def __init__(self, config, setup: bool = True): super().__init__(config, setup) self.plnx_tool = config["PLNX_TOOL"] self.bsp_path = config["BSP_PATH"] self.plnx_pkg = None self.plnx_tmp = config["PLNX_TMP_PATH"] self.plnx_proj = config["plnx_proj"] self.workDir = config["workDir"] self.imagesDir = config["imagesDir"] self.wsDir = config["wsDir"] self.config["platform"] = config["platform"] self.proj_dir = f"{self.workDir}/{self.plnx_proj}" self.petalinux_images = f"{self.proj_dir}/images" self.qemu_boot = False # Acquire bash console. self.runner = Xexpect(log, exit_nzero_ret=True) atexit.register(self.__del__) myconfs = [ "RECIPE_NAME", "RECIPE_NEW_NAME", "FETCHURI", "SOURCE_PATH", "RECIPE_DESTINATION", "IMAGE_RECIPE", "WORKSPACE_LAYERPATH", "EXISTING_RECIPENAME", "RECIPE_UPGRADE", ] for myconf in myconfs: if myconf in config: setattr(self, myconf.lower(), getattr(config, myconf))
def __init__(self, config, app_name, setup=True): super().__init__(config, setup=setup) self.console = Xexpect(log, exit_nzero_ret=True) self._setup_args() self.app_name = app_name
def run_on_host(self, cmd, expected="root", timeout=60, expected_failures=None, **kwargs): self.test_host_console = Xexpect( hostname=self.config["eth_host_name"], non_interactive=False, log=log, ) if cmd == "gettest_hostmac": self.test_host_console.runcmd( cmd="ifconfig enp9s0 | awk '/HWaddr {print substr($5,1)}'", timeout=30) self.targetMac = self.test_host_console.output() self.test_host_console.runcmd(cmd="sudo su -", timeout=timeout, expected="root") if type(cmd) is list: self.test_host_console.runcmd_list( cmd_list=f"{cmd}", expected_failures=expected_failures, timeout=timeout, expected=expected, ) else: self.test_host_console.runcmd( cmd=f"{cmd}", expected=expected, timeout=timeout, expected_failures=expected_failures, )
def __init__(self, config): super().__init__(config) self.component = config["component"] self.config = config self.src_path = None self.build_path = f'{config["workDir"]}/{self.component}-build/' self.console = Xexpect(log, exit_nzero_ret=True) self._setup(self.component)
def __init__(self, config, setup: bool = True): super().__init__(config, setup=setup) self.console = Xexpect(log, exit_nzero_ret=True) self.config_var_dict = self.set_config_var() self.defconfig_var_dict = self.set_defconfig_var() self.supported_components_list = self.get_defconfig_var().keys() self.user_var_dict = "" self.yaml_file_path = "" self.CONFIG_FILE_SET = False self.extra_env = ["scriptsDir"] self.cmd = "" self.config_var()
def __init__(self, config, app_name, setup=True): super(CrossCompile, self).__init__(config, setup=setup) if has_key(config, "console"): self.console = config.console else: self.console = Xexpect(log, exit_nzero_ret=True) setattr(config, "console", self.console) self._copy_src() self.srcDir = os.path.join(self.config["workDir"], "src") self.app_name = app_name self._setup_args() self._configure()
def __init__(self, config, setup: bool = False): super().__init__(config, setup=config.yocto_reset) super().configure() self.repo_path = config.repo_path self.console = Xexpect(log=log, exit_nzero_ret=True) self.yocto_url = config.yocto_url self.yocto_branch = config.yocto_branch self.yocto_manifest_xml = config.yocto_manifest_xml self.repo_bundle_url = config.repo_bundle_url self.workdir = config.workDir self.imagesdir = config.imagesDir self.deploy_dir = f"{config['yocto.conf.TMPDIR']}/deploy" if not self.repo_path: self.repo_path = "repo"
def test_xexpect_init(logger, mocker): mock_ssh_login = mocker.patch("roast.xexpect.ssh_login") Xexpect.sendline = mocker.Mock("sendline") Xexpect.expect = mocker.Mock("expect", return_value=3) x = Xexpect(logger) mock_ssh_login.assert_called_with(x) assert x.ip == socket.gethostname() assert x.prompt == x.ip assert Xexpect.sendline.call_count == 2 x = Xexpect(logger, hostip="hostip") assert x.ip == "hostip" assert x.prompt == "(%|#|>|\\$|# )" mock_ssh_login_user = mocker.patch("roast.xexpect.ssh_login_user") x = Xexpect(logger, userid="user", password="******") mock_ssh_login_user.assert_called_with(x, "user", "password")
def __init__(self, config, setup: bool = True): super().__init__(config, setup) self.plnx_tool = config["PLNX_TOOL"] self.bsp_path = config["BSP_PATH"] self.plnx_pkg = None self.plnx_tmp = config["PLNX_TMP_PATH"] self.plnx_proj = config["plnx_proj"] self.workDir = config["workDir"] self.imagesDir = config["imagesDir"] self.wsDir = config["wsDir"] self.config["platform"] = config["platform"] self.proj_dir = f"{self.workDir}/{self.plnx_proj}" self.petalinux_images = f"{self.proj_dir}/images" self.qemu_boot = False # Acquire bash console. self.runner = Xexpect(log, exit_nzero_ret=True)
def eth_telnet(self, **kwargs): self.terminal.runcmd(cmd="telnetd", timeout=20) self.test_host_console = Xexpect( hostname=self.config["eth_host_name"], non_interactive=False, log=log, ) self.test_host_console.runcmd( cmd=f"telnet {self.hostIp}", timeout=20, expected="Peta", wait_for_prompt=False, ) self.test_host_console.sendline("root") self.test_host_console.sendline("root") time.sleep(1) self.test_host_console.sendline("root") self.test_host_console.runcmd(cmd=f"ls", timeout=20, expected=":~# ", wait_for_prompt=False)
def generate_pdi(config) -> None: console = Xexpect(log) remove(os.path.join(config["imagesDir"], config["pdiFile"])) if is_file(config["bifFile"]): copy_file(config["bifFile"], config["imagesDir"]) console.runcmd(f"cd {config['imagesDir']}") bootgen_cmd = [ config["bootgenCmd"], "-arch", config["platform"].lower(), config["bootgenExtraArgs"], "-log", "info", "-image", config["bifFile"], "-o", config["pdiFile"], ] cmd = " ".join(bootgen_cmd) console.runcmd(cmd, expected="Bootimage generated successfully", wait_for_prompt=False) if is_file(os.path.join(config["imagesDir"], config["pdiFile"])): log.info("PDI generated successfully") else: err_msg = f"{config['pdiFile']} creation failed" log.error(err_msg) raise Exception(err_msg)
def gen_boot_bin(config) -> None: if has_key(config, "console"): console = config["console"] else: console = Xexpect(log) setattr(config, "console", console) remove(os.path.join(config["imagesDir"], config["binFile"])) if config["bifFile"]: copy_file(config["bifFile"], config["imagesDir"]) console.runcmd(f"cd {config['imagesDir']}") bootgen_cmd = [ config["bootgenCmd"], "-arch", config["platform"].lower(), config["bootgenExtraArgs"], "-log", "info", "-image", config["bifFile"], "-o", config["binFile"], ] cmd = " ".join(bootgen_cmd) console.runcmd(cmd, expected="Bootimage generated successfully") if is_file(os.path.join(config["imagesDir"], config["binFile"])): log.info("Bootable Image generate successful") else: err_msg = f"{config['binFile']} creation failed" log.error(err_msg) raise Exception(err_msg)
def get_client_console(self): if self.config["eth_host_name"]: self.client_console = Xexpect( hostname=self.config["eth_host_name"], hostip=None, userid=None, password=None, non_interactive=False, log=log, ) elif self.config["board_interface"] == "systest": client_ip = self.config["systest_host"] self.client_console = Xexpect(hostname=client_ip, non_interactive=False, log=log) else: client_ip = self.client_ip self.client_console = Xexpect( hostip=client_ip, userid=self.client_user, password=self.client_password, non_interactive=False, log=log, ) self.client_console.prompt = "bash-" self.client_console.runcmd("/bin/bash --norc") def _sudo_login(): self.client_console.prompt = r"root(.*?)# " self.client_console.sendline(cmd="sudo su -") index = self.client_console.expect( expected=["password", "root"], expected_failures="Permission denied", err_msg="fail to login with sudo", wait_for_prompt=False, ) if index == 1: self.client_console.runcmd( cmd=self.client_password, expected="root", expected_failures=[ "Permission denied", "Sorry", "not allowed" ], err_msg="fail to login with sudo", wait_for_prompt=False, ) def _set_status_init(): self.client_console.sync() self.client_console._setup_init() self.client_console.exit_nzero_ret = True if self.client_sudo_login: _sudo_login() _set_status_init()
def _setup_host_target(self) -> None: self._set_host() if not self.isLive: self.host_console = Xexpect( hlog, hostname=self.host, non_interactive=False, ) if self.invoke_hwserver: self.xsdb_hwserver = Xsdb(self.config, hostname=self.host, setup_hwserver=True) if self.invoke_xsdb: self.xsdb = Xsdb(self.config, hwserver=self.host) self.isLive = True else: self.serial.exit() if self.invoke_xsdb: self.xsdb = Xsdb(self.config, hwserver=self.host)
def __init__(self, config) -> None: self.config = config self.hostname = "" self.is_live = False self._configure() self.console = Xexpect(log, hostname=self.hostname, non_interactive=False) self.expect = self.console.expect self.sendline = self.console.sendline self.runcmd = self.console.runcmd self.runcmd_list = self.console.runcmd_list self.sendcontrol = self.console.sendcontrol self.send = self.console.send self.output = self.console.output self._setup_init = self.console._setup_init self.search = self.console.search self.sync = self.console.sync atexit.register(self.exit) self._connect() self.is_live = True
def gen_boot_bin(config) -> None: console = Xexpect(log) remove(os.path.join(config["imagesDir"], config["binFile"])) if config["bifFile"]: copy_file(config["bifFile"], config["imagesDir"]) console.runcmd(f"cd {config['imagesDir']}") bootgen_cmd = [ config["bootgenCmd"], "-arch", config["platform"].lower(), config["bootgenExtraArgs"], "-log", "info", "-image", config["bifFile"], "-o", config["binFile"], ] cmd = " ".join(bootgen_cmd) if "BOOTGEN_ENV" in config: for env in config["BOOTGEN_ENV"]: console.runcmd(f"export {env}") console.runcmd(cmd, expected="Bootimage generated successfully", wait_for_prompt=False) if is_file(os.path.join(config["imagesDir"], config["binFile"])): log.info("Bootable Image generate successful") else: err_msg = f"{config['binFile']} creation failed" log.error(err_msg) raise Exception(err_msg)
def start(self) -> None: self.interface = self.config.get("board_interface") self.relay_type = self.config.get("relay_type") if self.interface == "host_target": self._setup_host_target() self.relay = Relay(self.relay_type, session=self.host_console).driver self._reboot() self.serial = Serial("host", self.config).driver elif self.interface == "network_target": self._set_nw_target() self.target_console = Xexpect( tlog, hostip=self.ip, userid=self.user, password=self.password, non_interactive=False, ) elif self.interface == "qemu": log.info("Running Qemu Interface") else: raise Exception(f"ERROR: invalid board_interface {self.interface}")
class BuildOsl(Basebuild): def __init__(self, config): super().__init__(config) super().configure() self.component = config["component"] self.config = config self.src_path = None self.build_path = f'{config["workDir"]}/{self.component}-build/' self.console = Xexpect(log, exit_nzero_ret=True) self._setup(self.component) def _setup(self, component): self.src_reference = self.config.get(f"git.{component}.reference") self.external_src = self.config.get(f"external_{component}_src") self.arch = self.config[f"{component}_arch"] self.compiler = self.config[f"{component}_compiler"] if f"{component}_defconfig" in self.config: self.defconfig = self.config[f"{component}_defconfig"] else: self.defconfig = None if f"{component}_devicetree" in self.config: self.console.runcmd( f"export DEVICE_TREE={self.config[f'{component}_devicetree']}") self.console.runcmd(f"source {self.config['vitisPath']}/settings64.sh") if "sysroot_env" in self.config: self.console.runcmd(f"source {self.config['sysroot_env']}") if "sysroot_tool" in self.config: self.console.runcmd( f"export PATH={self.config['sysroot_tool']}:$PATH") self.console.runcmd(f"export ARCH={self.arch}") self.console.runcmd(f"export CROSS_COMPILE={self.compiler}") if "local_version" in self.config: self.console.runcmd( f"export LOCALVERSION={self.config.local_version}") mkdir(self.build_path) # export default env if f"{component}_env" in self.config: for env_var, value in self.config[f"{component}_env"].items(): self.console.runcmd(f"export {env_var}={value}") def setup_src(self): if not self.external_src: self.console.runcmd(f"cd {self.config['workDir']}") if self.src_reference: clone( self.config.git[f"{self.component}"], f"{self.config['workDir']}/{self.component}", workDir=self.config["workDir"], reference=self.src_reference, ) else: clone( self.config.git[f"{self.component}"], f"{self.config['workDir']}/{self.component}", workDir=self.config["workDir"], ) self.src_path = f"{self.config['workDir']}/{self.component}/" else: if self.component not in [ "kernel", "kernel_allmodconfig", "uboot" ]: rsync(self.console, self.external_src, self.config["workDir"]) self.src_path = ( f"{self.config['workDir']}/{get_base_name(self.external_src)}" ) else: self.src_path = self.external_src def configure(self): # configure the component if self.component == "kernel" or self.component == "rootfs": mkfile(f"{self.src_path}/.scmversion") if self.defconfig: cmd = f"make -C {self.src_path} {self.defconfig} O={self.build_path}" self.console.runcmd(cmd) def compile(self): extra_flags = "" if f"{self.component}_compile_flags" in self.config: extra_flags = f'{self.config[f"{self.component}_compile_flags"]}' if not self.config["outoftreebuild"]: self.build_path = self.src_path cmd = f'make -j {self.config["parallel_make"]} -C {self.src_path} O={self.build_path} {extra_flags}' self.console.runcmd(cmd, timeout=3600) def deploy(self): mkdir(self.config["deploy_artifacts"]) for image in self.config[f"{self.component}_artifacts"]: image = parse_config(self.config, image) copy_file(os.path.join(self.build_path, image), self.config["deploy_artifacts"])
class Ethernet(BaseLinux): def __init__(self, console, config, eth_interface): super().__init__(console, config) self.platform = None self.client_ip = "10.10.70.101" self.client_mac = "2c:2b:59:cf:7c:00" self.board_ip = "10.10.70.1" self.ping_intervel = "1" self.ping_size = "45" self.ping_count = "10" self.file_size = "4096" self.pktgen_size = "200" self.pktgen_count = "1" self.pktgen_burst = "1" self.pktgen_delay = "0" self.pktgen_frags = "4" self.pktgen_vlan_id = "0" self.eth_interface = "eth0" self.host_interface = "enp9s0" self.updown_count = "3" self.mtu = "1500" self.udp_mtu = "1500" self.iperf3_binary = "iperf3" self.extra_iperf3_args = "" self.timeout = "60" self.client_user = None self.client_password = None self.client_sudo_login = None self.server_user = "******" self.server_password = "******" self.config = config self.terminal = console self.log = log self.eth_interface = eth_interface self.terminal.prompt = r"root(.*?)# " if self.config.get("client_user"): self.client_user = self.config.client_user if self.config.get("client_password"): self.client_password = self.config.client_password if self.config.get("client_sudo_login"): self.client_sudo_login = self.config.client_sudo_login if self.config.get("target_ip"): self.client_ip = self.config.target_ip if self.config.get("board_ip"): self.board_ip = self.config.board_ip if self.config.get("client_mac"): self.client_mac = self.config.client_mac def _setip_config(self, config): self.config = config def ping_test(self): cmd = f"ping {self.client_ip} -s {self.ping_size} -c {self.ping_count}" self.terminal.runcmd(cmd=str(cmd), timeout=60, expected=" 0% packet loss") def ping_flood(self): self.get_client_console() self.client_console.runcmd( cmd=f"ping -f {self.board_ip} -i 5 -c 5", timeout=60, expected=" 0% packet loss", ) self.terminal.runcmd(cmd=f"ping {self.client_ip} -c 5", timeout=30, expected=" 0% packet loss") def ifupdown(self): for count in self.updown_count: self.terminal.runcmd(cmd=f"ifconfig {self.eth_interface} down", timeout=15) self.terminal.runcmd( cmd=f"ifconfig {self.eth_interface} up;sleep 5", timeout=15, expected_failures="link is not ready", ) self.ping_test() def eth_pktgen(self): commands = [ "echo 'stop' > /proc/net/pktgen/pgctrl", "echo 'rem_device_all' > /proc/net/pktgen/kpktgend_0", f"echo 'add_device {self.eth_interface}' > /proc/net/pktgen/kpktgend_0", f"echo 'count {self.pktgen_count}' > /proc/net/pktgen/{self.eth_interface}", f"echo 'clone_skb 100' > /proc/net/pktgen/{self.eth_interface}", f"echo 'pkt_size {self.pktgen_size}' > /proc/net/pktgen/{self.eth_interface}", f"echo 'burst {self.pktgen_burst}' > /proc/net/pktgen/{self.eth_interface}", f"echo 'delay {self.pktgen_delay}' > /proc/net/pktgen/{self.eth_interface}", f"echo 'vlan_id {self.pktgen_vlan_id}' > /proc/net/pktgen/{self.eth_interface}", f"echo 'vlan_p 0' > /proc/net/pktgen/{self.eth_interface}", f"echo 'vlan_cfi 0' > /proc/net/pktgen/{self.eth_interface}", f"echo 'frags {self.pktgen_frags}' > /proc/net/pktgen/{self.eth_interface}", f"echo 'dst {self.client_ip}' > /proc/net/pktgen/{self.eth_interface}", f"echo 'dst_mac {self.client_mac}' > /proc/net/pktgen/{self.eth_interface}", "echo 'start' > /proc/net/pktgen/pgctrl", f"cat /proc/net/pktgen/{self.eth_interface}", "paramsCount=$(grep -E 'Params: count' /proc/net/pktgen/eth0 | awk '{print substr($3,1)}')", "pktSofar=$(grep -E 'pkts-sofar' /proc/net/pktgen/eth0 | awk '{print substr($2,1)}')", '[ "$paramsCount" -eq "$pktSofar" ]', ] self.terminal.runcmd_list(cmd_list=commands, timeout=60) def eth_scp(self): self.terminal.runcmd(cmd="scp_file=$(mktemp scp.XXXXXXXXX)") self.terminal.runcmd( cmd= f"dd if=/dev/zero of=$scp_file bs=1 count=0 seek={self.file_size}") scp_cmd = f"scp -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -r $scp_file {self.client_user}@{self.client_ip}:/home/{self.client_user}" index = self.terminal.runcmd( cmd=scp_cmd, expected=["(y/n)", "password"], wait_for_prompt=False, timeout=120, ) if index == "1": self.terminal.runcmd(cmd=f"{self.client_password}", timeout=120) else: self.terminal.runcmd(cmd="y", expected="password", wait_for_prompt=False, timeout=300) self.terminal.runcmd(cmd=f"{self.client_password}") self.terminal.runcmd(cmd="rm $scp_file") def iperf_tcp_host_client(self): self.log.info("Starting an iperf3 server on the client...") self.get_client_console() cmd = f"{self.iperf3_binary} -s &" self.client_console.runcmd(cmd=cmd, expected="Server listening", wait_for_prompt=False, timeout=60) self.log.info(f"Measuring {self.iperf3_binary} TCP throughput...") cmd = f"{self.iperf3_binary} -c {self.client_ip} -f m {self.extra_iperf3_args}" self.terminal.runcmd(cmd=cmd, timeout=60) time.sleep(5) self.client_console.runcmd(cmd=f"pkill {self.iperf3_binary}", timeout=60) def iperf_tcp_client_host(self): self.get_client_console() self.log.info("Starting an iperf3 server on the board...") self.terminal.runcmd(cmd=f"{self.iperf3_binary} -s &", timeout=60) time.sleep(15) self.log.info(f"Starting an iperf3 client...") self.client_console.runcmd( cmd= f"{self.iperf3_binary} -c {self.board_ip} -f m {self.extra_iperf3_args}", timeout=60, ) time.sleep(5) self.terminal.runcmd(cmd=f"pkill {self.iperf3_binary}", timeout=60) def iperf_udp_host_client(self): self.log.info("Starting an iperf3 server on the client...") self.get_client_console() cmd = f"{self.iperf3_binary} -s &" self.client_console.runcmd(cmd=cmd, expected="Server listening", wait_for_prompt=False, timeout=60) time.sleep(15) self.log.info(f"Measuring {self.iperf3_binary} UDP throughput...") cmd = ( f"{self.iperf3_binary} -c {self.client_ip} -f m -u {self.extra_iperf3_args}" ) self.terminal.runcmd(cmd=cmd, timeout=60) time.sleep(5) self.client_console.runcmd(cmd=f"pkill {self.iperf3_binary}", timeout=60) def iperf_udp_client_host(self): self.get_client_console() self.log.info("Starting an iperf3 server on the board...") self.terminal.runcmd(cmd=f"{self.iperf3_binary} -s &", timeout=60) time.sleep(15) self.log.info(f"Starting an iperf3 client...") self.client_console.runcmd( cmd= f"{self.iperf3_binary} -c {self.board_ip} -f m -u {self.extra_iperf3_args}", timeout=60, ) time.sleep(5) self.terminal.runcmd(cmd=f"pkill {self.iperf3_binary}", timeout=60) def netperf_tcp_host_client(self): self.log.info("Starting an netperf server on the client...") self.get_client_console() cmd = f"ps -ef | grep netserver || netserver -D -4 &" self.client_console.runcmd(cmd=cmd, timeout=20) if self.mtu != "1500": time.sleep(2) self.log.info( f"Updating MTU size on host and device... : {self.mtu}") self.terminal.runcmd( cmd= f"ifconfig {self.eth_interface} down; ifconfig {self.eth_interface} mtu {self.mtu} up; ifconfig" ) time.sleep(5) self.client_console.runcmd( cmd= f"ifconfig {self.eth_interface} down; ifconfig {self.eth_interface} mtu {self.mtu} up; ifconfig" ) time.sleep(5) self.udp_mtu = f"{self.mtu} - 28" time.sleep(5) self.log.info( f"Device Netperf Output for TCP(Tx Mode) with MTU size {self.mtu}") time.sleep(1) cmd = f"netperf -c -C -H {self.client_ip} -t TCP_STREAM" failures = ["No space left on device", "recv_response:"] self.terminal.runcmd(cmd=cmd, expected_failures=failures, timeout=60) time.sleep(5) def netperf_udp_host_client(self): self.log.info("Starting an netperf server on the client...") self.get_client_console() cmd = f"ps -ef | grep netserver || netserver -D -4 &" self.client_console.runcmd(cmd=cmd, timeout=20) if self.mtu != "1500": time.sleep(2) self.log.info( f"Updating MTU size on host and device... : {self.mtu}") self.terminal.runcmd( cmd= f"ifconfig {self.eth_interface} down; ifconfig {self.eth_interface} mtu {self.mtu} up; ifconfig" ) time.sleep(5) self.client_console.runcmd( cmd= f"ifconfig {self.eth_interface} down; ifconfig {self.eth_interface} mtu {self.mtu} up; ifconfig" ) time.sleep(5) self.udp_mtu = f"{self.mtu} - 28" time.sleep(5) self.log.info( f"Device Netperf Output for UDP(Tx Mode) with MTU Size: {self.mtu}" ) time.sleep(1) cmd = f"netperf -c -C -H {self.client_ip} -t UDP_STREAM" failures = ["No space left on device", "recv_response:"] self.terminal.runcmd(cmd=cmd, expected_failures=failures, timeout=60) time.sleep(5) def netperf_tcp_client_host(self): self.get_client_console() failures = ["No space left on device", "recv_response:"] cmd = f"ps -ef | grep netserver || netserver -D -4 &" self.terminal.runcmd(cmd=cmd, timeout=20) time.sleep(5) cmd = f"netperf -c -C -H {self.board_ip} -t TCP_STREAM -- -m {self.mtu} -M {self.mtu}" self.client_console.runcmd(cmd=cmd, expected_failures=failures, timeout=50) def netperf_udp_client_host(self): self.get_client_console() failures = ["No space left on device", "recv_response:"] cmd = f"ps -ef | grep netserver || netserver -D -4 &" self.terminal.runcmd(cmd=cmd, timeout=20) time.sleep(5) self.log.info( f"Host Netperf Output for UDP(Rx Mode) with MTU Size: {self.udp_mtu}" ) cmd = f"netperf -c -C -H {self.board_ip} -t UDP_STREAM -- -m {self.mtu} -M {self.udp_mtu}" self.client_console.runcmd(cmd=cmd, timeout=50) time.sleep(5) def eth_dhcp(self): cmd = f"udhcpc -i {self.eth_interface}" self.terminal.runcmd(cmd=cmd, timeout=30, expected_failures=["not found"]) self.ping_test() def eth_speed(self): for speed in [1000, 10, 100]: cmd = f"ethtool -s {self.eth_interface} speed {speed} duplex full; sleep 15" self.terminal.runcmd(cmd=cmd) self.terminal.sendline("\r\n") self.terminal.runcmd(cmd=f"ethtool {self.eth_interface}") self.ping_test() cmd = f"ethtool -s {self.eth_interface} autoneg on; sleep 15" self.terminal.runcmd(cmd=cmd) self.terminal.sendline("\r\n") self.terminal.runcmd(cmd=f"ethtool {self.eth_interface}") self.ping_test() def eth_tftp(self): failures = [ "server error:", "(2) Access violation", "No such file or directory", "ERROR", ] tftp_file = "system.bit" cmdd = f"tftp -g -r {tftp_file} {self.client_ip}" self.terminal.runcmd(cmd=cmdd, timeout=300, expected_failures=failures) def eth_telnet(self): self.terminal.runcmd(cmd="telnetd", timeout=20) self.get_client_console() self.client_console.runcmd( cmd=f"telnet {self.board_ip}", timeout=20, expected="Peta", wait_for_prompt=False, ) self.client_console.sendline("root") self.client_console.sendline("root") time.sleep(1) self.client_console.sendline("root") self.client_console.runcmd(cmd=f"ls", timeout=20, expected=":~# ", wait_for_prompt=False) def eth_gravcat(self, **kwargs): self.get_client_console() cmd = f"gravecat -l 9999 &" self.terminal.sync() self.terminal.runcmd(cmd=cmd, timeout=20, expected_failures=["ommand", "failed"]) self.client_console.runcmd( f"/usr/bin/gravecat_x86_64 -s {self.boardIp} 9999 4 1000 500", expected="using", timeout=10, wait_for_prompt=False, ) time.sleep(600) self.client_console.sendline("\x03") self.client_console.runcmd( cmd="if pgrep gravecat;then kill -9 `pgrep -f gravecat`; fi") self.terminal.sync() self.ifupdown() def suspend_resume_eth_wkp(self, platform): eth_nodes = ["ff0b0000", "ff0c0000", "ff0d0000", "ff0e0000"] self.log.info("Starting an netperf server on the client...") self.terminal.runcmd("cat /proc/cpuinfo") self.terminal.runcmd("ifconfig -a") self.terminal.runcmd("echo 8 > /proc/sys/kernel/printk") self.terminal.runcmd( "echo 0 > /sys/module/printk/parameters/console_suspend") if platform == "versal": self.terminal.runcmd( f"echo disabled > {self.sys_axi}/ff000000.serial/tty/ttyAMA0/power/wakeup" ) self.terminal.runcmd( f"echo enabled > {self.sys_axi}/ff0c0000.ethernet/net/eth0/power/wakeup" ) self.terminal.runcmd( "echo mem > /sys/power/state", expected="CPU1 killed", wait_for_prompt=False, ) else: self.terminal.runcmd( f"echo disabled > {self.sys_axi}/ff000000.serial/tty/ttyPS0/power/wakeup" ) for node in eth_nodes: self.terminal.runcmd( f"cat /proc/device-tree/axi/ethernet\@{node}/status", expected="\r\n", ) node_status = self.terminal.output() if node_status == "okay": ethernet_node = node self.terminal.runcmd( f"echo enabled > {self.sys_axi}/{ethernet_node}.ethernet/net/eth0/power/wakeup" ) self.terminal.runcmd( "echo mem > /sys/power/state", expected="CPU3 killed", wait_for_prompt=False, ) self.get_client_console() self.client_console.runcmd(cmd=f"ping {self.board_ip} -c 5", expected_failures="0 received", retries=2) time.sleep(15) is_linux_cons(self.terminal) self.terminal.runcmd("pwd") if platform == "versal": self.terminal.runcmd( f"echo enabled > {self.sys_axi}/ff000000.serial/tty/ttyAMA0/power/wakeup" ) self.terminal.runcmd('bootmode="sd_boot"') else: self.terminal.runcmd( f"echo enabled > {self.sys_axi}/ff000000.serial/tty/ttyPS0/power/wakeup" ) def get_client_console(self): if self.config["eth_host_name"]: self.client_console = Xexpect( hostname=self.config["eth_host_name"], hostip=None, userid=None, password=None, non_interactive=False, log=log, ) elif self.config["board_interface"] == "systest": client_ip = self.config["systest_host"] self.client_console = Xexpect(hostname=client_ip, non_interactive=False, log=log) else: client_ip = self.client_ip self.client_console = Xexpect( hostip=client_ip, userid=self.client_user, password=self.client_password, non_interactive=False, log=log, ) self.client_console.prompt = "bash-" self.client_console.runcmd("/bin/bash --norc") def _sudo_login(): self.client_console.prompt = r"root(.*?)# " self.client_console.sendline(cmd="sudo su -") index = self.client_console.expect( expected=["password", "root"], expected_failures="Permission denied", err_msg="fail to login with sudo", wait_for_prompt=False, ) if index == 1: self.client_console.runcmd( cmd=self.client_password, expected="root", expected_failures=[ "Permission denied", "Sorry", "not allowed" ], err_msg="fail to login with sudo", wait_for_prompt=False, ) def _set_status_init(): self.client_console.sync() self.client_console._setup_init() self.client_console.exit_nzero_ret = True if self.client_sudo_login: _sudo_login() _set_status_init() def ifplugd(self): self.terminal.runcmd(cmd="ifplugd") cmd = f'pgrep -f ifplugd >/dev/null || echo "ifplugd demon not running"' self.terminal.runcmd(cmd=cmd, timeout=30, expected_failures=["ifplugd demon not running"]) self.ifupdown() cmd = f"ifconfig {self.eth_interface} {self.board_ip} netmask 255.255.255.0" self.terminal.runcmd(cmd=cmd, timeout=self.timeout) def eth_nfs(self): self.log.info("Output test files and clean up..") cmd_list = [ "out_dir=/tmp/nfs_temp_output", "mkdir -p ${out_dir} > /dev/null 2>&1", "out_prefix=${out_dir}/nfs_test", "out_mount=${out_prefix}.mount", "out_mount_prefix=${out_mount}/nfs_test", "unmount ${out_mount} && rm -rf ${out_prefix}*", ] self.terminal.runcmd_list(cmd_list=cmd_list, timeout=60) self.log.info("Mounting NFS...") cmd_list = [ "mkdir -p ${out_mount}", f'rpcinfo "{self.client_ip}" | grep "nfs"', f"mount -o port=2049,nolock,proto=tcp,vers=2 {self.client_ip}:/exports/root $out_mount", ] self.terminal.runcmd_list(cmd_list=cmd_list, timeout=200) self.log.info("Creating large pattern data files..") cmd_list = [ " [ -c /dev/urandom ] || mknod -m 777 /dev/urandom c 1 9 > /dev/null 2>&1;", "dd if=/dev/urandom of=${out_prefix}.r2m-pattern.bin bs=1024 count=4096;", "dd if=/dev/urandom of=${out_mount_prefix}.m2r-pattern.bin bs=1024 count=4096;", "cp ${out_mount_prefix}.m2r-pattern.bin ${out_prefix}.m2r-pattern.bin;", "cp ${out_prefix}.r2m-pattern.bin ${out_mount_prefix}.r2m-pattern.bin;", ] self.terminal.runcmd_list(cmd_list=cmd_list, timeout=200) self.log.info("Re-mounting the NFS.. Verifying the read back data...") cmd_list = [ "umount ${out_mount};", f"mount -o port=2049,nolock,proto=tcp,vers=2 {self.client_ip}:/exports/root $out_mount;" "diff -q ${out_prefix}.m2r-pattern.bin ${out_mount_prefix}.m2r-pattern.bin", "diff -q ${out_mount_prefix}.r2m-pattern.bin ${out_prefix}.r2m-pattern.bin", ] self.terminal.runcmd_list(cmd_list=cmd_list, timeout=200) def vlan_test(self): cmd = "board_mac=$(ifconfig eth0 | awk '/HWaddr/ {print substr($5,1)}')" self.terminal.runcmd(cmd=cmd, timeout=60) self.BoardMac = self.terminal.output() time.sleep(2) self.log.info(f"Board HWaddr : {self.BoardMac}... ") cmd = 'tcpdump -i {self.eth_interface} "vlan and icmp" and ip host 10.10.70.2 and ether host "$board_mac" -n -ev &' self.terminal.runcmd(cmd=cmd, timeout=60) time.sleep(2) cmd_list = [ "ip link set dev eth2.5 down &", "ip link del eth2.5 &", "modprobe 8021q &", "ip link add link eth2 name eth2.5 type vlan id 5 &", "ip addr add 10.10.70.2 brd 10.10.70.255 dev eth2.5 &", "ip link set dev eth2.5 up &", f"arp -s {self.client_ip} $board_mac dev eth2.5 &", f"{self.client_ip} -I eth2.5 -c 3 &", ] self.get_client_console() self.client_console.runcmd(cmd=cmd_list, timeout=120) time.sleep(10) self.log.info("=================== rx output ===================") self.terminal.runcmd(cmd="killall -s INT tcpdump &", timeout=60) time.sleep(2) cmd_list = [ "ip link set dev eth2.5 down &", "ip link del eth2.5 &", f"route del {self.client_ip} &", f"route add {self.client_ip} dev eth2 &", f"tcpdump -n -i eth2 dst port 9 and ip host {self.client_ip} -e -v > ~/tx_tcpdump_vlan.txt &", ] self.client_console.runcmd(cmd=cmd_list, timeout=120) time.sleep(10) self.client_console.runcmd(cmd="gettest_hostmac", timeout=20) time.sleep(1) commands = [ "echo 'stop' > /proc/net/pktgen/pgctrl", "echo 'rem_device_all' > /proc/net/pktgen/kpktgend_0", "echo 'add_device eth0' > /proc/net/pktgen/kpktgend_0", "echo 'count 1' > /proc/net/pktgen/eth0", "echo 'clone_skb 0' > /proc/net/pktgen/eth0", "echo 'pkt_size 200' > /proc/net/pktgen/eth0", "echo 'delay 0' > /proc/net/pktgen/eth0", "echo 'frags 4' > /proc/net/pktgen/eth0", "echo 'vlan_id 0' > /proc/net/pktgen/eth0", "echo 'vlan_p 0' > /proc/net/pktgen/eth0", "echo 'vlan_cfi 0' > /proc/net/pktgen/eth0", f"echo 'dst {self.board_ip}' > /proc/net/pktgen/eth0", f"echo 'dst_mac {self.client_mac}' > /proc/net/pktgen/eth0", "echo 'start' > /proc/net/pktgen/pgctrl", ] self.terminal.runcmd_list(cmd_list=commands, timeout=60) time.sleep(4) self.log.info("=================== tx output ===================") cmd_list = [ "killall -s INT tcpdump &", "cat ~/tx_tcpdump_vlan.txt", "rm ~/tx_tcpdump_vlan.txt", ] self.client_console.runcmd(cmd=cmd_list, timeout=120) time.sleep(10) def update_mtu(self, **kwargs): self.get_client_console() self.log.info(f"Updating MTU size on host and device... : {self.mtu}") self.terminal.runcmd_list(cmd_list=[ f"ifconfig {self.eth_interface} down", "sleep 10", f"ifconfig {self.eth_interface} mtu {self.mtu} {self.boardIp} up", "sleep 5", "ifconfig", ]) time.sleep(5) self.client_console.runcmd_list(cmd_list=[ f"sudo ifconfig {self.host_interface} down", "sleep 10", f"sudo ifconfig {self.host_interface} mtu {self.mtu} {self.clientIp} up", "sleep 5", "sudo ifconfig", ]) time.sleep(5) self.terminal.sync() def ping_jumbo_frame(self, **kwargs): self.get_client_console() for mtu in [1500, 2048, 4096, 8192]: self.log.info(f"Updating MTU size on device... : {mtu}") self.mtu = mtu self.update_mtu() self.terminal.sync() time.sleep(3) self.terminal.runcmd( cmd=f"ping {self.clientIp} -s {mtu-28} -c 5", timeout=30, expected=" 0% packet loss", ) time.sleep(2) self.mtu = 1500 self.update_mtu() def mii_test(self, **kwargs): self.terminal.runcmd(cmd=f"mii-tool -v {self.eth_interface}", expected="link ok") self.terminal.runcmd( cmd=f"mii-tool --force 10baseT-FD {self.eth_interface}", expected=" ") self.terminal.runcmd(cmd=f"mii-tool {self.eth_interface}", expected=": 10 Mbit") self.terminal.runcmd(cmd=f"mii-tool --restart {self.eth_interface}", expected=" ") self.terminal.sync() time.sleep(10) self.ping_test() self.terminal.sync() def eth_pqueue(self): self.client_console.runcmd(cmd="gettest_hostmac", timeout=20) self.client_console.runcmd(cmd="killall -s INT tcpdump &", timeout=20) self.client_console.runcmd(cmd="rm ~/priority.txt &", timeout=20) self.client_console.runcmd( cmd= f"tcpdump -n -i {self.host_interface} ip host {self.board_ip} -ev > ~/priority.txt &", timeout=20, ) time.sleep(5) self.log.info(f"targetMac = {self.client_mac}") commands = [ "echo 'stop' > /proc/net/pktgen/pgctrl", "echo 'rem_device_all' > /proc/net/pktgen/kpktgend_0", "echo 'add_device eth0@0' > /proc/net/pktgen/kpktgend_0", "echo 'count 500' > /proc/net/pktgen/eth0@0", "echo 'burst 50' > /proc/net/pktgen/eth0@0", "echo 'clone_skb 0' > /proc/net/pktgen/eth0@0", "echo 'pkt_size 1500' > /proc/net/pktgen/eth0@0", "echo 'delay 0' > /proc/net/pktgen/eth0@0", "echo 'frags 0' > /proc/net/pktgen/eth0@0", f"echo 'dst {self.board_ip}' > /proc/net/pktgen/eth0@0", f"echo 'dst_mac {self.client_mac}' > /proc/net/pktgen/eth0@0", "echo 'skb_priority 1' > /proc/net/pktgen/eth0@0", "echo 'queue_map_min 0' > /proc/net/pktgen/eth0@0", "echo 'queue_map_max 0' > /proc/net/pktgen/eth0@0", ] self.terminal.runcmd_list(cmd_list=commands, timeout=60) time.sleep(5) commands = [ "echo 'rem_device_all' > /proc/net/pktgen/kpktgend_1", "echo 'add_device eth0@1' > /proc/net/pktgen/kpktgend_1", "echo 'count 20' > /proc/net/pktgen/eth0@1", "echo 'burst 20' > /proc/net/pktgen/eth0@1", "echo 'clone_skb 0' > /proc/net/pktgen/eth0@1", "echo 'pkt_size 1400' > /proc/net/pktgen/eth0@1", "echo 'delay 0' > /proc/net/pktgen/eth0@1", "echo 'frags 0' > /proc/net/pktgen/eth0@1", f"echo 'dst {self.board_ip}' > /proc/net/pktgen/eth0@1", f"echo 'dst_mac {self.client_mac}' > /proc/net/pktgen/eth0@1", "echo 'skb_priority 1' > /proc/net/pktgen/eth0@1", "echo 'queue_map_min 1' > /proc/net/pktgen/eth0@1", "echo 'queue_map_max 1' > /proc/net/pktgen/eth0@1", ] self.terminal.runcmd_list(cmd_list=commands, timeout=60) time.sleep(5) commands = [ "echo 'rem_device_all' > /proc/net/pktgen/kpktgend_2", "echo 'add_device eth0@2' > /proc/net/pktgen/kpktgend_2", "echo 'count 500' > /proc/net/pktgen/eth0@2", "echo 'burst 50' > /proc/net/pktgen/eth0@2", "echo 'clone_skb 0' > /proc/net/pktgen/eth0@2", "echo 'pkt_size 1200' > /proc/net/pktgen/eth0@2", "echo 'delay 0' > /proc/net/pktgen/eth0@2", "echo 'frags 0' > /proc/net/pktgen/eth0@2", f"echo 'dst {self.board_ip}' > /proc/net/pktgen/eth0@2", f"echo 'dst_mac {self.client_mac}' > /proc/net/pktgen/eth0@2", "echo 'skb_priority 1' > /proc/net/pktgen/eth0@2", "echo 'queue_map_min 0' > /proc/net/pktgen/eth0@2", "echo 'queue_map_max 0' > /proc/net/pktgen/eth0@2", ] self.terminal.runcmd_list(cmd_list=commands, timeout=60) time.sleep(5) commands = [ "echo 'rem_device_all' > /proc/net/pktgen/kpktgend_3", "echo 'add_device eth0@3' > /proc/net/pktgen/kpktgend_3", "echo 'count 20' > /proc/net/pktgen/eth0@3", "echo 'burst 20' > /proc/net/pktgen/eth0@3", "echo 'clone_skb 0' > /proc/net/pktgen/eth0@3", "echo 'pkt_size 1100' > /proc/net/pktgen/eth0@3", "echo 'delay 0' > /proc/net/pktgen/eth0@3", "echo 'frags 0' > /proc/net/pktgen/eth0@3", f"echo 'dst {self.board_ip}' > /proc/net/pktgen/eth0@3", f"echo 'dst_mac {self.client_mac}' > /proc/net/pktgen/eth0@3", "echo 'skb_priority 1' > /proc/net/pktgen/eth0@3", "echo 'queue_map_min 1' > /proc/net/pktgen/eth0@3", "echo 'queue_map_max 1' > /proc/net/pktgen/eth0@3", "echo 'start' > /proc/net/pktgen/pgctrl &", ] self.terminal.runcmd_list(cmd_list=commands, timeout=60) time.sleep(5) self.client_console.runcmd(cmd="killall -s INT tcpdump &", timeout=20) self.client_console.runcmd(cmd="cat ~/priority.txt &", timeout=20)
class BuildOsl(Basebuild): def __init__(self, config): super().__init__(config) self.component = config["component"] self.config = config self.src_path = None self.build_path = f'{config["workDir"]}/{self.component}-build/' self.console = Xexpect(log, exit_nzero_ret=True) self._setup(self.component) def _setup(self, component): self.src_reference = get_config_data(self.config, f"git.{component}.reference") self.external_src = self.config[f"external_{component}_src"] self.arch = self.config[f"{component}_arch"] self.compiler = self.config[f"{component}_compiler"] if has_key(self.config, f"{component}_defconfig"): self.defconfig = self.config[f"{component}_defconfig"] else: self.defconfig = None if has_key(self.config, f"{component}_devicetree"): self.console.runcmd( f"export DEVICE_TREE={self.config[f'{component}_devicetree']}" ) self.console.runcmd(f"source {self.config['VITIS_SETTINGS_SH']}") self.console.runcmd(f"source {self.config['sysroot_env']}") # FIXME : Remove explicit addition of tools to env self.console.runcmd("export PATH=/group/siv2_xhd/work/lovek/tools/:$PATH") self.console.runcmd(f"export ARCH={self.arch}") self.console.runcmd(f"export CROSS_COMPILE={self.compiler}") mkdir(self.build_path) # export default env if has_key(self.config, f"{component}_env"): for env_var, value in self.config[f"{component}_env"].items(): self.console.runcmd(f"export {env_var}={value}") def setup_src(self): if not self.external_src: self.console.runcmd(f"cd {self.config['workDir']}") clone( self.config.git_params[f"{self.component}"], f"{self.config['workDir']}/{self.component}", "build_osl.log", workDir=self.config["workDir"], reference=self.src_reference, ) self.src_path = f"{self.config['workDir']}/{self.component}/" else: self.src_path = f"{self.external_src}" def configure(self): # configure the component if self.defconfig: cmd = f"make -C {self.src_path} {self.defconfig} O={self.build_path}" self.console.runcmd(cmd) def compile(self): extra_flags = "" if has_key(self.config, f"{self.component}_compile_flags"): extra_flags = f'{self.config[f"{self.component}_compile_flags"]}' if not self.config["outoftreebuild"]: self.build_path = self.src_path cmd = f'make -j {self.config["parallel_make"]} -C {self.src_path} O={self.build_path} {extra_flags}' self.console.runcmd(cmd, timeout=1000) def deploy(self): mkdir(self.config["deploy_artifacts"]) for image in self.config[f"{self.component}_artifacts"]: image = parse_config(self.config, image) copy_file( os.path.join(self.build_path, image), self.config["deploy_artifacts"] )
class Ethernet: attributes = [ "hostIp", "targetIp", "ping_intervel", "ping_size", "ping_count", "file_size", "pktgen_size", "pktgen_count", "pktgen_burst", "pktgen_delay", "pktgen_frags", "pktgen_vlan_id", "host_interface", "ifupdwn_count", "mtu", "eth_interface", "user", "password", "iperf_binary", "extra_iperf_args", ] targetIp = "10.10.70.101" hostIp = "10.10.70.21" ping_intervel = "1" ping_size = "45" ping_count = "10" file_size = "4096" pktgen_size = "200" pktgen_count = "1" pktgen_burst = "1" pktgen_delay = "0" pktgen_frags = "4" pktgen_vlan_id = "0" eth_interface = "eth0" host_interface = "enp9s0" updown_count = "3" mtu = "1500" udp_mtu = "1500" iperf_binary = "iperf" extra_iperf_args = "" timeout = "60" user = "" password = "" def __init__(self, config, terminal, eth_interface): self.config = config self.terminal = terminal self.log = log self.eth_interface = eth_interface self.terminal.prompt = "~# " self.terminal._setup_init() if config["user"]: self.user = config["user"] if config["password"]: self.password = config["password"] def _setip_config(self, config): self.config = config def eth_default_runner(func): @functools.wraps(func) def override_defaults(self, **kwargs): ts = time.time() self.log.info(f"Start ethernet test {func.__name__} ...") for attr, value in kwargs.items(): if attr not in self.attributes: raise ValueError( "Attribute [{}] not supported.".format(attr)) else: setattr(self, attr, value) self.log.info(f"key: {attr}, value: {value}") func(self, **kwargs) te = time.time() self.log.info( f"Ethernet test {func.__name__} end .. Total time taken:{round((te -ts),1)}, sec" ) self.__init__(self.config, self.terminal, self.eth_interface) return True return override_defaults @eth_default_runner def ping_test(self, **kwargs): cmd = f"ping {self.targetIp} -s {self.ping_size} -c {self.ping_count}" self.terminal.runcmd(cmd=str(cmd), timeout=20, expected=" 0% packet loss") @eth_default_runner def eth_speed(self, **kwargs): import random for speed in [1000, 10, 100]: cmd = f"ethtool -s {self.eth_interface} speed {speed} duplex full" self.terminal.runcmd(cmd=cmd, timeout=30, expected="link") self.terminal.sendline("\r\n") self.terminal.runcmd(cmd=f"ethtool {self.eth_interface}") self.ping_test() cmd = f"ethtool -s {self.eth_interface} autoneg on" self.terminal.runcmd(cmd=cmd, timeout=30, expected="link") self.terminal.sendline("\r\n") self.terminal.runcmd(cmd=f"ethtool {self.eth_interface}") self.ping_test() @eth_default_runner def ifupdown(self, **kwargs): for count in self.updown_count: self.terminal.runcmd(cmd=f"ifconfig {self.eth_interface} down", timeout=15, expected="root") self.terminal.runcmd( cmd=f"ifconfig {self.eth_interface} up;sleep 3", timeout=15, expected="root", expected_failures="link is not ready", ) self.ping_test() @eth_default_runner def ifplugd(self, **kwargs): cmd = f'pgrep -f ifplugd >/dev/null || echo "ifplugd demon not running"' self.terminal.runcmd(cmd=cmd, timeout=30, expected_failures=["ifplugd demon not running"]) self.ifupdown() cmd = f"ifconfig {self.eth_interface} {self.hostIp} netmask 255.255.255.0" self.terminal.runcmd(cmd=cmd, timeout=self.timeout) @eth_default_runner def eth_tftp(self, **kwargs): failures = [ "server error:", "(2) Access violation", "No such file or directory", "ERROR", ] cmd_list = [ "tftp_file=$(mktemp tftp.XXXXXX)", f"dd if=/dev/zero of=$tftp_file bs=1 count=0 seek={self.file_size}", ] self.terminal.runcmd_list(cmd_list=cmd_list, timeout=30, expected_failures=failures) self.run_on_host(cmd="chmod 777 -R /tftpboot/") cmd_list = [ f"tftp -p -r $tftp_file {self.targetIp}", "file1=$(md5sum $tftp_file | awk '{print $1}')", "rm $tftp_file", ] self.terminal.runcmd_list(cmd_list=cmd_list, timeout=300, expected_failures=failures) self.run_on_host(cmd="chmod 777 -R /tftpboot/") cmd_list = [ f"tftp -g -r $tftp_file {self.targetIp}", "file2=$(md5sum $tftp_file | awk '{print $1}')", "[ $file1 == $file2 ] || return 1", ] self.terminal.runcmd_list(cmd_list=cmd_list, timeout=300, expected_failures=failures) self.run_on_host(cmd="rm -rf /tftpboot/tftp*") @eth_default_runner def eth_nfs(self, **kwargs): self.log.info("Output test files and clean up..") cmd_list = [ "out_dir=/tmp/nfs_temp_output", "mkdir -p ${out_dir} > /dev/null 2>&1", "out_prefix=${out_dir}/nfs_test", "out_mount=${out_prefix}.mount", "out_mount_prefix=${out_mount}/nfs_test", "unmount ${out_mount} && rm -rf ${out_prefix}*", ] self.terminal.runcmd_list(cmd_list=cmd_list, timeout=60) self.log.info("Mounting NFS...") cmd_list = [ "mkdir -p ${out_mount}", f'rpcinfo "{self.targetIp}" | grep "nfs"', f"mount -o port=2049,nolock,proto=tcp,vers=2 {self.targetIp}:/exports/root $out_mount", ] self.terminal.runcmd_list(cmd_list=cmd_list, timeout=200) self.log.info("Creating large pattern data files..") cmd_list = [ " [ -c /dev/urandom ] || mknod -m 777 /dev/urandom c 1 9 > /dev/null 2>&1;", "dd if=/dev/urandom of=${out_prefix}.r2m-pattern.bin bs=1024 count=4096;", "dd if=/dev/urandom of=${out_mount_prefix}.m2r-pattern.bin bs=1024 count=4096;", "cp ${out_mount_prefix}.m2r-pattern.bin ${out_prefix}.m2r-pattern.bin;", "cp ${out_prefix}.r2m-pattern.bin ${out_mount_prefix}.r2m-pattern.bin;", ] self.terminal.runcmd_list(cmd_list=cmd_list, timeout=200) self.log.info("Re-mounting the NFS.. Verifying the read back data...") cmd_list = [ "umount ${out_mount};", f"mount -o port=2049,nolock,proto=tcp,vers=2 {self.targetIp}:/exports/root $out_mount;" "diff -q ${out_prefix}.m2r-pattern.bin ${out_mount_prefix}.m2r-pattern.bin", "diff -q ${out_mount_prefix}.r2m-pattern.bin ${out_prefix}.r2m-pattern.bin", ] self.terminal.runcmd_list(cmd_list=cmd_list, timeout=200) @eth_default_runner def eth_scp(self, **kwargs): cmd_list = [ "scp_file=$(mktemp scp.XXXXXXXXX)", f"dd if=/dev/zero of=$scp_file bs=1 count=0 seek={self.file_size}", f'expect -c "spawn scp -r $scp_file {self.user}@{self.targetIp}:/home/{self.user}/;expect "password"; send "{self.password}"; interact"', "file1=$(md5sum $scp_file | awk '{print $1}') && rm $scp_file", f'expect -c "spawn scp {self.user}@{self.targetIp}:/home/{self.user}/$scp_file ./;expect "password:"******"{self.password}"; interact"', "file2=$(md5sum $scp_file | awk '{print $1})'", "[ $file1 == $file2 ] || echo 'scp test failed'", ] self.terminal.runcmd_list(cmd_list=cmd_list, timeout=120, expected_failure=["scp test failed"]) @eth_default_runner def eth_telnet(self, **kwargs): self.terminal.runcmd(cmd="telnetd", timeout=20) self.test_host_console = Xexpect( hostname=self.config["eth_host_name"], non_interactive=False, log=log, ) self.test_host_console.runcmd( cmd=f"telnet {self.hostIp}", timeout=20, expected="Peta", wait_for_prompt=False, ) self.test_host_console.sendline("root") self.test_host_console.sendline("root") time.sleep(1) self.test_host_console.sendline("root") self.test_host_console.runcmd(cmd=f"ls", timeout=20, expected=":~# ", wait_for_prompt=False) @eth_default_runner def eth_dhcp(self, **kwargs): cmd = f"udhcp -i {self.eth_interface}" self.terminal.runcmd(cmd=cmd, timeout=30, expected_failures=["not found"]) self.ping_test() @eth_default_runner def iperf_test(self, **kwargs): self.log.info("Starting an iperf server on the host...") cmd = f"{self.iperf_binary} -s &" self.run_on_host(cmd=cmd, timeout=50) time.sleep(3) self.terminal.runcmd( cmd=f"tftp -g -r {self.iperf_binary} {self.targetIp}", timeout=60) self.terminal.runcmd( cmd= f"chmod 777 {self.iperf_binary}; mv {self.iperf_binary} /usr/sbin/", timeout=60, ) time.sleep(3) self.log.info(f"Measuring {self.iperf_binary} TCP throughput...") cmd = f"{self.iperf_binary} -c {self.targetIp} -f m {self.extra_iperf_args}" self.terminal.runcmd(cmd=cmd, timeout=60) time.sleep(3) self.log.info(f"Measuring {self.iperf_binary} UDP throughput...") cmd = f"{self.iperf_binary} -c {self.targetIp} -f m -u {self.extra_iperf_args}" self.terminal.runcmd(cmd=cmd, timeout=60) self.log.info( f"Running {self.iperf_binary} with option -d | --dualtest ....") cmd = f"{self.iperf_binary} -c {self.targetIp} -f m -d {self.extra_iperf_args}" self.terminal.runcmd(cmd=cmd, timeout=60) time.sleep(3) self.run_on_host(cmd=f"pkill {self.iperf_binary}", timeout=50) self.log.info( f"Starting an {self.iperf_binary} server on the target...") # self.terminal.runcmd(cmd="tftp -g -r {self.iperf_binary} {self.targetIp}",timeout=60) self.terminal.runcmd(cmd=f"{self.iperf_binary} -s &", timeout=60) time.sleep(3) self.log.info(f"Starting an iperf client on the host...") self.run_on_host( cmd= f"{self.iperf_binary} -c {self.hostIp} -f m {self.extra_iperf_args}", timeout=50, ) time.sleep(3) def run_on_host(self, cmd, expected="root", timeout=60, expected_failures=None, **kwargs): self.test_host_console = Xexpect( hostname=self.config["eth_host_name"], non_interactive=False, log=log, ) if cmd == "gettest_hostmac": self.test_host_console.runcmd( cmd="ifconfig enp9s0 | awk '/HWaddr {print substr($5,1)}'", timeout=30) self.targetMac = self.test_host_console.output() self.test_host_console.runcmd(cmd="sudo su -", timeout=timeout, expected="root") if type(cmd) is list: self.test_host_console.runcmd_list( cmd_list=f"{cmd}", expected_failures=expected_failures, timeout=timeout, expected=expected, ) else: self.test_host_console.runcmd( cmd=f"{cmd}", expected=expected, timeout=timeout, expected_failures=expected_failures, ) @eth_default_runner def netperf(self, **kwargs): failures = ["No space left on device", "recv_response:"] cmd = f"ps -ef | grep netserver || netserver -D -4 &" self.terminal.runcmd(cmd=cmd, timeout=20) if self.mtu != "1500": time.sleep(2) self.log.info( f"Updating MTU size on host and device... : {self.mtu}") self.terminal.runcmd( cmd= f"ifconfig {self.eth_interface} down; ifconfig {self.eth_interface} mtu {self.mtu} up; ifconfig" ) time.sleep(3) self.run_on_host( cmd= f"ifconfig {self.host_interface} down; ifconfig {self.host_interface} mtu {self.mtu} up; ifconfig" ) self.udp_mtu = f"{self.mtu} - 28" time.sleep(3) self.log.info( f"Device Netperf Output for TCP(Tx Mode) with MTU size {self.mtu}") time.sleep(1) cmd = f"netperf -c -C -H {self.targetIp} -t TCP_STREAM" self.terminal.runcmd(cmd=cmd, expected_failures=failures, timeout=50) time.sleep(3) self.log.info( f"Device Netperf Output for UDP(Tx Mode) with MTU Size: {self.mtu}" ) time.sleep(1) cmd = f"netperf -c -C -H {self.targetIp} -t UDP_STREAM" self.terminal.runcmd(cmd=cmd, timeout=50) time.sleep(3) cmd = f"ps -ef | grep netserver" self.run_on_host(cmd=cmd, timeout=50) time.sleep(1) self.log.info( f"Host Netperf Output for TCP(Rx Mode) with MTU size: {self.mtu}") cmd = f"netperf -c -C -H {self.hostIp} -t TCP_STREAM -- -m {self.mtu} -M {self.mtu}" self.run_on_host(cmd=cmd, expected_failures=failures, timeout=50) time.sleep(3) self.log.info( f"Host Netperf Output for UDP(Rx Mode) with MTU Size: {self.udp_mtu}" ) cmd = f"netperf -c -C -H {self.hostIp} -t UDP_STREAM -- -m {self.mtu} -M {self.udp_mtu}" self.run_on_host(cmd=cmd, timeout=50) time.sleep(2) @eth_default_runner def vlan_test(self, **kwargs): cmd = "board_mac=$(ifconfig eth0 | awk '/HWaddr {print substr($5,1)}')" self.terminal.runcmd(cmd=cmd, timeout=60) self.BoardMac = self.terminal.output() time.sleep(2) self.log.info(f"Board HWaddr : {self.BoardMac}... ") cmd = 'tcpdump -i {self.eth_interface} "vlan and icmp" and ip host 10.10.70.2 and ether host "$board_mac" -n -ev &' self.terminal.runcmd(cmd=cmd, timeout=60) time.sleep(2) cmd_list = [ "ip link set dev eth2.5 down &", "ip link del eth2.5 &", "modprobe 8021q &", "ip link add link eth2 name eth2.5 type vlan id 5 &", "ip addr add 10.10.70.2 brd 10.10.70.255 dev eth2.5 &", "ip link set dev eth2.5 up &", f"arp -s {self.hostIp} $board_mac dev eth2.5 &", f"{self.hostIp} -I eth2.5 -c 3 &", ] self.run_on_host(cmd=cmd_list, timeout=120) time.sleep(10) self.log.info("=================== rx output ===================") self.terminal.runcmd(cmd="killall -s INT tcpdump &", timeout=60) time.sleep(2) cmd_list = [ "ip link set dev eth2.5 down &", "ip link del eth2.5 &", f"route del {self.hostIp} &", f"route add {self.hostIp} dev eth2 &", f"tcpdump -n -i eth2 dst port 9 and ip host {self.hostIp} -e -v > ~/tx_tcpdump_vlan.txt &", ] self.run_on_host(cmd=cmd_list, timeout=120) time.sleep(10) self.run_on_host(cmd="gettest_hostmac", timeout=20) time.sleep(1) commands = [ "echo 'stop' > /proc/net/pktgen/pgctrl", "echo 'rem_device_all' > /proc/net/pktgen/kpktgend_0", "echo 'add_device eth0' > /proc/net/pktgen/kpktgend_0", "echo 'count 1' > /proc/net/pktgen/eth0", "echo 'clone_skb 0' > /proc/net/pktgen/eth0", "echo 'pkt_size 200' > /proc/net/pktgen/eth0", "echo 'delay 0' > /proc/net/pktgen/eth0", "echo 'frags 4' > /proc/net/pktgen/eth0", "echo 'vlan_id 0' > /proc/net/pktgen/eth0", "echo 'vlan_p 0' > /proc/net/pktgen/eth0", "echo 'vlan_cfi 0' > /proc/net/pktgen/eth0", f"echo 'dst {self.targetIp}' > /proc/net/pktgen/eth0", f"echo 'dst_mac {self.targetMac}' > /proc/net/pktgen/eth0", "echo 'start' > /proc/net/pktgen/pgctrl", ] self.terminal.runcmd_list(cmd_list=commands, timeout=60) time.sleep(4) self.log.info("=================== tx output ===================") cmd_list = [ "killall -s INT tcpdump &", "cat ~/tx_tcpdump_vlan.txt", "rm ~/tx_tcpdump_vlan.txt", ] self.run_on_host(cmd=cmd_list, timeout=120) time.sleep(10) @eth_default_runner def ping_flood(self, **kwargs): self.terminal.runcmd( cmd=f"tftp -g -r ping 10.10.70.101; chmod 777 ping") self.terminal.runcmd( cmd=f"./ping -f {self.hostIp} -i 5 -c 5", timeout=60, expected=" 0% packet loss", ) self.terminal.runcmd(cmd=f"ping {self.targetIp} -c 5", timeout=30, expected=" 0% packet loss") @eth_default_runner def ping_jumbo_frame(self, **kwargs): for mtu in [2048, 4096, 8192]: self.log.info(f"Updating MTU size on device... : {mtu}") self.terminal.runcmd( cmd= f"ifconfig {self.eth_interface} down; ifconfig {self.eth_interface} mtu {mtu} up; ifconfig" ) time.sleep(2) self.log.info(f"Updating MTU size on test host... : {mtu}") self.run_on_host( cmd= f"ifconfig {self.host_interface} down; ifconfig {self.host_interface} mtu {mtu} up; ifconfig" ) time.sleep(2) self.run_on_host( cmd=f"ping {self.hostIp} -l {mtu-28} -c 5", timeout=30, expected=" 0% packet loss", ) time.sleep(2) self.terminal.runcmd( cmd= f"ifconfig {self.eth_interface} down; ifconfig {self.eth_interface} mtu 1500 up; ifconfig" ) time.sleep(3) self.run_on_host( cmd= f"ifconfig {self.host_interface} down; ifconfig {self.host_interface} mtu {mtu} up; ifconfig" ) @eth_default_runner def eth_pktgen(self, **kwargs): self.run_on_host(cmd="gettest_hostmac", timeout=20) commands = [ "echo 'stop' > /proc/net/pktgen/pgctrl", "echo 'rem_device_all' > /proc/net/pktgen/kpktgend_0", f"echo 'add_device {self.eth_interface}' > /proc/net/pktgen/kpktgend_0", f"echo 'count {self.pktgen_count}' > /proc/net/pktgen/{self.eth_interface}", f"echo 'clone_skb 100' > /proc/net/pktgen/{self.eth_interface}", f"echo 'pkt_size {self.pktgen_size}' > /proc/net/pktgen/{self.eth_interface}", f"echo 'burst {self.pktgen_burst} > /proc/net/pktgen/{self.eth_interface}", f"echo 'delay {self.pktgen_delay}' > /proc/net/pktgen/{self.eth_interface}", f"echo 'vlan_id {self.pktgen_vlan_id}' > /proc/net/pktgen/{self.eth_interface}", f"echo 'vlan_p 0' > /proc/net/pktgen/{self.eth_interface}", f"echo 'vlan_cfi 0' > /proc/net/pktgen/{self.eth_interface}", f"echo 'frags {self.pktgen_frags}' > /proc/net/pktgen/{self.eth_interface}", f"echo 'dst {self.targetIp}' > /proc/net/pktgen/{self.eth_interface}", f"echo 'dst_mac {self.targetMac}' > /proc/net/pktgen/{self.eth_interface}", "echo 'start' > /proc/net/pktgen/pgctrl", f"cat /proc/net/pktgen/{self.eth_interface}", "paramsCount=$(grep -E 'Params: count' /proc/net/pktgen/eth0 | awk '{print substr($3,1)}')", "pktSofar=$(grep -E 'pkts-sofar' /proc/net/pktgen/eth0 | awk '{print substr($2,1)}')", '[ "$paramsCount" -eq "$pktSofar" ] || echo "fail"', ] self.terminal.runcmd_list(cmd_list=commands, timeout=60) @eth_default_runner def eth_pqueue(self, **kwargs): self.run_on_host(cmd="gettest_hostmac", timeout=20) self.run_on_host(cmd="killall -s INT tcpdump &", timeout=20) self.run_on_host(cmd="rm ~/priority.txt &", timeout=20) self.run_on_host( cmd= f"tcpdump -n -i {self.host_interface} ip host {self.targetIp} -ev > ~/priority.txt &", timeout=20, ) time.sleep(5) self.log.info(f"targetMac = {self.targetMac}") commands = [ "echo 'stop' > /proc/net/pktgen/pgctrl", "echo 'rem_device_all' > /proc/net/pktgen/kpktgend_0", "echo 'add_device eth0@0' > /proc/net/pktgen/kpktgend_0", "echo 'count 500' > /proc/net/pktgen/eth0@0", "echo 'burst 50' > /proc/net/pktgen/eth0@0", "echo 'clone_skb 0' > /proc/net/pktgen/eth0@0", "echo 'pkt_size 1500' > /proc/net/pktgen/eth0@0", "echo 'delay 0' > /proc/net/pktgen/eth0@0", "echo 'frags 0' > /proc/net/pktgen/eth0@0", f"echo 'dst {self.targetIp}' > /proc/net/pktgen/eth0@0", f"echo 'dst_mac {self.targetMac}' > /proc/net/pktgen/eth0@0", "echo 'skb_priority 1' > /proc/net/pktgen/eth0@0", "echo 'queue_map_min 0' > /proc/net/pktgen/eth0@0", "echo 'queue_map_max 0' > /proc/net/pktgen/eth0@0", ] self.terminal.runcmd_list(cmd_list=commands, timeout=60) time.sleep(5) commands = [ "echo 'rem_device_all' > /proc/net/pktgen/kpktgend_1", "echo 'add_device eth0@1' > /proc/net/pktgen/kpktgend_1", "echo 'count 20' > /proc/net/pktgen/eth0@1", "echo 'burst 20' > /proc/net/pktgen/eth0@1", "echo 'clone_skb 0' > /proc/net/pktgen/eth0@1", "echo 'pkt_size 1400' > /proc/net/pktgen/eth0@1", "echo 'delay 0' > /proc/net/pktgen/eth0@1", "echo 'frags 0' > /proc/net/pktgen/eth0@1", f"echo 'dst {self.targetIp}' > /proc/net/pktgen/eth0@1", f"echo 'dst_mac {self.targetMac}' > /proc/net/pktgen/eth0@1", "echo 'skb_priority 1' > /proc/net/pktgen/eth0@1", "echo 'queue_map_min 1' > /proc/net/pktgen/eth0@1", "echo 'queue_map_max 1' > /proc/net/pktgen/eth0@1", ] self.terminal.runcmd_list(cmd_list=commands, timeout=60) time.sleep(5) commands = [ "echo 'rem_device_all' > /proc/net/pktgen/kpktgend_2", "echo 'add_device eth0@2' > /proc/net/pktgen/kpktgend_2", "echo 'count 500' > /proc/net/pktgen/eth0@2", "echo 'burst 50' > /proc/net/pktgen/eth0@2", "echo 'clone_skb 0' > /proc/net/pktgen/eth0@2", "echo 'pkt_size 1200' > /proc/net/pktgen/eth0@2", "echo 'delay 0' > /proc/net/pktgen/eth0@2", "echo 'frags 0' > /proc/net/pktgen/eth0@2", f"echo 'dst {self.targetIp}' > /proc/net/pktgen/eth0@2", f"echo 'dst_mac {self.targetMac}' > /proc/net/pktgen/eth0@2", "echo 'skb_priority 1' > /proc/net/pktgen/eth0@2", "echo 'queue_map_min 0' > /proc/net/pktgen/eth0@2", "echo 'queue_map_max 0' > /proc/net/pktgen/eth0@2", ] self.terminal.runcmd_list(cmd_list=commands, timeout=60) time.sleep(5) commands = [ "echo 'rem_device_all' > /proc/net/pktgen/kpktgend_3", "echo 'add_device eth0@3' > /proc/net/pktgen/kpktgend_3", "echo 'count 20' > /proc/net/pktgen/eth0@3", "echo 'burst 20' > /proc/net/pktgen/eth0@3", "echo 'clone_skb 0' > /proc/net/pktgen/eth0@3", "echo 'pkt_size 1100' > /proc/net/pktgen/eth0@3", "echo 'delay 0' > /proc/net/pktgen/eth0@3", "echo 'frags 0' > /proc/net/pktgen/eth0@3", f"echo 'dst {self.targetIp}' > /proc/net/pktgen/eth0@3", f"echo 'dst_mac {self.targetMac}' > /proc/net/pktgen/eth0@3", "echo 'skb_priority 1' > /proc/net/pktgen/eth0@3", "echo 'queue_map_min 1' > /proc/net/pktgen/eth0@3", "echo 'queue_map_max 1' > /proc/net/pktgen/eth0@3", "echo 'start' > /proc/net/pktgen/pgctrl &", ] self.terminal.runcmd_list(cmd_list=commands, timeout=60) time.sleep(5) self.run_on_host(cmd="killall -s INT tcpdump &", timeout=20) self.run_on_host(cmd="cat ~/priority.txt &", timeout=20)
class Yocto(Basebuild): def __init__(self, config, setup: bool = False): super().__init__(config, setup=config.yocto_reset) super().configure() self.repo_path = config.repo_path self.console = Xexpect(log=log, exit_nzero_ret=True) self.yocto_url = config.yocto_url self.yocto_branch = config.yocto_branch self.yocto_manifest_xml = config.yocto_manifest_xml self.repo_bundle_url = config.repo_bundle_url self.workdir = config.workDir self.imagesdir = config.imagesDir self.deploy_dir = f"{config['yocto.conf.TMPDIR']}/deploy" if not self.repo_path: self.repo_path = "repo" def fetch(self): init_cmd = f"{self.repo_path} init -u {self.yocto_url} -b {self.yocto_branch} -m {self.yocto_manifest_xml}" if self.repo_bundle_url: init_cmd += f" --repo-url {self.repo_bundle_url}" self.console.runcmd(init_cmd) log.info("INFO: repo init -> DONE") def repo_sync(self, timeout=500): self.console.runcmd(f"{self.repo_path} sync -d -j 20", timeout=timeout) log.info("INFO: repo sync -> DONE") def repo_start(self): self.console.runcmd( f"{self.repo_path} start {self.yocto_branch} --all") log.info("INFO: repo start -> DONE") def repo_reset(self, timeout=500): self.console.runcmd( f"{self.repo_path} forall -c 'git reset --hard @{{u}}; git clean -fdx'", timeout=timeout, ) log.info("INFO: repo reset -> DONE") def sdk_setup(self, setupfile=""): self.console.runcmd("pwd") if not setupfile: setupfile = "setupsdk" log.info(f"INFO: sourcing {setupfile}") if is_file(setupfile): self.console.runcmd(f"source {setupfile}") else: log.error("ERROR:Yocto setup failed.Check repo sync") assert False, "Setup for yocto failed, Check repo init/sync." def reset_conf_file(self): remove(f"{self.workdir}/build/conf/auto.conf") def set_conf_vals(self): for key in self.config["yocto.conf"].keys(): value = self.config[f"yocto.conf.{key}"] newline = f'{key} = "{value}"' add_newline(f"{self.workdir}/build/conf/auto.conf", newline) for entries in self.config["yocto.exactconf"]: if "{{" in entries: entries = entries.format() add_newline(f"{self.workdir}/build/conf/auto.conf", entries) def image_builder(self, recipe, extra_args="", timeout=1000): bitbake_cmnd = f"bitbake {recipe} {extra_args}" self.console.runcmd(bitbake_cmnd, timeout=timeout) def deploy(self, deploy_dir=""): """This Function deploy the generated yocto build images to specific location Parameters: yocto_artifacts - to copy any specific files yocto_deploy_dir : to copy images to specific location deploy_dir : From location """ ret = True if self.config.get("yocto_deploy_dir"): yocto_deploy_dir = self.config.yocto_deploy_dir if not is_dir(yocto_deploy_dir): mkdir(yocto_deploy_dir) else: yocto_deploy_dir = self.imagesdir if not deploy_dir: deploy_dir = self.deploy_dir if "yocto_artifacts" in self.config: for image in self.config["yocto_artifacts"]: image_file = find_file(image, deploy_dir) if image_file: if is_file(image_file): copy_file(image_file, yocto_deploy_dir) elif is_dir(image_file): copyDirectory(image_file, yocto_deploy_dir, symlinks=True) else: log.error(f"{image} does not exists in {deploy_dir}") ret = False else: copyDirectory(deploy_dir, yocto_deploy_dir, symlinks=True) return ret
class BaseCrossCompile(Basebuild): def __init__(self, config, app_name, setup=True): super().__init__(config, setup=setup) self.console = Xexpect(log, exit_nzero_ret=True) self._setup_args() self.app_name = app_name def _copy_src(self): src = (f"{self.config['ROOT']}/" + "/".join(self.config["test_param_path_list"][0:3]) + "/src") dest = os.path.join(self.config["workDir"], "src") if is_dir(src): copyDirectory(src, dest) def _setup_args(self): if "CARDANO_ROOT" in self.config: self.cardano_root = self.config["CARDANO_ROOT"] self.sysroot = f"{self.config['aie_lib_wsdir']}/images/mini_sysroot" self.lib_file = self.config["lib_file"] def pre_configure(self): super().configure() self.srcDir = os.path.join(self.workDir, "src") self._copy_src() self.compiler_flags = "" self.linker_flags = "" self.include_dir = [self.workDir] self.lib_dir = [] def configure(self): VITIS_DIR = self.config.vitisPath XLNX_LICENSE = self.config["XILINXD_LICENSE_FILE"] cmdlist = [ f"export XILINXD_LICENSE_FILE={XLNX_LICENSE}", f"export VITIS_DIR={VITIS_DIR}", f"source {self.config.vitisPath}/settings64.sh", f"cd {self.workDir}", ] self.console.runcmd_list(cmdlist) if "AIETOOLS_ROOT" in self.config: AIETOOLS_ROOT = self.config.AIETOOLS_ROOT SYSROOT = self.config.SYSROOT cmdlist = [ f"export AIETOOLS_ROOT={AIETOOLS_ROOT}", f"export SYSROOT={SYSROOT}", f"export PATH={VITIS_DIR}/bin:$PATH", ] self.console.runcmd_list(cmdlist) self.include_dir.append(f"{AIETOOLS_ROOT}/include") self.lib_dir += [f"{AIETOOLS_ROOT}/lib/{self.lib_file}.o/"] if "user_include_path" in self.config: for path in self.config.user_include_path: path = parse_config(self.config, path) self.include_dir += [path] if "user_compiler_flags" in self.config: self.compiler_flags += self.config.user_compiler_flags # Add Cardano Source path to include if self.config.get("cardano_app"): self.include_dir.append( f"{self.config.cardano_base_ws_dir}/images/src") self.include = ["-I" + dir for dir in self.include_dir] self.include = " ".join(self.include) if "user_lib_path" in self.config: for path in self.config.user_lib_path: path = parse_config(self.config, path) self.lib_dir += [path] self.lib = ["-L" + dir for dir in self.lib_dir] self.lib = " ".join(self.lib) if "user_linker_flags" in self.config: self.linker_flags += self.config.user_linker_flags def deploy(self, src_file_name): if "deploy_artifacts" in self.config: for artifact in self.config["deploy_artifacts"]: artifact_path = os.path.join(self.workDir, artifact) if is_dir(artifact_path): copyDirectory(artifact_path, os.path.join(self.imagesDir, artifact)) else: copy_match_files(self.workDir, self.imagesDir, artifact, follow_src_dir=True) cmd_list = [ f"cd {self.imagesDir}", f"tar cvfJ deploy_artifacts.tar.xz ./*", ] self.console.runcmd_list(cmd_list) aie_path = os.path.join(self.workDir, "aie_control." + self.app_name["exe"]) if is_file(f'{self.workDir}/{src_file_name}.{self.app_name["exe"]}'): copy_file( f'{self.workDir}/{src_file_name}.{self.app_name["exe"]}', self.imagesDir, ) log.info(f"{self.app_name['exe']} created successfully") else: raise Exception(f"Error: {self.app_name['exe']} creation failed")
def __init__(self, config, setup=True): super().__init__(config, setup) self.console = Xexpect(log, exit_nzero_ret=True)
class CrossCompile(Basebuild): def __init__(self, config, app_name, setup=True): super(CrossCompile, self).__init__(config, setup=setup) if has_key(config, "console"): self.console = config.console else: self.console = Xexpect(log, exit_nzero_ret=True) setattr(config, "console", self.console) self._copy_src() self.srcDir = os.path.join(self.config["workDir"], "src") self.app_name = app_name self._setup_args() self._configure() def _copy_src(self): src = (f"{self.config['ROOT']}/" + "/".join(self.config["test_param_path_list"][0:3]) + "/src") dest = os.path.join(self.config["workDir"], "src") if is_dir(src): copyDirectory(src, dest) def _setup_args(self): if has_key(self.config, "CARDANO_ROOT"): self.cardano_root = self.config["CARDANO_ROOT"] self.sysroot = self.config["mini_sysroot"] self.lib_file = self.config["lib_file"] def _configure(self): self.common_compiler_flags = "-Wall -O0 -g3 -c -fmessage-length=0" self.common_linker_flags = "" self.include_dir = [self.config["workDir"] + "/"] self.lib_dir = [] if has_key(self.config, "CARDANO_ROOT"): self.lib_dir.append( os.path.join(self.cardano_root, "lib", self.lib_file + ".o")) self.include_dir.append(os.path.join(self.cardano_root, "include")) VITIS_PATH = self.config["vitisPath"] cmdlist = [ f"source {VITIS_PATH}/settings64.sh", f"cd {self.config['workDir']}" ] self.console.runcmd_list(cmdlist) def compile(self, src_file_name): if has_key(self.config, "user_include_path"): for path in self.config.user_include_path: path = parse_config(self.config, path) self.include_dir += [path] self.include = ["-I" + dir for dir in self.include_dir] self.include = " ".join(self.include) compile_cmd = [ self.app_name["compiler"][self.config["param"]], self.common_compiler_flags, self.app_name["compile_flags"], f'-MT"{self.srcDir}/{src_file_name}.cpp"', self.app_name["procname"][self.config["param"]], f'-MF"{self.srcDir}/{src_file_name}.d"', f'-MT"{self.srcDir}/{src_file_name}.o"', "-o", f'"{self.srcDir}/{src_file_name}.o"', f"{self.include}", f"{self.srcDir}/{src_file_name}.cpp", ] if has_key(self.config, "file_format") and self.config["file_format"] == "eabi": compile_cmd += [self.config["abi_cmd"]] cmd = " ".join(compile_cmd) self.console.runcmd(cmd) time.sleep(5) def link(self): if has_key(self.config, "user_lib_path"): for path in self.config.user_lib_path: path = parse_config(self.config, path) self.lib_dir += [path] self.lib = ["-L" + dir for dir in self.lib_dir] self.lib = " ".join(self.lib) link_cmd = [ self.app_name["compiler"][self.config["param"]], "-v", self.app_name["procname"][self.config["param"]], self.common_linker_flags, f"{self.lib}", "-o", f"{self.config['workDir']}/aie_control.{self.app_name['exe']}", f"{self.srcDir}/aie_control.o", ] if has_key(self.config, "cardano_app") and self.config["cardano_app"]: link_cmd += [ f'{self.wsDir}../cardano/work/src/{self.config["cardano_src"]}.o' ] if has_key(self.config, "file_format") and self.config["file_format"] == "eabi": link_cmd += [self.config["abi_cmd"]] link_cmd += [self.app_name["link_flags"]] cmd = " ".join(link_cmd) self.console.runcmd(cmd) time.sleep(5) def deploy(self): if has_key(self.config, "deploy_artifacts"): for artifact in self.config["deploy_artifacts"]: artifact_path = os.path.join(self.config["workDir"], artifact) if is_dir(artifact_path): copyDirectory( artifact_path, os.path.join(self.config["imagesDir"], artifact)) elif is_file(artifact_path): copy_file(artifact_path, self.config["imagesDir"]) cmd_list = [ f"cd {self.config['imagesDir']}", f"tar cvfJ deploy_artifacts.tar.xz ./*", ] self.console.runcmd_list(cmd_list) aie_path = os.path.join(self.config["workDir"], "aie_control." + self.app_name["exe"]) if is_file(aie_path): copy_file(aie_path, self.config["imagesDir"]) log.info(f"{self.app_name['exe']} created successfully") else: raise Exception(f"Error: {self.app_name['exe']} creation failed")
class AppBuilder(Basebuild): def __init__(self, config, setup: bool = True): super().__init__(config, setup=setup) if has_key(config, "console"): self.console = config["console"] else: self.console = Xexpect(log, exit_nzero_ret=True) config["console"] = self.console self.config_var_dict = self.set_config_var() self.defconfig_var_dict = self.set_defconfig_var() self.supported_components_list = self.get_defconfig_var().keys() self.user_var_dict = "" self.yaml_file_path = "" self.CONFIG_FILE_SET = False self.extra_env = ["scriptsDir"] self.cmd = "" self.config_var() def set_config_var(self): """Loads config.yaml from absolute path set in configuration. If path not set in configuration, use config.yaml included in library. """ if has_key(self.config, "XSCT_CONFIG"): xsct_path = self.config["XSCT_CONFIG"] else: dir_path = os.path.dirname(os.path.realpath(__file__)) xsct_path = os.path.join(dir_path, "config.yaml") data = load_yaml(xsct_path) if data is None or data == "": print_err_exit("File: config.yaml is empty or not found") for key in data: data[key] = set(data[key]) return data def config_var(self): """Exports configuration variables needed by tcl scripts. If scriptsDir is not specified in config, use this file's directory. """ if not has_key(self.config, "scriptsDir"): self.config["scriptsDir"] = os.path.dirname(os.path.realpath(__file__)) if has_key(self.config, "extra_xsct_env"): if isinstance(self.config["extra_xsct_env"], list): self.extra_env.extend(self.config["extra_xsct_env"]) else: self.extra_env.extend(self.config["extra_xsct_env"].split(" ")) for key in self.extra_env: value = self.config[key] self.console.runcmd(f"export {key}={value}") def set_defconfig_var(self): """Loads defconfig.yaml from absolute path set in configuration. If path not set in configuration, use defconfig.yaml included in library. """ if has_key(self.config, "DEFAULT_XSCT_CONFIG"): defconfig_path = self.config["DEFAULT_XSCT_CONFIG"] else: dir_path = os.path.dirname(os.path.realpath(__file__)) defconfig_path = os.path.join(dir_path, "defconfig.yaml") data = load_yaml(defconfig_path) if data is None or data == "": err_msg = "defconfig.yaml is empty or not found or file reading failed" log.error(err_msg) raise Exception(err_msg) # Find the supported components valid_components = list(data.keys()) # Remove the common key from the supported components del_element("common", valid_components) # Add the common variables to each component for component in valid_components: try: # Find the common dictionary values in common Vs specified component common_vals = list( set(data["common"].keys()) & set(data[component].keys()) ) # Check if there are some common values present in common and individual component correction = {} for val in common_vals: correction[val] = data[component][val] # Update the data with the common values data[component].update(data["common"]) # Apply the correction data[component].update(correction) except: print_err_exit("Setting defconfig variables failed !") del_key("common", data) return data def get_user_args(self, argv=None): parser = argparse.ArgumentParser( description="This script is used to build elf for any component using xsct" ) parser.add_argument("--app", help="App Name to be build") parser.add_argument("--arch", default="64", help="Platform Architecture") parser.add_argument("--bspname", help="Standalone BSP Name") parser.add_argument( "--component", choices=self.get_supported_components(), help="Supported Components", ) parser.add_argument( "--do_compile", default="1", choices=["0", "1"], help="Build the project" ) parser.add_argument( "--do_cleanup", default="1", choices=["0", "1"], help="Clean up after building the project", ) parser.add_argument("--driver", help="Driver Name to be build") parser.add_argument("--elf_name", help="Cumtome Elf Name to be used") parser.add_argument("--example_name", help="Example to be build") parser.add_argument("--extension", help="Run time functions to perform") parser.add_argument("--extra_args", nargs="*") parser.add_argument("--file", type=open, action=LoadFromFile) parser.add_argument("--hdf", help="Path of the directory containing hdf") parser.add_argument("--hwpname", default="hw0", help="Name of HW project") parser.add_argument("--lib", help="Libraries Required") parser.add_argument("--library_name", help="Library name to be build") parser.add_argument("--thirdparty_name", help="ThirdParty name to be build") parser.add_argument( "--thirdparty_dir", help="ThirdParty Directory where apps/libs are present" ) parser.add_argument("--osname", default="standalone", help="Operating System") parser.add_argument( "--out_dir", help="Output directory path where elfs needs to be copied" ) parser.add_argument("--pname", help="Name of the project") parser.add_argument("--processor", help="Processor used for compilation") parser.add_argument("--rp", help="Repo Path") parser.add_argument("--rp_intg", help="INTG Repo Path") parser.add_argument( "--import_sources", help="Path of directory where source to be imported is present", ) parser.add_argument( "--import_args", help='"Import sources arguments like : -soft-link, etc' ) parser.add_argument("--ws", help="Workspace used for compilation") parser.add_argument("--xsct_path", help="Xsct Bin Path") parser.add_argument("--functest_name", help="Name of the function test") parser.add_argument( "--build_till_bsp", default="0", choices=["0", "1"], help="Build bsp only" ) args = parser.parse_args(argv) # Fixme: convert args to type class dict before retunring it return args def map_procname(self, proc): proc_dict = { "a72_0_versal": "psv_cortexa72_0", "a72_1_versal": "psv_cortexa72_1", "r5_0_versal": "psv_cortexr5_0", "r5_1_versal": "psv_cortexr5_1", "pmc_versal": "psv_pmc_0", "psm_versal": "psv_psm_0", "a53_0_zynqmp": "psu_cortexa53_0", "a53_1_zynqmp": "psu_cortexa53_1", "r5_0_zynqmp": "psu_cortexr5_0", "r5_1_zynqmp": "psu_cortexr5_1", "a9_0_zynq": "ps7_cortexa9_0", "a9_1_zynq": "ps7_cortexa9_1", "mb_0": "microblaze_0", } if proc in proc_dict.keys(): return proc_dict[proc] else: return proc def parser(self, config): args = {} if has_key(config, "do_compile"): args["do_compile"] = config["do_compile"] else: args["do_compile"] = 1 if has_key(config, "do_cleanup"): args["do_cleanup"] = config["do_cleanup"] if has_key(config, "extension"): args["extension"] = config["extension"] if has_key(config, "xsct_proj_name"): args["pname"] = config["xsct_proj_name"] if has_key(config, "xsct_proc_name"): args["processor"] = self.map_procname(config["xsct_proc_name"]) if has_key(config, "XSCT_PATH"): args["xsct_path"] = config["XSCT_PATH"] if has_key(config, "xsct_os_name"): args["osname"] = config["xsct_os_name"] if has_key(config, "xsct_xsa"): args["hdf"] = config["xsct_xsa"] if has_key(config, "xsct_app_name"): args["app"] = config["xsct_app_name"] if has_key(config, "xsct_platform_name"): args["hwpname"] = config["xsct_platform_name"] if has_key(config, "build_till_bsp"): args["build_till_bsp"] = config["build_till_bsp"] else: args["build_till_bsp"] = 0 if has_key(config, "extra_args"): args["extra_args"] = config["extra_args"] else: args["extra_args"] = None if has_key(config, "xsct_outDir"): args["out_dir"] = config["xsct_outDir"] else: args["out_dir"] = f"{self.wsDir}/images" if has_key(config, "xsct_elf_name"): args["elf_name"] = config["xsct_elf_name"] if has_key(config, "xsct_lib"): args["lib"] = config["xsct_lib"] if has_key(config, "xsct_library_name"): args["library_name"] = config["xsct_library_name"] if config["xsct_import_sources"]: args["import_sources"] = config["xsct_import_sources"] if config["xsct_import_args"]: args["import_args"] = config["xsct_import_args"] if has_key(config, "xsct_thirdparty_name"): args["thirdparty_name"] = config["xsct_thirdparty_name"] if has_key(config, "xsct_thirdparty_dir"): args["thirdparty_dir"] = config["xsct_thirdparty_dir"] if has_key(config, "xsct_extention_tcl"): if config["xsct_extention_tcl"]: args["extension"] = config["xsct_extention_tcl"] if has_key(config, "component"): args["component"] = config["component"] if has_key(config, "xsct_driver"): args["driver"] = config["xsct_driver"] if has_key(config, "xsct_example_name"): args["example_name"] = config["xsct_example_name"] if has_key(config, "repo_exists") and config["repo_exists"] == 1: args["rp"] = f"{self.workDir}/src/" else: if config["externalEmbeddedsw"]: args["rp"] = config["externalEmbeddedsw"] args["ws"] = f"{self.workDir}/{config['component']}/" dir_path = os.path.dirname(os.path.realpath(__file__)) self.console.runcmd(f"export SCRIPTS_PYTHON={dir_path}") self.console.runcmd(f"export test_log_path={self.wsDir}/logs") self.console.runcmd(f"export test_work_path={self.wsDir}/work") self.console.runcmd(f"export test_image_path={self.wsDir}/images") if has_key(config, "BUILD_SOURCE"): self.console.runcmd(f"export BUILD_SOURCE={config['BUILD_SOURCE']}") if has_key(config, "usr_src_path"): self.console.runcmd(f"export usr_src_path={config['usr_src_path']}") return args def set_user_args(self, args): # Extra args might be present in config file appconfig = self.get_config_var()["APP_CONFIG"] bspconfig = self.get_config_var()["BSP_CONFIG"] yaml_file_path = os.path.join(self.workDir, "yamlconf.yaml") if verify_extra_args(args["extra_args"]): if write_extra_args_to_yaml_conf( args, "cmdline", appconfig, bspconfig, yaml_file_path ): self.CONFIG_FILE_SET = True self.yaml_file_path = yaml_file_path elif args["extra_args"] is None: if write_extra_args_to_yaml_conf( args, "file", appconfig, bspconfig, yaml_file_path ): self.CONFIG_FILE_SET = True self.yaml_file_path = yaml_file_path # Verify if all the mandatory args are defined is_defined(args, list(self.get_config_var()["MANDATORY_ARGS"])) arg_dict = {} for arg in args.keys(): value = args[arg] if value is not None and value != "": arg_dict[arg] = value # Collect few mandatory args if "xsct_path" not in arg_dict: arg_dict["xsct_path"] = self.config["xsctCmd"] if "rp" not in arg_dict: arg_dict["rp"] = self.config["ESW_REPO"] # Collect the params from the default settings component = arg_dict["component"] config_dict = self.get_defconfig_var() for key in config_dict: if key == component: subdict = config_dict[key] for subkey in subdict: if subkey not in arg_dict: arg_dict[subkey] = subdict[subkey] self.user_var_dict = arg_dict def set_cmd(self): user_var_dict = self.get_user_var() tclargs = self.get_config_var()["TCL_SUPPORTED_ARGS"] cmd = "" for key in user_var_dict: if key in tclargs: value = user_var_dict[key] if value is not None and value != "": str1 = "-" + str(key) str2 = surround_double_quotes(str(value)) cmd += str1 + " " + str2 + " " dir_path = os.path.dirname(os.path.realpath(__file__)) app_path = os.path.join(dir_path, self.config["APP_TCL"]) cmd = str(user_var_dict["xsct_path"]) + " " + app_path + " " + cmd + " " if self.get_config_file(): cmd += "-yamlconf" + " " + surround_double_quotes(self.get_yaml_file()) self.cmd = cmd # Getter def get_config_var(self): return self.config_var_dict def get_defconfig_var(self): return self.defconfig_var_dict def get_supported_components(self): return self.supported_components_list def get_user_var(self): return self.user_var_dict def get_config_file(self): return self.CONFIG_FILE_SET def get_yaml_file(self): return self.yaml_file_path def get_cmd(self): return self.cmd def build_app(self, config) -> bool: self.set_cmd() cmd = self.get_cmd() log.info(f"APP.TCL COMMAND: {cmd}") if has_key(config, "source_cardano") and config["source_cardano"] == 1: CARDANO_ROOT = config["CARDANO_ROOT"] self.console.runcmd(f"export CARDANO_ROOT={CARDANO_ROOT}") self.console.runcmd(f"source {CARDANO_ROOT}/scripts/cardano_env.sh") self.console.runcmd("unset DISPLAY") self.console.runcmd( f"export _JAVA_OPTIONS='-Duser.home={config['workDir']}/.xsct'" ) self.console.runcmd(cmd, timeout=1500) if config["build_till_bsp"] or check_if_string_in_file( f"{self.wsDir}/images/results.txt", "PASS" ): return True else: self.console.logfile.error("ELF creation Failed") return False
class Petalinux(Basebuild): """This Petalinux class contains api's for all common petalinux options""" configs_dir = "project-spec/configs" meta_user = "******" recipes_apps = f"{meta_user}/recipes-apps" recipes_bsp = f"{meta_user}/recipes-bsp" project_config = f"{configs_dir}/config" rootfs_config = f"{configs_dir}/rootfs_config" user_rootfs_config = f"{meta_user}/conf/user-rootfsconfig" plnxbspconf_file = f"{meta_user}/conf/petalinuxbsp.conf" devicetree_dir = f"{recipes_bsp}/device-tree" fsbl_dir = f"{recipes_bsp}/fsbl" pmufw_dir = f"{recipes_bsp}/pmu-firmware" plm_dir = f"{recipes_bsp}/plm" psmfw_dir = f"{recipes_bsp}/psm-firmware" system_user_file = f"{devicetree_dir}/files/system-user.dtsi" kernel_dir = f"{meta_user}/recipes-kernel/linux/linux-xlnx" kernel_bbappend = f"{meta_user}/recipes-kernel/linux/linux-xlnx_%.bbappend" devicetree_append = f"{devicetree_dir}/device-tree.bbappend" fsbl_bbappend = f"{fsbl_dir}/fsbl-firmware_%.bbappend" atf_dir = f"{recipes_bsp}/arm-trusted-firmware" atf_bbappend = f"{atf_dir}/arm-trusted-firmware_%.bbappend" uboot_dir = f"{meta_user}/recipes-bsp/u-boot" uboot_bbappend = f"{uboot_dir}/u-boot-xlnx_%.bbappend" pmufw_bbappend = f"{pmufw_dir}/pmu-firmware_%.bbappend" plm_bbappend = f"{plm_dir}/plm-firmware_%.bbappend" psmfw_bbappend = f"{psmfw_dir}/psm-firmware_%.bbappend" openamp_dir = f"{meta_user}/recipes-openamp" libmetal_dir = openamp_dir openamp_bbappend = f"{openamp_dir}/open-amp/open-amp_%.bbappend" libmetal_bbappend = f"{openamp_dir}/libmetal/libmetal_%.bbappend" xen_dir = f"{meta_user}/recipes-extended/xen" xen_bbappend = f"{xen_dir}/xen_%.bbappend" recipesmm_dir = f"{meta_user}/recipes-multimedia/" gst_dir = f"{recipesmm_dir}/gstreamer/" vcu_dir = f"{recipesmm_dir}/vcu/" vcu_firmware_dir = f"{recipesmm_dir}/vcu/vcu-firmware" vcu_firmware_bbappend = f"{vcu_dir}/vcu-firmware.bbappend" vcu_omxil_dir = f"{recipesmm_dir}/vcu/libomxil-xlnx" vcu_omxil_bbappend = f"{vcu_dir}/libomxil-xlnx.bbappend" vcu_ctrlsw_dir = f"{recipesmm_dir}/vcu/libvcu-xlnx" vcu_ctrlsw_bbappend = f"{vcu_dir}/libvcu-xlnx.bbappend" vcu_modules_dir = f"{recipesmm_dir}/vcu/kernel-module-vcu" vcu_modules_bbappend = f"{vcu_dir}/kernel-module-vcu.bbappend" gstreamer_dir = f"{recipesmm_dir}/gstreamer/gstreamer1.0" gstreamer_bbappend = f"{gst_dir}/gstreamer1.0_%.bbappend" gst_plugins_bad_dir = f"{recipesmm_dir}/gstreamer/gstreamer1.0-plugins-bad" gst_plugins_bad_bbappend = f"{gst_dir}/gstreamer1.0-plugins-bad_%.bbappend" gst_plugins_base_dir = f"{recipesmm_dir}/gstreamer/gstreamer1.0-plugins-base" gst_plugins_base_bbappend = f"{gst_dir}/gstreamer1.0-plugins-base_%.bbappend" gst_plugins_good_dir = f"{recipesmm_dir}/gstreamer/gstreamer1.0-plugins-good" gst_plugins_good_bbappend = f"{gst_dir}/gstreamer1.0-plugins-good_%.bbappend" gst_omx_dir = f"{recipesmm_dir}/gstreamer/gstreamer1.0-omx" gst_omx_bbappend = f"{gst_dir}/gstreamer1.0-omx_%.bbappend" def __init__(self, config, setup: bool = True): super().__init__(config, setup) self.plnx_tool = config["PLNX_TOOL"] self.bsp_path = config["BSP_PATH"] self.plnx_pkg = None self.plnx_tmp = config["PLNX_TMP_PATH"] self.plnx_proj = config["plnx_proj"] self.workDir = config["workDir"] self.imagesDir = config["imagesDir"] self.wsDir = config["wsDir"] self.config["platform"] = config["platform"] self.proj_dir = f"{self.workDir}/{self.plnx_proj}" self.petalinux_images = f"{self.proj_dir}/images" self.qemu_boot = False # Acquire bash console. self.runner = Xexpect(log, exit_nzero_ret=True) atexit.register(self.__del__) myconfs = [ "RECIPE_NAME", "RECIPE_NEW_NAME", "FETCHURI", "SOURCE_PATH", "RECIPE_DESTINATION", "IMAGE_RECIPE", "WORKSPACE_LAYERPATH", "EXISTING_RECIPENAME", "RECIPE_UPGRADE", ] for myconf in myconfs: if myconf in config: setattr(self, myconf.lower(), getattr(config, myconf)) def source_tool(self, timeout: int = 120) -> None: """This Function source the petalinux tool. Parameters: PLNX_TOOL - by default set to petalinux daily_latest """ if not is_file(self.plnx_tool): raise Exception(f"Error: ({self.plnx_tool}) is not a file") cmd = f"source {self.plnx_tool}" self.runner.runcmd(cmd=str(cmd), timeout=timeout) log.info(f"Petalinux Tool : {self.runner.runcmd(f'echo $PETALINUX')}") def create_project(self, timeout: int = 300) -> None: """This Function sources the creates petalinux project based on user configuration, Parameters: PLNX_TOOL : petalinux tool path platform : versal, zynqMP, zynq, microblaze plnx_flow : BSP, template (by default BSP) BSP_PATH : bsp path (default set to petalinux daily_latest) PLNX_BSP : bsp name plnx_proj : project name """ remove(f"{self.proj_dir}") self.source_tool() cmd = f"petalinux-create -t project " if self.config.get("plnx_flow") == "template": log.info("Using templete flow to create petalinux project...") cmd += f"--template {self.config['platform']}" else: log.info("Using bsp flow to create petalinux project...") self.plnx_bsp = self.config["PLNX_BSP"] self.plnx_bsp_path = get_original_path( f"{self.bsp_path}/{self.plnx_bsp}") if not is_file(self.plnx_bsp_path): log.error(f"Petalinux BSP {self.plnx_bsp_path} Not found") assert False, "Petalinux BSP Not found" cmd += f"-s {self.plnx_bsp_path} " if "plnx_proj" in self.config: cmd += f" -n {self.plnx_proj}" self.runner.runcmd(f"cd {self.workDir}") self.runner.runcmd(cmd=str(cmd), timeout=timeout) self.runner.runcmd(f"cd {self.plnx_proj}") os.chdir(self.proj_dir) if self.config.get("plnx_init_cmds"): self.runner.runcmd_list(self.config.plnx_init_cmds) def fetch_project(self): """This Function clones petalinux project from git, Parameters: PLNX_TOOL : petalinux tool path plnx_proj : project name bsp_src : bsp source >>> git.bsp.url : "https://gitenterprise.xilinx.com/bsp_src.git" git.bsp.branch : "master" """ self.source_tool() if "git.bsp.url" in self.config: url = self.config.git.bsp.url if "git.bsp.branch" not in self.config: self.config.git.bsp.branch = "master" clone( self.config.git.bsp, self.proj_dir, recurse_submodules=self.config.git.bsp.recurse_submodules, ) os.chdir(self.proj_dir) self.runner.runcmd(f"cd {self.proj_dir}") else: err_msg = "git.bsp.url not found in config" assert False, err_msg def create_apps(self, timeout: int = 300) -> None: """This Function creates user applications in petalinux project, Parameters: user_apps : it is dictionary >>> Usage: 1. user_apps = { 'appname' : [ 'app files1', 'app file2' ] } 2. user_apps = { 'appname' : [ 'app files' ], 'appname_bbfile' : 'userspecfic bbfile path'} """ plnx_apps = self.config["user_apps"] if plnx_apps: for app_name, files in plnx_apps.items(): app_name = app_name.lower() if "bbfile" not in app_name: files = convert_list(files) if self.config.get("plnx_flow") == "template": create_apps_cmd = f"petalinux-create -t apps --template install -n {app_name.strip()} --enable" self.runner.runcmd(cmd=str(create_apps_cmd), timeout=timeout) remove_all_files( f"{self.recipes_apps}/{app_name}/files/") else: create_apps_cmd = ( f"petalinux-create -t apps -n {app_name.strip()} --enable" ) self.runner.runcmd(cmd=str(create_apps_cmd), timeout=timeout) for data in files: data = parse_config(self.config, data) if is_dir(data): copyDirectory( data, f"{self.proj_dir}/{self.recipes_apps}/{app_name}/files/", ) else: copy_file( data, f"{self.proj_dir}/{self.recipes_apps}/{app_name}/files/", ) for app_name, files in plnx_apps.items(): app_name = app_name.lower() if "bbfile" in app_name: files = parse_config(self.config, files) app_name = os.path.splitext(os.path.basename(files))[0] copy_file( files, f"{self.proj_dir}/{self.recipes_apps}/{app_name}/") else: log.warning("No user apps to create ") def set_tmp_path(self): """This Function to set temp path for petalinux project Parameters: PLNX_TMP_PATH : Default it set to '/tmp/petalinux' """ if self.plnx_tmp: tmp_dir = (self.plnx_proj + "-" + datetime.now().strftime("%Y.%m.%d-%H.%M.%S")) if os.getenv("JOB_NAME"): tmp_dir = f"{os.getenv('JOB_NAME')}/{tmp_dir}" self.plnx_tmp = os.path.join(self.plnx_tmp, tmp_dir) mkdir(self.plnx_tmp) os.chmod(self.plnx_tmp, 0o777) add_newline(f"{self.project_config}", f'CONFIG_TMP_DIR_LOCATION="{self.plnx_tmp}"') if is_filesystem_nfs(self.proj_dir): self.plnx_tmp = self.get_tmp_path() def get_tmp_path(self): with open(f"{self.proj_dir}/{self.project_config}", "r") as read_obj: for line in read_obj: if "CONFIG_TMP_DIR_LOCATION" in line: matchObj = re.search('CONFIG_TMP_DIR_LOCATION="(.*)"', line) tmp_path = matchObj.group(1) return tmp_path def silent_config(self, timeout: int = 600) -> None: """This Function apply the user configuration to petalinux project Parameter: plnx_config_component (optional) """ plnx_silent_cmd = "yes | petalinux-config" if "plnx_config_component" in self.config: plnx_silent_cmd += f" -c {self.config['plnx_config_component']}" plnx_silent_cmd += " --silentconfig" self.runner.runcmd(cmd=str(plnx_silent_cmd), timeout=timeout) def get_hwdesign(self, timeout: int = 600) -> None: """This Function apply hardware design file(.xsa) on petalinux project Parameter: hw_design_path : .xsa file path """ hw_design = get_original_path(self.config["hw_design_path"]) hwdesign_cmd = ( f"yes | petalinux-config --get-hw-description={hw_design} --silentconfig" ) self.runner.runcmd(cmd=str(hwdesign_cmd), timeout=timeout) def plnx_build(self, timeout: int = 3600) -> None: """This Function runs petalinux build command on project Parameter: None """ build_cmd = "petalinux-build" if self.config.get("plnx_build_timeout", ""): timeout = self.config.plnx_build_timeout self.runner.runcmd(cmd=str(build_cmd), timeout=timeout) def set_config(self): """This Function apply the user configs on petalinux project >>> Usage: plnx_configs = { rootfs : [ 'CONFIG_xen=y', 'CONFIG_open-amp_demo is not set' ], project : [ 'CONFIG_ROOTFS_INITRD=y' ], kernel : [ 'CONFIG_XILINX_ETHERNET=y'], user-rootfs : ['CONFIG_kernel-module-hdmi'], bspconf : ['IMAGE_INSTALL_append = "iperf3"'] } """ component_map = { "user-rootfs": { "conf": f"{self.user_rootfs_config}" }, "rootfs": { "conf": f"{self.rootfs_config}" }, "project": { "conf": f"{self.project_config}" }, "kernel": { "conf": f"{self.kernel_dir}/bsp.cfg", "conf_dir": f"{self.kernel_dir}", "bbappend": [ 'FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"', 'SRC_URI += "file://bsp.cfg"', ], }, "uboot": { "conf": f"{self.uboot_dir}/files/bsp.cfg", "conf_dir": f"{self.uboot_dir}", "bbappend": ['SRC_URI += "file://bsp.cfg"'], }, "bspconf": { "conf": f"{self.plnxbspconf_file}" }, } components = ("plm", "pmufw", "fsbl", "psmfw") for key, value in self.config["plnx_configs"].items(): value = convert_list(value) if key in component_map: if "conf_dir" in component_map[key].keys(): mkdir(str(component_map[key]["conf_dir"])) if "bbappend" in component_map[key].keys(): append_list = convert_list(component_map[key]["bbappend"]) for val in append_list: add_newline(getattr(self, f"{key}_bbappend"), str(val)) for itr in value: itr = parse_config(self.config, itr) add_newline(str(component_map[key]["conf"]), str(itr)) elif key in components: mkdir(getattr(self, f"{key}_dir")) for itr in value: itr = parse_config(self.config, itr) add_newline(getattr(self, f"{key}_bbappend"), str(itr)) else: err_msg = f"Invalid arg {key} in plnx_configs" assert False, err_msg def apply_patch(self): components = ("atf", "pmufw", "fsbl", "uboot") for key, value in self.config["apply_patches"].items(): value = convert_list(value) component_dir = f"{key}_dir" mycomponent_dir = getattr(self, component_dir) append_file = f"{key}_bbappend" myappend_file = getattr(self, append_file) mkdir(mycomponent_dir) if "kernel" in key: add_newline(f"{self.kernel_bbappend}", 'SRC_URI += "file://bsp.cfg"') add_newline( myappend_file, 'FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"', ) if key in components: add_newline( myappend_file, 'FILESEXTRAPATHS_prepend := "${THISDIR}:"', ) else: add_newline( myappend_file, 'FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"', ) for itr in value: if itr.endswith(".patch"): if is_file(f"{itr}"): copy_file(f"{itr}", mycomponent_dir) itr = os.path.basename(f"{itr}") add_newline(myappend_file, f'SRC_URI += "file://{itr}"') else: copy_file(f"{self.config['workDir']}/{itr}", mycomponent_dir) add_newline(myappend_file, f'SRC_URI += "file://{itr}"') else: log.info("Invalid patch...") def apply_external_component(self): """This function adds support to apply external src on petalinux project for kernel, uboot, atf, fsbl, xen, openamp components Parameters: url - component git url externalsrc - local src path srcrev - commid id/tag branch - git branch checksum - source checksum >>> Usage: plnx.component.uboot.url = "<git url>" plnx.component.uboot.branch = "master" plnx.component.uboot.srcrev = "12223434222" plnx.component.uboot.checksum = "<checksum>" plnx.component.xen.externalsrc= "<external source path>" plnx.component.openamp.url= "<source url>" """ def _external_repo_setup(self, key): if key == "openamp": key = "open-amp" component = re.sub("-", "", key) comp = f"{component}_bbappend" comp_dir = f"{component}_dir" mycomp_dir = getattr(self, comp_dir) mkdir(f"{mycomp_dir}/{key}") bbappend = getattr(self, comp) return bbappend def _component_map(self): component_map = {} component_map["atf"] = "ARM__TRUSTED__FIRMWARE" component_map["uboot"] = "U__BOOT" component_map["kernel"] = "LINUX__KERNEL" return component_map for key, value in self.config.plnx.component.items(): if key in ["kernel", "uboot", "atf"]: component_map = _component_map(self) if value["url"]: add_newline( f"{self.project_config}", f"CONFIG_SUBSYSTEM_COMPONENT_{component_map[key]}_NAME_REMOTE=y", ) add_newline( f"{self.project_config}", f"# CONFIG_SUBSYSTEM_COMPONENT_{component_map[key]}_NAME_EXT__LOCAL__SRC is not set", ) add_newline( f"{self.project_config}", f"CONFIG_SUBSYSTEM_COMPONENT_{component_map[key]}_NAME_REMOTE_DOWNLOAD_PATH=\"{value['url']};protocol=https\"", ) if value["srcrev"]: add_newline( f"{self.project_config}", f"CONFIG_SUBSYSTEM_COMPONENT_{component_map[key]}_NAME_REMOTE_REFERENCE=\"{value['srcrev']}\"", ) else: add_newline( f"{self.project_config}", f'CONFIG_SUBSYSTEM_COMPONENT_{component_map[key]}_NAME_REMOTE_REFERENCE="${{AUTOREV}}"', ) if value["branch"]: add_newline( f"{self.project_config}", f"CONFIG_SUBSYSTEM_COMPONENT_{component_map[key]}_NAME_REMOTE_BRANCH=\"{value['branch']}\"", ) if value["checksum"]: add_newline( f"{self.project_config}", f"CONFIG_SUBSYSTEM_COMPONENT_{component_map[key]}_LIC_FILES_CHKSUM_REMOTE=\"{value['checksum']}\"", ) else: if value["externalsrc"]: add_newline( f"{self.project_config}", f"# CONFIG_SUBSYSTEM_COMPONENT_{component_map[key]}_NAME_REMOTE is not set", ) add_newline( f"{self.project_config}", f"CONFIG_SUBSYSTEM_COMPONENT_{component_map[key]}_NAME_EXT__LOCAL__SRC=y", ) add_newline( f"{self.project_config}", f"CONFIG_SUBSYSTEM_COMPONENT_{component_map[key]}_NAME_EXT__LOCAL__SRC_PATH=\"{value['externalsrc']}\"", ) else: bbappend = _external_repo_setup(self, key) if value["externalsrc"]: repo_name = get_base_name(value["externalsrc"]) self.runner.runcmd( cmd= f"rsync -av --exclude '.git*' {value['externalsrc']} {self.workDir}", timeout=120, ) add_newline(bbappend, f"inherit externalsrc") add_newline(bbappend, f'EXTERNALSRC = "{self.workDir}/{repo_name}"') add_newline( bbappend, f'EXTERNALSRC_BUILD = "{self.workDir}/{repo_name}"') if value["checksum"]: add_newline( bbappend, f"LIC_FILES_CHKSUM = \"file://license.txt;md5={value['checksum']}\"", ) elif value["url"]: if value["checksum"]: add_newline( bbappend, f"LIC_FILES_CHKSUM = \"file://license.txt;md5={value['checksum']}\"", ) add_newline(bbappend, f"REPO = \"{value['url']};protocol=https\"") if value["srcrev"]: if value["srcrev"] == "AUTOREV": add_newline(bbappend, 'SRCREV = "{AUTOREV}"') else: add_newline(bbappend, f"SRCREV = \"{value['srcrev']}\"") if value["branch"]: add_newline(bbappend, f"BRANCH = \"{value['branch']}\"") # Add build dependency using this var, for the component being built if value["depends"]: add_newline(bbappend, f"DEPENDS += \"{value['depends']}\"") else: log.info( "Invalid external component src option.... using default petalinux sources" ) def update_dtsi_file(self): """This function adds user dtsi file to petalinux project Parameter: plnx_user_dtsi_files : user .dtsi file >>> Usage: plnx_user_dtsi_files = ['<path to dtsi file>'] """ mkdir(self.devicetree_dir) if "plnx_user_dtsi_files" in self.config: dt_files = convert_list(self.config["plnx_user_dtsi_files"]) for file in dt_files: copy_file(file, f"{self.devicetree_dir}/files/") dt_file = os.path.basename(file) if dt_file.endswith(".dtsi"): add_newline(f"{self.system_user_file}", f'/include/ "{dt_file}"') add_newline(f"{self.devicetree_append}", f'SRC_URI += "file://{dt_file}"') else: log.error("Invalid dtsi input") def link_dtsi_file(self): """This function softlink system-user.dtsi file with user provided dtsi file Parameter: plnx_user_dtsi_file : user .dtsi file >>> Usage: plnx_user_dtsi_file = 'path to user dtsi file' """ if "plnx_user_dtsi_file" in self.config: plnx_user_dtsi_file = self.config["plnx_user_dtsi_file"] system_dt_file = os.path.basename(self.system_user_file) user_dt_file = os.path.basename(plnx_user_dtsi_file) if os.path.isabs(plnx_user_dtsi_file): if is_file(plnx_user_dtsi_file): copy_file(plnx_user_dtsi_file, f"{self.devicetree_dir}/files/") else: err_msg = "ERROR: No {plnx_user_dtsi_file} exists" assert False, err_msg os.chdir(f"{self.devicetree_dir}/files/") symlink(system_dt_file, user_dt_file) os.chdir(self.proj_dir) def pack_bsp(self, timeout: int = 300) -> None: """This Function packages the petalinux project into BSP""" self.plnx_pkg = self.config["plnx_pkg"] if not self.plnx_pkg.split(): self.plnx_pkg = self.plnx_bsp pack_cmd = f"petalinux-package --bsp -p ./ -o {self.workDir}/{self.plnx_pkg}" self.runner.runcmd(cmd=str(pack_cmd), timeout=timeout) def run_user_script(self, timeout: int = 600) -> None: """This Function can be useful to run any user script after petalinux project creation. Parameters: user_script : supports .sh and .py >>> Usage: user_script = { "file_name" : "example.sh", "args" : "<script args if any>", "expected" : "<any script to match to know the script execution complete>" } """ userSuite = self.config["user_script"] script = userSuite["file_name"] args = userSuite["args"] expected = userSuite["expected"] if is_file(f"{self.workDir}/{script}") == True: if script.endswith(".sh"): config_cmd = f"sh {self.workDir}/{script} {args}" elif script.endswith(".py"): config_cmd = f"python {self.workDir}/{script} {args}" else: log.error(f"{script} not supported to execute") else: raise Exception(f"ERROR: {self.workDir}/{script} not exist") self.runner.runcmd(cmd=str(config_cmd), expected=expected, timeout=timeout) def plnx_package_boot(self, timeout: int = 600) -> None: """This Function creates BOOT.BIN""" if self.config["platform"].strip().lower() == "zynqmp": plnx_bin_cmd = "petalinux-package --boot %s %s %s %s" % ( "--fsbl images/linux/zynqmp_fsbl.elf", "--u-boot images/linux/u-boot.elf", "--pmufw images/linux/pmufw.elf", "--fpga images/linux/system.bit --force", ) elif self.config["platform"].strip().lower() == "zynq": plnx_bin_cmd = "petalinux-package --boot %s %s %s" % ( "--fsbl images/linux/zynq_fsbl.elf", "--u-boot images/linux/u-boot.elf", "--fpga images/linux/system.bit --force", ) elif self.config["platform"].strip().lower() == "versal": plnx_bin_cmd = "petalinux-package --boot --force --u-boot" elif self.config["platform"].strip().lower() == "microblaze": plnx_bin_cmd = "" else: log.error(f"{(self.config['platform'])} not supported") raise Exception( f"Error: {(self.config['platform'])} not supported") self.runner.runcmd(cmd=str(plnx_bin_cmd), timeout=timeout) def build_sdk(self, timeout: int = 3600) -> None: """This Function runs petalinux sdk command on project""" build_sdk_cmd = "petalinux-build --sdk" self.runner.runcmd(cmd=str(build_sdk_cmd), timeout=timeout) def extract_sdk(self, timeout: int = 3600) -> None: """This Function extracts the sdk on project""" extract_sdk_cmd = "petalinux-package --sysroot" self.runner.runcmd(cmd=str(extract_sdk_cmd), timeout=timeout) # Fix me def plnx_package_wic(self, wic_args=None, timeout=600): """This Function can be useful to generate wic image to prepare sd card. Parameters: wic_args : None (or) --bootfiles "BOOT.BIN boot.scr system.dtb image.ub" """ wic_cmd = "petalinux-package --wic" if wic_args: wic_cmd += f" {wic_args}" self.runner.runcmd(cmd=str(wic_cmd), timeout=timeout) def deploy(self): """This Function deploy the generated petalinux build images to specfic location Parameters: plnx_artifacts - to copy any specific files deploy_dir : to copy images to specific location >>> Usage: plnx_artifacts = ['image.ub', 'BOOT.BIN', 'Image', 'system.xsa' ] deploy_dir = "<path>" """ ret = True if "deploy_dir" in self.config: deploy_dir = self.config["deploy_dir"] if not is_dir(deploy_dir): mkdir(deploy_dir) log.info(f"Checking petalinux artifacts in {self.proj_dir}") if "plnx_artifacts" in self.config: for image in self.config["plnx_artifacts"]: image_file = find_file(image, f"{self.proj_dir}/images/linux") if image_file: if is_file(image_file): copy_file(image_file, deploy_dir) elif is_dir(image_file): copyDirectory(image_file, f"{deploy_dir}/{image}", symlinks=True) else: log.error(f"{image} does not exist in {self.workDir}") ret = False copyDirectory(f"{self.proj_dir}/images/linux", self.imagesDir, symlinks=True) return ret def deploy_bsp(self): """This Function deploys the petalinux bsp to specified location Parameter: deploy_dir : <path> """ deploy_dir = self.config["deploy_dir"] if deploy_dir: if is_dir(deploy_dir) == False: mkdir(deploy_dir) else: remove(deploy_dir) mkdir(deploy_dir) log.info(f"Copying plnx packed bsp to {deploy_dir}") shutil.copy(f"{self.workDir}/{self.plnx_pkg}", deploy_dir) shutil.copy(f"{self.workDir}/{self.plnx_pkg}", self.imagesDir) def _run_boot( self, cmd: str, proj_path: Optional[str] = None, hwserver: Optional[str] = None, bitfile: Optional[str] = None, rootfs: Optional[str] = None, ) -> None: """This function create petalinux boot command""" if proj_path: self.proj_dir = proj_path if not bitfile: bitfile = f"{self.proj_dir}/pre-built/linux/images/system.bit" if bitfile.endswith(".bit"): if self.config.get("platform") != "versal": if self.config.get("plnx_no_rev_check"): cmd += f' --after-connect "fpga -no-revision-check {bitfile}"' else: cmd += f" --bitstream {bitfile}" if rootfs: cmd += f" --rootfs {rootfs}" if hwserver: cmd += f" --hw_server-url {hwserver}:3121" if not is_dir(self.proj_dir): log.error(f"Petalinux Project Directory {self.proj_dir} not found") assert False, "Petalinux project directory not found" self.runner.runcmd(f"cd {self.proj_dir}") self.runner.runcmd(cmd=str(cmd), timeout=3600) def _run_qemu_boot(self, cmd, proj_path=None, qemu_args=None, rootfs=None): """This function create petalinux qemu boot command""" if proj_path: self.proj_dir = proj_path if rootfs: cmd += f" --rootfs {rootfs}" if qemu_args: cmd += f" {qemu_args}" if not is_dir(self.proj_dir): log.error(f"Petalinux Project Directory {self.proj_dir} not found") assert False, "Petalinux project directory not found" self.runner.runcmd(f"cd {self.proj_dir}") self.qemu_boot = True self.runner.sendline(cmd=str(cmd)) def plnx_boot( self, boottype=None, proj_path=None, hwserver=None, bitfile=None, qemu_args=None, rootfs=None, ): if boottype not in ("prebuilt 2", "prebuilt 3", "kernel", "uboot"): raise Exception("""Invalid petalinux boot type selected support types are: prebuilt 2, prebuilt 3, kernel, uboot""") if hwserver: cmd = f"petalinux-boot --jtag -v --{boottype}" self._run_boot(cmd, proj_path, hwserver, bitfile, rootfs) else: cmd = f"petalinux-boot --qemu --{boottype}" self._run_qemu_boot(cmd, proj_path, qemu_args, rootfs) def __del__(self): """This function deletes the petalinux project created under TEMP path. Parameters: PLNX_TMP_PATH : tmp path to to build petalinux project skip_clean_temp : to skip temp clean >>> skip_clean_temp = true """ if self.config.get("skip_clean_temp"): log.info("Skipped petaliux project temp clean...") else: if self.plnx_tmp: log.info( f"Petaliunx project temp clean successful on path : {self.plnx_tmp} ..." ) remove(self.plnx_tmp) if self.qemu_boot: self.runner.sendcontrol("a") self.runner.sendline("x") def devtool(self, operation, timeout=12000): """This function performs petalinux-devtool options""" self.source_tool() self.create_project() if operation == "add": self.runner.runcmd( cmd= f" petalinux-devtool {operation} {self.recipe_name} {self.fetchuri}", timeout=2000, ) elif operation == "modify": self.runner.runcmd( cmd= f" petalinux-devtool {operation} {self.existing_recipename} ", timeout=1000, ) elif operation == "upgrade": self.runner.runcmd( cmd=f" petalinux-devtool {operation} {self.recipe_upgrade}", timeout=2000, ) elif operation in ("status", "export"): self.runner.runcmd_list( cmd_list=[ f"petalinux-devtool add {self.recipe_name} {self.fetchuri}", f"petalinux-devtool {operation}", ], timeout=600, ) elif operation in ( "latest-version", "check-upgrade-status", "search", "build", "find-recipe", "configure-help", "update-recipe", "configure", ): self.runner.runcmd_list( cmd_list=[ f"petalinux-devtool add {self.recipe_name} {self.fetchuri}", f"petalinux-devtool {operation} {self.recipe_name}", ], timeout=2000, ) elif operation == "rename": self.runner.runcmd_list( cmd_list=[ f"petalinux-devtool add {self.recipe_name} {self.fetchuri}", f"petalinux-devtool {operation} {self.recipe_name} {self.recipe_new_name}", f"petalinux-devtool search {self.recipe_new_name}", ], timeout=2000, ) elif operation == "reset": self.runner.runcmd_list( cmd_list=[ f"petalinux-devtool add {self.recipe_name} {self.fetchuri}", f" petalinux-devtool {operation} {self.recipe_name} ", f"cd {self.source_path} && rm -rf {self.recipe_name}", f"cd && cd {self.workDir}/{self.plnx_proj}", ], timeout=2000, ) elif operation == "finish": self.runner.runcmd_list( cmd_list=[ f"petalinux-devtool add {self.recipe_name} {self.fetchuri}", f" petalinux-devtool {operation} {self.recipe_name} {self.workDir}/{self.plnx_proj}/{self.recipe_destination}", f"cd {self.source_path} && rm -rf {self.recipe_name}", f"petalinux-devtool find-recipe {self.recipe_name}", f"cd && cd {self.workDir}/{self.plnx_proj}", ], timeout=2000, ) elif operation == "build-image": self.runner.runcmd( cmd=f" petalinux-devtool {operation} {self.image_recipe}", timeout=2000) elif operation == "create-workspace": self.runner.runcmd( cmd= f" petalinux-devtool {operation} {self.workspace_layerpath}", timeout=600, ) elif operation == "import": self.plnx_bsp = self.config["PLNX_BSP"] self.runner.runcmd_list( cmd_list=[ f"petalinux-devtool add {self.recipe_name} {self.fetchuri}", f" petalinux-devtool export && cd ..", f"yes | petalinux-create -t project -s {self.bsp_path}{self.plnx_bsp}", f"cd {self.plnx_proj} && petalinux-devtool {operation} {self.workDir}/{self.plnx_proj}.old/build/workspace-export-*tar.gz -o", ], timeout=600, ) elif operation == "extract": self.runner.runcmd( cmd= f"petalinux-devtool {operation} {self.existing_recipename} {self.workspace_layerpath}", timeout=2000, ) elif operation == "--help": self.runner.runcmd(cmd=f"petalinux-devtool {operation} ")
class Cardano(Basebuild): def __init__(self, config, setup=True): super().__init__(config, setup) self.console = Xexpect(log, exit_nzero_ret=True) def configure(self): super().configure() self.srcDir = os.path.join(self.workDir, "src") self.cdoDir = os.path.join(self.workDir, "Work/ps/cdo") VITIS_DIR = self.config.vitisPath AIETOOLS_ROOT = self.config.AIETOOLS_ROOT SYSROOT = self.config.SYSROOT version_dotless = self.config.version.replace(".", "") base_platform = f"{self.config.BASE_PLATFORM_NAME}_{version_dotless}0_{self.config.BASE_PLATFORM_VERSION_EXTENSION}" self.PFM_XPFM = os.path.join(self.config.PLATFORMS_PATH, base_platform, f"{base_platform}.xpfm") XLNX_LICENSE = self.config["XILINXD_LICENSE_FILE"] cmdlist = [ f"export XILINXD_LICENSE_FILE={XLNX_LICENSE}", f"export VITIS_DIR={VITIS_DIR}", f"export AIETOOLS_ROOT={AIETOOLS_ROOT}", f"export XILINX_VITIS_AIETOOLS={AIETOOLS_ROOT}", f"export SYSROOT={SYSROOT}", f"export PFM_XPFM={self.PFM_XPFM}", f"export PATH={VITIS_DIR}/bin:$PATH", f"source {self.config.vitisPath}/settings64.sh", ] self.console.runcmd_list(cmdlist) def compile_cardano_app(self): self.console.runcmd(f"cd {self.workDir}") # compile cardano and generate cdo compile_cmd = [ "aiecompiler -v", self.config["CARDANO_FLAGS"], f'-include="{self.srcDir}/kernels"', f'-include="{self.srcDir}"', f'{self.srcDir}/{self.config["cardano_src"]}.cpp', ] cmd = " ".join(compile_cmd) timeout = int(self.config.get("cardano_timeout", 800)) self.console.runcmd(cmd, timeout=timeout, err_msg="Cardano App Compilation Failure") def gen_xclbin(self): gen_cmd = [ "v++ -s -p -t hw", f"--platform {self.PFM_XPFM}", f"--package.out_dir {self.workDir} --package.defer_aie_run", f"--config {self.config.vpp_package_path}/package.cfg", f"-o aie_xrt.xclbin", f"{self.workDir}/libadf.a", ] cmd = " ".join(gen_cmd) timeout = int(self.config.get("vpp_timeout", 600)) self.console.runcmd(cmd, timeout=timeout, err_msg="xclbin generation failed") def generate_pdis(self): self.config["console"] = self.console assert pdi(self.config, "new"), "ERROR: PDI Generation failed" def copy_images(self): ret = False try: mkdir(f"{self.imagesDir}/elfs") mkdir(f"{self.imagesDir}/src") self.elfs = glob.glob( f"{self.workDir}/Work/aie/{self.config['tiles']}") for elf in self.elfs: name = os.path.basename(elf) copy_file(f"{elf}/Release/{name}", f"{self.imagesDir}/elfs") self.cdos = get_files(self.cdoDir, extension="bin") for cdo in self.cdos: copy_file(f"{self.cdoDir}/{cdo}", f"{self.imagesDir}") self.control_cpps = get_files(f"{self.workDir}/Work/ps/c_rts/", extension="cpp") for cpp in self.control_cpps: copy_file( f"{self.workDir}/Work/ps/c_rts/{cpp}", f"{self.imagesDir}/src", ) copyDirectory(f"{self.workDir}/src", f"{self.imagesDir}/src") self.xclbin = get_files(self.workDir, extension="xclbin") for xclbin in self.xclbin: copy_file(f"{self.workDir}/{xclbin}", self.imagesDir) copyDirectory(f"{self.workDir}/Work", f"{self.imagesDir}/Work") ret = True except Exception as err: log.error(err) return ret def simulate_cardano_app(self): if self.config.external_aienginev2 and self.config.get( "external_cardano_src"): self.config.AIETOOLS_ROOT = ( f"{self.config.external_cardano_src}/prep/rdi/aietools") self.console.runcmd( f"export AIETOOLS_ROOT={self.config.AIETOOLS_ROOT}") self.console.runcmd( f"source {self.config.AIETOOLS_ROOT}/scripts/aietools_env.sh") self.console.runcmd(f"cd {self.workDir}") if self.config.get("generate_input_data") and self.config.get( "generate_src"): gen_cmd = [ f"{self.config.AIETOOLS_ROOT}/tps/lnx64/gcc/bin/g++", "-static-libstdc++ -std=c++11", f"-I {self.workDir}/Work/temp/", f"{self.workDir}/src/{self.config.generate_src}.cpp", f"-o {self.workDir}/{self.config.generate_src}.out", ] cmd = " ".join(gen_cmd) self.console.runcmd(cmd) self.console.runcmd( f"{self.workDir}/{self.config.generate_src}.out") timeout = int(self.config.get("simulation_timeout", 700)) self.console.runcmd( f"aiesimulator --pkg-dir={self.workDir}/Work", timeout=timeout, err_msg="Cardano app Simulation Failed", ) def incremental_build(self): if self.config.external_aienginev2 and self.config.get( "external_cardano_src"): self.config.AIETOOLS_ROOT = ( f"{self.config.external_cardano_src}/prep/rdi/aietools") self.console.runcmd( f"export AIETOOLS_ROOT={self.config.AIETOOLS_ROOT}") self.console.runcmd( f"source {self.config.AIETOOLS_ROOT}/scripts/aietools_env.sh") lock_path = os.path.join( get_abs_path(self.config.external_cardano_src), "src/products/cardano/", "incremental_build.lock", ) lock = FileLock(lock_path) with lock: build_number = os.getenv("BUILD_NUMBER") if not check_if_string_in_file( f"{self.config.external_cardano_src}/logs/incremetal_build.log", build_number, ): self.console.runcmd( f"sh {self.config.external_cardano_src}/incremental_build.sh {self.config.external_aienginev2}", expected="Incremental Build SUCCESSFUL", expected_failures=["Incremental Build FAILED"], timeout=3000, ) else: if check_if_string_in_file( f"{self.config.external_cardano_src}/logs/incremetal_build.log", "Incremental Build FAILED", ): raise Exception("ERROR: Incremental Build FAILED")
class AppBuilder(Basebuild): def __init__(self, config, setup: bool = True): super().__init__(config, setup=setup) self.console = Xexpect(log, exit_nzero_ret=True) self.config_var_dict = self.set_config_var() self.defconfig_var_dict = self.set_defconfig_var() self.supported_components_list = self.get_defconfig_var().keys() self.user_var_dict = "" self.yaml_file_path = "" self.CONFIG_FILE_SET = False self.extra_env = ["scriptsDir"] self.cmd = "" self.config_var() def set_config_var(self): """Loads config.yaml from absolute path set in configuration. If path not set in configuration, use config.yaml included in library. """ if "XSCT_CONFIG" in self.config: xsct_path = self.config["XSCT_CONFIG"] else: dir_path = os.path.dirname(os.path.realpath(__file__)) xsct_path = os.path.join(dir_path, "config.yaml") data = load_yaml(xsct_path) if data is None or data == "": print_err_exit("File: config.yaml is empty or not found") for key in data: data[key] = set(data[key]) return data def config_var(self): """Exports configuration variables needed by tcl scripts. If scriptsDir is not specified in config, use this file's directory. """ if "scriptsDir" not in self.config: self.config["scriptsDir"] = os.path.dirname( os.path.realpath(__file__)) # Set toolchain in env, default=linaro if self.config.get("XSCT_TOOLCHAIN") == "armcc": self.console.runcmd(f"export XSCT_TOOLCHAIN=armcc") self.console.runcmd( f"export ARMLMD_LICENSE_FILE={self.config['ARMLMD_LICENSE_FILE']}" ) self.console.runcmd( f"export PATH={self.config['ARMCC_BIN_PATH']}" + ":$PATH") if "extra_xsct_env" in self.config: if isinstance(self.config["extra_xsct_env"], list): self.extra_env.extend(self.config["extra_xsct_env"]) else: self.extra_env.extend(self.config["extra_xsct_env"].split(" ")) for key in self.extra_env: value = self.config[key] self.console.runcmd(f"export {key}={value}") def set_defconfig_var(self): """Loads defconfig.yaml from absolute path set in configuration. If path not set in configuration, use defconfig.yaml included in library. """ if "DEFAULT_XSCT_CONFIG" in self.config: defconfig_path = self.config["DEFAULT_XSCT_CONFIG"] else: dir_path = os.path.dirname(os.path.realpath(__file__)) defconfig_path = os.path.join(dir_path, "defconfig.yaml") data = load_yaml(defconfig_path) if data is None or data == "": err_msg = "defconfig.yaml is empty or not found or file reading failed" log.error(err_msg) raise Exception(err_msg) # Find the supported components valid_components = list(data.keys()) # Remove the common key from the supported components del_element("common", valid_components) # Add the common variables to each component for component in valid_components: try: # Find the common dictionary values in common Vs specified component common_vals = list( set(data["common"].keys()) & set(data[component].keys())) # Check if there are some common values present in common and individual component correction = {} for val in common_vals: correction[val] = data[component][val] # Update the data with the common values data[component].update(data["common"]) # Apply the correction data[component].update(correction) except: print_err_exit("Setting defconfig variables failed !") del_key("common", data) return data def get_user_args(self, argv=None): parser = argparse.ArgumentParser( description= "This script is used to build elf for any component using xsct") parser.add_argument("--app", help="App Name to be build") parser.add_argument("--arch", default="64", help="Platform Architecture") parser.add_argument("--bspname", help="Standalone BSP Name") parser.add_argument( "--component", choices=self.get_supported_components(), help="Supported Components", ) parser.add_argument("--do_compile", default="1", choices=["0", "1"], help="Build the project") parser.add_argument( "--use_dependency_props", default="1", choices=["0", "1"], help="Use dependency.props", ) parser.add_argument( "--do_cleanup", default="1", choices=["0", "1"], help="Clean up after building the project", ) parser.add_argument("--driver", help="Driver Name to be build") parser.add_argument("--elf_name", help="Cumtome Elf Name to be used") parser.add_argument("--example_name", help="Example to be build") parser.add_argument("--extension", help="Run time functions to perform") parser.add_argument("--extra_args", nargs="*") parser.add_argument("--file", type=open, action=LoadFromFile) parser.add_argument("--hdf", help="Path of the directory containing hdf") parser.add_argument("--hwpname", default="hw0", help="Name of HW project") parser.add_argument("--lib", help="Libraries Required") parser.add_argument("--library_name", help="Library name to be build") parser.add_argument("--thirdparty_name", help="ThirdParty name to be build") parser.add_argument( "--thirdparty_dir", help="ThirdParty Directory where apps/libs are present") parser.add_argument("--osname", default="standalone", help="Operating System") parser.add_argument( "--out_dir", help="Output directory path where elfs needs to be copied") parser.add_argument("--pname", help="Name of the project") parser.add_argument("--processor", help="Processor used for compilation") parser.add_argument("--rp", help="Repo Path") parser.add_argument("--rp_intg", help="INTG Repo Path") parser.add_argument( "--import_sources", help="Path of directory where source to be imported is present", ) parser.add_argument( "--import_args", help='"Import sources arguments like : -soft-link, etc') parser.add_argument("--ws", help="Workspace used for compilation") parser.add_argument("--xsct_path", help="Xsct Bin Path") parser.add_argument("--functest_name", help="Name of the function test") parser.add_argument("--build_till_bsp", default="0", choices=["0", "1"], help="Build bsp only") parser.add_argument("--use_hypervisor", default="0", choices=["0", "1"], help="Use Hypervisor") parser.add_argument( "--iar_compilation", default="0", choices=["0", "1"], help="Perform IAR Compilation", ) args = parser.parse_args(argv) # Fixme: convert args to type class dict before retunring it return args def map_procname(self, proc): proc_dict = { "a72_0_versal": "psv_cortexa72_0", "a72_1_versal": "psv_cortexa72_1", "r5_0_versal": "psv_cortexr5_0", "r5_1_versal": "psv_cortexr5_1", "pmc_versal": "psv_pmc_0", "psm_versal": "psv_psm_0", "a53_0_zynqmp": "psu_cortexa53_0", "a53_1_zynqmp": "psu_cortexa53_1", "r5_0_zynqmp": "psu_cortexr5_0", "r5_1_zynqmp": "psu_cortexr5_1", "a9_0_zynq": "ps7_cortexa9_0", "a9_1_zynq": "ps7_cortexa9_1", "mb_0": "microblaze_0", } if proc in proc_dict.keys(): return proc_dict[proc] else: return proc def parser(self, config): args = {} if "do_compile" in config: args["do_compile"] = config["do_compile"] else: args["do_compile"] = 1 if "do_cleanup" in config: args["do_cleanup"] = config["do_cleanup"] if "extension" in config: args["extension"] = config["extension"] if "xsct_proj_name" in config: args["pname"] = config["xsct_proj_name"] if "xsct_proc_name" in config: args["processor"] = self.map_procname(config["xsct_proc_name"]) if "XSCT_PATH" in config: args["xsct_path"] = config["XSCT_PATH"] if "xsct_os_name" in config: args["osname"] = config["xsct_os_name"] if "xsct_xsa" in config: args["hdf"] = config["xsct_xsa"] if "xsct_app_name" in config: args["app"] = config["xsct_app_name"] if "xsct_platform_name" in config: args["hwpname"] = config["xsct_platform_name"] if "build_till_bsp" in config: args["build_till_bsp"] = config["build_till_bsp"] else: args["build_till_bsp"] = 0 if "extra_args" in config: args["extra_args"] = config["extra_args"] else: args["extra_args"] = None if "xsct_outDir" in config: args["out_dir"] = config["xsct_outDir"] else: args["out_dir"] = f"{config.wsDir}/images" if "xsct_elf_name" in config: args["elf_name"] = config["xsct_elf_name"] if "xsct_lib" in config: args["lib"] = config["xsct_lib"] if "xsct_library_name" in config: args["library_name"] = config["xsct_library_name"] if "xsct_import_sources" in config: args["import_sources"] = config["xsct_import_sources"] if "xsct_import_args" in config: args["import_args"] = config["xsct_import_args"] if "xsct_thirdparty_name" in config: args["thirdparty_name"] = config["xsct_thirdparty_name"] if "xsct_use_hypervisor" in config: args["use_hypervisor"] = config["xsct_use_hypervisor"] if "xsct_thirdparty_dir" in config: args["thirdparty_dir"] = config["xsct_thirdparty_dir"] if "xsct_extention_tcl" in config: if config["xsct_extention_tcl"]: args["extension"] = config["xsct_extention_tcl"] if "component" in config: args["component"] = config["component"] if "rp_intg" in config: args["rp_intg"] = config["rp_intg"] if "xsct_driver" in config: args["driver"] = config["xsct_driver"] if "iar_compilation" in config: args["iar_compilation"] = config["iar_compilation"] args["use_dependency_props"] = config.get("use_dependency_props", "1") if "xsct_example_name" in config: args["example_name"] = config["xsct_example_name"] if "repo_exists" in config and config["repo_exists"] == 1: args["rp"] = f"{config.workDir}/src/" else: if config.get("external_embeddedsw"): if is_dir(config["external_embeddedsw"]): args["rp"] = config["external_embeddedsw"] else: raise Exception( f"ERROR: {config['external_embeddedsw']} does not exist" ) args["ws"] = f"{config.workDir}/{config['component']}/" if "XSCT_HYPERVISOR" in config: self.console.runcmd( f"export XSCT_HYPERVISOR={config['XSCT_HYPERVISOR']}") dir_path = os.path.dirname(os.path.realpath(__file__)) self.console.runcmd(f"export SCRIPTS_PYTHON={dir_path}") self.console.runcmd(f"export test_log_path={config.logDir}") self.console.runcmd(f"export test_work_path={config.workDir}") self.console.runcmd(f"export test_image_path={config.imagesDir}") if "BUILD_SOURCE" in config: self.console.runcmd( f"export BUILD_SOURCE={config['BUILD_SOURCE']}") if "usr_src_path" in config: self.console.runcmd( f"export usr_src_path={config['usr_src_path']}") return args def set_user_args(self, args): # Extra args might be present in config file appconfig = self.get_config_var()["APP_CONFIG"] bspconfig = self.get_config_var()["BSP_CONFIG"] yaml_file_path = os.path.join(self.config.workDir, "yamlconf.yaml") if verify_extra_args(args["extra_args"]): if write_extra_args_to_yaml_conf(args, "cmdline", appconfig, bspconfig, yaml_file_path): self.CONFIG_FILE_SET = True self.yaml_file_path = yaml_file_path elif args["extra_args"] is None: if write_extra_args_to_yaml_conf(args, "file", appconfig, bspconfig, yaml_file_path): self.CONFIG_FILE_SET = True self.yaml_file_path = yaml_file_path # Verify if all the mandatory args are defined is_defined(args, list(self.get_config_var()["MANDATORY_ARGS"])) arg_dict = {} for arg in args.keys(): value = args[arg] if value is not None and value != "": arg_dict[arg] = value # Check for vitisPath mandatory args if not is_dir(self.config["vitisPath"]): raise Exception( f"Error: ({self.config['vitisPath']}) is not a directory") if not is_file(f"{self.config['vitisPath']}/bin/xsct"): raise Exception( f"Error: ({self.config['vitisPath']}/bin/xsct) is not a valid file" ) # Collect few mandatory args if "xsct_path" not in arg_dict: arg_dict["xsct_path"] = f"{self.config['vitisPath']}/bin/xsct" if "rp" not in arg_dict: arg_dict["rp"] = f"{self.config['vitisPath']}/data/embeddedsw" # Collect the params from the default settings component = arg_dict["component"] config_dict = self.get_defconfig_var() for key in config_dict: if key == component: subdict = config_dict[key] for subkey in subdict: if subkey not in arg_dict: arg_dict[subkey] = subdict[subkey] self.user_var_dict = arg_dict def set_cmd(self): user_var_dict = self.get_user_var() tclargs = self.get_config_var()["TCL_SUPPORTED_ARGS"] cmd = "" for key in user_var_dict: if key in tclargs: value = user_var_dict[key] if value is not None and value != "": str1 = "-" + str(key) str2 = surround_double_quotes(str(value)) cmd += str1 + " " + str2 + " " if "APP_TCL" in self.config: app_path = self.config["APP_TCL"] else: dir_path = os.path.dirname(os.path.realpath(__file__)) app_path = os.path.join(dir_path, "scout_app.tcl") cmd = app_path + " " + cmd + " " if self.config.get("iar_compilation") != "1": cmd = str(user_var_dict["xsct_path"]) + " " + cmd if self.get_config_file(): cmd += "-yamlconf" + " " + surround_double_quotes( self.get_yaml_file()) self.cmd = cmd # Getter def get_config_var(self): return self.config_var_dict def get_defconfig_var(self): return self.defconfig_var_dict def get_supported_components(self): return self.supported_components_list def get_user_var(self): return self.user_var_dict def get_config_file(self): return self.CONFIG_FILE_SET def get_yaml_file(self): return self.yaml_file_path def get_cmd(self): return self.cmd def clone_esw(self, args): self.esw_path = os.path.join(self.config["sharedWs"], "embeddedsw") if (self.config["XSCT_BUILD_SOURCE"] == "git" and not self.config["external_embeddedsw"] and not self.config["repo_exists"]): mkdir(self.config["sharedWs"]) args["rp"] = self.esw_path clone( self.config.git.embeddedsw, self.esw_path, clone_once=True, ) elif self.config.get("external_embeddedsw"): # Get log prints when external esw is given git = Git( self.config.git.embeddedsw, self.config.external_embeddedsw, True, ) log.info("Using external embeddedsw") git.log() return args def build_app(self, config) -> bool: cmd = "" if config.get("iar_compilation") != "1": self.console.runcmd("unset DISPLAY") self.set_cmd() cmd = self.get_cmd() if config.get("iar_compilation") == "1": xtfci_path = f"{config.wsDir}/work" if not is_file(xtfci_path + "/xtfci.py"): os.system( f"wget https://raw.gitenterprise.xilinx.com/regressions/xtfci/master/xtfci.py -P {xtfci_path}" ) from roast.component import iar result = iar.run_iar( self.config["component"], cmd, f"{config.workDir}", f"{config.wsDir}/images", self.config, ) if result: return True else: return False print_msg("APP.TCL COMMAND") print_msg("*******************************************") print_msg(cmd) print_msg("*******************************************") print_msg("") self.console.runcmd( f"export XSDK_DEFAULT_TRACE={config.get('XSDK_DEFAULT_TRACE', '')}" ) self.console.runcmd( f"export _JAVA_OPTIONS='-Duser.home={config['workDir']}/.xsct'") self.console.runcmd( cmd, timeout=1500, expected_failures=expected_failures, ) if config.get("build_till_bsp", 0) or check_if_string_in_file( f"{config.wsDir}/images/results.txt", "PASS"): return True else: log.error("ELF creation Failed") return False