Пример #1
0
    def __init__(self, d, sdktestdir, sdkenv, tcname, *args):
        super(SDKTestContext, self).__init__(d)

        self.sdktestdir = sdktestdir
        self.sdkenv = sdkenv
        self.tcname = tcname

        if not hasattr(self, 'target_manifest'):
            self.target_manifest = d.getVar("SDK_TARGET_MANIFEST", True)
        try:
            self.pkgmanifest = {}
            with open(self.target_manifest) as f:
                for line in f:
                    (pkg, arch, version) = line.strip().split()
                    self.pkgmanifest[pkg] = (version, arch)
        except IOError as e:
            bb.fatal("No package manifest file found. Did you build the sdk image?\n%s" % e)

        if not hasattr(self, 'host_manifest'):
            self.host_manifest = d.getVar("SDK_HOST_MANIFEST", True)
        try:
            with open(self.host_manifest) as f:
                self.hostpkgmanifest = f.read()
        except IOError as e:
            bb.fatal("No host package manifest file found. Did you build the sdk image?\n%s" % e)
Пример #2
0
 def _wait_until_booted(self):
     try:
         serialconn = pexpect.spawn(self.serialcontrol_cmd, env=self.origenv, logfile=sys.stdout)
         serialconn.expect("login:", timeout=120)
         serialconn.close()
     except pexpect.ExceptionPexpect as e:
         bb.fatal('Serial interaction failed: %s' % str(e))
Пример #3
0
def load_supported_recipes(d):

    files = []
    supported_files = d.getVar('SUPPORTED_RECIPES', True)
    if not supported_files:
        bb.fatal('SUPPORTED_RECIPES is not set')
    supported_recipes = SupportedRecipes()
    for filename in supported_files.split():
        try:
            with open(filename) as f:
                linenumber = 1
                for line in f:
                    if line.startswith('#'):
                        continue
                    # TODO (?): sanity check the content to catch
                    # obsolete entries or typos.
                    pn = line.strip()
                    if pn:
                        supported_recipes.append(SupportedRecipe(line.strip(),
                                                                 filename,
                                                                 linenumber))
                    linenumber += 1
            files.append(filename)
        except OSError as ex:
            bb.fatal('Could not read SUPPORTED_RECIPES = %s: %s' % (supported_files, str(ex)))

    return (supported_recipes, files)
Пример #4
0
    def _unpackdep(self, ud, pkg, data, destdir, dldir, d):
       file = data[pkg]['tgz']
       logger.debug(2, "file to extract is %s" % file)
       if file.endswith('.tgz') or file.endswith('.tar.gz') or file.endswith('.tar.Z'):
            cmd = 'tar xz --strip 1 --no-same-owner -f %s/%s' % (dldir, file)
       else:
            bb.fatal("NPM package %s downloaded not a tarball!" % file)

       # Change to subdir before executing command
       save_cwd = os.getcwd()
       if not os.path.exists(destdir):
           os.makedirs(destdir)
       os.chdir(destdir)
       path = d.getVar('PATH', True)
       if path:
            cmd = "PATH=\"%s\" %s" % (path, cmd)
       bb.note("Unpacking %s to %s/" % (file, os.getcwd()))
       ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True)
       os.chdir(save_cwd)

       if ret != 0:
            raise UnpackError("Unpack command %s failed with return value %s" % (cmd, ret), ud.url)

       if 'deps' not in data[pkg]:
            return
       for dep in data[pkg]['deps']:
           self._unpackdep(ud, dep, data[pkg]['deps'], "%s/node_modules/%s" % (destdir, dep), dldir, d)
Пример #5
0
    def __init__(self, machine, rootfs, display = None, tmpdir = None, logfile = None, boottime = 400, runqemutime = 60):
        # Popen object
        self.runqemu = None

        self.machine = machine
        self.rootfs = rootfs

        self.qemupid = None
        self.ip = None

        self.display = display
        self.tmpdir = tmpdir
        self.logfile = logfile
        self.boottime = boottime
        self.runqemutime = runqemutime

        self.bootlog = ''
        self.qemusock = None

        try:
            self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            self.server_socket.setblocking(0)
            self.server_socket.bind(("127.0.0.1",0))
            self.server_socket.listen(2)
            self.serverport = self.server_socket.getsockname()[1]
            bb.note("Created listening socket for qemu serial console on: 127.0.0.1:%s" % self.serverport)
        except socket.error, msg:
            self.server_socket.close()
            bb.fatal("Failed to create listening socket: %s" %msg[1])
Пример #6
0
def load_supported_recipes(d):

    files = []
    supported_files = d.getVar("SUPPORTED_RECIPES", True)
    if not supported_files:
        bb.fatal("SUPPORTED_RECIPES is not set")
    supported_recipes = SupportedRecipes()
    for filename in supported_files.split():
        try:
            base = os.path.basename(filename)
            supportedby = d.getVarFlag("SUPPORTED_RECIPES", base, True)
            if not supportedby:
                supportedby = base.rstrip(".txt")
            with open(filename) as f:
                linenumber = 1
                for line in f:
                    if line.startswith("#"):
                        continue
                    # TODO (?): sanity check the content to catch
                    # obsolete entries or typos.
                    pn = line.strip()
                    if pn:
                        supported_recipes.append(SupportedRecipe(line.strip(), supportedby, filename, linenumber))
                    linenumber += 1
            files.append(filename)
        except OSError as ex:
            bb.fatal("Could not read SUPPORTED_RECIPES = %s: %s" % (supported_files, str(ex)))

    return (supported_recipes, files)
Пример #7
0
Файл: ast.py Проект: ilbers/isar
def finalize(fn, d, variant = None):
    saved_handlers = bb.event.get_handlers().copy()
    try:
        for var in d.getVar('__BBHANDLERS', False) or []:
            # try to add the handler
            handlerfn = d.getVarFlag(var, "filename", False)
            if not handlerfn:
                bb.fatal("Undefined event handler function '%s'" % var)
            handlerln = int(d.getVarFlag(var, "lineno", False))
            bb.event.register(var, d.getVar(var, False), (d.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln)

        bb.event.fire(bb.event.RecipePreFinalise(fn), d)

        bb.data.expandKeys(d)
        runAnonFuncs(d)

        tasklist = d.getVar('__BBTASKS', False) or []
        bb.event.fire(bb.event.RecipeTaskPreProcess(fn, list(tasklist)), d)
        bb.build.add_tasks(tasklist, d)

        bb.parse.siggen.finalise(fn, d, variant)

        d.setVar('BBINCLUDED', bb.parse.get_file_depends(d))

        bb.event.fire(bb.event.RecipeParsed(fn), d)
    finally:
        bb.event.set_handlers(saved_handlers)
Пример #8
0
 def power_ctl(self, msg):
     if self.powercontrol_cmd:
         cmd = "%s %s" % (self.powercontrol_cmd, msg)
         try:
             commands.runCmd(cmd, assert_error=False, preexec_fn=os.setsid, env=self.origenv)
         except CommandError as e:
             bb.fatal(str(e))
Пример #9
0
def finalize(fn, d, variant=None):
    saved_handlers = bb.event.get_handlers().copy()

    for var in d.getVar("__BBHANDLERS", False) or []:
        # try to add the handler
        handlerfn = d.getVarFlag(var, "filename", False)
        if not handlerfn:
            bb.fatal("Undefined event handler function '%s'" % var)
        handlerln = int(d.getVarFlag(var, "lineno", False))
        bb.event.register(
            var, d.getVar(var, False), (d.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln
        )

    bb.event.fire(bb.event.RecipePreFinalise(fn), d)

    bb.data.expandKeys(d)
    bb.data.update_data(d)
    code = []
    for funcname in d.getVar("__BBANONFUNCS", False) or []:
        code.append("%s(d)" % funcname)
    bb.utils.better_exec("\n".join(code), {"d": d})
    bb.data.update_data(d)

    tasklist = d.getVar("__BBTASKS", False) or []
    bb.build.add_tasks(tasklist, d)

    bb.parse.siggen.finalise(fn, d, variant)

    d.setVar("BBINCLUDED", bb.parse.get_file_depends(d))

    bb.event.fire(bb.event.RecipeParsed(fn), d)
    bb.event.set_handlers(saved_handlers)
Пример #10
0
    def eval(self, data):

        for func in self.n:
            calledfunc = self.classname + "_" + func

            if data.getVar(func, False) and not data.getVarFlag(func, "export_func", False):
                continue

            if data.getVar(func, False):
                data.setVarFlag(func, "python", None)
                data.setVarFlag(func, "func", None)

            for flag in ["func", "python"]:
                if data.getVarFlag(calledfunc, flag, False):
                    data.setVarFlag(func, flag, data.getVarFlag(calledfunc, flag, False))
            for flag in ["dirs"]:
                if data.getVarFlag(func, flag, False):
                    data.setVarFlag(calledfunc, flag, data.getVarFlag(func, flag, False))
            data.setVarFlag(func, "filename", "autogenerated")
            data.setVarFlag(func, "lineno", 1)

            if data.getVarFlag(calledfunc, "python", False):
                data.setVar(func, "    bb.build.exec_func('" + calledfunc + "', d)\n", parsing=True)
            else:
                if "-" in self.classname:
                    bb.fatal(
                        "The classname %s contains a dash character and is calling an sh function %s using EXPORT_FUNCTIONS. Since a dash is illegal in sh function names, this cannot work, please rename the class or don't use EXPORT_FUNCTIONS."
                        % (self.classname, calledfunc)
                    )
                data.setVar(func, "    " + calledfunc + "\n", parsing=True)
            data.setVarFlag(func, "export_func", "1")
Пример #11
0
def get_signer(d, backend):
    """Get signer object for the specified backend"""
    # Use local signing by default
    if backend == 'local':
        return LocalSigner(d)
    else:
        bb.fatal("Unsupported signing backend '%s'" % backend)
Пример #12
0
def get_target_controller(d):
    if d.getVar("TEST_TARGET", True) == "qemu":
        return QemuTarget(d)
    elif d.getVar("TEST_TARGET", True) == "simpleremote":
        return SimpleRemoteTarget(d)
    else:
        bb.fatal("Please set a valid TEST_TARGET")
Пример #13
0
def get_target_controller(d):
    testtarget = d.getVar("TEST_TARGET", True)
    # old, simple names
    if testtarget == "qemu":
        return QemuTarget(d)
    elif testtarget == "simpleremote":
        return SimpleRemoteTarget(d)
    else:
        # use the class name
        try:
            # is it a core class defined here?
            controller = getattr(sys.modules[__name__], testtarget)
        except AttributeError:
            # nope, perhaps a layer defined one
            try:
                bbpath = d.getVar("BBPATH", True).split(":")
                testtargetloader = TestTargetLoader()
                controller = testtargetloader.get_controller_module(testtarget, bbpath)
            except ImportError as e:
                bb.fatal(
                    "Failed to import {0} from available controller modules:\n{1}".format(
                        testtarget, traceback.format_exc()
                    )
                )
            except AttributeError as e:
                bb.fatal("Invalid TEST_TARGET - " + str(e))
        return controller(d)
Пример #14
0
def sanity(d):
    import bb
    fail = False
    sdk_cpu = d.get("SDK_CPU")
    if not sdk_cpu:
        bb.error("SDK_CPU not set")
        fail = True
    sdk_os = d.get("SDK_OS")
    if not sdk_os:
        bb.error("SDK_OS not set")
        fail = True
    machine = d.get("MACHINE")
    machine_cpu = d.get("MACHINE_CPU")
    machine_os = d.get("MACHINE_OS")
    if machine:
        pass
    elif machine_cpu and machine_os:
        pass
    elif machine_cpu:
        bb.error("MACHINE_CPU set, but not MACHINE_OS")
        fail = True
    elif machine_os:
        bb.error("MACHINE_OS set, but not MACHINE_CPU")
        fail = True
    else:
        bb.error("MACHINE or MACHINE_CPU and MACHINE_OS must be set")
        fail = True
    if fail:
        bb.fatal("Invalid MACHINE and/or SDK specification\n"
                 "Check your conf/local.conf file and/or machine and distro config files.")
    return
Пример #15
0
    def eval(self, data):

        for func in self.n:
            calledfunc = self.classname + "_" + func

            if data.getVar(func) and not data.getVarFlag(func, 'export_func'):
                continue

            if data.getVar(func):
                data.setVarFlag(func, 'python', None)
                data.setVarFlag(func, 'func', None)

            for flag in [ "func", "python" ]:
                if data.getVarFlag(calledfunc, flag):
                    data.setVarFlag(func, flag, data.getVarFlag(calledfunc, flag))
            for flag in [ "dirs" ]:
                if data.getVarFlag(func, flag):
                    data.setVarFlag(calledfunc, flag, data.getVarFlag(func, flag))

            if data.getVarFlag(calledfunc, "python"):
                data.setVar(func, "    bb.build.exec_func('" + calledfunc + "', d)\n")
            else:
                if "-" in self.classname:
                   bb.fatal("The classname %s contains a dash character and is calling an sh function %s using EXPORT_FUNCTIONS. Since a dash is illegal in sh function names, this cannot work, please rename the class or don't use EXPORT_FUNCTIONS." % (self.classname, calledfunc))
                data.setVar(func, "    " + calledfunc + "\n")
            data.setVarFlag(func, 'export_func', '1')
Пример #16
0
def unpack_file(file, destdir, parameters, env=None):
    import subprocess, shutil

    try:
        dos = to_boolean(parameters.get("dos"), False)
    except ValueError, exc:
        bb.fatal("Invalid value for 'dos' parameter for %s: %s" %
                 (filename, parameters.get("dos")))
Пример #17
0
 def add_auto_list(path):
     if not os.path.exists(os.path.join(path, '__init__.py')):
         bb.fatal('Tests directory %s exists but is missing __init__.py' % path)
     files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
     for f in files:
         module = 'oeqa.' + type + '.' + f[:-3]
         if module not in testslist:
             testslist.append(module)
Пример #18
0
 def get_image_fstype(self, d, image_fstypes=None):
     if not image_fstypes:
         image_fstypes = d.getVar('IMAGE_FSTYPES', True).split(' ')
     possible_image_fstypes = [fstype for fstype in self.supported_image_fstypes if fstype in image_fstypes]
     if possible_image_fstypes:
         return possible_image_fstypes[0]
     else:
         bb.fatal("no possible image_fstype could not be determined. IMAGE_FSTYPES=\"%s\" and supported_image_fstypes=\"%s\": " % (', '.join(map(str, image_fstypes)), ', '.join(map(str, self.supported_image_fstypes))))
Пример #19
0
 def deploy(self):
     bb.plain("%s - deploying image on target" % self.pn)
     # base class just sets the ssh log file for us
     super(GummibootTarget, self).deploy()
     self.master = sshcontrol.SSHControl(ip=self.ip, logfile=self.sshlog, timeout=600, port=self.port)
     try:
         self._deploy()
     except Exception as e:
         bb.fatal("Failed deploying test image: %s" % e)
Пример #20
0
 def add_controller_list(path):
     if not os.path.exists(os.path.join(path, '__init__.py')):
         bb.fatal('Controllers directory %s exists but is missing __init__.py' % path)
     files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
     for f in files:
         module = 'oeqa.controllers.' + f[:-3]
         if module not in controllerslist:
             controllerslist.append(module)
         else:
             bb.warn("Duplicate controller module found for %s, only one added. Layers should create unique controller module names" % module)
Пример #21
0
def prserv_make_conn(d):
    import prserv.serv
    host = d.getVar("PRSERV_HOST",True)
    port = d.getVar("PRSERV_PORT",True)
    try:
        conn = None
        conn = prserv.serv.PRServerConnection(host,int(port))
        d.setVar("__PRSERV_CONN",conn)
    except Exception, exc:
        bb.fatal("Connecting to PR service %s:%s failed: %s" % (host, port, str(exc)))
Пример #22
0
 def _read_testlist(self, fpath, builddir):
     if not os.path.isabs(fpath):
         fpath = os.path.join(builddir, "conf", fpath)
     if not os.path.exists(fpath):
         bb.fatal("No such manifest file: ", fpath)
     tcs = []
     for line in open(fpath).readlines():
         line = line.strip()
         if line and not line.startswith("#"):
             tcs.append(line)
     return " ".join(tcs)
Пример #23
0
 def _start(self, params=None):
     self.power_cycle(self.master)
     try:
         serialconn = pexpect.spawn(self.serialcontrol_cmd, env=self.origenv, logfile=sys.stdout)
         serialconn.expect("GNU GRUB  version 2.00")
         serialconn.expect("Linux")
         serialconn.sendline("OB\r")
         serialconn.expect("login:", timeout=120)
         serialconn.close()
     except pexpect.ExceptionPexpect as e:
         bb.fatal('Serial interaction failed: %s' % str(e))
Пример #24
0
 def __init__(self, d):
     super(SimpleRemoteTarget, self).__init__(d)
     self.ip = d.getVar("TEST_TARGET_IP", True) or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.')
     bb.note("Target IP: %s" % self.ip)
     self.server_ip = d.getVar("TEST_SERVER_IP", True)
     if not self.server_ip:
         try:
             self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split()[6]
         except Exception as e:
             bb.fatal("Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s" % e)
     bb.note("Server IP: %s" % self.server_ip)
Пример #25
0
    def __init__(self, d):
        super(IntelshumwayTarget, self).__init__(d)

        self.deploy_cmds = [
                '/folk/vlm/commandline/vlmTool copyFile -s amazon -t 22025  -k /folk/lyang0/kernel',
                '/folk/vlm/commandline/vlmTool copyFile -s amazon -t 22025  -r /folk/lyang0/rootfs',
                ]


        if not self.serialcontrol_cmd:
            bb.fatal("This TEST_TARGET needs a TEST_SERIALCONTROL_CMD defined in local.conf.")
Пример #26
0
 def create_socket(self):
     tries = 3
     while tries > 0:
         try:
             self.server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
             self.server_socket.connect(self.socketfile)
             bb.note("Created listening socket for qemu serial console.")
             tries = 0
         except socket.error as msg:
             self.server_socket.close()
             bb.fatal("Failed to create listening socket.")
             tries -= 1
Пример #27
0
def prserv_check_avail(d):
    host = d.getVar("PRSERV_HOST",True)
    port = d.getVar("PRSERV_PORT",True)
    try:
        if not host:
            raise TypeError
        else:
            port = int(port)
    except TypeError:
        bb.fatal("Undefined or incorrect values of PRSERV_HOST or PRSERV_PORT")
    else:
        prserv_make_conn(d, True)
Пример #28
0
    def __init__(self, d):
        super(GrubTarget, self).__init__(d)
        self.deploy_cmds = [
                'mount -L boot /boot',
                'mkdir -p /mnt/testrootfs',
                'mount -L testrootfs /mnt/testrootfs',
                'cp ~/test-kernel /boot',
                'rm -rf /mnt/testrootfs/*',
                'tar xvf ~/test-rootfs.%s -C /mnt/testrootfs' % self.image_fstype,
                ]

        if not self.serialcontrol_cmd:
            bb.fatal("This TEST_TARGET needs a TEST_SERIALCONTROL_CMD defined in local.conf.")
Пример #29
0
def prserv_make_conn(d, check = False):
    import prserv.serv
    host = d.getVar("PRSERV_HOST",True)
    port = d.getVar("PRSERV_PORT",True)
    try:
        conn = None
        conn = prserv.serv.PRServerConnection(host,int(port))
        if check:
            if not conn.ping():
                raise Exception('service not available')
        d.setVar("__PRSERV_CONN",conn)
    except Exception, exc:
        bb.fatal("Connecting to PR service %s:%s failed: %s" % (host, port, str(exc)))
Пример #30
0
 def extract_binary(pth_to_pkg, dest_pth=None):
     cpio_command = runCmd("which cpio")
     rpm2cpio_command = runCmd("ls /usr/bin/rpm2cpio")
     if (cpio_command.status != 0) and (rpm2cpio_command.status != 0):
         bb.fatal("Either \"rpm2cpio\" or \"cpio\" tools are not available on your system."
                 "All binaries extraction processes will not be available, crashing all related tests."
                 "Please install them according to your OS recommendations") # will exit here
     if dest_pth:
         os.chdir(dest_pth)
     else:
         os.chdir("%s" % os.sep)# this is for native package
     extract_bin_command = runCmd("%s %s | %s -idm" % (rpm2cpio_command.output, pth_to_pkg, cpio_command.output)) # semi-hardcoded because of a bug on poky's rpm2cpio
     return extract_bin_command
Пример #31
0
def feeder(lineno, s, fn, root, statements, eof=False):
    global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__, __infunc__, __body__, bb, __residue__, __classname__

    # Check tabs in python functions:
    # - def py_funcname(): covered by __inpython__
    # - python(): covered by '__anonymous' == __infunc__[0]
    # - python funcname(): covered by __infunc__[3]
    if __inpython__ or (__infunc__ and
                        ('__anonymous' == __infunc__[0] or __infunc__[3])):
        tab = __python_tab_regexp__.match(s)
        if tab:
            bb.warn(
                'python should use 4 spaces indentation, but found tabs in %s, line %s'
                % (root, lineno))

    if __infunc__:
        if s == '}':
            __body__.append('')
            ast.handleMethod(statements, fn, lineno, __infunc__[0], __body__,
                             __infunc__[3], __infunc__[4])
            __infunc__ = []
            __body__ = []
        else:
            __body__.append(s)
        return

    if __inpython__:
        m = __python_func_regexp__.match(s)
        if m and not eof:
            __body__.append(s)
            return
        else:
            ast.handlePythonMethod(statements, fn, lineno, __inpython__, root,
                                   __body__)
            __body__ = []
            __inpython__ = False

            if eof:
                return

    if s and s[0] == '#':
        if len(__residue__) != 0 and __residue__[0][0] != "#":
            bb.fatal(
                "There is a comment on line %s of file %s (%s) which is in the middle of a multiline expression.\nBitbake used to ignore these but no longer does so, please fix your metadata as errors are likely as a result of this change."
                % (lineno, fn, s))

    if len(__residue__) != 0 and __residue__[0][0] == "#" and (not s
                                                               or s[0] != "#"):
        bb.fatal(
            "There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed."
            % (lineno, fn, s))

    if s and s[-1] == '\\':
        __residue__.append(s[:-1])
        return

    s = "".join(__residue__) + s
    __residue__ = []

    # Skip empty lines
    if s == '':
        return

    # Skip comments
    if s[0] == '#':
        return

    m = __func_start_regexp__.match(s)
    if m:
        __infunc__ = [
            m.group("func") or "__anonymous", fn, lineno,
            m.group("py") is not None,
            m.group("fr") is not None
        ]
        return

    m = __def_regexp__.match(s)
    if m:
        __body__.append(s)
        __inpython__ = m.group(1)

        return

    m = __export_func_regexp__.match(s)
    if m:
        ast.handleExportFuncs(statements, fn, lineno, m, __classname__)
        return

    m = __addtask_regexp__.match(s)
    if m:
        if len(m.group().split()) == 2:
            # Check and warn for "addtask task1 task2"
            m2 = re.match(r"addtask\s+(?P<func>\w+)(?P<ignores>.*)", s)
            if m2 and m2.group('ignores'):
                logger.warning('addtask ignored: "%s"' % m2.group('ignores'))

        # Check and warn for "addtask task1 before task2 before task3", the
        # similar to "after"
        taskexpression = s.split()
        for word in ('before', 'after'):
            if taskexpression.count(word) > 1:
                logger.warning(
                    "addtask contained multiple '%s' keywords, only one is supported"
                    % word)

        ast.handleAddTask(statements, fn, lineno, m)
        return

    m = __deltask_regexp__.match(s)
    if m:
        # Check and warn "for deltask task1 task2"
        if m.group('ignores'):
            logger.warning('deltask ignored: "%s"' % m.group('ignores'))
        ast.handleDelTask(statements, fn, lineno, m)
        return

    m = __addhandler_regexp__.match(s)
    if m:
        ast.handleBBHandlers(statements, fn, lineno, m)
        return

    m = __inherit_regexp__.match(s)
    if m:
        ast.handleInherit(statements, fn, lineno, m)
        return

    return ConfHandler.feeder(lineno, s, fn, statements)
Пример #32
0
    def parseConfigurationFiles(self, prefiles, postfiles, mc="default"):
        data = bb.data.createCopy(self.basedata)
        data.setVar("BB_CURRENT_MC", mc)

        # Parse files for loading *before* bitbake.conf and any includes
        for f in prefiles:
            data = parse_config_file(f, data)

        layerconf = self._findLayerConf(data)
        if layerconf:
            parselog.debug(2, "Found bblayers.conf (%s)", layerconf)
            # By definition bblayers.conf is in conf/ of TOPDIR.
            # We may have been called with cwd somewhere else so reset TOPDIR
            data.setVar("TOPDIR", os.path.dirname(os.path.dirname(layerconf)))
            data = parse_config_file(layerconf, data)

            layers = (data.getVar('BBLAYERS') or "").split()

            data = bb.data.createCopy(data)
            approved = bb.utils.approved_variables()
            for layer in layers:
                if not os.path.isdir(layer):
                    parselog.critical("Layer directory '%s' does not exist! "
                                      "Please check BBLAYERS in %s" %
                                      (layer, layerconf))
                    sys.exit(1)
                parselog.debug(2, "Adding layer %s", layer)
                if 'HOME' in approved and '~' in layer:
                    layer = os.path.expanduser(layer)
                if layer.endswith('/'):
                    layer = layer.rstrip('/')
                data.setVar('LAYERDIR', layer)
                data.setVar('LAYERDIR_RE', re.escape(layer))
                data = parse_config_file(
                    os.path.join(layer, "conf", "layer.conf"), data)
                data.expandVarref('LAYERDIR')
                data.expandVarref('LAYERDIR_RE')

            data.delVar('LAYERDIR_RE')
            data.delVar('LAYERDIR')

            bbfiles_dynamic = (data.getVar('BBFILES_DYNAMIC') or "").split()
            collections = (data.getVar('BBFILE_COLLECTIONS') or "").split()
            invalid = []
            for entry in bbfiles_dynamic:
                parts = entry.split(":", 1)
                if len(parts) != 2:
                    invalid.append(entry)
                    continue
                l, f = parts
                if l in collections:
                    data.appendVar("BBFILES", " " + f)
            if invalid:
                bb.fatal(
                    "BBFILES_DYNAMIC entries must be of the form <collection name>:<filename pattern>, not:\n    %s"
                    % "\n    ".join(invalid))

            layerseries = set((data.getVar("LAYERSERIES_CORENAMES")
                               or "").split())
            for c in collections:
                compat = set((data.getVar("LAYERSERIES_COMPAT_%s" % c)
                              or "").split())
                if compat and not (compat & layerseries):
                    bb.fatal(
                        "Layer %s is not compatible with the core layer which only supports these series: %s (layer is compatible with %s) %s"
                        % (c, " ".join(layerseries), " ".join(compat), compat))
                elif not compat and not data.getVar("BB_WORKERCONTEXT"):
                    bb.warn(
                        "Layer %s should set LAYERSERIES_COMPAT_%s in its conf/layer.conf file to list the core layer names it is compatible with %s %s."
                        % (c, c, " ".join(compat), compat))

        if not data.getVar("BBPATH"):
            msg = "The BBPATH variable is not set"
            if not layerconf:
                msg += (
                    " and bitbake did not find a conf/bblayers.conf file in"
                    " the expected location.\nMaybe you accidentally"
                    " invoked bitbake from the wrong directory?")
            raise SystemExit(msg)

        data = parse_config_file(os.path.join("conf", "bitbake.conf"), data)

        # Parse files for loading *after* bitbake.conf and any includes
        for p in postfiles:
            data = parse_config_file(p, data)

        # Handle any INHERITs and inherit the base class
        bbclasses = ["base"] + (data.getVar('INHERIT') or "").split()
        for bbclass in bbclasses:
            data = _inherit(bbclass, data)

        # Nomally we only register event handlers at the end of parsing .bb files
        # We register any handlers we've found so far here...
        for var in data.getVar('__BBHANDLERS', False) or []:
            handlerfn = data.getVarFlag(var, "filename", False)
            if not handlerfn:
                parselog.critical("Undefined event handler function '%s'" %
                                  var)
                sys.exit(1)
            handlerln = int(data.getVarFlag(var, "lineno", False))
            bb.event.register(var, data.getVar(var, False),
                              (data.getVarFlag(var, "eventmask")
                               or "").split(), handlerfn, handlerln)

        data.setVar('BBINCLUDED', bb.parse.get_file_depends(data))

        return data
Пример #33
0
    def install_complementary(self, globs=None):
        """
        Install complementary packages based upon the list of currently installed
        packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
        these packages, if they don't exist then no error will occur.  Note: every
        backend needs to call this function explicitly after the normal package
        installation
        """
        if globs is None:
            globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
            split_linguas = set()

            for translation in self.d.getVar('IMAGE_LINGUAS').split():
                split_linguas.add(translation)
                split_linguas.add(translation.split('-')[0])

            split_linguas = sorted(split_linguas)

            for lang in split_linguas:
                globs += " *-locale-%s" % lang
                for complementary_linguas in (
                        self.d.getVar('IMAGE_LINGUAS_COMPLEMENTARY')
                        or "").split():
                    globs += (" " + complementary_linguas) % lang

        if globs is None:
            return

        # we need to write the list of installed packages to a file because the
        # oe-pkgdata-util reads it from a file
        with tempfile.NamedTemporaryFile(
                mode="w+", prefix="installed-pkgs") as installed_pkgs:
            pkgs = self.list_installed()

            provided_pkgs = set()
            for pkg in pkgs.values():
                provided_pkgs |= set(pkg.get('provs', []))

            output = oe.utils.format_pkg_list(pkgs, "arch")
            installed_pkgs.write(output)
            installed_pkgs.flush()

            cmd = [
                "oe-pkgdata-util", "-p",
                self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name,
                globs
            ]
            exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY')
            if exclude:
                cmd.extend(['--exclude=' + '|'.join(exclude.split())])
            try:
                bb.note('Running %s' % cmd)
                complementary_pkgs = subprocess.check_output(
                    cmd, stderr=subprocess.STDOUT).decode("utf-8")
                complementary_pkgs = set(complementary_pkgs.split())
                skip_pkgs = sorted(complementary_pkgs & provided_pkgs)
                install_pkgs = sorted(complementary_pkgs - provided_pkgs)
                bb.note(
                    "Installing complementary packages ... %s (skipped already provided packages %s)"
                    % (' '.join(install_pkgs), ' '.join(skip_pkgs)))
                self.install(install_pkgs, attempt_only=True)
            except subprocess.CalledProcessError as e:
                bb.fatal(
                    "Could not compute complementary packages list. Command "
                    "'%s' returned %d:\n%s" %
                    (' '.join(cmd), e.returncode, e.output.decode("utf-8")))

        if self.d.getVar('IMAGE_LOCALES_ARCHIVE') == '1':
            target_arch = self.d.getVar('TARGET_ARCH')
            localedir = oe.path.join(self.target_rootfs,
                                     self.d.getVar("libdir"), "locale")
            if os.path.exists(localedir) and os.listdir(localedir):
                generate_locale_archive(self.d, self.target_rootfs,
                                        target_arch, localedir)
                # And now delete the binary locales
                self.remove(
                    fnmatch.filter(self.list_installed(),
                                   "glibc-binary-localedata-*"), False)
Пример #34
0
def create_packages_dir(d, subrepo_dir, deploydir, taskname,
                        filterbydependencies):
    """
    Go through our do_package_write_X dependencies and hardlink the packages we depend
    upon into the repo directory. This prevents us seeing other packages that may
    have been built that we don't depend upon and also packages for architectures we don't
    support.
    """
    import errno

    taskdepdata = d.getVar("BB_TASKDEPDATA", False)
    mytaskname = d.getVar("BB_RUNTASK")
    pn = d.getVar("PN")
    seendirs = set()
    multilibs = {}

    bb.utils.remove(subrepo_dir, recurse=True)
    bb.utils.mkdirhier(subrepo_dir)

    # Detect bitbake -b usage
    nodeps = d.getVar("BB_LIMITEDDEPS") or False
    if nodeps or not filterbydependencies:
        oe.path.symlink(deploydir, subrepo_dir, True)
        return

    start = None
    for dep in taskdepdata:
        data = taskdepdata[dep]
        if data[1] == mytaskname and data[0] == pn:
            start = dep
            break
    if start is None:
        bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
    pkgdeps = set()
    start = [start]
    seen = set(start)
    # Support direct dependencies (do_rootfs -> do_package_write_X)
    # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> do_package_write_X)
    while start:
        next = []
        for dep2 in start:
            for dep in taskdepdata[dep2][3]:
                if taskdepdata[dep][0] != pn:
                    if "do_" + taskname in dep:
                        pkgdeps.add(dep)
                elif dep not in seen:
                    next.append(dep)
                    seen.add(dep)
        start = next

    for dep in pkgdeps:
        c = taskdepdata[dep][0]
        manifest, d2 = oe.sstatesig.find_sstate_manifest(
            c, taskdepdata[dep][2], taskname, d, multilibs)
        if not manifest:
            bb.fatal("No manifest generated from: %s in %s" %
                     (c, taskdepdata[dep][2]))
        if not os.path.exists(manifest):
            continue
        with open(manifest, "r") as f:
            for l in f:
                l = l.strip()
                deploydir = os.path.normpath(deploydir)
                if bb.data.inherits_class('packagefeed-stability', d):
                    dest = l.replace(deploydir + "-prediff", "")
                else:
                    dest = l.replace(deploydir, "")
                dest = subrepo_dir + dest
                if l.endswith("/"):
                    if dest not in seendirs:
                        bb.utils.mkdirhier(dest)
                        seendirs.add(dest)
                    continue
                # Try to hardlink the file, copy if that fails
                destdir = os.path.dirname(dest)
                if destdir not in seendirs:
                    bb.utils.mkdirhier(destdir)
                    seendirs.add(destdir)
                try:
                    os.link(l, dest)
                except OSError as err:
                    if err.errno == errno.EXDEV:
                        bb.utils.copyfile(l, dest)
                    else:
                        raise
Пример #35
0
    def check_stamps(self):
        unchecked = {}
        current = []
        notcurrent = []
        buildable = []

        if self.stamppolicy == "perfile":
            fulldeptree = False
        else:
            fulldeptree = True
            stampwhitelist = []
            if self.stamppolicy == "whitelist":
                stampwhitelist = self.self.stampfnwhitelist

        for task in range(len(self.runq_fnid)):
            unchecked[task] = ""
            if len(self.runq_depends[task]) == 0:
                buildable.append(task)

        def check_buildable(self, task, buildable):
            for revdep in self.runq_revdeps[task]:
                alldeps = 1
                for dep in self.runq_depends[revdep]:
                    if dep in unchecked:
                        alldeps = 0
                if alldeps == 1:
                    if revdep in unchecked:
                        buildable.append(revdep)

        for task in range(len(self.runq_fnid)):
            if task not in unchecked:
                continue
            fn = self.taskData.fn_index[self.runq_fnid[task]]
            taskname = self.runq_task[task]
            stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname)
            # If the stamp is missing its not current
            if not os.access(stampfile, os.F_OK):
                del unchecked[task]
                notcurrent.append(task)
                check_buildable(self, task, buildable)
                continue
            # If its a 'nostamp' task, it's not current
            taskdep = self.dataCache.task_deps[fn]
            if 'nostamp' in taskdep and task in taskdep['nostamp']:
                del unchecked[task]
                notcurrent.append(task)
                check_buildable(self, task, buildable)
                continue

        while (len(buildable) > 0):
            nextbuildable = []
            for task in buildable:
                if task in unchecked:
                    fn = self.taskData.fn_index[self.runq_fnid[task]]
                    taskname = self.runq_task[task]
                    stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname)
                    iscurrent = True

                    t1 = os.stat(stampfile)[stat.ST_MTIME]
                    for dep in self.runq_depends[task]:
                        if iscurrent:
                            fn2 = self.taskData.fn_index[self.runq_fnid[dep]]
                            taskname2 = self.runq_task[dep]
                            stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2],
                                                    taskname2)
                            if fn == fn2 or (fulldeptree
                                             and fn2 not in stampwhitelist):
                                if dep in notcurrent:
                                    iscurrent = False
                                else:
                                    t2 = os.stat(stampfile2)[stat.ST_MTIME]
                                    if t1 < t2:
                                        iscurrent = False
                    del unchecked[task]
                    if iscurrent:
                        current.append(task)
                    else:
                        notcurrent.append(task)

                check_buildable(self, task, nextbuildable)

            buildable = nextbuildable

        #for task in range(len(self.runq_fnid)):
        #    fn = self.taskData.fn_index[self.runq_fnid[task]]
        #    taskname = self.runq_task[task]
        #    print "%s %s.%s" % (task, taskname, fn)

        #print "Unchecked: %s" % unchecked
        #print "Current: %s" % current
        #print "Not current: %s" % notcurrent

        if len(unchecked) > 0:
            bb.fatal("check_stamps fatal internal error")
        return current
Пример #36
0
def bitbake_main(configParams, configuration):

    # Python multiprocessing requires /dev/shm on Linux
    if sys.platform.startswith('linux') and not os.access(
            '/dev/shm', os.W_OK | os.X_OK):
        raise BBMainException(
            "FATAL: /dev/shm does not exist or is not writable")

    # Unbuffer stdout to avoid log truncation in the event
    # of an unorderly exit as well as to provide timely
    # updates to log files for use with tail
    try:
        if sys.stdout.name == '<stdout>':
            sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
    except:
        pass

    configuration.setConfigParameters(configParams)

    ui_module = import_extension_module(bb.ui, configParams.ui, 'main')
    servermodule = import_extension_module(bb.server, configParams.servertype,
                                           'BitBakeServer')

    if configParams.server_only:
        if configParams.servertype != "xmlrpc":
            raise BBMainException(
                "FATAL: If '--server-only' is defined, we must set the "
                "servertype as 'xmlrpc'.\n")
        if not configParams.bind:
            raise BBMainException(
                "FATAL: The '--server-only' option requires a name/address "
                "to bind to with the -B option.\n")
        if configParams.remote_server:
            raise BBMainException("FATAL: The '--server-only' option conflicts with %s.\n" %
                                  ("the BBSERVER environment variable" if "BBSERVER" in os.environ \
                                   else "the '--remote-server' option" ))

    if configParams.bind and configParams.servertype != "xmlrpc":
        raise BBMainException("FATAL: If '-B' or '--bind' is defined, we must "
                              "set the servertype as 'xmlrpc'.\n")

    if configParams.remote_server and configParams.servertype != "xmlrpc":
        raise BBMainException(
            "FATAL: If '--remote-server' is defined, we must "
            "set the servertype as 'xmlrpc'.\n")

    if configParams.observe_only and (not configParams.remote_server
                                      or configParams.bind):
        raise BBMainException(
            "FATAL: '--observe-only' can only be used by UI clients "
            "connecting to a server.\n")

    if configParams.kill_server and not configParams.remote_server:
        raise BBMainException(
            "FATAL: '--kill-server' can only be used to terminate a remote server"
        )

    if "BBDEBUG" in os.environ:
        level = int(os.environ["BBDEBUG"])
        if level > configuration.debug:
            configuration.debug = level

    bb.msg.init_msgconfig(configParams.verbose, configuration.debug,
                          configuration.debug_domains)

    # Ensure logging messages get sent to the UI as events
    handler = bb.event.LogHandler()
    if not configParams.status_only:
        # In status only mode there are no logs and no UI
        logger.addHandler(handler)

    # Clear away any spurious environment variables while we stoke up the cooker
    cleanedvars = bb.utils.clean_environment()

    featureset = []
    if not configParams.server_only:
        # Collect the feature set for the UI
        featureset = getattr(ui_module, "featureSet", [])

    if configParams.server_only:
        for param in ('prefile', 'postfile'):
            value = getattr(configParams, param)
            if value:
                setattr(configuration, "%s_server" % param, value)
                param = "%s_server" % param

    if not configParams.remote_server:
        # we start a server with a given configuration
        server = start_server(servermodule, configParams, configuration,
                              featureset)
        bb.event.ui_queue = []
    else:
        # we start a stub server that is actually a XMLRPClient that connects to a real server
        server = servermodule.BitBakeXMLRPCClient(configParams.observe_only,
                                                  configParams.xmlrpctoken)
        server.saveConnectionDetails(configParams.remote_server)

    if not configParams.server_only:
        try:
            server_connection = server.establishConnection(featureset)
        except Exception as e:
            bb.fatal("Could not connect to server %s: %s" %
                     (configParams.remote_server, str(e)))

        if configParams.kill_server:
            server_connection.connection.terminateServer()
            bb.event.ui_queue = []
            return 0

        server_connection.setupEventQueue()

        # Restore the environment in case the UI needs it
        for k in cleanedvars:
            os.environ[k] = cleanedvars[k]

        logger.removeHandler(handler)

        if configParams.status_only:
            server_connection.terminate()
            return 0

        try:
            return ui_module.main(server_connection.connection,
                                  server_connection.events, configParams)
        finally:
            bb.event.ui_queue = []
            server_connection.terminate()
    else:
        print("Bitbake server address: %s, server port: %s" %
              (server.serverImpl.host, server.serverImpl.port))
        return 0

    return 1
Пример #37
0
def _exec_task(fn, task, d, quieterr):
    """Execute a BB 'task'

    Execution of a task involves a bit more setup than executing a function,
    running it with its own local metadata, and with some useful variables set.
    """
    if not d.getVarFlag(task, 'task', False):
        event.fire(TaskInvalid(task, d), d)
        logger.error("No such task: %s" % task)
        return 1

    logger.debug("Executing task %s", task)

    localdata = _task_data(fn, task, d)
    tempdir = localdata.getVar('T')
    if not tempdir:
        bb.fatal("T variable not set, unable to build")

    # Change nice level if we're asked to
    nice = localdata.getVar("BB_TASK_NICE_LEVEL")
    if nice:
        curnice = os.nice(0)
        nice = int(nice) - curnice
        newnice = os.nice(nice)
        logger.debug("Renice to %s " % newnice)
    ionice = localdata.getVar("BB_TASK_IONICE_LEVEL")
    if ionice:
        try:
            cls, prio = ionice.split(".", 1)
            bb.utils.ioprio_set(os.getpid(), int(cls), int(prio))
        except:
            bb.warn("Invalid ionice level %s" % ionice)

    bb.utils.mkdirhier(tempdir)

    # Determine the logfile to generate
    logfmt = localdata.getVar('BB_LOGFMT') or 'log.{task}.{pid}'
    logbase = logfmt.format(task=task, pid=os.getpid())

    # Document the order of the tasks...
    logorder = os.path.join(tempdir, 'log.task_order')
    try:
        with open(logorder, 'a') as logorderfile:
            logorderfile.write('{0} ({1}): {2}\n'.format(
                task, os.getpid(), logbase))
    except OSError:
        logger.exception("Opening log file '%s'", logorder)
        pass

    # Setup the courtesy link to the logfn
    loglink = os.path.join(tempdir, 'log.{0}'.format(task))
    logfn = os.path.join(tempdir, logbase)
    if loglink:
        bb.utils.remove(loglink)

        try:
            os.symlink(logbase, loglink)
        except OSError:
            pass

    prefuncs = localdata.getVarFlag(task, 'prefuncs', expand=True)
    postfuncs = localdata.getVarFlag(task, 'postfuncs', expand=True)

    class ErrorCheckHandler(logging.Handler):
        def __init__(self):
            self.triggered = False
            logging.Handler.__init__(self, logging.ERROR)

        def emit(self, record):
            if getattr(record, 'forcelog', False):
                self.triggered = False
            else:
                self.triggered = True

    # Handle logfiles
    try:
        bb.utils.mkdirhier(os.path.dirname(logfn))
        logfile = open(logfn, 'w')
    except OSError:
        logger.exception("Opening log file '%s'", logfn)
        pass

    # Dup the existing fds so we dont lose them
    osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()]
    oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()]
    ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()]

    # Replace those fds with our own
    with open('/dev/null', 'r') as si:
        os.dup2(si.fileno(), osi[1])
    os.dup2(logfile.fileno(), oso[1])
    os.dup2(logfile.fileno(), ose[1])

    # Ensure Python logging goes to the logfile
    handler = logging.StreamHandler(logfile)
    handler.setFormatter(logformatter)
    # Always enable full debug output into task logfiles
    handler.setLevel(logging.DEBUG - 2)
    bblogger.addHandler(handler)

    errchk = ErrorCheckHandler()
    bblogger.addHandler(errchk)

    localdata.setVar('BB_LOGFILE', logfn)
    localdata.setVar('BB_RUNTASK', task)
    localdata.setVar('BB_TASK_LOGGER', bblogger)

    flags = localdata.getVarFlags(task)

    try:
        try:
            event.fire(TaskStarted(task, fn, logfn, flags, localdata),
                       localdata)

            for func in (prefuncs or '').split():
                exec_func(func, localdata)
            exec_func(task, localdata)
            for func in (postfuncs or '').split():
                exec_func(func, localdata)
        finally:
            # Need to flush and close the logs before sending events where the
            # UI may try to look at the logs.
            sys.stdout.flush()
            sys.stderr.flush()

            bblogger.removeHandler(handler)

            # Restore the backup fds
            os.dup2(osi[0], osi[1])
            os.dup2(oso[0], oso[1])
            os.dup2(ose[0], ose[1])

            # Close the backup fds
            os.close(osi[0])
            os.close(oso[0])
            os.close(ose[0])

            logfile.close()
            if os.path.exists(logfn) and os.path.getsize(logfn) == 0:
                logger.debug2("Zero size logfn %s, removing", logfn)
                bb.utils.remove(logfn)
                bb.utils.remove(loglink)
    except (Exception, SystemExit) as exc:
        handled = False
        if isinstance(exc, bb.BBHandledException):
            handled = True

        if quieterr:
            if not handled:
                logger.warning(repr(exc))
            event.fire(TaskFailedSilent(task, fn, logfn, localdata), localdata)
        else:
            errprinted = errchk.triggered
            # If the output is already on stdout, we've printed the information in the
            # logs once already so don't duplicate
            if verboseStdoutLogging or handled:
                errprinted = True
            if not handled:
                logger.error(repr(exc))
            event.fire(TaskFailed(task, fn, logfn, localdata, errprinted),
                       localdata)
        return 1

    event.fire(TaskSucceeded(task, fn, logfn, localdata), localdata)

    if not localdata.getVarFlag(task, 'nostamp',
                                False) and not localdata.getVarFlag(
                                    task, 'selfstamp', False):
        make_stamp(task, localdata)

    return 0
 def get_image_fstype(self, d):
     image_fstype = self.match_image_fstype(d)
     if image_fstype:
         return image_fstype
     else:
         bb.fatal("IMAGE_FSTYPES should contain a Target Controller supported image fstype: %s " % ', '.join(map(str, self.supported_image_fstypes)))
Пример #39
0
    def run_intercepts(self, populate_sdk=None):
        intercepts_dir = self.intercepts_dir

        bb.note("Running intercept scripts:")
        os.environ['D'] = self.target_rootfs
        os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
        for script in os.listdir(intercepts_dir):
            script_full = os.path.join(intercepts_dir, script)

            if script == "postinst_intercept" or not os.access(
                    script_full, os.X_OK):
                continue

            # we do not want to run any multilib variant of this
            if script.startswith("delay_to_first_boot"):
                self._postpone_to_first_boot(script_full)
                continue

            if populate_sdk == 'host' and self.d.getVar('SDK_OS') == 'mingw32':
                bb.note(
                    "The postinstall intercept hook '%s' could not be executed due to missing wine support, details in %s/log.do_%s"
                    % (script, self.d.getVar('T'),
                       self.d.getVar('BB_CURRENTTASK')))
                continue

            bb.note("> Executing %s intercept ..." % script)

            try:
                output = subprocess.check_output(script_full,
                                                 stderr=subprocess.STDOUT)
                if output: bb.note(output.decode("utf-8"))
            except subprocess.CalledProcessError as e:
                bb.note("Exit code %d. Output:\n%s" %
                        (e.returncode, e.output.decode("utf-8")))
                if populate_sdk == 'host':
                    bb.fatal(
                        "The postinstall intercept hook '%s' failed, details in %s/log.do_%s"
                        % (script, self.d.getVar('T'),
                           self.d.getVar('BB_CURRENTTASK')))
                elif populate_sdk == 'target':
                    if "qemuwrapper: qemu usermode is not supported" in e.output.decode(
                            "utf-8"):
                        bb.note(
                            "The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
                            % (script, self.d.getVar('T'),
                               self.d.getVar('BB_CURRENTTASK')))
                    else:
                        bb.fatal(
                            "The postinstall intercept hook '%s' failed, details in %s/log.do_%s"
                            % (script, self.d.getVar('T'),
                               self.d.getVar('BB_CURRENTTASK')))
                else:
                    if "qemuwrapper: qemu usermode is not supported" in e.output.decode(
                            "utf-8"):
                        bb.note(
                            "The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
                            % (script, self.d.getVar('T'),
                               self.d.getVar('BB_CURRENTTASK')))
                        self._postpone_to_first_boot(script_full)
                    else:
                        bb.fatal(
                            "The postinstall intercept hook '%s' failed, details in %s/log.do_%s"
                            % (script, self.d.getVar('T'),
                               self.d.getVar('BB_CURRENTTASK')))
Пример #40
0
    signal.signal(signal.SIGPIPE, signal.SIG_DFL)


def unpack_file(file, destdir, parameters, env=None):
    import subprocess, shutil

    try:
        dos = to_boolean(parameters.get("dos"), False)
    except ValueError, exc:
        bb.fatal("Invalid value for 'dos' parameter for %s: %s" %
                 (filename, parameters.get("dos")))

    try:
        unpack = to_boolean(parameters.get("unpack"), True)
    except ValueError, exc:
        bb.fatal("Invalid value for 'unpack' parameter for %s: %s" %
                 (filename, parameters.get("unpack")))

    dest = os.path.join(destdir, os.path.basename(file))
    if os.path.exists(dest):
        if os.path.samefile(file, dest):
            return True

    cmd = None
    if unpack:
        if file.endswith('.tar'):
            cmd = 'tar x --no-same-owner -f %s' % file
        elif file.endswith('.tgz') or file.endswith(
                '.tar.gz') or file.endswith('.tar.Z'):
            cmd = 'tar xz --no-same-owner -f %s' % file
        elif file.endswith('.tbz') or file.endswith('.tbz2') or file.endswith(
                '.tar.bz2'):
Пример #41
0
def is_patch(filename, parameters):
    try:
        apply = to_boolean(parameters.get("apply"))
    except ValueError, exc:
        bb.fatal("Invalid value for 'apply' parameter for %s: %s" %
                 (filename, parameters.get("apply")))
Пример #42
0
def setup_bitbake(configParams, configuration, extrafeatures=None):
    # Ensure logging messages get sent to the UI as events
    handler = bb.event.LogHandler()
    if not configParams.status_only:
        # In status only mode there are no logs and no UI
        logger.addHandler(handler)

    # Clear away any spurious environment variables while we stoke up the cooker
    cleanedvars = bb.utils.clean_environment()

    if configParams.server_only:
        featureset = []
        ui_module = None
    else:
        ui_module = import_extension_module(bb.ui, configParams.ui, 'main')
        # Collect the feature set for the UI
        featureset = getattr(ui_module, "featureSet", [])

    if extrafeatures:
        for feature in extrafeatures:
            if not feature in featureset:
                featureset.append(feature)

    server_connection = None

    if configParams.remote_server:
        # Connect to a remote XMLRPC server
        server_connection = bb.server.xmlrpcclient.connectXMLRPC(configParams.remote_server, featureset,
                                                                 configParams.observe_only, configParams.xmlrpctoken)
    else:
        retries = 8
        while retries:
            try:
                topdir, lock = lockBitbake()
                sockname = topdir + "/bitbake.sock"
                if lock:
                    if configParams.status_only or configParams.kill_server:
                        logger.info("bitbake server is not running.")
                        lock.close()
                        return None, None
                    # we start a server with a given configuration
                    logger.info("Starting bitbake server...")
                    # Clear the event queue since we already displayed messages
                    bb.event.ui_queue = []
                    server = bb.server.process.BitBakeServer(lock, sockname, configuration, featureset)

                else:
                    logger.info("Reconnecting to bitbake server...")
                    if not os.path.exists(sockname):
                        print("Previous bitbake instance shutting down?, waiting to retry...")
                        i = 0
                        lock = None
                        # Wait for 5s or until we can get the lock
                        while not lock and i < 50:
                            time.sleep(0.1)
                            _, lock = lockBitbake()
                            i += 1
                        if lock:
                            bb.utils.unlockfile(lock)
                        raise bb.server.process.ProcessTimeout("Bitbake still shutting down as socket exists but no lock?")
                if not configParams.server_only:
                    try:
                        server_connection = bb.server.process.connectProcessServer(sockname, featureset)
                    except EOFError:
                        # The server may have been shutting down but not closed the socket yet. If that happened,
                        # ignore it.
                        pass

                if server_connection or configParams.server_only:
                    break
            except BBMainFatal:
                raise
            except (Exception, bb.server.process.ProcessTimeout) as e:
                if not retries:
                    raise
                retries -= 1
                if isinstance(e, (bb.server.process.ProcessTimeout, BrokenPipeError)):
                    logger.info("Retrying server connection...")
                else:
                    logger.info("Retrying server connection... (%s)" % traceback.format_exc())
            if not retries:
                bb.fatal("Unable to connect to bitbake server, or start one")
            if retries < 5:
                time.sleep(5)

    if configParams.kill_server:
        server_connection.connection.terminateServer()
        server_connection.terminate()
        bb.event.ui_queue = []
        logger.info("Terminated bitbake server.")
        return None, None

    # Restore the environment in case the UI needs it
    for k in cleanedvars:
        os.environ[k] = cleanedvars[k]

    logger.removeHandler(handler)

    return server_connection, ui_module
Пример #43
0
def _exec_task(fn, task, d, quieterr):
    """Execute a BB 'task'

    Execution of a task involves a bit more setup than executing a function,
    running it with its own local metadata, and with some useful variables set.
    """
    if not data.getVarFlag(task, 'task', d):
        event.fire(TaskInvalid(task, d), d)
        logger.error("No such task: %s" % task)
        return 1

    logger.debug(1, "Executing task %s", task)

    localdata = _task_data(fn, task, d)
    tempdir = localdata.getVar('T', True)
    if not tempdir:
        bb.fatal("T variable not set, unable to build")

    bb.utils.mkdirhier(tempdir)

    # Determine the logfile to generate
    logfmt = localdata.getVar('BB_LOGFMT', True) or 'log.{task}.{pid}'
    logbase = logfmt.format(task=task, pid=os.getpid())

    # Document the order of the tasks...
    logorder = os.path.join(tempdir, 'log.task_order')
    try:
        logorderfile = file(logorder, 'a')
    except OSError:
        logger.exception("Opening log file '%s'", logorder)
        pass
    logorderfile.write('{0} ({1}): {2}\n'.format(task, os.getpid(), logbase))
    logorderfile.close()

    # Setup the courtesy link to the logfn
    loglink = os.path.join(tempdir, 'log.{0}'.format(task))
    logfn = os.path.join(tempdir, logbase)
    if loglink:
        bb.utils.remove(loglink)

        try:
           os.symlink(logbase, loglink)
        except OSError:
           pass

    prefuncs = localdata.getVarFlag(task, 'prefuncs', expand=True)
    postfuncs = localdata.getVarFlag(task, 'postfuncs', expand=True)

    class ErrorCheckHandler(logging.Handler):
        def __init__(self):
            self.triggered = False
            logging.Handler.__init__(self, logging.ERROR)
        def emit(self, record):
            self.triggered = True

    # Handle logfiles
    si = file('/dev/null', 'r')
    try:
        bb.utils.mkdirhier(os.path.dirname(logfn))
        logfile = file(logfn, 'w')
    except OSError:
        logger.exception("Opening log file '%s'", logfn)
        pass

    # Dup the existing fds so we dont lose them
    osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()]
    oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()]
    ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()]

    # Replace those fds with our own
    os.dup2(si.fileno(), osi[1])
    os.dup2(logfile.fileno(), oso[1])
    os.dup2(logfile.fileno(), ose[1])

    # Ensure python logging goes to the logfile
    handler = logging.StreamHandler(logfile)
    handler.setFormatter(logformatter)
    # Always enable full debug output into task logfiles
    handler.setLevel(logging.DEBUG - 2)
    bblogger.addHandler(handler)

    errchk = ErrorCheckHandler()
    bblogger.addHandler(errchk)

    localdata.setVar('BB_LOGFILE', logfn)
    localdata.setVar('BB_RUNTASK', task)

    event.fire(TaskStarted(task, localdata), localdata)
    try:
        for func in (prefuncs or '').split():
            exec_func(func, localdata)
        exec_func(task, localdata)
        for func in (postfuncs or '').split():
            exec_func(func, localdata)
    except FuncFailed as exc:
        if quieterr:
            event.fire(TaskFailedSilent(task, logfn, localdata), localdata)
        else:
            errprinted = errchk.triggered
            logger.error(str(exc))
            event.fire(TaskFailed(task, logfn, localdata, errprinted), localdata)
        return 1
    finally:
        sys.stdout.flush()
        sys.stderr.flush()

        bblogger.removeHandler(handler)

        # Restore the backup fds
        os.dup2(osi[0], osi[1])
        os.dup2(oso[0], oso[1])
        os.dup2(ose[0], ose[1])

        # Close the backup fds
        os.close(osi[0])
        os.close(oso[0])
        os.close(ose[0])
        si.close()

        logfile.close()
        if os.path.exists(logfn) and os.path.getsize(logfn) == 0:
            logger.debug(2, "Zero size logfn %s, removing", logfn)
            bb.utils.remove(logfn)
            bb.utils.remove(loglink)
    event.fire(TaskSucceeded(task, localdata), localdata)

    if not localdata.getVarFlag(task, 'nostamp') and not localdata.getVarFlag(task, 'selfstamp'):
        make_stamp(task, localdata)

    return 0
Пример #44
0
def feeder(lineno, s, fn, root, statements, eof=False):
    global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__, __infunc__, __body__, bb, __residue__, __classname__
    if __infunc__:
        if s == '}':
            __body__.append('')
            ast.handleMethod(statements, fn, lineno, __infunc__[0], __body__, __infunc__[3], __infunc__[4])
            __infunc__ = []
            __body__ = []
        else:
            __body__.append(s)
        return

    if __inpython__:
        m = __python_func_regexp__.match(s)
        if m and not eof:
            __body__.append(s)
            return
        else:
            ast.handlePythonMethod(statements, fn, lineno, __inpython__,
                                   root, __body__)
            __body__ = []
            __inpython__ = False

            if eof:
                return

    if s and s[0] == '#':
        if len(__residue__) != 0 and __residue__[0][0] != "#":
            bb.fatal("There is a comment on line %s of file %s (%s) which is in the middle of a multiline expression.\nBitbake used to ignore these but no longer does so, please fix your metadata as errors are likely as a result of this change." % (lineno, fn, s))

    if len(__residue__) != 0 and __residue__[0][0] == "#" and (not s or s[0] != "#"):
        bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s))

    if s and s[-1] == '\\':
        __residue__.append(s[:-1])
        return

    s = "".join(__residue__) + s
    __residue__ = []

    # Skip empty lines
    if s == '':
        return   

    # Skip comments
    if s[0] == '#':
        return

    m = __func_start_regexp__.match(s)
    if m:
        __infunc__ = [m.group("func") or "__anonymous", fn, lineno, m.group("py") is not None, m.group("fr") is not None]
        return

    m = __def_regexp__.match(s)
    if m:
        __body__.append(s)
        __inpython__ = m.group(1)

        return

    m = __export_func_regexp__.match(s)
    if m:
        ast.handleExportFuncs(statements, fn, lineno, m, __classname__)
        return

    m = __addtask_regexp__.match(s)
    if m:
        ast.handleAddTask(statements, fn, lineno, m)
        return

    m = __deltask_regexp__.match(s)
    if m:
        ast.handleDelTask(statements, fn, lineno, m)
        return

    m = __addhandler_regexp__.match(s)
    if m:
        ast.handleBBHandlers(statements, fn, lineno, m)
        return

    m = __inherit_regexp__.match(s)
    if m:
        ast.handleInherit(statements, fn, lineno, m)
        return

    return ConfHandler.feeder(lineno, s, fn, statements)
Пример #45
0
def failed_postinsts_abort(pkgs, log_path):
    bb.fatal(
        """Postinstall scriptlets of %s have failed. If the intention is to defer them to first boot,
then please place them into pkg_postinst_ontarget_${PN} ().
Deferring to first boot via 'exit 1' is no longer supported.
Details of the failure are in %s.""" % (pkgs, log_path))
Пример #46
0
def feeder(lineno, s, fn, root, statements, eof=False):
    global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__, __infunc__, __body__, bb, __residue__, __classname__

    # Check tabs in python functions:
    # - def py_funcname(): covered by __inpython__
    # - python(): covered by '__anonymous' == __infunc__[0]
    # - python funcname(): covered by __infunc__[3]
    if __inpython__ or (__infunc__ and
                        ('__anonymous' == __infunc__[0] or __infunc__[3])):
        tab = __python_tab_regexp__.match(s)
        if tab:
            bb.warn(
                'python should use 4 spaces indentation, but found tabs in %s, line %s'
                % (root, lineno))

    if __infunc__:
        if s == '}':
            __body__.append('')
            ast.handleMethod(statements, fn, lineno, __infunc__[0], __body__,
                             __infunc__[3], __infunc__[4])
            __infunc__ = []
            __body__ = []
        else:
            __body__.append(s)
        return

    if __inpython__:
        m = __python_func_regexp__.match(s)
        if m and not eof:
            __body__.append(s)
            return
        else:
            ast.handlePythonMethod(statements, fn, lineno, __inpython__, root,
                                   __body__)
            __body__ = []
            __inpython__ = False

            if eof:
                return

    if s and s[0] == '#':
        if len(__residue__) != 0 and __residue__[0][0] != "#":
            bb.fatal(
                "There is a comment on line %s of file %s:\n'''\n%s\n'''\nwhich is in the middle of a multiline expression. This syntax is invalid, please correct it."
                % (lineno, fn, s))

    if len(__residue__) != 0 and __residue__[0][0] == "#" and (not s
                                                               or s[0] != "#"):
        bb.fatal(
            "There is a confusing multiline partially commented expression on line %s of file %s:\n%s\nPlease clarify whether this is all a comment or should be parsed."
            % (lineno - len(__residue__), fn, "\n".join(__residue__)))

    if s and s[-1] == '\\':
        __residue__.append(s[:-1])
        return

    s = "".join(__residue__) + s
    __residue__ = []

    # Skip empty lines
    if s == '':
        return

    # Skip comments
    if s[0] == '#':
        return

    m = __func_start_regexp__.match(s)
    if m:
        __infunc__ = [
            m.group("func") or "__anonymous", fn, lineno,
            m.group("py") is not None,
            m.group("fr") is not None
        ]
        return

    m = __def_regexp__.match(s)
    if m:
        __body__.append(s)
        __inpython__ = m.group(1)

        return

    m = __export_func_regexp__.match(s)
    if m:
        ast.handleExportFuncs(statements, fn, lineno, m, __classname__)
        return

    m = __addtask_regexp__.match(s)
    if m:
        if len(m.group().split()) == 2:
            # Check and warn for "addtask task1 task2"
            m2 = re.match(r"addtask\s+(?P<func>\w+)(?P<ignores>.*)", s)
            if m2 and m2.group('ignores'):
                logger.warning('addtask ignored: "%s"' % m2.group('ignores'))

        # Check and warn for "addtask task1 before task2 before task3", the
        # similar to "after"
        taskexpression = s.split()
        for word in ('before', 'after'):
            if taskexpression.count(word) > 1:
                logger.warning(
                    "addtask contained multiple '%s' keywords, only one is supported"
                    % word)

        # Check and warn for having task with exprssion as part of task name
        for te in taskexpression:
            if any(("%s_" % keyword) in te
                   for keyword in bb.data_smart.__setvar_keyword__):
                raise ParseError(
                    "Task name '%s' contains a keyword which is not recommended/supported.\nPlease rename the task not to include the keyword.\n%s"
                    %
                    (te,
                     ("\n".join(map(str, bb.data_smart.__setvar_keyword__)))),
                    fn)
        ast.handleAddTask(statements, fn, lineno, m)
        return

    m = __deltask_regexp__.match(s)
    if m:
        ast.handleDelTask(statements, fn, lineno, m)
        return

    m = __addhandler_regexp__.match(s)
    if m:
        ast.handleBBHandlers(statements, fn, lineno, m)
        return

    m = __inherit_regexp__.match(s)
    if m:
        ast.handleInherit(statements, fn, lineno, m)
        return

    return ConfHandler.feeder(lineno, s, fn, statements)
Пример #47
0
def check_build(d, event):
    supported_recipes, files = load_supported_recipes(d)
    supported_recipes_check = d.getVar('SUPPORTED_RECIPES_CHECK', True)
    if not supported_recipes_check:
        return

    isnative = IsNative(d)
    valid = ('note', 'warn', 'error', 'fatal')
    if supported_recipes_check not in valid:
        bb.fatal(
            'SUPPORTED_RECIPES_CHECK must be set to one of %s, currently is: %s'
            % ('/'.join(valid), supported_recipes_check))
    logger = bb.__dict__[supported_recipes_check]

    # See bitbake/lib/bb/cooker.py buildDependTree() for the content of the depgraph hash.
    # Basically it mirrors the information dumped by "bitbake -g".
    depgraph = event._depgraph
    # import pprint
    # bb.note('depgraph: %s' % pprint.pformat(depgraph))

    dirname = d.getVar('SUPPORTED_RECIPES_SOURCES_DIR', True)
    report_sources = d.getVar('SUPPORTED_RECIPES_SOURCES', True)

    unsupported = {}
    sources = []
    for pn, pndata in depgraph['pn'].items():
        # We only care about recipes compiled for the target.
        # Most native ones can be detected reliably because they inherit native.bbclass,
        # but some special cases have to be hard-coded.
        # Image recipes also do not matter.
        if not isnative(pn, pndata):
            filename = pndata['filename']
            collection = bb.utils.get_file_layer(filename, d)
            supportedby = supported_recipes.recipe_supportedby(pn, collection)
            if not supportedby:
                unsupported[pn] = collection
            if report_sources:
                dumpfile = os.path.join(dirname, pn + filename)
                with open(dumpfile) as f:
                    reader = csv.reader(f)
                    for row in reader:
                        row_hash = {
                            f: row[i]
                            for i, f in enumerate(SOURCE_FIELDS)
                        }
                        row_hash['supported'] = 'yes (%s)' % ' '.join(supportedby) \
                                                if supportedby else 'no'
                        sources.append(row_hash)

    if report_sources:
        with open(report_sources, 'w') as f:
            fields = SOURCE_FIELDS[:]
            # Insert after 'collection'.
            fields.insert(fields.index('collection') + 1, 'supported')
            extensions = []
            for importer, modname, ispkg in pkgutil.iter_modules(
                    supportedrecipesreport.__path__):
                module = __import__('supportedrecipesreport.' + modname,
                                    fromlist="dummy")
                for name, clazz in inspect.getmembers(module, inspect.isclass):
                    if issubclass(clazz, Columns):
                        extensions.append(clazz(d, sources))
            for e in extensions:
                e.extend_header(fields)
            writer = csv.DictWriter(f, fields)
            writer.writeheader()
            for row in sources:
                for e in extensions:
                    e.extend_row(row)
            # Sort by first column, then second column, etc., after extending all rows.
            for row in sorted(sources,
                              key=lambda r: [r.get(f, None) for f in fields]):
                writer.writerow(row)
        bb.note('Created SUPPORTED_RECIPES_SOURCES = %s file.' %
                report_sources)

    if unsupported:
        max_lines = int(
            d.getVar('SUPPORTED_RECIPES_CHECK_DEPENDENCY_LINES', True))
        dependencies, truncated = dump_dependencies(depgraph, max_lines,
                                                    unsupported)

        output = []
        output.append(
            'The following unsupported recipes are required for the build:')
        output.extend([
            '  ' + line
            for line in dump_unsupported(unsupported, supported_recipes)
        ])
        output.append('''
Each unsupported recipe is identified by the recipe name and the collection
in which it occurs and has to be marked as supported (see below) using that
format. Typically each layer has exactly one collection.''')
        if dependencies:
            # Add the optional dependency dump.
            output.append('''
Here are the dependency chains (including DEPENDS and RDEPENDS)
which include one or more of the unsupported recipes. -> means "depends on"
and * marks unsupported recipes:''')
            for line in dependencies:
                line_entries = [('*' if pn in unsupported else '') + pn
                                for pn in line]
                output.append('  ' + ' -> '.join(line_entries))
            if truncated:
                output.append('''...
Output truncated, to see more increase SUPPORTED_RECIPES_CHECK_DEPENDENCY_LINES (currently %d).'''
                              % max_lines)

        output.append('''
To avoid this message, several options exist:
* Check the dependency chain(s) to see why a recipe gets pulled in and perhaps
  change recipe configurations or image content to avoid pulling in undesired
  components.
* If the recipe is supported in some other layer, disable the unsupported one
  with BBMASK.
* Add the unsupported recipes to one of the following files:
  %s
  Regular expressions are supported on both sides of the @ separator.
* Create a new file which lists the unsupported recipes and extend SUPPORTED_RECIPES:
    SUPPORTED_RECIPES_append = " <path>/recipes-supported-by-me.txt"
  See meta-ostro/conf/layer.conf and ostro.conf for an example how the path can be
  derived automatically. The expectation is that SUPPORTED_RECIPES gets set in
  distro configuration files, depending on the support provided by the distro
  creator.
* Disable the check with SUPPORTED_RECIPES_CHECK = "" in local.conf.
  'bitbake -g <build target>' produces .dot files showing these dependencies.
''' % '\n  '.join(files))

        logger('\n'.join(output))
Пример #48
0
    def __init__(self, d):
        super(MasterImageHardwareTarget, self).__init__(d)

        # target ip
        addr = d.getVar("TEST_TARGET_IP", True) or bb.fatal(
            'Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.'
        )
        self.ip = addr.split(":")[0]
        try:
            self.port = addr.split(":")[1]
        except IndexError:
            self.port = None
        bb.note("Target IP: %s" % self.ip)
        self.server_ip = d.getVar("TEST_SERVER_IP", True)
        if not self.server_ip:
            try:
                self.server_ip = subprocess.check_output(
                    ['ip', 'route', 'get', self.ip]).split("\n")[0].split()[-1]
            except Exception as e:
                bb.fatal(
                    "Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s"
                    % e)
        bb.note("Server IP: %s" % self.server_ip)

        # test rootfs + kernel
        self.rootfs = os.path.join(
            d.getVar("DEPLOY_DIR_IMAGE", True),
            d.getVar("IMAGE_LINK_NAME", True) + '.tar.gz')
        self.kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True),
                                   d.getVar("KERNEL_IMAGETYPE"))
        if not os.path.isfile(self.rootfs):
            # we could've checked that IMAGE_FSTYPES contains tar.gz but the config for running testimage might not be
            # the same as the config with which the image was build, ie
            # you bitbake core-image-sato with IMAGE_FSTYPES += "tar.gz"
            # and your autobuilder overwrites the config, adds the test bits and runs bitbake core-image-sato -c testimage
            bb.fatal(
                "No rootfs found. Did you build the image ?\nIf yes, did you build it with IMAGE_FSTYPES += \"tar.gz\" ? \
                      \nExpected path: %s" % self.rootfs)
        if not os.path.isfile(self.kernel):
            bb.fatal("No kernel found. Expected path: %s" % self.kernel)

        # master ssh connection
        self.master = None
        # if the user knows what they are doing, then by all means...
        self.user_cmds = d.getVar("TEST_DEPLOY_CMDS", True)
        self.deploy_cmds = None

        # this is the name of the command that controls the power for a board
        # e.g: TEST_POWERCONTROL_CMD = "/home/user/myscripts/powercontrol.py ${MACHINE} what-ever-other-args-the-script-wants"
        # the command should take as the last argument "off" and "on" and "cycle" (off, on)
        self.powercontrol_cmd = d.getVar("TEST_POWERCONTROL_CMD", True) or None
        self.powercontrol_args = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or ""

        self.serialcontrol_cmd = d.getVar("TEST_SERIALCONTROL_CMD",
                                          True) or None
        self.serialcontrol_args = d.getVar(
            "TEST_SERIALCONTROL_EXTRA_ARGS") or ""

        self.origenv = os.environ
        if self.powercontrol_cmd or self.serialcontrol_cmd:
            # the external script for controlling power might use ssh
            # ssh + keys means we need the original user env
            bborigenv = d.getVar("BB_ORIGENV", False) or {}
            for key in bborigenv:
                val = bborigenv.getVar(key, True)
                if val is not None:
                    self.origenv[key] = str(val)

        if self.powercontrol_cmd:
            if self.powercontrol_args:
                self.powercontrol_cmd = "%s %s" % (self.powercontrol_cmd,
                                                   self.powercontrol_args)
        if self.serialcontrol_cmd:
            if self.serialcontrol_args:
                self.serialcontrol_cmd = "%s %s" % (self.serialcontrol_cmd,
                                                    self.serialcontrol_args)
Пример #49
0
    def setVar(self, var, value, **loginfo):
        #print("var=" + str(var) + "  val=" + str(value))

        if not var.startswith("__anon_") and ("_append" in var or "_prepend"
                                              in var or "_remove" in var):
            info = "%s" % var
            if "file" in loginfo:
                info += " file: %s" % loginfo["file"]
            if "line" in loginfo:
                info += " line: %s" % loginfo["line"]
            bb.fatal(
                "Variable %s contains an operation using the old override syntax. Please convert this layer/metadata before attempting to use with a newer bitbake."
                % info)

        shortvar = var.split(":", 1)[0]
        if shortvar in self._var_renames:
            _print_rename_error(shortvar,
                                loginfo,
                                self._var_renames,
                                fullvar=var)
            # Mark that we have seen a renamed variable
            self.setVar("_FAILPARSINGERRORHANDLED", True)

        self.expand_cache = {}
        parsing = False
        if 'parsing' in loginfo:
            parsing = True

        if 'op' not in loginfo:
            loginfo['op'] = "set"

        match = __setvar_regexp__.match(var)
        if match and match.group("keyword") in __setvar_keyword__:
            base = match.group('base')
            keyword = match.group("keyword")
            override = match.group('add')
            l = self.getVarFlag(base, keyword, False) or []
            l.append([value, override])
            self.setVarFlag(base, keyword, l, ignore=True)
            # And cause that to be recorded:
            loginfo['detail'] = value
            loginfo['variable'] = base
            if override:
                loginfo['op'] = '%s[%s]' % (keyword, override)
            else:
                loginfo['op'] = keyword
            self.varhistory.record(**loginfo)
            # todo make sure keyword is not __doc__ or __module__
            # pay the cookie monster

            # more cookies for the cookie monster
            if ':' in var:
                self._setvar_update_overrides(base, **loginfo)

            if base in self.overridevars:
                self._setvar_update_overridevars(var, value)
            return

        if not var in self.dict:
            self._makeShadowCopy(var)

        if not parsing:
            if ":append" in self.dict[var]:
                del self.dict[var][":append"]
            if ":prepend" in self.dict[var]:
                del self.dict[var][":prepend"]
            if ":remove" in self.dict[var]:
                del self.dict[var][":remove"]
            if var in self.overridedata:
                active = []
                self.need_overrides()
                for (r, o) in self.overridedata[var]:
                    if o in self.overridesset:
                        active.append(r)
                    elif ":" in o:
                        if set(o.split(":")).issubset(self.overridesset):
                            active.append(r)
                for a in active:
                    self.delVar(a)
                del self.overridedata[var]

        # more cookies for the cookie monster
        if ':' in var:
            self._setvar_update_overrides(var, **loginfo)

        # setting var
        self.dict[var]["_content"] = value
        self.varhistory.record(**loginfo)

        if var in self.overridevars:
            self._setvar_update_overridevars(var, value)
Пример #50
0
def setup_bitbake(configParams, configuration, extrafeatures=None):
    # Ensure logging messages get sent to the UI as events
    handler = bb.event.LogHandler()
    if not configParams.status_only:
        # In status only mode there are no logs and no UI
        logger.addHandler(handler)

    # Clear away any spurious environment variables while we stoke up the cooker
    cleanedvars = bb.utils.clean_environment()

    if configParams.server_only:
        featureset = []
        ui_module = None
    else:
        ui_module = import_extension_module(bb.ui, configParams.ui, 'main')
        # Collect the feature set for the UI
        featureset = getattr(ui_module, "featureSet", [])

    if configParams.server_only:
        for param in ('prefile', 'postfile'):
            value = getattr(configParams, param)
            if value:
                setattr(configuration, "%s_server" % param, value)
                param = "%s_server" % param

    if extrafeatures:
        for feature in extrafeatures:
            if not feature in featureset:
                featureset.append(feature)

    servermodule = import_extension_module(bb.server, configParams.servertype,
                                           'BitBakeServer')
    if configParams.remote_server:
        if os.getenv('BBSERVER') == 'autostart':
            if configParams.remote_server == 'autostart' or \
               not servermodule.check_connection(configParams.remote_server, timeout=2):
                configParams.bind = 'localhost:0'
                srv = start_server(servermodule, configParams, configuration,
                                   featureset)
                configParams.remote_server = '%s:%d' % tuple(
                    configuration.interface)
                bb.event.ui_queue = []
        # we start a stub server that is actually a XMLRPClient that connects to a real server
        from bb.server.xmlrpc import BitBakeXMLRPCClient
        server = servermodule.BitBakeXMLRPCClient(configParams.observe_only,
                                                  configParams.xmlrpctoken)
        server.saveConnectionDetails(configParams.remote_server)
    else:
        # we start a server with a given configuration
        server = start_server(servermodule, configParams, configuration,
                              featureset)
        bb.event.ui_queue = []

    if configParams.server_only:
        server_connection = None
    else:
        try:
            server_connection = server.establishConnection(featureset)
        except Exception as e:
            bb.fatal("Could not connect to server %s: %s" %
                     (configParams.remote_server, str(e)))

        if configParams.kill_server:
            server_connection.connection.terminateServer()
            bb.event.ui_queue = []
            return None, None, None

        server_connection.setupEventQueue()

        # Restore the environment in case the UI needs it
        for k in cleanedvars:
            os.environ[k] = cleanedvars[k]

        logger.removeHandler(handler)

    return server, server_connection, ui_module