Example #1
0
def build_dependencies(key, keys, shelldeps, d):
    deps = set()
    try:
        if d.getVarFlag(key, "func"):
            if d.getVarFlag(key, "python"):
                parsedvar = d.expandWithRefs(d.getVar(key, False), key)
                parser = bb.codeparser.PythonParser()
                parser.parse_python(parsedvar.value)
                deps = deps | parser.references
            else:
                parsedvar = d.expandWithRefs(d.getVar(key, False), key)
                parser = bb.codeparser.ShellParser()
                parser.parse_shell(parsedvar.value)
                deps = deps | shelldeps
            deps = deps | parsedvar.references
            deps = deps | (keys & parser.execs) | (keys & parsedvar.execs)
        else:
            parser = d.expandWithRefs(d.getVar(key, False), key)
            deps |= parser.references
            deps = deps | (keys & parser.execs)
        deps |= set((d.getVarFlag(key, "vardeps") or "").split())
        deps -= set((d.getVarFlag(key, "vardepsexclude") or "").split())
    except:
        bb.note("Error expanding variable %s" % key)
        raise
    return deps
Example #2
0
 def readfifo(data):
     lines = data.split('\0')
     for line in lines:
         splitval = line.split(' ', 1)
         cmd = splitval[0]
         if len(splitval) > 1:
             value = splitval[1]
         else:
             value = ''
         if cmd == 'bbplain':
             bb.plain(value)
         elif cmd == 'bbnote':
             bb.note(value)
         elif cmd == 'bbwarn':
             bb.warn(value)
         elif cmd == 'bberror':
             bb.error(value)
         elif cmd == 'bbfatal':
             # The caller will call exit themselves, so bb.error() is
             # what we want here rather than bb.fatal()
             bb.error(value)
         elif cmd == 'bbfatal_log':
             bb.error(value, forcelog=True)
         elif cmd == 'bbdebug':
             splitval = value.split(' ', 1)
             level = int(splitval[0])
             value = splitval[1]
             bb.debug(level, value)
Example #3
0
def build_dependencies(key, keys, shelldeps, vardepvals, d):
    deps = set()
    vardeps = d.getVarFlag(key, "vardeps", True)
    try:
        value = d.getVar(key, False)
        if key in vardepvals:
           value =  d.getVarFlag(key, "vardepvalue", True)
        elif d.getVarFlag(key, "func"):
            if d.getVarFlag(key, "python"):
                parsedvar = d.expandWithRefs(value, key)
                parser = bb.codeparser.PythonParser(key, logger)
                parser.parse_python(parsedvar.value)
                deps = deps | parser.references
            else:
                parsedvar = d.expandWithRefs(value, key)
                parser = bb.codeparser.ShellParser(key, logger)
                parser.parse_shell(parsedvar.value)
                deps = deps | shelldeps
            if vardeps is None:
                parser.log.flush()
            deps = deps | parsedvar.references
            deps = deps | (keys & parser.execs) | (keys & parsedvar.execs)
        else:
            parser = d.expandWithRefs(value, key)
            deps |= parser.references
            deps = deps | (keys & parser.execs)
        deps |= set((vardeps or "").split())
        deps -= set((d.getVarFlag(key, "vardepsexclude", True) or "").split())
    except:
        bb.note("Error expanding variable %s" % key)
        raise
    return deps, value
Example #4
0
 def readfifo(data):
     lines = data.split(b'\0')
     for line in lines:
         # Just skip empty commands
         if not line:
             continue
         splitval = line.split(b' ', 1)
         cmd = splitval[0].decode("utf-8")
         if len(splitval) > 1:
             value = splitval[1].decode("utf-8")
         else:
             value = ''
         if cmd == 'bbplain':
             bb.plain(value)
         elif cmd == 'bbnote':
             bb.note(value)
         elif cmd == 'bbwarn':
             bb.warn(value)
         elif cmd == 'bberror':
             bb.error(value)
         elif cmd == 'bbfatal':
             # The caller will call exit themselves, so bb.error() is
             # what we want here rather than bb.fatal()
             bb.error(value)
         elif cmd == 'bbfatal_log':
             bb.error(value, forcelog=True)
         elif cmd == 'bbdebug':
             splitval = value.split(' ', 1)
             level = int(splitval[0])
             value = splitval[1]
             bb.debug(level, value)
         else:
             bb.warn("Unrecognised command '%s' on FIFO" % cmd)
    def try_mirror(d, tarfn):
        """
        Try to use a mirrored version of the sources. We do this
        to avoid massive loads on foreign cvs and svn servers.
        This method will be used by the different fetcher
        implementations.

        d Is a bb.data instance
        tarfn is the name of the tarball
        """
        pn = data.getVar('PN', d, True)
        src_tarball_stash = None
        if pn:
            src_tarball_stash = (data.getVar('SRC_TARBALL_STASH_%s' % pn, d, True) or data.getVar('CVS_TARBALL_STASH_%s' % pn, d, True) or data.getVar('SRC_TARBALL_STASH', d, True) or data.getVar('CVS_TARBALL_STASH', d, True) or "").split()

        for stash in src_tarball_stash:
            fetchcmd = data.getVar("FETCHCOMMAND_mirror", d, True) or data.getVar("FETCHCOMMAND_wget", d, True)
            uri = stash + tarfn
            bb.note("fetch " + uri)
            fetchcmd = fetchcmd.replace("${URI}", uri)
            ret = os.system(fetchcmd)
            if ret == 0:
                bb.note("Fetched %s from tarball stash, skipping checkout" % tarfn)
                return True
        return False
Example #6
0
    def __init__(self, machine, rootfs, display = None, tmpdir = None, logfile = None, boottime = 400, runqemutime = 60):
        # Popen object
        self.runqemu = None

        self.machine = machine
        self.rootfs = rootfs

        self.qemupid = None
        self.ip = None

        self.display = display
        self.tmpdir = tmpdir
        self.logfile = logfile
        self.boottime = boottime
        self.runqemutime = runqemutime

        self.bootlog = ''
        self.qemusock = None

        try:
            self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            self.server_socket.setblocking(0)
            self.server_socket.bind(("127.0.0.1",0))
            self.server_socket.listen(2)
            self.serverport = self.server_socket.getsockname()[1]
            bb.note("Created listening socket for qemu serial console on: 127.0.0.1:%s" % self.serverport)
        except socket.error, msg:
            self.server_socket.close()
            bb.fatal("Failed to create listening socket: %s" %msg[1])
Example #7
0
    def execute(graph, item):
        if data.getVarFlag(item, 'task', d):
            if item in task_cache:
                return 1

            if task != item:
                # deeper than toplevel, exec w/ deps
                exec_task(item, d)
                return 1

            try:
                debug(1, "Executing task %s" % item)
                old_overrides = data.getVar('OVERRIDES', d, 0)
                localdata = data.createCopy(d)
                data.setVar('OVERRIDES', 'task_%s:%s' % (item, old_overrides), localdata)
                data.update_data(localdata)
                event.fire(TaskStarted(item, localdata))
                exec_func(item, localdata)
                event.fire(TaskSucceeded(item, localdata))
                task_cache.append(item)
            except FuncFailed, reason:
                note( "Task failed: %s" % reason )
                failedevent = TaskFailed(item, d)
                event.fire(failedevent)
                raise EventException(None, failedevent)
Example #8
0
        def fetch_uri(uri, basename, dl, md5, d):
            if os.path.exists(dl):
                #               file exists, but we didnt complete it.. trying again..
                fetchcmd = bb.data.getVar("RESUMECOMMAND", d, 1)
            else:
                fetchcmd = bb.data.getVar("FETCHCOMMAND", d, 1)

            bb.note("fetch " + uri)
            fetchcmd = fetchcmd.replace("${URI}", uri)
            fetchcmd = fetchcmd.replace("${FILE}", basename)
            bb.debug(2, "executing " + fetchcmd)
            ret = os.system(fetchcmd)
            if ret != 0:
                return False

            #           supposedly complete.. write out md5sum
            if bb.which(bb.data.getVar("PATH", d), "md5sum"):
                try:
                    md5pipe = os.popen("md5sum " + dl)
                    md5data = (md5pipe.readline().split() or [""])[0]
                    md5pipe.close()
                except OSError:
                    md5data = ""
                md5out = file(md5, "w")
                md5out.write(md5data)
                md5out.close()
            else:
                md5out = file(md5, "w")
                md5out.write("")
                md5out.close()
            return True
Example #9
0
 def readfifo(data):
     lines = data.split(b"\0")
     for line in lines:
         splitval = line.split(b" ", 1)
         cmd = splitval[0]
         if len(splitval) > 1:
             value = splitval[1].decode("utf-8")
         else:
             value = ""
         if cmd == "bbplain":
             bb.plain(value)
         elif cmd == "bbnote":
             bb.note(value)
         elif cmd == "bbwarn":
             bb.warn(value)
         elif cmd == "bberror":
             bb.error(value)
         elif cmd == "bbfatal":
             # The caller will call exit themselves, so bb.error() is
             # what we want here rather than bb.fatal()
             bb.error(value)
         elif cmd == "bbfatal_log":
             bb.error(value, forcelog=True)
         elif cmd == "bbdebug":
             splitval = value.split(" ", 1)
             level = int(splitval[0])
             value = splitval[1]
             bb.debug(level, value)
Example #10
0
    def _unpackdep(self, ud, pkg, data, destdir, dldir, d):
       file = data[pkg]['tgz']
       logger.debug(2, "file to extract is %s" % file)
       if file.endswith('.tgz') or file.endswith('.tar.gz') or file.endswith('.tar.Z'):
            cmd = 'tar xz --strip 1 --no-same-owner -f %s/%s' % (dldir, file)
       else:
            bb.fatal("NPM package %s downloaded not a tarball!" % file)

       # Change to subdir before executing command
       save_cwd = os.getcwd()
       if not os.path.exists(destdir):
           os.makedirs(destdir)
       os.chdir(destdir)
       path = d.getVar('PATH', True)
       if path:
            cmd = "PATH=\"%s\" %s" % (path, cmd)
       bb.note("Unpacking %s to %s/" % (file, os.getcwd()))
       ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True)
       os.chdir(save_cwd)

       if ret != 0:
            raise UnpackError("Unpack command %s failed with return value %s" % (cmd, ret), ud.url)

       if 'deps' not in data[pkg]:
            return
       for dep in data[pkg]['deps']:
           self._unpackdep(ud, dep, data[pkg]['deps'], "%s/node_modules/%s" % (destdir, dep), dldir, d)
Example #11
0
    def _index_unpack_to(self, ud, rootdir, d, cargo_index):
        """
        Unpacks the index
        """
        thefile = ud.localpath

        cmd = "tar -xz --no-same-owner --strip-components 1 -f %s -C %s" % (thefile, cargo_index)

        # change to the rootdir to unpack but save the old working dir
        save_cwd = os.getcwd()
        os.chdir(rootdir)

        # ensure we've got these paths made
        bb.utils.mkdirhier(cargo_index)

        # path it
        path = d.getVar('PATH', True)
        if path:
            cmd = "PATH=\"%s\" %s" % (path, cmd)
        bb.note("Unpacking %s to %s/" % (thefile, cargo_index))

        ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True)

        os.chdir(save_cwd)

        if ret != 0:
            raise UnpackError("Unpack command %s failed with return value %s" % (cmd, ret), ud.url)
Example #12
0
    def go(self, d, urls = []):
        """Fetch urls"""
        if not urls:
            urls = self.urls

        for loc in urls:
            (type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(loc, d))

            tag = gettag(parm)
            proto = getprotocol(parm)

            gitsrcname = '%s%s' % (host, path.replace('/', '.'))

            repofilename = 'git_%s.tar.gz' % (gitsrcname)
            repofile = os.path.join(data.getVar("DL_DIR", d, 1), repofilename)
            repodir = os.path.join(data.expand('${GITDIR}', d), gitsrcname)

            coname = '%s' % (tag)
            codir = os.path.join(repodir, coname)

            cofile = self.localpath(loc, d)

            # tag=="master" must always update
            if (tag != "master") and Fetch.try_mirror(d, localfile(loc, d)):
                bb.debug(1, "%s already exists (or was stashed). Skipping git checkout." % cofile)
                continue

            if not os.path.exists(repodir):
                if Fetch.try_mirror(d, repofilename):    
                    bb.mkdirhier(repodir)
                    os.chdir(repodir)
                    rungitcmd("tar -xzf %s" % (repofile),d)
                else:
                    rungitcmd("git clone -n %s://%s%s %s" % (proto, host, path, repodir),d)

            os.chdir(repodir)
            rungitcmd("git pull %s://%s%s" % (proto, host, path),d)
            rungitcmd("git pull --tags %s://%s%s" % (proto, host, path),d)
            rungitcmd("git prune-packed", d)
            rungitcmd("git pack-redundant --all | xargs -r rm", d)
            # Remove all but the .git directory
            rungitcmd("rm * -Rf", d)
            # old method of downloading tags
            #rungitcmd("rsync -a --verbose --stats --progress rsync://%s%s/ %s" % (host, path, os.path.join(repodir, ".git", "")),d)

            os.chdir(repodir)
            bb.note("Creating tarball of git repository")
            rungitcmd("tar -czf %s %s" % (repofile, os.path.join(".", ".git", "*") ),d)

            if os.path.exists(codir):
                prunedir(codir)

            bb.mkdirhier(codir)
            os.chdir(repodir)
            rungitcmd("git read-tree %s" % (tag),d)
            rungitcmd("git checkout-index -q -f --prefix=%s -a" % (os.path.join(codir, "git", "")),d)

            os.chdir(codir)
            bb.note("Creating tarball of git checkout")
            rungitcmd("tar -czf %s %s" % (cofile, os.path.join(".", "*") ),d)
Example #13
0
    def deploy(self):

        self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime)
        sshloglink = os.path.join(self.testdir, "ssh_target_log")
        if os.path.islink(sshloglink):
            os.unlink(sshloglink)
        os.symlink(self.sshlog, sshloglink)
        bb.note("SSH log file: %s" %  self.sshlog)
Example #14
0
 def restart(self, qemuparams = None):
     bb.note("Restarting qemu process")
     if self.runqemu.poll() is None:
         self.stop()
     self.create_socket()
     if self.start(qemuparams):
         return True
     return False
Example #15
0
def runTests(tc):

    suite = loadTests(tc)
    bb.note("Test modules  %s" % tc.testslist)
    bb.note("Found %s tests" % suite.countTestCases())
    runner = unittest.TextTestRunner(verbosity=2)
    result = runner.run(suite)

    return result
Example #16
0
 def __init__(self, d):
     super(SimpleRemoteTarget, self).__init__(d)
     self.ip = d.getVar("TEST_TARGET_IP", True) or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.')
     bb.note("Target IP: %s" % self.ip)
     self.server_ip = d.getVar("TEST_SERVER_IP", True)
     if not self.server_ip:
         try:
             self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split()[6]
         except Exception as e:
             bb.fatal("Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s" % e)
     bb.note("Server IP: %s" % self.server_ip)
Example #17
0
    def deploy(self):
        bb.utils.mkdirhier(self.testdir)

        qemuloglink = os.path.join(self.testdir, "qemu_boot_log")
        if os.path.islink(qemuloglink):
            os.unlink(qemuloglink)
        os.symlink(self.qemulog, qemuloglink)

        bb.note("rootfs file: %s" %  self.rootfs)
        bb.note("Qemu log file: %s" % self.qemulog)
        super(QemuTarget, self).deploy()
Example #18
0
    def parse_full_manifest(self):
        installed_pkgs = list()
        if not os.path.exists(self.full_manifest):
            bb.note('full manifest not exist')
            return installed_pkgs

        with open(self.full_manifest, 'r') as manifest:
            for pkg in manifest.read().split('\n'):
                installed_pkgs.append(pkg.strip())

        return installed_pkgs
Example #19
0
    def _crate_unpack(self, ud, rootdir, d):
        """
        Unpacks a crate
        """
        thefile = ud.localpath

        # possible metadata we need to write out
        metadata = {}

        # change to the rootdir to unpack but save the old working dir
        save_cwd = os.getcwd()
        os.chdir(rootdir)

        pn = d.getVar('BPN')
        if pn == ud.parm.get('name'):
            cmd = "tar -xz --no-same-owner -f %s" % thefile
        else:
            cargo_bitbake = self._cargo_bitbake_path(rootdir)

            cmd = "tar -xz --no-same-owner -f %s -C %s" % (thefile, cargo_bitbake)

            # ensure we've got these paths made
            bb.utils.mkdirhier(cargo_bitbake)

            # generate metadata necessary
            with open(thefile, 'rb') as f:
                # get the SHA256 of the original tarball
                tarhash = hashlib.sha256(f.read()).hexdigest()

            metadata['files'] = {}
            metadata['package'] = tarhash

        # path it
        path = d.getVar('PATH')
        if path:
            cmd = "PATH=\"%s\" %s" % (path, cmd)
        bb.note("Unpacking %s to %s/" % (thefile, os.getcwd()))

        ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True)

        os.chdir(save_cwd)

        if ret != 0:
            raise UnpackError("Unpack command %s failed with return value %s" % (cmd, ret), ud.url)

        # if we have metadata to write out..
        if len(metadata) > 0:
            cratepath = os.path.splitext(os.path.basename(thefile))[0]
            bbpath = self._cargo_bitbake_path(rootdir)
            mdfile = '.cargo-checksum.json'
            mdpath = os.path.join(bbpath, cratepath, mdfile)
            with open(mdpath, "w") as f:
                json.dump(metadata, f)
Example #20
0
 def create_socket(self):
     tries = 3
     while tries > 0:
         try:
             self.server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
             self.server_socket.connect(self.socketfile)
             bb.note("Created listening socket for qemu serial console.")
             tries = 0
         except socket.error as msg:
             self.server_socket.close()
             bb.fatal("Failed to create listening socket.")
             tries -= 1
Example #21
0
def build_dependencies(key, keys, shelldeps, vardepvals, d):
    deps = set()
    vardeps = d.getVarFlag(key, "vardeps", True)
    try:
        if key[-1] == ']':
            vf = key[:-1].split('[')
            value = d.getVarFlag(vf[0], vf[1], False)
        else:
            value = d.getVar(key, False)

        if key in vardepvals:
           value =  d.getVarFlag(key, "vardepvalue", True)
        elif d.getVarFlag(key, "func"):
            if d.getVarFlag(key, "python"):
                parsedvar = d.expandWithRefs(value, key)
                parser = bb.codeparser.PythonParser(key, logger)
                if parsedvar.value and "\t" in parsedvar.value:
                    logger.warn("Variable %s contains tabs, please remove these (%s)" % (key, d.getVar("FILE", True)))
                parser.parse_python(parsedvar.value)
                deps = deps | parser.references
            else:
                parsedvar = d.expandWithRefs(value, key)
                parser = bb.codeparser.ShellParser(key, logger)
                parser.parse_shell(parsedvar.value)
                deps = deps | shelldeps
            if vardeps is None:
                parser.log.flush()
            deps = deps | parsedvar.references
            deps = deps | (keys & parser.execs) | (keys & parsedvar.execs)
        else:
            parser = d.expandWithRefs(value, key)
            deps |= parser.references
            deps = deps | (keys & parser.execs)

        # Add varflags, assuming an exclusion list is set
        varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS', True)
        if varflagsexcl:
            varfdeps = []
            varflags = d.getVarFlags(key)
            if varflags:
                for f in varflags:
                    if f not in varflagsexcl:
                        varfdeps.append('%s[%s]' % (key, f))
            if varfdeps:
                deps |= set(varfdeps)

        deps |= set((vardeps or "").split())
        deps -= set((d.getVarFlag(key, "vardepsexclude", True) or "").split())
    except:
        bb.note("Error expanding variable %s" % key)
        raise
    return deps, value
Example #22
0
    def deploy(self):
        try:
            shutil.copyfile(self.origrootfs, self.rootfs)
        except Exception as e:
            bb.fatal("Error copying rootfs: %s" % e)

        qemuloglink = os.path.join(self.testdir, "qemu_boot_log")
        if os.path.islink(qemuloglink):
            os.unlink(qemuloglink)
        os.symlink(self.qemulog, qemuloglink)

        bb.note("rootfs file: %s" %  self.rootfs)
        bb.note("Qemu log file: %s" % self.qemulog)
        super(QemuTarget, self).deploy()
Example #23
0
    def start(self, qemuparams=None, ssh=True, extra_bootparams=None):

        if self.display:
            os.environ["DISPLAY"] = self.display
        else:
            bb.error("To start qemu I need a X desktop, please set DISPLAY correctly (e.g. DISPLAY=:1)")
            return False
        if not os.path.exists(self.rootfs):
            bb.error("Invalid rootfs %s" % self.rootfs)
            return False
        if not os.path.exists(self.tmpdir):
            bb.error("Invalid TMPDIR path %s" % self.tmpdir)
            return False
        else:
            os.environ["OE_TMPDIR"] = self.tmpdir
        if not os.path.exists(self.deploy_dir_image):
            bb.error("Invalid DEPLOY_DIR_IMAGE path %s" % self.deploy_dir_image)
            return False
        else:
            os.environ["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image

        # Set this flag so that Qemu doesn't do any grabs as SDL grabs interact
        # badly with screensavers.
        os.environ["QEMU_DONT_GRAB"] = "1"
        self.qemuparams = (
            '--append "root=/dev/ram0 console=ttyS0" -nographic -serial unix:%s,server,nowait' % self.socketfile
        )

        launch_cmd = "qemu-system-i386 -kernel %s -initrd %s %s" % (self.kernel, self.rootfs, self.qemuparams)
        self.runqemu = subprocess.Popen(
            launch_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, preexec_fn=os.setpgrp
        )

        bb.note("runqemu started, pid is %s" % self.runqemu.pid)
        bb.note("waiting at most %s seconds for qemu pid" % self.runqemutime)
        endtime = time.time() + self.runqemutime
        while not self.is_alive() and time.time() < endtime:
            time.sleep(1)

        if self.is_alive():
            bb.note("qemu started - qemu procces pid is %s" % self.qemupid)
            self.create_socket()
        else:
            bb.note("Qemu pid didn't appeared in %s seconds" % self.runqemutime)
            output = self.runqemu.stdout
            self.stop()
            bb.note("Output from runqemu:\n%s" % output.read().decode("utf-8"))
            return False

        return self.is_alive()
Example #24
0
def expand(s, d = _data, varname = None):
    """Variable expansion using the data store.

    Example:
        Standard expansion:
        >>> setVar('A', 'sshd')
        >>> print expand('/usr/bin/${A}')
        /usr/bin/sshd

        Python expansion:
        >>> print expand('result: ${@37 * 72}')
        result: 2664
    """
    def var_sub(match):
        key = match.group()[2:-1]
        if varname and key:
            if varname == key:
                raise Exception("variable %s references itself!" % varname)
        var = getVar(key, d, 1)
        if var is not None:
            return var
        else:
            return match.group()

    def python_sub(match):
        import bb
        code = match.group()[3:-1]
        locals()['d'] = d
        s = eval(code)
        if type(s) == types.IntType: s = str(s)
        return s

    if type(s) is not types.StringType: # sanity check
        return s

    while s.find('$') != -1:
        olds = s
        try:
            s = __expand_var_regexp__.sub(var_sub, s)
            s = __expand_python_regexp__.sub(python_sub, s)
            if s == olds: break
            if type(s) is not types.StringType: # sanity check
                import bb
                bb.error('expansion of %s returned non-string %s' % (olds, s))
        except KeyboardInterrupt:
            raise
        except:
            note("%s:%s while evaluating:\n%s" % (sys.exc_info()[0], sys.exc_info()[1], s))
            raise
    return s
Example #25
0
def copy_from_sysroots(pathnames, sysroots, mirrors, installdest):
    '''Copy the specified files from the specified sysroots, also checking the
    specified mirror patterns as alternate paths, to the specified destination.'''
    import subprocess

    expanded_pathnames = expand_paths(pathnames, mirrors)
    searched_paths = search_sysroots(expanded_pathnames, sysroots)
    for path, files in searched_paths:
        if not files:
            bb.debug(1, 'Failed to find `{}`'.format(path))
        else:
            destdir = oe.path.join(installdest, os.path.dirname(path))
            bb.utils.mkdirhier(destdir)
            subprocess.check_call(['cp', '-pPR'] + list(files) + [destdir + '/'])
            bb.note('Copied `{}`  to `{}/`'.format(', '.join(files), destdir))
Example #26
0
    def create_socket(self):

        self.bootlog = ''
        self.qemusock = None

        try:
            self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            self.server_socket.setblocking(0)
            self.server_socket.bind(("127.0.0.1",0))
            self.server_socket.listen(2)
            self.serverport = self.server_socket.getsockname()[1]
            bb.note("Created listening socket for qemu serial console on: 127.0.0.1:%s" % self.serverport)
        except socket.error, msg:
            self.server_socket.close()
            bb.fatal("Failed to create listening socket: %s" %msg[1])
Example #27
0
    def stop(self):

        if self.runqemu:
            bb.note("Sending SIGTERM to runqemu")
            os.killpg(self.runqemu.pid, signal.SIGTERM)
            endtime = time.time() + self.runqemutime
            while self.runqemu.poll() is None and time.time() < endtime:
                time.sleep(1)
            if self.runqemu.poll() is None:
                bb.note("Sending SIGKILL to runqemu")
                os.killpg(self.runqemu.pid, signal.SIGKILL)
            self.runqemu = None
        if self.server_socket:
            self.server_socket.close()
            self.server_socket = None
        self.qemupid = None
        self.ip = None
Example #28
0
    def start(self, qemuparams = None, ssh=True, extra_bootparams=None, runqemuparams='', discard_writes=True):

        if self.display:
            os.environ["DISPLAY"] = self.display
        else:
            bb.error("To start qemu I need a X desktop, please set DISPLAY correctly (e.g. DISPLAY=:1)")
            return False
        if not os.path.exists(self.rootfs):
            bb.error("Invalid rootfs %s" % self.rootfs)
            return False
        if not os.path.exists(self.tmpdir):
            bb.error("Invalid TMPDIR path %s" % self.tmpdir)
            return False
        else:
            os.environ["OE_TMPDIR"] = self.tmpdir
        if not os.path.exists(self.deploy_dir_image):
            bb.error("Invalid DEPLOY_DIR_IMAGE path %s" % self.deploy_dir_image)
            return False
        else:
            os.environ["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image

        # Set this flag so that Qemu doesn't do any grabs as SDL grabs interact
        # badly with screensavers.
        os.environ["QEMU_DONT_GRAB"] = "1"
        self.qemuparams = '--append "root=/dev/ram0 console=ttyS0" -nographic -serial unix:%s,server,nowait' % self.socketfile

        launch_cmd = 'qemu-system-i386 -kernel %s -initrd %s %s' % (self.kernel, self.rootfs, self.qemuparams)
        self.runqemu = subprocess.Popen(launch_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,preexec_fn=os.setpgrp)

        bb.note("runqemu started, pid is %s" % self.runqemu.pid)
        bb.note("waiting at most %s seconds for qemu pid" % self.runqemutime)
        endtime = time.time() + self.runqemutime
        while not self.is_alive() and time.time() < endtime:
            time.sleep(1)

        if self.is_alive():
            bb.note("qemu started - qemu procces pid is %s" % self.qemupid)
            self.create_socket()
        else:
            bb.note("Qemu pid didn't appeared in %s seconds" % self.runqemutime)
            output = self.runqemu.stdout
            self.stop()
            bb.note("Output from runqemu:\n%s" % output.read().decode("utf-8"))
            return False

        return self.is_alive()
Example #29
0
    def latest_versionstring(self, ud, d):
        """
        Compute the latest release name like "x.y.x" in "x.y.x+gitHASH"
        by searching through the tags output of ls-remote, comparing
        versions and returning the highest match.
        """
        pupver = ('', '')

        tagregex = re.compile(
            d.getVar('UPSTREAM_CHECK_GITTAGREGEX')
            or "(?P<pver>([0-9][\.|_]?)+)")
        try:
            output = self._lsremote(ud, d, "refs/tags/*")
        except (bb.fetch2.FetchError, bb.fetch2.NetworkAccess) as e:
            bb.note("Could not list remote: %s" % str(e))
            return pupver

        verstring = ""
        revision = ""
        for line in output.split("\n"):
            if not line:
                break

            tag_head = line.split("/")[-1]
            # Ignore non-released branches
            m = re.search("(alpha|beta|rc|final)+", tag_head)
            if m:
                continue

            # search for version in the line
            tag = tagregex.search(tag_head)
            if tag == None:
                continue

            tag = tag.group('pver')
            tag = tag.replace("_", ".")

            if verstring and bb.utils.vercmp(("0", tag, ""),
                                             ("0", verstring, "")) < 0:
                continue

            verstring = tag
            revision = line.split()[0]
            pupver = (verstring, revision)

        return pupver
Example #30
0
    def stop(self):

        if self.runqemu:
            bb.note("Sending SIGTERM to runqemu")
            os.killpg(self.runqemu.pid, signal.SIGTERM)
            endtime = time.time() + self.runqemutime
            while self.runqemu.poll() is None and time.time() < endtime:
                time.sleep(1)
            if self.runqemu.poll() is None:
                bb.note("Sending SIGKILL to runqemu")
                os.killpg(self.runqemu.pid, signal.SIGKILL)
            self.runqemu = None
        if self.server_socket:
            self.server_socket.close()
            self.server_socket = None
        self.qemupid = None
        self.ip = None
Example #31
0
    def create_socket(self):

        self.bootlog = ''
        self.qemusock = None

        try:
            self.server_socket = socket.socket(socket.AF_INET,
                                               socket.SOCK_STREAM)
            self.server_socket.setblocking(0)
            self.server_socket.bind(("127.0.0.1", 0))
            self.server_socket.listen(2)
            self.serverport = self.server_socket.getsockname()[1]
            bb.note(
                "Created listening socket for qemu serial console on: 127.0.0.1:%s"
                % self.serverport)
        except socket.error, msg:
            self.server_socket.close()
            bb.fatal("Failed to create listening socket: %s" % msg[1])
Example #32
0
File: git.py Project: zedian/poky
    def latest_versionstring(self, ud, d):
        """
        Compute the latest release name like "x.y.x" in "x.y.x+gitHASH"
        by searching through the tags output of ls-remote, comparing
        versions and returning the highest match.
        """
        pupver = ('', '')

        tagregex = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX') or r"(?P<pver>([0-9][\.|_]?)+)")
        try:
            output = self._lsremote(ud, d, "refs/tags/*")
        except (bb.fetch2.FetchError, bb.fetch2.NetworkAccess) as e:
            bb.note("Could not list remote: %s" % str(e))
            return pupver

        verstring = ""
        revision = ""
        for line in output.split("\n"):
            if not line:
                break

            tag_head = line.split("/")[-1]
            # Ignore non-released branches
            m = re.search(r"(alpha|beta|rc|final)+", tag_head)
            if m:
                continue

            # search for version in the line
            tag = tagregex.search(tag_head)
            if tag == None:
                continue

            tag = tag.group('pver')
            tag = tag.replace("_", ".")

            if verstring and bb.utils.vercmp(("0", tag, ""), ("0", verstring, "")) < 0:
                continue

            verstring = tag
            revision = line.split()[0]
            pupver = (verstring, revision)

        return pupver
Example #33
0
    def install_complementary(self, globs=None):
        installed_pkgs_file = os.path.join(oeRuntimeTest.tc.d.getVar('WORKDIR'),
                                           "installed_pkgs.txt")
        self.pkgs_list = RpmPkgsList(oeRuntimeTest.tc.d, oeRuntimeTest.tc.d.getVar('IMAGE_ROOTFS'), oeRuntimeTest.tc.d.getVar('arch_var'), oeRuntimeTest.tc.d.getVar('os_var'))
        with open(installed_pkgs_file, "w+") as installed_pkgs:
            installed_pkgs.write(self.pkgs_list.list("arch"))

        cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"),
               "-p", oeRuntimeTest.tc.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs_file,
               globs]
        try:
            bb.note("Installing complementary packages ...")
            complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            bb.fatal("Could not compute complementary packages list. Command "
                     "'%s' returned %d:\n%s" %
                     (' '.join(cmd), e.returncode, e.output))

        return complementary_pkgs.split()
Example #34
0
def runTests(tc):

    # set the context object passed from the test class
    setattr(oeRuntimeTest, "tc", tc)
    # set ps command to use
    setattr(oeRuntimeTest, "pscmd", "ps -ef" if oeRuntimeTest.hasPackage("procps") else "ps")
    # prepare test suite, loader and runner
    suite = unittest.TestSuite()
    testloader = unittest.TestLoader()
    testloader.sortTestMethodsUsing = None
    runner = unittest.TextTestRunner(verbosity=2)

    bb.note("Test modules  %s" % tc.testslist)
    suite = testloader.loadTestsFromNames(tc.testslist)
    bb.note("Found %s tests" % suite.countTestCases())

    result = runner.run(suite)

    return result
Example #35
0
    def create_socket(self):
        bb.note("waiting at most %s seconds for qemu pid" % self.runqemutime)
        tries = self.runqemutime
        while tries > 0:
            time.sleep(1)
            try:
                self.server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
                self.server_socket.connect(self.socketname)
                bb.note("Created listening socket for qemu serial console.")
                break

            except socket.error:
                self.server_socket.close()
                tries -= 1

        if tries == 0:
            bb.error("Failed to create listening socket %s: " % (self.socketname))
            return False
        return True
Example #36
0
    def install_complementary(self, globs=None):
        installed_pkgs_file = os.path.join(oeRuntimeTest.tc.d.getVar('WORKDIR'),
                                           "installed_pkgs.txt")
        self.pkgs_list = RpmPkgsList(oeRuntimeTest.tc.d, oeRuntimeTest.tc.d.getVar('IMAGE_ROOTFS'), oeRuntimeTest.tc.d.getVar('arch_var'), oeRuntimeTest.tc.d.getVar('os_var'))
        with open(installed_pkgs_file, "w+") as installed_pkgs:
            installed_pkgs.write(self.pkgs_list.list("arch"))

        cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"),
               "-p", oeRuntimeTest.tc.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs_file,
               globs]
        try:
            bb.note("Installing complementary packages ...")
            complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            bb.fatal("Could not compute complementary packages list. Command "
                     "'%s' returned %d:\n%s" %
                     (' '.join(cmd), e.returncode, e.output))

        return complementary_pkgs.split()
Example #37
0
    def checkout_sysroot(self):
        """
        Replicate the ostree repository into the OSTree rootfs and make a checkout/deploy.
        """
        if os.path.isdir(self.OSTREE_ROOTFS):
            shutil.rmtree(self.OSTREE_ROOTFS)

        bb.note(self.format('Initializing OSTree rootfs {OSTREE_ROOTFS} ...'))
        bb.utils.mkdirhier(self.OSTREE_ROOTFS)
        self.run_ostree(
            'admin --sysroot={OSTREE_ROOTFS} init-fs {OSTREE_ROOTFS}')
        self.run_ostree('admin --sysroot={OSTREE_ROOTFS} os-init {OSTREE_OS}')

        bb.note(
            self.format(
                'Replicating primary OSTree repository {OSTREE_BARE} branch {OSTREE_BRANCHNAME} into OSTree rootfs {OSTREE_ROOTFS} ...'
            ))
        self.run_ostree(
            '--repo={OSTREE_ROOTFS}/ostree/repo pull-local --remote=updates {OSTREE_BARE} {OSTREE_BRANCHNAME}'
        )

        bb.note('Deploying sysroot from OSTree sysroot repository...')
        self.run_ostree(
            'admin --sysroot={OSTREE_ROOTFS} deploy --os={OSTREE_OS} updates:{OSTREE_BRANCHNAME}'
        )

        # OSTree initialized var for our OS, but we want the original rootfs content instead.
        src = os.path.join(self.IMAGE_ROOTFS, 'var')
        dst = os.path.join(self.OSTREE_ROOTFS, 'ostree', 'deploy',
                           self.OSTREE_OS, 'var')
        bb.note(
            self.format('Copying /var from rootfs to OSTree rootfs as {} ...',
                        dst))
        shutil.rmtree(dst)
        oe.path.copyhardlinktree(src, dst)

        if self.OSTREE_REMOTE:
            bb.note(
                self.format('Setting OSTree remote to {OSTREE_REMOTE} ...'))
            self.run_ostree('remote add --repo={OSTREE_ROOTFS}/ostree/repo '
                            '--gpg-import={OSTREE_GPGDIR}/pubring.gpg '
                            'updates {OSTREE_REMOTE}')
Example #38
0
def runTests(tc):

    # set the context object passed from the test class
    setattr(oeTest, "tc", tc)
    # set ps command to use
    setattr(oeRuntimeTest, "pscmd",
            "ps -ef" if oeTest.hasPackage("procps") else "ps")
    # prepare test suite, loader and runner
    suite = unittest.TestSuite()
    testloader = unittest.TestLoader()
    testloader.sortTestMethodsUsing = None
    runner = unittest.TextTestRunner(verbosity=2)

    bb.note("Test modules  %s" % tc.testslist)
    suite = testloader.loadTestsFromNames(tc.testslist)
    bb.note("Found %s tests" % suite.countTestCases())

    result = runner.run(suite)

    return result
    def _initialize_intercepts(self):
        bb.note("Initializing intercept dir for %s" % self.target_rootfs)
        # As there might be more than one instance of PackageManager operating at the same time
        # we need to isolate the intercept_scripts directories from each other,
        # hence the ugly hash digest in dir name.
        self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'), "intercept_scripts-%s" %
                                           (hashlib.sha256(self.target_rootfs.encode()).hexdigest()))

        postinst_intercepts = (self.d.getVar("POSTINST_INTERCEPTS") or "").split()
        if not postinst_intercepts:
            postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_PATH")
            if not postinst_intercepts_path:
                postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_DIR") or self.d.expand("${COREBASE}/scripts/postinst-intercepts")
            postinst_intercepts = oe.path.which_wild('*', postinst_intercepts_path)

        bb.debug(1, 'Collected intercepts:\n%s' % ''.join('  %s\n' % i for i in postinst_intercepts))
        bb.utils.remove(self.intercepts_dir, True)
        bb.utils.mkdirhier(self.intercepts_dir)
        for intercept in postinst_intercepts:
            shutil.copy(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept)))
Example #40
0
    def _make_repo_shallow(self, revisions, git_dir, gitcmd, d, branches=None):
        if branches is not None:
            refs = branches
        else:
            ref_output = runfetchcmd('%s for-each-ref --format="%%(refname)	%%(*objecttype)"' % gitcmd, d)
            ref_split = (iter_extend(l.rstrip().rsplit('\t', 1), 2) for l in ref_output.splitlines())
            refs = (ref for ref, objtype in ref_split if not objtype or objtype == 'commit')

        parsed_revs = runfetchcmd('%s rev-parse %s' % (gitcmd, ' '.join('%s^{}' % i for i in revisions)), d)
        queue = collections.deque(r.rstrip() for r in parsed_revs.splitlines())
        seen = set()

        shallow_file = os.path.join(git_dir, 'shallow')
        try:
            os.unlink(shallow_file)
        except OSError as exc:
            if exc.errno != errno.ENOENT:
                raise

        for rev in iter_except(queue.popleft, IndexError):
            if rev in seen:
                continue

            bb.note("Processing shallow revision: %s" % rev)
            parent_output = runfetchcmd('%s rev-parse %s^@' % (gitcmd, rev), d)
            parents = [p.rstrip() for p in parent_output.splitlines()]
            with open(shallow_file, 'a') as f:
                f.write(rev + '\n')
            seen.add(rev)

            for parent in parents:
                for ref in refs:
                    try:
                        merge_base = runfetchcmd('%s merge-base %s %s' % (gitcmd, parent, ref), d).rstrip()
                    except bb.fetch2.FetchError:
                        continue
                    queue.append(merge_base)

        runfetchcmd('%s reflog expire --expire-unreachable=now --all' % gitcmd, d)
        runfetchcmd('%s repack -ad' % gitcmd, d)
        runfetchcmd('%s prune-packed' % gitcmd, d)
 def __init__(self, d):
     super(SimpleRemoteTarget, self).__init__(d)
     addr = d.getVar("TEST_TARGET_IP", True) or bb.fatal(
         'Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.'
     )
     self.ip = addr.split(":")[0]
     try:
         self.port = addr.split(":")[1]
     except IndexError:
         self.port = None
     bb.note("Target IP: %s" % self.ip)
     self.server_ip = d.getVar("TEST_SERVER_IP", True)
     if not self.server_ip:
         try:
             self.server_ip = subprocess.check_output(
                 ['ip', 'route', 'get', self.ip]).split("\n")[0].split()[-1]
         except Exception as e:
             bb.fatal(
                 "Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s"
                 % e)
     bb.note("Server IP: %s" % self.server_ip)
    def install_glob(self, globs, sdk=False):
        """
        Install all packages that match a glob.
        """
        # TODO don't have sdk here but have a property on the superclass
        # (and respect in install_complementary)
        if sdk:
            pkgdatadir = self.d.getVar("PKGDATA_DIR_SDK")
        else:
            pkgdatadir = self.d.getVar("PKGDATA_DIR")

        try:
            bb.note("Installing globbed packages...")
            cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs]
            bb.note('Running %s' % cmd)
            proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            stdout, stderr = proc.communicate()
            if stderr: bb.note(stderr.decode("utf-8"))
            pkgs = stdout.decode("utf-8")
            self.install(pkgs.split(), attempt_only=True)
        except subprocess.CalledProcessError as e:
            # Return code 1 means no packages matched
            if e.returncode != 1:
                bb.fatal("Could not compute globbed packages list. Command "
                         "'%s' returned %d:\n%s" %
                         (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
Example #43
0
    def populate_repo(self):
        """
        Populate primary OSTree repository (bare-user mode) with the given sysroot.
        """
        bb.note(
            self.format(
                'Populating OSTree primary repository {OSTREE_BARE} ...'))

        if os.path.isdir(self.OSTREE_BARE):
            shutil.rmtree(self.OSTREE_BARE)
        bb.utils.mkdirhier(self.OSTREE_BARE)
        self.run_ostree('--repo={OSTREE_BARE} init --mode=bare-user')
        self.run_ostree('--repo={OSTREE_BARE} commit '
                        '{gpg_sign} '
                        '--tree=dir={OSTREE_SYSROOT} '
                        '--branch={OSTREE_BRANCHNAME} '
                        '--subject="{OSTREE_COMMIT_SUBJECT}"')
        output = self.run_ostree('--repo={OSTREE_BARE} summary -u')
        bb.note(
            self.format(
                'OSTree primary repository {OSTREE_BARE} summary:\n{0}',
                output))
Example #44
0
    def export_repo(self):
        """
        Export data from a primary OSTree repository to the given (archive-z2) one.
        """

        bb.note(
            self.format(
                'Exporting primary repository {OSTREE_BARE} to export repository {OSTREE_REPO}...'
            ))
        if not os.path.isdir(self.OSTREE_REPO):
            bb.note("Initializing repository %s for exporting..." %
                    self.OSTREE_REPO)
            bb.utils.mkdirhier(self.OSTREE_REPO)
            self.run_ostree('--repo={OSTREE_REPO} init --mode=archive-z2')

        self.run_ostree(
            '--repo={OSTREE_REPO} pull-local --remote={OSTREE_OS} {OSTREE_BARE} {OSTREE_BRANCHNAME}'
        )
        self.run_ostree(
            '--repo={OSTREE_REPO} commit {gpg_sign} --branch={OSTREE_BRANCHNAME} --tree=ref={OSTREE_OS}:{OSTREE_BRANCHNAME}'
        )
        self.run_ostree('--repo={OSTREE_REPO} summary {gpg_sign} -u')
Example #45
0
 def readfifo(data):
     nonlocal fifobuffer
     fifobuffer.extend(data)
     while fifobuffer:
         message, token, nextmsg = fifobuffer.partition(b"\00")
         if token:
             splitval = message.split(b' ', 1)
             cmd = splitval[0].decode("utf-8")
             if len(splitval) > 1:
                 value = splitval[1].decode("utf-8")
             else:
                 value = ''
             if cmd == 'bbplain':
                 bb.plain(value)
             elif cmd == 'bbnote':
                 bb.note(value)
             elif cmd == 'bbverbnote':
                 bb.verbnote(value)
             elif cmd == 'bbwarn':
                 bb.warn(value)
             elif cmd == 'bberror':
                 bb.error(value)
             elif cmd == 'bbfatal':
                 # The caller will call exit themselves, so bb.error() is
                 # what we want here rather than bb.fatal()
                 bb.error(value)
             elif cmd == 'bbfatal_log':
                 bb.error(value, forcelog=True)
             elif cmd == 'bbdebug':
                 splitval = value.split(' ', 1)
                 level = int(splitval[0])
                 value = splitval[1]
                 bb.debug(level, value)
             else:
                 bb.warn("Unrecognised command '%s' on FIFO" % cmd)
             fifobuffer = nextmsg
         else:
             break
Example #46
0
    def _ensure_npm():
        """Check if the 'npm' command is available in the recipes"""
        if not TINFOIL.recipes_parsed:
            TINFOIL.parse_recipes()

        try:
            d = TINFOIL.parse_recipe("nodejs-native")
        except bb.providers.NoProvider:
            bb.error("Nothing provides 'nodejs-native' which is required for the build")
            bb.note("You will likely need to add a layer that provides nodejs")
            sys.exit(14)

        bindir = d.getVar("STAGING_BINDIR_NATIVE")
        npmpath = os.path.join(bindir, "npm")

        if not os.path.exists(npmpath):
            TINFOIL.build_targets("nodejs-native", "addto_recipe_sysroot")

            if not os.path.exists(npmpath):
                bb.error("Failed to add 'npm' to sysroot")
                sys.exit(14)

        return bindir
Example #47
0
    def _unpackdep(self, ud, pkg, data, destdir, dldir, d):
        file = data[pkg]['tgz']
        logger.debug(2, "file to extract is %s" % file)
        if file.endswith('.tgz') or file.endswith('.tar.gz') or file.endswith('.tar.Z'):
            cmd = 'tar xz --strip 1 --no-same-owner --warning=no-unknown-keyword -f %s/%s' % (dldir, file)
        else:
            bb.fatal("NPM package %s downloaded not a tarball!" % file)

        # Change to subdir before executing command
        if not os.path.exists(destdir):
            os.makedirs(destdir)
        path = d.getVar('PATH', True)
        if path:
            cmd = "PATH=\"%s\" %s" % (path, cmd)
        bb.note("Unpacking %s to %s/" % (file, destdir))
        ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True, cwd=destdir)

        if ret != 0:
            raise UnpackError("Unpack command %s failed with return value %s" % (cmd, ret), ud.url)

        if 'deps' not in data[pkg]:
            return
        for dep in data[pkg]['deps']:
            self._unpackdep(ud, dep, data[pkg]['deps'], "%s/node_modules/%s" % (destdir, dep), dldir, d)
Example #48
0
    def install_glob(self, globs, sdk=False):
        """
        Install all packages that match a glob.
        """
        # TODO don't have sdk here but have a property on the superclass
        # (and respect in install_complementary)
        if sdk:
            pkgdatadir = self.d.expand("${TMPDIR}/pkgdata/${SDK_SYS}")
        else:
            pkgdatadir = self.d.getVar("PKGDATA_DIR")

        try:
            bb.note("Installing globbed packages...")
            cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs]
            pkgs = subprocess.check_output(
                cmd, stderr=subprocess.STDOUT).decode("utf-8")
            self.install(pkgs.split(), attempt_only=True)
        except subprocess.CalledProcessError as e:
            # Return code 1 means no packages matched
            if e.returncode != 1:
                bb.fatal(
                    "Could not compute globbed packages list. Command "
                    "'%s' returned %d:\n%s" %
                    (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
Example #49
0
def runTests(tc, type="runtime"):

    suite = loadTests(tc, type)
    bb.note("Test modules  %s" % tc.testslist)
    if hasattr(tc, "tagexp") and tc.tagexp:
        bb.note("Filter test cases by tags: %s" % tc.tagexp)
    bb.note("Found %s tests" % suite.countTestCases())
    runner = unittest.TextTestRunner(verbosity=2)
    result = runner.run(suite)

    return result
Example #50
0
    def finalize_sysroot(self):
        """
        Finalize the physical root directory after the ostree checkout.
        """
        if False:
            bb.note(self.format('Creating EFI mount point /boot/efi in OSTree rootfs {OSTREE_ROOTFS} ...'))
            bb.utils.mkdirhier(os.path.join(self.OSTREE_ROOTFS, 'boot', 'efi'))

        if True:
            bb.note(self.format('Creating U-Boot mount point /boot/uboot in OSTree rootfs {OSTREE_ROOTFS} ...'))
            bb.utils.mkdirhier(os.path.join(self.OSTREE_ROOTFS, 'boot', 'uboot'))

        bb.note(self.format('Copying pristine rootfs {IMAGE_ROOTFS}/home to OSTree rootfs {OSTREE_ROOTFS} ...'))
        oe.path.copyhardlinktree(os.path.join(self.IMAGE_ROOTFS, 'home'),
                                 os.path.join(self.OSTREE_ROOTFS, 'home'))
Example #51
0
    def _crate_unpack(self, ud, rootdir, d):
        """
        Unpacks a crate
        """
        thefile = ud.localpath

        # change to the rootdir to unpack but save the old working dir
        save_cwd = os.getcwd()
        os.chdir(rootdir)

        pn = d.getVar('PN', True)
        if pn == ud.parm.get('name'):
            cmd = "tar -xz --no-same-owner -f %s" % thefile
        else:
            cargo_src = self._cargo_src_path(rootdir)
            cargo_cache = self._cargo_cache_path(rootdir)
            cargo_registry = self._cargo_registry_path(rootdir)

            cmd = "tar -xz --no-same-owner -f %s -C %s" % (thefile, cargo_src)

            # ensure we've got these paths made
            bb.utils.mkdirhier(cargo_cache)
            bb.utils.mkdirhier(cargo_registry)
            bb.utils.mkdirhier(cargo_src)

            bb.note("Copying %s to %s/" % (thefile, cargo_cache))
            shutil.copy(thefile, cargo_cache)

            bb.note("Copying %s to %s/" % (thefile, cargo_registry))
            shutil.copy(thefile, cargo_registry)

        # path it
        path = d.getVar('PATH', True)
        if path:
            cmd = "PATH=\"%s\" %s" % (path, cmd)
        bb.note("Unpacking %s to %s/" % (thefile, os.getcwd()))

        ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True)

        os.chdir(save_cwd)

        if ret != 0:
            raise UnpackError(
                "Unpack command %s failed with return value %s" % (cmd, ret),
                ud.url)
Example #52
0
    def unpack(self, ud, destdir, d):
        """ unpack the downloaded src to destdir"""

        subdir = ud.parm.get("subpath", "")
        if subdir != "":
            readpathspec = ":%s" % subdir
            def_destsuffix = "%s/" % os.path.basename(subdir.rstrip('/'))
        else:
            readpathspec = ""
            def_destsuffix = "git/"

        destsuffix = ud.parm.get("destsuffix", def_destsuffix)
        destdir = ud.destdir = os.path.join(destdir, destsuffix)
        if os.path.exists(destdir):
            bb.utils.prunedir(destdir)

        need_lfs = ud.parm.get("lfs", "1") == "1"

        if not need_lfs:
            ud.basecmd = "GIT_LFS_SKIP_SMUDGE=1 " + ud.basecmd

        source_found = False
        source_error = []

        if not source_found:
            clonedir_is_up_to_date = not self.clonedir_need_update(ud, d)
            if clonedir_is_up_to_date:
                runfetchcmd(
                    "%s clone %s %s/ %s" %
                    (ud.basecmd, ud.cloneflags, ud.clonedir, destdir), d)
                source_found = True
            else:
                source_error.append(
                    "clone directory not available or not up to date: " +
                    ud.clonedir)

        if not source_found:
            if ud.shallow:
                if os.path.exists(ud.fullshallow):
                    bb.utils.mkdirhier(destdir)
                    runfetchcmd("tar -xzf %s" % ud.fullshallow,
                                d,
                                workdir=destdir)
                    source_found = True
                else:
                    source_error.append("shallow clone not available: " +
                                        ud.fullshallow)
            else:
                source_error.append("shallow clone not enabled")

        if not source_found:
            raise bb.fetch2.UnpackError(
                "No up to date source found: " + "; ".join(source_error),
                ud.url)

        repourl = self._get_repo_url(ud)
        runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, repourl),
                    d,
                    workdir=destdir)

        if self._contains_lfs(ud, d, destdir):
            if need_lfs and not self._find_git_lfs(d):
                raise bb.fetch2.FetchError(
                    "Repository %s has LFS content, install git-lfs on host to download (or set lfs=0 to ignore it)"
                    % (repourl))
            elif not need_lfs:
                bb.note(
                    "Repository %s has LFS content but it is not being fetched"
                    % (repourl))

        if not ud.nocheckout:
            if subdir != "":
                runfetchcmd(
                    "%s read-tree %s%s" %
                    (ud.basecmd, ud.revisions[ud.names[0]], readpathspec),
                    d,
                    workdir=destdir)
                runfetchcmd("%s checkout-index -q -f -a" % ud.basecmd,
                            d,
                            workdir=destdir)
            elif not ud.nobranch:
                branchname = ud.branches[ud.names[0]]
                runfetchcmd("%s checkout -B %s %s" % (ud.basecmd, branchname, \
                            ud.revisions[ud.names[0]]), d, workdir=destdir)
                runfetchcmd("%s branch %s --set-upstream-to origin/%s" % (ud.basecmd, branchname, \
                            branchname), d, workdir=destdir)
            else:
                runfetchcmd("%s checkout %s" %
                            (ud.basecmd, ud.revisions[ud.names[0]]),
                            d,
                            workdir=destdir)

        return True
Example #53
0
    def __init__(self, lock, sockname, configuration, featureset):

        self.configuration = configuration
        self.featureset = featureset
        self.sockname = sockname
        self.bitbake_lock = lock
        self.readypipe, self.readypipein = os.pipe()

        # Create server control socket
        if os.path.exists(sockname):
            os.unlink(sockname)

        # Place the log in the builddirectory alongside the lock file
        logfile = os.path.join(os.path.dirname(self.bitbake_lock.name),
                               "bitbake-cookerdaemon.log")

        self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        # AF_UNIX has path length issues so chdir here to workaround
        cwd = os.getcwd()
        try:
            os.chdir(os.path.dirname(sockname))
            self.sock.bind(os.path.basename(sockname))
        finally:
            os.chdir(cwd)
        self.sock.listen(1)

        os.set_inheritable(self.sock.fileno(), True)
        startdatetime = datetime.datetime.now()
        bb.daemonize.createDaemon(self._startServer, logfile)
        self.sock.close()
        self.bitbake_lock.close()
        os.close(self.readypipein)

        ready = ConnectionReader(self.readypipe)
        r = ready.poll(5)
        if not r:
            bb.note(
                "Bitbake server didn't start within 5 seconds, waiting for 90")
            r = ready.poll(90)
        if r:
            try:
                r = ready.get()
            except EOFError:
                # Trap the child exitting/closing the pipe and error out
                r = None
        if not r or r[0] != "r":
            ready.close()
            bb.error("Unable to start bitbake server (%s)" % str(r))
            if os.path.exists(logfile):
                logstart_re = re.compile(self.start_log_format %
                                         ('([0-9]+)', '([0-9-]+ [0-9:.]+)'))
                started = False
                lines = []
                lastlines = []
                with open(logfile, "r") as f:
                    for line in f:
                        if started:
                            lines.append(line)
                        else:
                            lastlines.append(line)
                            res = logstart_re.match(line.rstrip())
                            if res:
                                ldatetime = datetime.datetime.strptime(
                                    res.group(2),
                                    self.start_log_datetime_format)
                                if ldatetime >= startdatetime:
                                    started = True
                                    lines.append(line)
                        if len(lastlines) > 60:
                            lastlines = lastlines[-60:]
                if lines:
                    if len(lines) > 60:
                        bb.error(
                            "Last 60 lines of server log for this session (%s):\n%s"
                            % (logfile, "".join(lines[-60:])))
                    else:
                        bb.error("Server log for this session (%s):\n%s" %
                                 (logfile, "".join(lines)))
                elif lastlines:
                    bb.error(
                        "Server didn't start, last 60 loglines (%s):\n%s" %
                        (logfile, "".join(lastlines)))
            else:
                bb.error("%s doesn't exist" % logfile)

            raise SystemExit(1)

        ready.close()
Example #54
0
    def __init__(self, lock, sockname, featureset, server_timeout, xmlrpcinterface):

        self.server_timeout = server_timeout
        self.xmlrpcinterface = xmlrpcinterface
        self.featureset = featureset
        self.sockname = sockname
        self.bitbake_lock = lock
        self.readypipe, self.readypipein = os.pipe()

        # Place the log in the builddirectory alongside the lock file
        logfile = os.path.join(os.path.dirname(self.bitbake_lock.name), "bitbake-cookerdaemon.log")
        self.logfile = logfile

        startdatetime = datetime.datetime.now()
        bb.daemonize.createDaemon(self._startServer, logfile)
        self.bitbake_lock.close()
        os.close(self.readypipein)

        ready = ConnectionReader(self.readypipe)
        r = ready.poll(5)
        if not r:
            bb.note("Bitbake server didn't start within 5 seconds, waiting for 90")
            r = ready.poll(90)
        if r:
            try:
                r = ready.get()
            except EOFError:
                # Trap the child exiting/closing the pipe and error out
                r = None
        if not r or r[0] != "r":
            ready.close()
            bb.error("Unable to start bitbake server (%s)" % str(r))
            if os.path.exists(logfile):
                logstart_re = re.compile(start_log_format % ('([0-9]+)', '([0-9-]+ [0-9:.]+)'))
                started = False
                lines = []
                lastlines = []
                with open(logfile, "r") as f:
                    for line in f:
                        if started:
                            lines.append(line)
                        else:
                            lastlines.append(line)
                            res = logstart_re.search(line.rstrip())
                            if res:
                                ldatetime = datetime.datetime.strptime(res.group(2), start_log_datetime_format)
                                if ldatetime >= startdatetime:
                                    started = True
                                    lines.append(line)
                        if len(lastlines) > 60:
                            lastlines = lastlines[-60:]
                if lines:
                    if len(lines) > 60:
                        bb.error("Last 60 lines of server log for this session (%s):\n%s" % (logfile, "".join(lines[-60:])))
                    else:
                        bb.error("Server log for this session (%s):\n%s" % (logfile, "".join(lines)))
                elif lastlines:
                        bb.error("Server didn't start, last 60 loglines (%s):\n%s" % (logfile, "".join(lastlines)))
            else:
                bb.error("%s doesn't exist" % logfile)

            raise SystemExit(1)

        ready.close()
    def install_complementary(self, globs=None):
        """
        Install complementary packages based upon the list of currently installed
        packages e.g. locales, *-dev, *-dbg, etc. Note: every backend needs to
        call this function explicitly after the normal package installation.
        """
        if globs is None:
            globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
            split_linguas = set()

            for translation in self.d.getVar('IMAGE_LINGUAS').split():
                split_linguas.add(translation)
                split_linguas.add(translation.split('-')[0])

            split_linguas = sorted(split_linguas)

            for lang in split_linguas:
                globs += " *-locale-%s" % lang
                for complementary_linguas in (self.d.getVar('IMAGE_LINGUAS_COMPLEMENTARY') or "").split():
                    globs += (" " + complementary_linguas) % lang

        if globs is None:
            return

        # we need to write the list of installed packages to a file because the
        # oe-pkgdata-util reads it from a file
        with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs:
            pkgs = self.list_installed()

            provided_pkgs = set()
            for pkg in pkgs.values():
                provided_pkgs |= set(pkg.get('provs', []))

            output = oe.utils.format_pkg_list(pkgs, "arch")
            installed_pkgs.write(output)
            installed_pkgs.flush()

            cmd = ["oe-pkgdata-util",
                   "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name,
                   globs]
            exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY')
            if exclude:
                cmd.extend(['--exclude=' + '|'.join(exclude.split())])
            try:
                bb.note('Running %s' % cmd)
                proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                stdout, stderr = proc.communicate()
                if stderr: bb.note(stderr.decode("utf-8"))
                complementary_pkgs = stdout.decode("utf-8")
                complementary_pkgs = set(complementary_pkgs.split())
                skip_pkgs = sorted(complementary_pkgs & provided_pkgs)
                install_pkgs = sorted(complementary_pkgs - provided_pkgs)
                bb.note("Installing complementary packages ... %s (skipped already provided packages %s)" % (
                    ' '.join(install_pkgs),
                    ' '.join(skip_pkgs)))
                self.install(install_pkgs)
            except subprocess.CalledProcessError as e:
                bb.fatal("Could not compute complementary packages list. Command "
                         "'%s' returned %d:\n%s" %
                         (' '.join(cmd), e.returncode, e.output.decode("utf-8")))

        if self.d.getVar('IMAGE_LOCALES_ARCHIVE') == '1':
            target_arch = self.d.getVar('TARGET_ARCH')
            localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale")
            if os.path.exists(localedir) and os.listdir(localedir):
                generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir)
                # And now delete the binary locales
                self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False)
Example #56
0
    def start(self, params=None, extra_bootparams=None):
        if not os.path.exists(self.tmpdir):
            bb.error("Invalid TMPDIR path %s" % self.tmpdir)
            #logger.error("Invalid TMPDIR path %s" % self.tmpdir)
            return False
        else:
            os.environ["OE_TMPDIR"] = self.tmpdir
        if not os.path.exists(self.deploy_dir_image):
            bb.error("Invalid DEPLOY_DIR_IMAGE path %s" % self.deploy_dir_image)
            return False
        else:
            os.environ["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image

        if not os.path.exists(self.kernel):
            bb.error("Invalid kernel path: %s" % self.kernel)
            return False

        self.qemuparams = '-nographic -serial unix:%s,server' % (self.socketname)
        qemu_binary = ""
        if 'arm' in self.machine or 'cortex' in self.machine:
            qemu_binary = 'qemu-system-arm'
            qemu_machine_args = '-machine lm3s6965evb'
        elif 'x86' in self.machine:
            qemu_binary = 'qemu-system-i386'
            qemu_machine_args = '-machine type=pc-0.14'
        elif 'nios2' in self.machine:
            qemu_binary = 'qemu-system-nios2'
            qemu_machine_args = '-machine altera_10m50_zephyr'
        else:
            bb.error("Unsupported QEMU: %s" % self.machine)
            return False

        self.origchldhandler = signal.getsignal(signal.SIGCHLD)
        signal.signal(signal.SIGCHLD, self.handleSIGCHLD)

        launch_cmd = '%s -kernel %s %s %s' % (qemu_binary, self.kernel, self.qemuparams, qemu_machine_args)
        bb.note(launch_cmd)
        self.runqemu = subprocess.Popen(launch_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,preexec_fn=os.setpgrp)

        #
        # We need the preexec_fn above so that all runqemu processes can easily be killed
        # (by killing their process group). This presents a problem if this controlling
        # process itself is killed however since those processes don't notice the death
        # of the parent and merrily continue on.
        #
        # Rather than hack runqemu to deal with this, we add something here instead.
        # Basically we fork off another process which holds an open pipe to the parent
        # and also is setpgrp. If/when the pipe sees EOF from the parent dieing, it kills
        # the process group. This is like pctrl's PDEATHSIG but for a process group
        # rather than a single process.
        #
        r, w = os.pipe()
        self.monitorpid = os.fork()
        if self.monitorpid:
            os.close(r)
            self.monitorpipe = os.fdopen(w, "w")
        else:
            # child process
            os.setpgrp()
            os.close(w)
            r = os.fdopen(r)
            x = r.read()
            os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM)
            sys.exit(0)

        bb.note("qemu started, pid is %s" % self.runqemu.pid)
        return self.create_socket()
    def run_intercepts(self, populate_sdk=None):
        intercepts_dir = self.intercepts_dir

        bb.note("Running intercept scripts:")
        os.environ['D'] = self.target_rootfs
        os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
        for script in os.listdir(intercepts_dir):
            script_full = os.path.join(intercepts_dir, script)

            if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
                continue

            # we do not want to run any multilib variant of this
            if script.startswith("delay_to_first_boot"):
                self._postpone_to_first_boot(script_full)
                continue

            if populate_sdk == 'host' and self.d.getVar('SDK_OS') == 'mingw32':
                bb.note("The postinstall intercept hook '%s' could not be executed due to missing wine support, details in %s/log.do_%s"
                                % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
                continue

            bb.note("> Executing %s intercept ..." % script)

            try:
                output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
                if output: bb.note(output.decode("utf-8"))
            except subprocess.CalledProcessError as e:
                bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
                if populate_sdk == 'host':
                    bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
                elif populate_sdk == 'target':
                    if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
                        bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
                                % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
                    else:
                        bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
                else:
                    if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
                        bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
                                % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
                        self._postpone_to_first_boot(script_full)
                    else:
                        bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
Example #58
0
    def unpack(self, ud, destdir, d):
        """ unpack the downloaded src to destdir"""

        subdir = ud.parm.get("subpath", "")
        if subdir != "":
            readpathspec = ":%s" % subdir
            def_destsuffix = "%s/" % os.path.basename(subdir.rstrip('/'))
        else:
            readpathspec = ""
            def_destsuffix = "git/"

        destsuffix = ud.parm.get("destsuffix", def_destsuffix)
        destdir = ud.destdir = os.path.join(destdir, destsuffix)
        if os.path.exists(destdir):
            bb.utils.prunedir(destdir)

        source_found = False
        source_error = []

        if not source_found:
            clonedir_is_up_to_date = not self.clonedir_need_update(ud, d)
            if clonedir_is_up_to_date:
                runfetchcmd(
                    "%s clone %s %s/ %s" %
                    (ud.basecmd, ud.cloneflags, ud.clonedir, destdir), d)
                source_found = True
            else:
                source_error.append(
                    "clone directory not available or not up to date: " +
                    ud.clonedir)

        if not source_found:
            if ud.shallow:
                if os.path.exists(ud.fullshallow):
                    bb.utils.mkdirhier(destdir)
                    runfetchcmd("tar -xzf %s" % ud.fullshallow,
                                d,
                                workdir=destdir)
                    source_found = True
                else:
                    source_error.append("shallow clone not available: " +
                                        ud.fullshallow)
            else:
                source_error.append("shallow clone not enabled")

        if not source_found:
            raise bb.fetch2.UnpackError(
                "No up to date source found: " + "; ".join(source_error),
                ud.url)

        repourl = self._get_repo_url(ud)
        runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, repourl),
                    d,
                    workdir=destdir)

        if self._contains_lfs(ud, d, destdir):
            path = d.getVar('PATH')
            if path:
                gitlfstool = bb.utils.which(path, "git-lfs", executable=True)
                if not gitlfstool:
                    raise bb.fetch2.FetchError(
                        "Repository %s has lfs content, install git-lfs plugin on host to download"
                        % (repourl))
            else:
                bb.note("Could not find 'PATH'")

        if not ud.nocheckout:
            if subdir != "":
                runfetchcmd(
                    "%s read-tree %s%s" %
                    (ud.basecmd, ud.revisions[ud.names[0]], readpathspec),
                    d,
                    workdir=destdir)
                runfetchcmd("%s checkout-index -q -f -a" % ud.basecmd,
                            d,
                            workdir=destdir)
            elif not ud.nobranch:
                branchname = ud.branches[ud.names[0]]
                runfetchcmd("%s checkout -B %s %s" % (ud.basecmd, branchname, \
                            ud.revisions[ud.names[0]]), d, workdir=destdir)
                runfetchcmd("%s branch %s --set-upstream-to origin/%s" % (ud.basecmd, branchname, \
                            branchname), d, workdir=destdir)
            else:
                runfetchcmd("%s checkout %s" %
                            (ud.basecmd, ud.revisions[ud.names[0]]),
                            d,
                            workdir=destdir)

        return True
    def __init__(self, d):
        super(MasterImageHardwareTarget, self).__init__(d)

        # target ip
        addr = d.getVar("TEST_TARGET_IP", True) or bb.fatal(
            'Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.'
        )
        self.ip = addr.split(":")[0]
        try:
            self.port = addr.split(":")[1]
        except IndexError:
            self.port = None
        bb.note("Target IP: %s" % self.ip)
        self.server_ip = d.getVar("TEST_SERVER_IP", True)
        if not self.server_ip:
            try:
                self.server_ip = subprocess.check_output(
                    ['ip', 'route', 'get', self.ip]).split("\n")[0].split()[-1]
            except Exception as e:
                bb.fatal(
                    "Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s"
                    % e)
        bb.note("Server IP: %s" % self.server_ip)

        # test rootfs + kernel
        self.image_fstype = self.get_image_fstype(d)
        self.rootfs = os.path.join(
            d.getVar("DEPLOY_DIR_IMAGE", True),
            d.getVar("IMAGE_LINK_NAME", True) + '.' + self.image_fstype)
        self.kernel = os.path.join(
            d.getVar("DEPLOY_DIR_IMAGE", True),
            d.getVar("KERNEL_IMAGETYPE", False) + '-' +
            d.getVar('MACHINE', False) + '.bin')
        if not os.path.isfile(self.rootfs):
            # we could've checked that IMAGE_FSTYPES contains tar.gz but the config for running testimage might not be
            # the same as the config with which the image was build, ie
            # you bitbake core-image-sato with IMAGE_FSTYPES += "tar.gz"
            # and your autobuilder overwrites the config, adds the test bits and runs bitbake core-image-sato -c testimage
            bb.fatal(
                "No rootfs found. Did you build the image ?\nIf yes, did you build it with IMAGE_FSTYPES += \"tar.gz\" ? \
                      \nExpected path: %s" % self.rootfs)
        if not os.path.isfile(self.kernel):
            bb.fatal("No kernel found. Expected path: %s" % self.kernel)

        # master ssh connection
        self.master = None
        # if the user knows what they are doing, then by all means...
        self.user_cmds = d.getVar("TEST_DEPLOY_CMDS", True)
        self.deploy_cmds = None

        # this is the name of the command that controls the power for a board
        # e.g: TEST_POWERCONTROL_CMD = "/home/user/myscripts/powercontrol.py ${MACHINE} what-ever-other-args-the-script-wants"
        # the command should take as the last argument "off" and "on" and "cycle" (off, on)
        self.powercontrol_cmd = d.getVar("TEST_POWERCONTROL_CMD", True) or None
        self.powercontrol_args = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS",
                                          False) or ""

        self.serialcontrol_cmd = d.getVar("TEST_SERIALCONTROL_CMD",
                                          True) or None
        self.serialcontrol_args = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS",
                                           False) or ""

        self.origenv = os.environ
        if self.powercontrol_cmd or self.serialcontrol_cmd:
            # the external script for controlling power might use ssh
            # ssh + keys means we need the original user env
            bborigenv = d.getVar("BB_ORIGENV", False) or {}
            for key in bborigenv:
                val = bborigenv.getVar(key, True)
                if val is not None:
                    self.origenv[key] = str(val)

        if self.powercontrol_cmd:
            if self.powercontrol_args:
                self.powercontrol_cmd = "%s %s" % (self.powercontrol_cmd,
                                                   self.powercontrol_args)
        if self.serialcontrol_cmd:
            if self.serialcontrol_args:
                self.serialcontrol_cmd = "%s %s" % (self.serialcontrol_cmd,
                                                    self.serialcontrol_args)
Example #60
0
    def launch(self, qemuparams=None):

        if self.display:
            os.environ["DISPLAY"] = self.display
        else:
            bb.error(
                "To start qemu I need a X desktop, please set DISPLAY correctly (e.g. DISPLAY=:1)"
            )
            return False
        if not os.path.exists(self.rootfs):
            bb.error("Invalid rootfs %s" % self.rootfs)
            return False
        if not os.path.exists(self.tmpdir):
            bb.error("Invalid TMPDIR path %s" % self.tmpdir)
            return False
        else:
            os.environ["OE_TMPDIR"] = self.tmpdir
        if not os.path.exists(self.deploy_dir_image):
            bb.error("Invalid DEPLOY_DIR_IMAGE path %s" %
                     self.deploy_dir_image)
            return False
        else:
            os.environ["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image

        # Set this flag so that Qemu doesn't do any grabs as SDL grabs interact
        # badly with screensavers.
        os.environ["QEMU_DONT_GRAB"] = "1"
        self.qemuparams = 'bootparams="console=tty1 console=ttyS0,115200n8" qemuparams="-serial tcp:127.0.0.1:%s"' % self.serverport
        if qemuparams:
            self.qemuparams = self.qemuparams[:-1] + " " + qemuparams + " " + '\"'

        launch_cmd = 'runqemu %s %s %s' % (self.machine, self.rootfs,
                                           self.qemuparams)
        self.runqemu = subprocess.Popen(launch_cmd,
                                        shell=True,
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.STDOUT,
                                        preexec_fn=os.setpgrp)

        bb.note("runqemu started, pid is %s" % self.runqemu.pid)
        bb.note("waiting at most %s seconds for qemu pid" % self.runqemutime)
        endtime = time.time() + self.runqemutime
        while not self.is_alive() and time.time() < endtime:
            time.sleep(1)

        if self.is_alive():
            bb.note("qemu started - qemu procces pid is %s" % self.qemupid)
            cmdline = open('/proc/%s/cmdline' % self.qemupid).read()
            self.ip, _, self.host_ip = cmdline.split('ip=')[1].split(
                ' ')[0].split(':')[0:3]
            if not re.search("^((?:[0-9]{1,3}\.){3}[0-9]{1,3})$", self.ip):
                bb.note(
                    "Couldn't get ip from qemu process arguments, I got '%s'" %
                    self.ip)
                bb.note("Here is the ps output:\n%s" % cmdline)
                self.kill()
                return False
            bb.note("IP found: %s" % self.ip)
            bb.note("Waiting at most %d seconds for login banner" %
                    self.boottime)
            endtime = time.time() + self.boottime
            socklist = [self.server_socket]
            reachedlogin = False
            stopread = False
            while time.time() < endtime and not stopread:
                sread, swrite, serror = select.select(socklist, [], [], 5)
                for sock in sread:
                    if sock is self.server_socket:
                        self.qemusock, addr = self.server_socket.accept()
                        self.qemusock.setblocking(0)
                        socklist.append(self.qemusock)
                        socklist.remove(self.server_socket)
                        bb.note("Connection from %s:%s" % addr)
                    else:
                        data = sock.recv(1024)
                        if data:
                            self.log(data)
                            self.bootlog += data
                            lastlines = "\n".join(
                                self.bootlog.splitlines()[-2:])
                            if re.search("login:"******"Reached login banner")
                        else:
                            socklist.remove(sock)
                            sock.close()
                            stopread = True

            if not reachedlogin:
                bb.note("Target didn't reached login boot in %d seconds" %
                        self.boottime)
                lines = "\n".join(self.bootlog.splitlines()[-5:])
                bb.note("Last 5 lines of text:\n%s" % lines)
                bb.note("Check full boot log: %s" % self.logfile)
                self.kill()
                return False
        else:
            bb.note("Qemu pid didn't appeared in %s seconds" %
                    self.runqemutime)
            output = self.runqemu.stdout
            self.kill()
            bb.note("Output from runqemu:\n%s" % output.read())
            return False

        return self.is_alive()