Example #1
0
    def wait_task(self, poll, task):
        """Wait for a specific task to finish baking. Returns pair (result,
        delta), or None in case poll=True and the task is not yet
        done.

        """
        assert(task in self)
        result = task.wait(poll)
        if result is None:
            return None
        delta = self.remove(task)
        task.task_time = delta

        task.recipe.remaining_tasks -= 1
        if result:
            info("%s finished - %.3f s" % (task, delta))
            task.build_done(self.baker.runq.get_task_buildhash(task))
            self.baker.runq.mark_done(task)
            self.completed_tasks.append(task)
        else:
            err("%s failed - %.3f s" % (task, delta))
            self.failed_tasks.append(task)
            task.build_failed()
            # If any task for a recipe fails, ensure that we don't do rmwork.
            task.recipe.rmwork = False

        if task.recipe.remaining_tasks == 0:
            task.recipe.do_rmwork()

        return (task, result, delta)
Example #2
0
    def wait_task(self, poll, task):
        """Wait for a specific task to finish baking. Returns pair (result,
        delta), or None in case poll=True and the task is not yet
        done.

        """
        assert (task in self)
        result = task.wait(poll)
        if result is None:
            return None
        delta = self.remove(task)
        task.task_time = delta

        task.recipe.remaining_tasks -= 1
        if result:
            info("%s finished - %.3f s" % (task, delta))
            task.build_done(self.baker.runq.get_task_buildhash(task))
            self.baker.runq.mark_done(task)
            self.completed_tasks.append(task)
        else:
            err("%s failed - %.3f s" % (task, delta))
            self.failed_tasks.append(task)
            task.build_failed()
            # If any task for a recipe fails, ensure that we don't do rmwork.
            task.recipe.rmwork = False

        if task.recipe.remaining_tasks == 0:
            task.recipe.do_rmwork()

        return (task, result, delta)
Example #3
0
 def __init__(self, baker):
     self.baker = baker
     self.config = baker.config
     self.oeparser = baker.oeparser
     self.init_layer_meta()
     self.db = sqlite.connect(":memory:", isolation_level=None)
     if not self.db:
         raise Exception("could not create in-memory sqlite db")
     self.db.text_factory = str
     self.dbc = CursorWrapper(self.db.cursor(), profile=False)
     self.init_db()
     self.recipes = {}
     self.packages = {}
     self.tasks = {}
     self.cachedir = self.config.get("CACHEDIR") or ""
     self.debug = self.baker.debug
     fail = False
     recipefiles = self.list_recipefiles()
     total = len(recipefiles)
     count = 0
     rusage = oelite.profiling.Rusage("recipe parsing")
     for recipefile in recipefiles:
         count += 1
         if self.debug:
             debug("Adding %s to cookbook [%s/%s]"%(
                     self.shortfilename(recipefile), count, total))
         else:
             oelite.util.progress_info("Adding recipes to cookbook",
                                       total, count)
         try:
             if not self.add_recipefile(recipefile):
                 fail = True
         except KeyboardInterrupt:
             if os.isatty(sys.stdout.fileno()) and not self.debug:
                 print
             die("Aborted while building cookbook")
         except oelite.parse.ParseError, e:
             if os.isatty(sys.stdout.fileno()) and not self.debug:
                 print
             e.print_details()
             err("Parse error in %s"%(self.shortfilename(recipefile)))
             fail = True
         except Exception, e:
             import traceback
             if os.isatty(sys.stdout.fileno()) and not self.debug:
                 print
             traceback.print_exc()
             err("Uncaught Python exception in %s"%(
                     self.shortfilename(recipefile)))
             fail = True
Example #4
0
File: task.py Project: kimrhh/core
 def do_cleandirs(self, name=None):
     if not name:
         name = self.name
     cleandirs = self.meta().get_flag(name, "cleandirs", oelite.meta.FULL_EXPANSION)
     if cleandirs:
         for cleandir in cleandirs.split():
             if not os.path.exists(cleandir):
                 continue
             try:
                 # print "cleandir %s"%(cleandir)
                 if os.path.islink(cleandir):
                     os.unlink(cleandir)
                 else:
                     shutil.rmtree(cleandir)
             except Exception, e:
                 err("cleandir %s failed: %s" % (cleandir, e))
                 raise
Example #5
0
 def do_cleandirs(self, name=None):
     if not name:
         name = self.name
     cleandirs = (self.meta().get_flag(name, "cleandirs",
                                       oelite.meta.FULL_EXPANSION))
     if cleandirs:
         for cleandir in cleandirs.split():
             if not os.path.exists(cleandir):
                 continue
             try:
                 #print "cleandir %s"%(cleandir)
                 if os.path.islink(cleandir):
                     os.unlink(cleandir)
                 else:
                     shutil.rmtree(cleandir)
             except Exception, e:
                 err("cleandir %s failed: %s" % (cleandir, e))
                 raise
Example #6
0
                self.runq.set_task_stamp(task, stamp_mtime, stamp_signature)

            task = self.runq.get_metahashable_task()
            count += 1
            continue

        oelite.util.progress_info("Calculating task metadata hashes",
                                  total, count)

        if self.debug:
            timing_info("Calculation task metadata hashes", start)

        if count != total:
            print ""
            self.runq.print_metahashable_tasks()
            err("Circular task dependencies detected. Remaining tasks:")
            for task in self.runq.get_unhashed_tasks():
                print "  %s"%(task)
            die("Unable to handle circular task dependencies")

        self.runq.set_task_build_on_nostamp_tasks()
        self.runq.set_task_build_on_retired_tasks()
        self.runq.set_task_build_on_hashdiff()

        # check for availability of prebaked packages, and set package
        # filename for all packages.
        depend_packages = self.runq.get_depend_packages()
        url_prefix = self.config.get("PREBAKE_URL")
        if url_prefix is not None:
            info("Trying to use prebakes from url: %s"%(url_prefix))
        for package in depend_packages:
Example #7
0
                # detected at runq task level.  If we cannot build a
                # recipe because of circular task dependencies, it is
                # clearly a bug.  Improve runq detection of this by
                # always simulating runq execution before starting,
                # and checking that all tasks can be completed, and if
                # some tasks are unbuildable, print out remaining
                # tasks and their dependencies.

                # on the other hand.... circular dependencies can be
                # arbitrarely complex, and it is pretty hard to handle
                # them generally, so better refuse to handle any of
                # them, to avoid having to add more and more complex
                # code to handle growingly sophisticated types of
                # circular dependencies.

                err("circular dependency while resolving %s"%(item))
                depends = []
                recursion_path[0].append(package)
                recursion_path[1].append(item)
                for i in xrange(len(recursion_path[0])):
                    depend_package = str(recursion_path[0][i])
                    depend_item = str(recursion_path[1][i])
                    if depend_item == depend_package:
                        depends.append(depend_package)
                    else:
                        depends.append("%s (%s)"%(depend_package, depend_item))
                #raise RecursiveDepends(depends) Simply break the circular
                # dependency here. It is not possible to determine if it is a
                # problem or not here, as this has to be done at task level
                # instead.
                return set([])
Example #8
0
                # detected at runq task level.  If we cannot build a
                # recipe because of circular task dependencies, it is
                # clearly a bug.  Improve runq detection of this by
                # always simulating runq execution before starting,
                # and checking that all tasks can be completed, and if
                # some tasks are unbuildable, print out remaining
                # tasks and their dependencies.

                # on the other hand.... circular dependencies can be
                # arbitrarely complex, and it is pretty hard to handle
                # them generally, so better refuse to handle any of
                # them, to avoid having to add more and more complex
                # code to handle growingly sophisticated types of
                # circular dependencies.

                err("circular dependency while resolving %s" % (item))
                depends = []
                recursion_path[0].append(package)
                recursion_path[1].append(item)
                for i in xrange(len(recursion_path[0])):
                    depend_package = str(recursion_path[0][i])
                    depend_item = str(recursion_path[1][i])
                    if depend_item == depend_package:
                        depends.append(depend_package)
                    else:
                        depends.append("%s (%s)" %
                                       (depend_package, depend_item))
                #raise RecursiveDepends(depends) Simply break the circular
                # dependency here. It is not possible to determine if it is a
                # problem or not here, as this has to be done at task level
                # instead.
Example #9
0
    def add_recipe(self, recipe):
        self.dbc.execute(
            "INSERT INTO recipe "
            "(file, type, name, version, priority) "
            "VALUES (?, ?, ?, ?, ?)",
            (recipe.filename, recipe.type, recipe.name,
             recipe.version, recipe.priority))
        recipe_id = self.dbc.lastrowid
        recipe.set_id(recipe_id)
        self.recipes[recipe_id] = recipe

        task_names = recipe.get_task_names()
        taskseq = []
        for task_name in task_names:
            task_nostamp = recipe.meta.get_boolean_flag(task_name, "nostamp")
            taskseq.append((recipe_id, task_name, task_nostamp))
        if taskseq:
            self.dbc.executemany(
                "INSERT INTO task (recipe, name, nostamp) VALUES (?, ?, ?)",
                taskseq)

        for deptype in ("DEPENDS", "RDEPENDS", "FDEPENDS"):
            recipe_depends = []
            for item in (recipe.meta.get(deptype) or "").split():
                item = oelite.item.OEliteItem(item, (deptype, recipe.type))
                recipe_depends.append((recipe_id, deptype, item.type, item.name, item.version))
            for item in (recipe.meta.get("CLASS_"+deptype) or "").split():
                item = oelite.item.OEliteItem(item, (deptype, recipe.type))
                recipe_depends.append((recipe_id, deptype, item.type, item.name, item.version))
            if recipe_depends:
                self.dbc.executemany(
                    "INSERT INTO recipe_depend (recipe, deptype, type, item, version) "
                    "VALUES (?, ?, ?, ?, ?)", recipe_depends)

        for task_name in task_names:
            task_id = flatten_single_value(self.dbc.execute(
                    "SELECT id FROM task WHERE recipe=? AND name=?",
                    (recipe_id, task_name)))

            for parent in recipe.meta.get_list_flag(task_name, "deps"):
                self.dbc.execute(
                    "INSERT INTO task_parent (recipe, task, parent) "
                    "VALUES (:recipe_id, :task_name, :parent)",
                    locals())

            for _deptask in recipe.meta.get_list_flag(task_name, "deptask"):
                deptask = _deptask.split(":", 1)
                if len(deptask) != 2:
                    bb.fatal("invalid deptask:", _deptask)
                assert deptask[0] in ("DEPENDS", "RDEPENDS", "FDEPENDS")
                self.dbc.execute(
                    "INSERT INTO task_deptask (task, deptype, deptask) "
                    "VALUES (?, ?, ?)", ([task_id] + deptask))

            for _recdeptask in recipe.meta.get_list_flag(task_name,
                                                        "recdeptask"):
                recdeptask = _recdeptask.split(":", 1)
                if len(recdeptask) != 2:
                    bb.fatal("invalid deptask:", _recdeptask)
                assert recdeptask[0] in ("DEPENDS", "RDEPENDS", "FDEPENDS")
                self.dbc.execute(
                    "INSERT INTO task_recdeptask (task, deptype, recdeptask) "
                    "VALUES (?, ?, ?)", ([task_id] + recdeptask))

            for depends in recipe.meta.get_list_flag(task_name, "depends"):
                try:
                    (parent_item, parent_task) = depends.split(":")
                    self.dbc.execute(
                        "INSERT INTO task_depend "
                        "(task, parent_item, parent_task) "
                        "VALUES (?, ?, ?)",
                        (task_id, parent_item, parent_task))
                except ValueError:
                    err("invalid task 'depends' value for %s "
                        "(valid syntax is item:task): %s"%(
                            task_name, depends))

        packages = recipe.meta.get_list("PACKAGES")
        if not packages:
            warn("no packages defined for recipe %s"%(recipe))
        else:
            for package in packages:
                arch = (recipe.meta.get("PACKAGE_ARCH_" + package) or
                        recipe.meta.get("RECIPE_ARCH"))
                type = (recipe.meta.get("PACKAGE_TYPE_" + package) or
                        recipe.meta.get("RECIPE_TYPE"))
                package_id = self.add_package(recipe, package, type, arch)
            
                provides = recipe.meta.get("PROVIDES_" + package) or ""
                provides = provides.split()
                if not package in provides:
                    provides.append(package)
                for item in provides:
                    self.dbc.execute(
                        "INSERT INTO provide (package, item) "
                        "VALUES (?, ?)", (package_id, item))
            
                for deptype in ("DEPENDS", "RDEPENDS"):
                    depends = recipe.meta.get("%s_%s"%(deptype , package)) or ""
                    for item in depends.split():
                        self.dbc.execute(
                            "INSERT INTO package_depend "
                            "(package, deptype, item) "
                            "VALUES (?, ?, ?)", (package_id, deptype, item))

        return
Example #10
0
            else:
                self.runq.set_task_stamp(task, stamp_mtime, stamp_signature)

            task = self.runq.get_metahashable_task()
            count += 1
            continue

        oelite.util.progress_info("Calculating task metadata hashes", total,
                                  count)

        rusage.end()

        if count != total:
            print ""
            self.runq.print_metahashable_tasks()
            err("Circular task dependencies detected. Remaining tasks:")
            for task in self.runq.get_unhashed_tasks():
                print "  %s" % (task)
            die("Unable to handle circular task dependencies")

        self.runq.set_task_build_on_nostamp_tasks()
        self.runq.set_task_build_on_retired_tasks()
        self.runq.set_task_build_on_hashdiff()

        # check for availability of prebaked packages, and set package
        # filename for all packages.
        depend_packages = self.runq.get_depend_packages()
        url_prefix = self.config.get("PREBAKE_URL")
        if url_prefix is not None:
            info("Trying to use prebakes from url: %s" % (url_prefix))
        for package in depend_packages:
Example #11
0
def error(*args):
    oebakery.err(" ".join(args))
Example #12
0
def grab(url, filename, timeout=120, retry=5, proxies=None, passive_ftp=True):
    print "Grabbing", url

    if proxies:
        env = os.environ.copy()
        env.update(proxies)
    else:
        env = None # this is the default, uses a copy of the current environment

    if passive_ftp:
        psvftp = '--passive-ftp'
    else:
        psvftp = '--no-passive-ftp'

    d = os.path.dirname(filename)
    f = os.path.basename(filename)
    if not os.path.exists(d):
        os.makedirs(d)

    # Use mkstemp to create and open a guaranteed unique file. We use
    # the file descriptor as wget's stdout. We must download to the
    # actual ingredient dir rather than e.g. /tmp to ensure that we
    # can do a link(2) call without encountering EXDEV.
    (fd, dl_tgt) = tempfile.mkstemp(prefix = f + ".", dir = d)
    # Unfortunately, mkstemp() uses mode 0o600 when opening the file,
    # but we'd rather have used 0o644. So we get to do a little syscall
    # dance, yay.
    mask = os.umask(0o022)
    os.fchmod(fd, 0o644 & ~mask)
    os.umask(mask)

    cmd = ['wget', '-t', str(retry), '-T', str(timeout), psvftp, '--no-check-certificate', '--progress=dot:mega', '-v', url, '-O', '-']

    try:
        returncode = subprocess.call(cmd, env=env, stdout=fd)

        if returncode != 0:
            err("Error %s %d" % (cmd, returncode))
            return False

        if os.fstat(fd).st_size == 0:
            err("The fetch of %s resulted in a zero size file?! Failing since this isn't right." % (url))
            return False

        # We use link(2) rather than rename(2), since the latter would
        # replace an existing target. Although that's still done
        # atomically and the new file should be identical to the old,
        # it's better that once created, the target dentry is
        # "immutable". For example, there might be some code that,
        # when opening a file, first does a stat(2), then actually
        # opens the file, and then does an fstat() and compares the
        # inode numbers. We don't want such code to fail. It's also
        # slightly simpler that we need to do an unlink(2) on all exit
        # paths.
        try:
            os.link(dl_tgt, filename)
        except OSError as e:
            if e.errno == errno.EEXIST:
                # Some other fetcher beat us to it, signature checking
                # should ensure we don't end up using a wrong
                # file. But do make a note of this in the log file so
                # that we can see that the races do occur, and that
                # this works as intended.
                info("Fetching %s raced with another process - this is harmless" % url)
                pass
            else:
                err("os.link(%s, %s) failed: %s", dl_tgt, filename, str(e))
                return False
    finally:
        # Regardless of how all of the above went, we have to delete
        # the temporary dentry and close the file descriptor. We do
        # not wrap these in ignoreall-try-except, since something is
        # really broken if either fails (in particular, subprocess is
        # not supposed to close the fd we give it; it should only dup2
        # it to 1, and then close the original _in the child_).
        os.unlink(dl_tgt)
        os.close(fd)


    return True
Example #13
0
    def add_recipe(self, recipe):
        self.dbc.execute(
            "INSERT INTO recipe "
            "(file, type, name, version, priority) "
            "VALUES (?, ?, ?, ?, ?)",
            (recipe.filename, recipe.type, recipe.name, recipe.version,
             recipe.priority))
        recipe_id = self.dbc.lastrowid
        recipe.set_id(recipe_id)
        self.recipes[recipe_id] = recipe

        task_names = recipe.get_task_names()
        taskseq = []
        for task_name in task_names:
            task_nostamp = recipe.meta.get_boolean_flag(task_name, "nostamp")
            taskseq.append((recipe_id, task_name, task_nostamp))
        if taskseq:
            self.dbc.executemany(
                "INSERT INTO task (recipe, name, nostamp) VALUES (?, ?, ?)",
                taskseq)

        for deptype in ("DEPENDS", "RDEPENDS", "FDEPENDS"):
            recipe_depends = []
            for item in (recipe.meta.get(deptype) or "").split():
                item = oelite.item.OEliteItem(item, (deptype, recipe.type))
                recipe_depends.append(
                    (recipe_id, deptype, item.type, item.name, item.version))
            for item in (recipe.meta.get("CLASS_" + deptype) or "").split():
                item = oelite.item.OEliteItem(item, (deptype, recipe.type))
                recipe_depends.append(
                    (recipe_id, deptype, item.type, item.name, item.version))
            if recipe_depends:
                self.dbc.executemany(
                    "INSERT INTO recipe_depend (recipe, deptype, type, item, version) "
                    "VALUES (?, ?, ?, ?, ?)", recipe_depends)

        for task_name in task_names:
            task_id = flatten_single_value(
                self.dbc.execute(
                    "SELECT id FROM task WHERE recipe=? AND name=?",
                    (recipe_id, task_name)))

            for parent in recipe.meta.get_list_flag(task_name, "deps"):
                self.dbc.execute(
                    "INSERT INTO task_parent (recipe, task, parent) "
                    "VALUES (:recipe_id, :task_name, :parent)", locals())

            for _deptask in recipe.meta.get_list_flag(task_name, "deptask"):
                deptask = _deptask.split(":", 1)
                if len(deptask) != 2:
                    bb.fatal("invalid deptask:", _deptask)
                assert deptask[0] in ("DEPENDS", "RDEPENDS", "FDEPENDS")
                self.dbc.execute(
                    "INSERT INTO task_deptask (task, deptype, deptask) "
                    "VALUES (?, ?, ?)", ([task_id] + deptask))

            for _recdeptask in recipe.meta.get_list_flag(
                    task_name, "recdeptask"):
                recdeptask = _recdeptask.split(":", 1)
                if len(recdeptask) != 2:
                    bb.fatal("invalid deptask:", _recdeptask)
                assert recdeptask[0] in ("DEPENDS", "RDEPENDS", "FDEPENDS")
                self.dbc.execute(
                    "INSERT INTO task_recdeptask (task, deptype, recdeptask) "
                    "VALUES (?, ?, ?)", ([task_id] + recdeptask))

            for depends in recipe.meta.get_list_flag(task_name, "depends"):
                try:
                    (parent_item, parent_task) = depends.split(":")
                    self.dbc.execute(
                        "INSERT INTO task_depend "
                        "(task, parent_item, parent_task) "
                        "VALUES (?, ?, ?)",
                        (task_id, parent_item, parent_task))
                except ValueError:
                    err("invalid task 'depends' value for %s "
                        "(valid syntax is item:task): %s" %
                        (task_name, depends))

        packages = recipe.meta.get_list("PACKAGES")
        if not packages:
            warn("no packages defined for recipe %s" % (recipe))
        else:
            for package in packages:
                arch = (recipe.meta.get("PACKAGE_ARCH_" + package)
                        or recipe.meta.get("RECIPE_ARCH"))
                type = (recipe.meta.get("PACKAGE_TYPE_" + package)
                        or recipe.meta.get("RECIPE_TYPE"))
                package_id = self.add_package(recipe, package, type, arch)

                provides = recipe.meta.get("PROVIDES_" + package) or ""
                provides = provides.split()
                if not package in provides:
                    provides.append(package)
                for item in provides:
                    self.dbc.execute(
                        "INSERT INTO provide (package, item) "
                        "VALUES (?, ?)", (package_id, item))

                for deptype in ("DEPENDS", "RDEPENDS"):
                    depends = recipe.meta.get("%s_%s" %
                                              (deptype, package)) or ""
                    for item in depends.split():
                        self.dbc.execute(
                            "INSERT INTO package_depend "
                            "(package, deptype, item) "
                            "VALUES (?, ?, ?)", (package_id, deptype, item))

        return
Example #14
0
                self.runq.set_task_stamp(task, stamp_mtime, stamp_signature)

            task = self.runq.get_metahashable_task()
            count += 1
            continue

        oelite.util.progress_info("Calculating task metadata hashes", total,
                                  count)

        if self.debug:
            timing_info("Calculation task metadata hashes", start)

        if count != total:
            print ""
            self.runq.print_metahashable_tasks()
            err("Circular task dependencies detected. Remaining tasks:")
            for task in self.runq.get_unhashed_tasks():
                print "  %s" % (task)
            die("Unable to handle circular task dependencies")

        self.runq.set_task_build_on_nostamp_tasks()
        self.runq.set_task_build_on_retired_tasks()
        self.runq.set_task_build_on_hashdiff()

        # check for availability of prebaked packages, and set package
        # filename for all packages.
        depend_packages = self.runq.get_depend_packages()
        url_prefix = self.config.get("PREBAKE_URL")
        if url_prefix is not None:
            info("Trying to use prebakes from url: %s" % (url_prefix))
        for package in depend_packages: