Ejemplo n.º 1
0
 def extract_tarball(self, unpack_dir):
     try:
         shell.unpack(self.download_path, unpack_dir, logfile=get_logfile(self))
     except (IOError, EOFError, tarfile.ReadError):
         m.action(_('Corrupted or partial tarball, redownloading...'))
         run_until_complete(self.fetch(redownload=True))
         shell.unpack(self.download_path, unpack_dir, logfile=get_logfile(self))
Ejemplo n.º 2
0
    def run(self, config, args):
        if args.build_tools_only:
            # --build-tools-only meant '--system=no --toolchains=no --build-tools=yes'
            args.toolchains = False
            args.system = False
            m.deprecation(
                'Replace --build-tools-only with --system=no --toolchains=no')
        if args.system_only:
            # --system-only meant '--system=yes --toolchains=yes --build-tools=no'
            args.build_tools = False
            m.deprecation('Replace --system-only with --build-tools=no')
        bootstrappers = Bootstrapper(config, args.system, args.toolchains,
                                     args.build_tools, args.offline,
                                     args.assume_yes)
        tasks = []

        async def bootstrap_fetch_extract(bs):
            await bs.fetch()
            await bs.extract()

        for bootstrapper in bootstrappers:
            tasks.append(bootstrap_fetch_extract(bootstrapper))
        run_until_complete(tasks)

        for bootstrapper in bootstrappers:
            bootstrapper.start(jobs=args.jobs)
Ejemplo n.º 3
0
    def start_cooking(self):
        '''
        Cooks the recipe and all its dependencies
        '''
        recipes = [self.cookbook.get_recipe(x) for x in self.recipes]

        if self.no_deps:
            ordered_recipes = recipes
        else:
            ordered_recipes = []
            for recipe in self.recipes:
                deps = self.cookbook.list_recipe_deps(recipe)
                # remove recipes already scheduled to be built
                deps = [x for x in deps if x not in ordered_recipes]
                ordered_recipes.extend(deps)

        if self.deps_only:
            ordered_recipes = [x for x in ordered_recipes if x not in recipes]

        m.message(
            _("Building the following recipes: %s") %
            ' '.join([x.name for x in ordered_recipes]))

        steps = [step[1] for step in recipes[0].steps]
        self._build_status_printer = BuildStatusPrinter(
            steps, self.interactive)
        self._static_libraries_built = []

        run_until_complete(self._cook_recipes(ordered_recipes))
Ejemplo n.º 4
0
    def run(self, config, args):
        cookbook = CookBook(config)
        recipe_name = args.recipe[0]
        recursive = args.recursive

        recipe = cookbook.get_recipe(recipe_name)

        if recursive:
            ordered_recipes = cookbook.list_recipe_deps(recipe_name)
        else:
            ordered_recipes = [recipe]

        for recipe in ordered_recipes:
            if cookbook.recipe_needs_build(recipe.name):
                raise FatalError(_("Recipe %s is not built yet" % recipe.name))

        for recipe in ordered_recipes:
            # call step function
            stepfunc = None
            try:
                stepfunc = getattr(recipe, 'check')
            except:
                m.message('%s has no check step, skipped' % recipe.name)

            if stepfunc:
                try:
                    if asyncio.iscoroutinefunction(stepfunc):
                        run_until_complete(stepfunc())
                    else:
                        stepfunc()
                except FatalError as e:
                    raise e
                except Exception as ex:
                    raise FatalError(
                        _("Error running %s checks: %s") % (recipe.name, ex))
Ejemplo n.º 5
0
 def run(self, config, args):
     bootstrappers = Bootstrapper(config, args.build_tools_only,
             offline=False, assume_yes=False, system_only=False)
     tasks = []
     for bootstrapper in bootstrappers:
         bootstrapper.fetch_recipes(args.jobs)
         tasks.append(bootstrapper.fetch())
     run_until_complete(tasks)
Ejemplo n.º 6
0
    def _do_step(self, step):
        if step in BuildSteps.FETCH:
            arch, recipe = list(self._recipes.items())[0]
            # No, really, let's not download a million times...
            run_until_complete(self._async_run_step(recipe, step, arch))
            return

        # For the universal build we need to configure both architectures with
        # with the same final prefix, but we want to install each architecture
        # on a different path (eg: /path/to/prefix/x86).

        archs_prefix = list(self._recipes.keys())

        for arch, recipe in self._recipes.items():
            # Create a stamp file to list installed files based on the
            # modification time of this file
            if step in [BuildSteps.INSTALL[1], BuildSteps.POST_INSTALL[1]]:
                time.sleep(2)  #wait 2 seconds to make sure new files get the
                #proper time difference, this fixes an issue of
                #the next recipe to be built listing the previous
                #recipe files as their own
                tmp = tempfile.NamedTemporaryFile()
                # the modification time resolution depends on the filesystem,
                # where FAT32 has a resolution of 2 seconds and ext4 1 second
                t = time.time() - 2
                os.utime(tmp.name, (t, t))

            # Call the step function
            stepfunc = getattr(recipe, step)
            if asyncio.iscoroutinefunction(stepfunc):
                run_until_complete(self._async_run_step(recipe, step, arch))
            else:
                self._run_step(recipe, step, arch)

            # Move installed files to the architecture prefix
            if step in [BuildSteps.INSTALL[1], BuildSteps.POST_INSTALL[1]]:
                installed_files = shell.find_newer_files(
                    self._config.prefix, tmp.name, True)
                tmp.close()
                for f in installed_files:

                    def not_in_prefix(src):
                        for p in archs_prefix + ['Libraries']:
                            if src.startswith(p):
                                return True
                        return False

                    # skip files that are installed in the arch prefix
                    if not_in_prefix(f):
                        continue
                    src = os.path.join(self._config.prefix, f)

                    dest = os.path.join(self._config.prefix,
                                        recipe.config.target_arch, f)
                    if not os.path.exists(os.path.dirname(dest)):
                        os.makedirs(os.path.dirname(dest))
                    shutil.move(src, dest)
Ejemplo n.º 7
0
    def run(self, config, args):
        BaseCache.run(self, config, args)

        sha = self.get_git_sha(args)
        deps = self.get_deps(config, args)
        dep = self.find_dep(deps, sha)
        if dep:
            run_until_complete(self.fetch_dep(config, dep, args.namespace))
        m.message('All done!')
Ejemplo n.º 8
0
    def _cook_recipe(self, recipe, count, total):
        # A Recipe depending on a static library that has been rebuilt
        # also needs to be rebuilt to pick up the latest build.
        if recipe.library_type != LibraryType.STATIC:
            if len(set(self._static_libraries_built) & set(recipe.deps)) != 0:
                self.cookbook.reset_recipe_status(recipe.name)
        if not self.cookbook.recipe_needs_build(recipe.name) and \
                not self.force:
            m.build_step(count, total, recipe.name, _("already built"))
            return

        if self.missing_files:
            # create a temp file that will be used to find newer files
            tmp = tempfile.NamedTemporaryFile()

        recipe.force = self.force
        for desc, step in recipe.steps:
            m.build_step(count, total, recipe.name, step)
            # check if the current step needs to be done
            if self.cookbook.step_done(recipe.name, step) and not self.force:
                m.action(_("Step done"))
                continue
            try:
                # call step function
                stepfunc = getattr(recipe, step)
                if not stepfunc:
                    raise FatalError(_('Step %s not found') % step)
                if asyncio.iscoroutinefunction(stepfunc):
                    run_until_complete(stepfunc())
                else:
                    stepfunc()
                # update status successfully
                self.cookbook.update_step_status(recipe.name, step)
            except FatalError as e:
                exc_traceback = sys.exc_info()[2]
                trace = ''
                # Don't print trace if the FatalError is merely that the
                # subprocess exited with a non-zero status. The traceback
                # is just confusing and useless in that case.
                if not isinstance(e.__context__, CalledProcessError):
                    tb = traceback.extract_tb(exc_traceback)[-1]
                    if tb.filename.endswith('.recipe'):
                        # Print the recipe and line number of the exception
                        # if it starts in a recipe
                        trace += 'Exception at {}:{}\n'.format(
                            tb.filename, tb.lineno)
                    trace += e.args[0] + '\n'
                self._handle_build_step_error(recipe, step, trace, e.arch)
            except Exception:
                raise BuildStepError(recipe, step, traceback.format_exc())
        self.cookbook.update_build_status(recipe.name, recipe.built_version())
        if recipe.library_type == LibraryType.STATIC:
            self._static_libraries_built.append(recipe.name)

        if self.missing_files:
            self._print_missing_files(recipe, tmp)
            tmp.close()
Ejemplo n.º 9
0
    def strip_dir(self, dir_path):
        if not self.strip_cmd:
            m.warning('Strip command is not defined')
            return

        tasks = []
        for dirpath, dirnames, filenames in os.walk(dir_path):
            for f in filenames:
                tasks.append(self._async_strip_file(os.path.join(dirpath, f)))
        run_until_complete(tasks)
Ejemplo n.º 10
0
    def run(self, config, args):
        bootstrappers = Bootstrapper(config, args.build_tools_only,
                args.offline, args.assume_yes, args.system_only)
        tasks = []
        for bootstrapper in bootstrappers:
            tasks.append(bootstrapper.fetch())
        run_until_complete(tasks)

        for bootstraper in bootstrappers:
            bootstraper.extract()
            bootstrapper.start()
Ejemplo n.º 11
0
    def fetch(cookbook, recipes, no_deps, reset_rdeps, full_reset, print_only,
              jobs):
        fetch_recipes = []
        if not recipes:
            fetch_recipes = cookbook.get_recipes_list()
        elif no_deps:
            fetch_recipes = [cookbook.get_recipe(x) for x in recipes]
        else:
            for recipe in recipes:
                fetch_recipes += cookbook.list_recipe_deps(recipe)
            fetch_recipes = remove_list_duplicates(fetch_recipes)
        m.message(
            _("Fetching the following recipes using %s async job(s): %s") %
            (jobs, ' '.join([x.name for x in fetch_recipes])))
        shell.set_max_non_cpu_bound_calls(jobs)
        to_rebuild = []
        tasks = []
        for i in range(len(fetch_recipes)):
            recipe = fetch_recipes[i]
            if print_only:
                # For now just print tarball URLs
                if isinstance(recipe, Tarball):
                    m.message("TARBALL: {} {}".format(recipe.url,
                                                      recipe.tarball_name))
                continue
            stepfunc = getattr(recipe, 'fetch')
            if asyncio.iscoroutinefunction(stepfunc):
                tasks.append(stepfunc())
            else:
                m.build_step(i + 1, len(fetch_recipes), recipe,
                             'fetch started')
                stepfunc()
            bv = cookbook.recipe_built_version(recipe.name)
            cv = recipe.built_version()
            if bv != cv:
                # On different versions, only reset recipe if:
                #  * forced
                #  * OR it was fully built already
                if full_reset or not cookbook.recipe_needs_build(recipe.name):
                    to_rebuild.append(recipe)
                    cookbook.reset_recipe_status(recipe.name)
                    if recipe.library_type == LibraryType.STATIC or reset_rdeps:
                        for r in cookbook.list_recipe_reverse_deps(
                                recipe.name):
                            to_rebuild.append(r)
                            cookbook.reset_recipe_status(r.name)

        run_until_complete(tasks)
        m.message("All async fetch jobs finished")
        if to_rebuild:
            to_rebuild = sorted(list(set(to_rebuild)), key=lambda r: r.name)
            m.message(
                _("These recipes have been updated and will "
                  "be rebuilt:\n%s") % '\n'.join([x.name for x in to_rebuild]))
Ejemplo n.º 12
0
    def _do_step(self, step):
        if step in BuildSteps.FETCH:
            arch, recipe = list(self._recipes.items())[0]
            run_until_complete(self._async_run_step(recipe, step, arch))
            return

        for arch, recipe in self._recipes.items():
            stepfunc = getattr(recipe, step)
            if asyncio.iscoroutinefunction(stepfunc):
                run_until_complete(self._async_run_step(recipe, step, arch))
            else:
                self._run_step(recipe, step, arch)
Ejemplo n.º 13
0
    def run(self, config, args):
        if not config.uninstalled:
            raise FatalError(
                _("fetch-cache is only available with "
                  "cerbero-uninstalled"))

        git_dir = os.path.dirname(sys.argv[0])
        sha = git.get_hash(git_dir, args.commit)
        deps = self.get_deps(config, args)
        if not args.skip_fetch:
            dep = self.find_dep(deps, sha)
            if dep:
                run_until_complete(self.fetch_dep(config, dep, args.namespace))
        if args.job_id:
            self.update_log(config, args, deps, sha)
Ejemplo n.º 14
0
    def json_get(self, url):
        m.message("GET %s" % url)

        tmpdir = tempfile.mkdtemp()
        tmpfile = os.path.join(tmpdir, 'deps.json')
        run_until_complete(
            shell.download(url,
                           destination=tmpfile,
                           logfile=open(os.devnull, 'w')))

        with open(tmpfile, 'r') as f:
            resp = f.read()
        shutil.rmtree(tmpdir)

        return json.loads(resp)
Ejemplo n.º 15
0
    def run(self, config, args):
        bootstrappers = Bootstrapper(config, args.build_tools_only,
                                     args.offline, args.assume_yes,
                                     args.system_only)
        tasks = []

        async def bootstrap_fetch_extract(bs):
            await bs.fetch()
            await bs.extract()

        for bootstrapper in bootstrappers:
            tasks.append(bootstrap_fetch_extract(bootstrapper))
        run_until_complete(tasks)

        for bootstrapper in bootstrappers:
            bootstrapper.start(jobs=args.jobs)
Ejemplo n.º 16
0
 def run(self, config, args):
     if args.build_tools_only:
         # --build-tools-only meant '--toolchains=no --build-tools=yes'
         args.toolchains = False
         m.deprecation('Replace --build-tools-only with --toolchains=no')
     bootstrappers = Bootstrapper(config,
                                  False,
                                  args.toolchains,
                                  args.build_tools,
                                  offline=False,
                                  assume_yes=False)
     tasks = []
     for bootstrapper in bootstrappers:
         bootstrapper.fetch_recipes(args.jobs)
         tasks.append(bootstrapper.fetch())
     run_until_complete(tasks)
Ejemplo n.º 17
0
def local_checkout(git_dir, local_git_dir, commit, logfile=None):
    '''
    Clone a repository for a given commit in a different location

    @param git_dir: destination path of the git repository
    @type git_dir: str
    @param local_git_dir: path of the source git repository
    @type local_git_dir: str
    @param commit: the commit to checkout
    @type commit: false
    '''
    branch_name = 'cerbero_build'
    shell.call('%s checkout %s -B %s' % (GIT, commit, branch_name), local_git_dir, logfile=logfile)
    shell.call('%s clone %s -s -b %s .' % (GIT, local_git_dir, branch_name),
               git_dir, logfile=logfile)
    ensure_user_is_set(local_git_dir, logfile=logfile)
    run_until_complete(submodules_update(git_dir, local_git_dir, logfile=logfile))
Ejemplo n.º 18
0
    def self_update(self):
        '''Update this instance of cerbero git repository'''

        if not self.args.self_update:
            return

        try:
            manifest = Manifest(self.args.self_update)
            manifest.parse()
            project = manifest.find_project('cerbero')
            git_dir = os.path.dirname(sys.argv[0])
            git.add_remote(git_dir, project.remote, project.fetch_uri)
            run_until_complete(git.fetch(git_dir))
            run_until_complete(git.checkout(git_dir, project.revision))
        except FatalError as ex:
            self.log_error(
                _("ERROR: Failed to proceed with self update %s") % ex)
        sys.exit(0)
Ejemplo n.º 19
0
    def request(self, url, values, token=None):
        headers = {}
        if token:
            headers = {"Private-Token": token}

        data = urllib.parse.urlencode(values)
        url = "%s?%s" % (url, data)

        m.message("GET %s" % url)

        tmpdir = tempfile.mkdtemp()
        tmpfile = os.path.join(tmpdir, 'deps.json')

        try:
            run_until_complete(shell.download(url, destination=tmpfile))
        except urllib.error.URLError as e:
            raise FatalError(_(e.reason))

        with open(tmpfile, 'r') as f:
            resp = f.read()
        shutil.rmtree(tmpdir)

        return json.loads(resp)
Ejemplo n.º 20
0
 def strip_file(self, path):
     run_until_complete(self._async_strip_file(path))
Ejemplo n.º 21
0
    def _create_framework_library(self, libraries):
        tmpdir = tempfile.mkdtemp()

        libname = os.path.basename (self.libname) # just to make sure

        if self.arch == Architecture.UNIVERSAL:
            archs = self.universal_archs
        else:
            archs = [self.arch]

        archs = [a if a != Architecture.X86 else 'i386' for a in archs]

        split_queue = asyncio.Queue()
        join_queues = collections.defaultdict(asyncio.Queue)
        for thin_arch in archs:
            shell.call ('mkdir -p %s' % thin_arch, tmpdir, env=self.env)

        status = BuildStatusPrinter(archs, m.console_is_interactive())
        for lib in libraries:
            for thin_arch in archs:
                split_queue.put_nowait((lib, thin_arch))
                status.arch_total[thin_arch] += 1

        async def split_library_worker():
            while True:
                lib, thin_arch = await split_queue.get()

                tmpdir_thinarch = os.path.join(tmpdir, thin_arch)
                libprefix = os.path.split(lib)[-1].replace('.', '_')

                if len(archs) > 1: #should be a fat file, split only to the arch we want
                    libprefix += '_%s_' % thin_arch
                    lib_tmpdir = await self._split_static_lib(lib, thin_arch)
                else:
                    lib_tmpdir = await self._split_static_lib(lib)

                if lib_tmpdir is None:
                    # arch is not supported in the static lib, skip it
                    status.inc_arch (thin_arch)
                    split_queue.task_done()
                    continue

                obj_files = shell.ls_files(['*.o'], lib_tmpdir)
                obj_dict = {}
                for obj_f in obj_files:
                    obj_path = os.path.join(lib_tmpdir, obj_f)
                    md5 = (await shell.async_call_output(['md5', '-q', obj_path], env=self.env)).split('\n')[0]
                    md5 = '%s-%s' % (md5, os.path.getsize(obj_path))
                    obj_dict[obj_f] = md5

                join_queues[thin_arch].put_nowait((lib, lib_tmpdir, obj_dict))
                split_queue.task_done()

        async def join_library_worker(q, thin_arch):
            object_files_md5 = []
            while True:
                lib, lib_tmpdir, obj_dict = await q.get()

                status.inc_arch (thin_arch)

                tmpdir_thinarch = os.path.join(tmpdir, thin_arch)
                libprefix = os.path.split(lib)[-1].replace('.', '_')

                target_objs = []
                for obj_f, md5 in obj_dict.items():
                    obj_path = os.path.join(lib_tmpdir, obj_f)
                    if md5 not in object_files_md5:
                        target_name = '%s-%s' % (libprefix, obj_f)
                        try:
                            # Hard link source file to the target name
                            os.link(obj_path, tmpdir_thinarch + '/' + target_name)
                        except:
                            # Fall back to cp if hard link doesn't work for any reason
                            await shell.async_call(['cp', obj_path, target_name], tmpdir_thinarch, env=self.env)

                        # If we have a duplicate object, commit any collected ones
                        if target_name in target_objs:
                            m.warning ("Committing %d objects due to dup %s" % (len (target_objs), target_name))
                            await shell.async_call(['ar', '-cqS', libname] + target_objs, tmpdir_thinarch, env=self.env)
                            target_objs = []

                        target_objs.append (target_name)
                        object_files_md5.append(md5)

                # Put all the collected target_objs in the archive. cmdline limit is 262k args on OSX.
                if len(target_objs):
                    await shell.async_call(['ar', '-cqS', libname] + target_objs, tmpdir_thinarch, env=self.env)
                shutil.rmtree(lib_tmpdir)
                q.task_done()

        async def post_join_worker(thin_arch):
            tmpdir_thinarch = os.path.join(tmpdir, thin_arch)
            await shell.async_call(['ar', '-s', libname], tmpdir_thinarch, env=self.env)
            lib = os.path.join(tmpdir, thin_arch, libname)
            await self._check_duplicated_symbols(lib, tmpdir)

        async def split_join_task():
            tasks = [asyncio.ensure_future(join_library_worker(join_queues[arch], arch)) for arch in archs]
            [tasks.append(asyncio.ensure_future(split_library_worker())) for i in range(len(archs))]
            async def split_join_queues_done():
                await split_queue.join()
                for arch in archs:
                    await join_queues[arch].join()
            await run_tasks(tasks, split_join_queues_done())

            tasks = [asyncio.ensure_future(post_join_worker(thin_arch)) for thin_arch in archs]
            await run_tasks(tasks)
        run_until_complete(split_join_task())

        if len(archs) > 1:
            #merge the final libs into a fat file again
            files = [os.path.join(tmpdir, arch, libname) for arch in archs]
            shell.new_call(['lipo'] + files + ['-create' ,'-output', self.install_name], tmpdir, env=self.env)
        else:
            shell.new_call(['cp', os.path.join(tmpdir, self.arch, libname), self.install_name], tmpdir, env=self.env)
        shutil.rmtree(tmpdir)
Ejemplo n.º 22
0
    def fetch(cookbook, recipes, no_deps, reset_rdeps, full_reset, print_only,
              jobs):
        fetch_recipes = []
        if not recipes:
            fetch_recipes = cookbook.get_recipes_list()
        elif no_deps:
            fetch_recipes = [cookbook.get_recipe(x) for x in recipes]
        else:
            for recipe in recipes:
                fetch_recipes += cookbook.list_recipe_deps(recipe)
            fetch_recipes = remove_list_duplicates(fetch_recipes)
        m.message(
            _("Fetching the following recipes using %s async job(s): %s") %
            (jobs, ' '.join([x.name for x in fetch_recipes])))
        shell.set_max_non_cpu_bound_calls(jobs)
        to_rebuild = []
        tasks = []
        printer = BuildStatusPrinter(('fetch', ),
                                     cookbook.get_config().interactive)
        printer.total = len(fetch_recipes)

        async def fetch_print_wrapper(recipe_name, stepfunc):
            printer.update_recipe_step(printer.count, printer.total,
                                       recipe_name, 'fetch')
            await stepfunc()
            printer.count += 1
            printer.remove_recipe(recipe_name)

        for recipe in fetch_recipes:
            if print_only:
                # For now just print tarball URLs
                if isinstance(recipe, Tarball):
                    m.message("TARBALL: {} {}".format(recipe.url,
                                                      recipe.tarball_name))
                continue
            stepfunc = getattr(recipe, 'fetch')
            if asyncio.iscoroutinefunction(stepfunc):
                tasks.append(fetch_print_wrapper(recipe.name, stepfunc))
            else:
                printer.update_recipe_step(printer.count, printer.total,
                                           recipe.name, 'fetch')
                stepfunc()
                printer.count += 1
                printer.remove_recipe(recipe.name)

        run_until_complete(tasks)
        m.message("All async fetch jobs finished")

        # Checking the current built version against the fetched one
        # needs to be done *after* actually fetching
        for recipe in fetch_recipes:
            bv = cookbook.recipe_built_version(recipe.name)
            cv = recipe.built_version()
            if bv != cv:
                # On different versions, only reset recipe if:
                #  * forced
                #  * OR it was fully built already
                if full_reset or not cookbook.recipe_needs_build(recipe.name):
                    to_rebuild.append(recipe)
                    cookbook.reset_recipe_status(recipe.name)
                    if recipe.library_type == LibraryType.STATIC or reset_rdeps:
                        for r in cookbook.list_recipe_reverse_deps(
                                recipe.name):
                            to_rebuild.append(r)
                            cookbook.reset_recipe_status(r.name)

        if to_rebuild:
            to_rebuild = sorted(list(set(to_rebuild)), key=lambda r: r.name)
            m.message(
                _("These recipes have been updated and will "
                  "be rebuilt:\n%s") % '\n'.join([x.name for x in to_rebuild]))