def run(self, repo): keyword = "["+repo+"]" # import repo.conf self.read_conf_file() if keyword in self.data: first = self.data.index(keyword) for line in self.data[first+1:]: if line.startswith("["): continue if self._type is None and line.startswith("type"): self._type = line.split("@")[1].strip() if self._type == 'local': return elif self.remote is None and line.startswith("remote"): self.remote = line.split("@")[1].strip() if self._type == "git": from lpms.syncers import git as syncer lpms.logger.info("synchronizing %s from %s" % (repo, self.remote)) out.notify("synchronizing %s from %s" % (out.color(repo, "green"), self.remote)) syncer.run(repo, self.remote)
def function_collisions(self): '''Checks the build environment to deal with function collisions if primary_library is not defined''' if self.environment.primary_library: return preserved_names = [ 'extract', 'prepare', 'configure', 'build', 'install', 'collision_check', 'pre_merge', 'post_install' 'remove' ] race_list = {} for library in self.environment.libraries: for preserved_name in preserved_names: if preserved_name in self.environment.raw: continue if library+"_"+preserved_name in self.environment.raw: if preserved_name in race_list: if not library in race_list[preserved_name]: race_list[preserved_name].append(library) else: race_list.update({preserved_name: [library]}) result = [(key, race_list[key]) for key in race_list if len(race_list[key]) > 1] if result: out.warn("function collision detected in these stages. you should use primary_library keyword.") for item in result: stage, libraries = item out.notify(stage+": "+", ".join(libraries)) lpms.terminate("please contact the package maintainer.")
def linstall(parameters='', arg='install'): '''Runs standard installation function with given parameters and commands''' args = 'make prefix=%(prefix)s/%(defaultprefix)s \ datadir=%(prefix)s/%(data)s \ infodir=%(prefix)s/%(info)s \ localstatedir=%(prefix)s/%(localstate)s \ mandir=%(prefix)s/%(man)s \ sysconfdir=%(prefix)s/%(conf)s \ %(parameters)s \ %(argument)s' % { 'prefix': install_dir, 'defaultprefix': cst.prefix, 'man': cst.man, 'info': cst.info, 'localstate': cst.localstate, 'conf': cst.conf, 'data': cst.data, 'parameters': parameters, 'argument': arg, } args = " ".join([member for member in args.split(" ") if member != ""]) out.notify("running %s" % args) if not system(args): raise BuildError("linstall failed.") else: # remove /usr/share/info/dir file if it exists dir_file = "%s/usr/share/info/dir" % install_dir if os.path.isfile(dir_file): shelltools.remove_file("%s/usr/share/info/dir" % install_dir)
def run(self, repo): keyword = "[" + repo + "]" # import repo.conf self.read_conf_file() if keyword in self.data: first = self.data.index(keyword) for line in self.data[first + 1:]: if line.startswith("["): continue if self._type is None and line.startswith("type"): self._type = line.split("@")[1].strip() if self._type == 'local': return elif self.remote is None and line.startswith("remote"): self.remote = line.split("@")[1].strip() if self._type == "git": from lpms.syncers import git as syncer lpms.logger.info("synchronizing %s from %s" % (repo, self.remote)) out.notify("synchronizing %s from %s" % (out.color(repo, "green"), self.remote)) syncer.run(repo, self.remote)
def linstall(parameters='', arg='install'): '''Runs standard installation function with given parameters and commands''' args = 'make prefix=%(prefix)s/%(defaultprefix)s \ datadir=%(prefix)s/%(data)s \ infodir=%(prefix)s/%(info)s \ localstatedir=%(prefix)s/%(localstate)s \ mandir=%(prefix)s/%(man)s \ sysconfdir=%(prefix)s/%(conf)s \ %(parameters)s \ %(argument)s' % { 'prefix': install_dir, 'defaultprefix': cst.prefix, 'man': cst.man, 'info': cst.info, 'localstate': cst.localstate, 'conf': cst.conf, 'data': cst.data, 'parameters': parameters, 'argument': arg, } args = " ".join([member for member in args.split(" ") if member != ""]) out.notify("running %s" % args) if not system(args): raise BuildError("linstall failed.") else: # remove /usr/share/info/dir file if it exists dir_file = "%s/usr/share/info/dir" % install_dir if os.path.isfile(dir_file): shelltools.remove_file("%s/usr/share/info/dir" % install_dir)
def run_extract(self): # if the environment has no extract_plan variable, doesn't run extract function if not hasattr(self.environment, "extract_nevertheless" ) or not self.environment.extract_nevertheless: if not hasattr(self.environment, "extract_plan"): return target = os.path.dirname(self.environment.build_dir) extracted_file = os.path.join(os.path.dirname(target), ".extracted") if os.path.isfile(extracted_file): if self.environment.force_extract: shelltools.remove_file(extracted_file) else: out.write("%s %s/%s-%s had been already extracted.\n" % (out.color(">>", "brightyellow"), \ self.environment.category, self.environment.name, self.environment.version)) return True utils.xterm_title("lpms: extracting %s/%s/%s-%s" % (self.environment.repo, self.environment.category, \ self.environment.name, self.environment.version)) out.notify("extracting archive(s) to %s" % os.path.dirname(self.environment.build_dir)) # now, extract the archives self.run_stage("extract") out.notify("%s has been extracted." % self.environment.fullname) shelltools.touch(extracted_file) if self.environment.stage == "extract": lpms.terminate()
def conf(*args, **kwargs): '''Runs configure script with standard and given parameters''' conf_command = './configure' if "run_dir" in kwargs: conf_command = os.path.join(kwargs["run_dir"], "configure") if os.access(conf_command, os.F_OK): if os.access(conf_command, os.X_OK): args = '%s \ --prefix=/%s \ --build=%s \ --mandir=/%s \ --infodir=/%s \ --datadir=/%s \ --sysconfdir=/%s \ --localstatedir=/%s \ --libexecdir=/%s \ --libdir=/%s \ %s' % (conf_command, cst.prefix, \ cfg.LPMSConfig().CHOST, cst.man, \ cst.info, cst.data, \ cst.conf, cst.localstate, \ cst.libexec, cst.libdir, " ".join(args)) args = [arg for arg in args.split(" ") if arg.strip()] out.notify("running %s" % "\n\t".join(args)) if not system(" ".join(args)): raise BuildError("conf failed.") else: raise BuildError("configure script is not executable.") else: out.warn("no configure script found.")
def run_configure(self): utils.xterm_title("(%s/%s) lpms: configuring %s/%s-%s from %s" % (self.environment.index, self.environment.count, self.environment.category, self.environment.name, self.environment.version, self.environment.repo)) out.normal("configuring source in %s" % self.environment.build_dir) configured_file = os.path.join( os.path.dirname(os.path.dirname(self.environment.build_dir)), ".configured") if os.path.isfile(configured_file) and self.environment.resume_build: out.warn_notify("%s had been already configured." % self.environment.fullname) return True lpms.logger.info("configuring in %s" % self.environment.build_dir) self.run_stage("configure") out.notify("%s has been configured." % self.environment.fullname) if not os.path.isfile(configured_file): touch(configured_file) if self.environment.stage == "configure": lpms.terminate()
def function_collisions(self): '''Checks the build environment to deal with function collisions if primary_library is not defined''' if self.environment.primary_library: return preserved_names = [ 'extract', 'prepare', 'configure', 'build', 'install', 'collision_check', 'pre_merge', 'post_install' 'remove' ] race_list = {} for library in self.environment.libraries: for preserved_name in preserved_names: if preserved_name in self.environment.raw: continue if library + "_" + preserved_name in self.environment.raw: if preserved_name in race_list: if not library in race_list[preserved_name]: race_list[preserved_name].append(library) else: race_list.update({preserved_name: [library]}) result = [(key, race_list[key]) for key in race_list if len(race_list[key]) > 1] if result: out.warn( "function collision detected in these stages. you should use primary_library keyword." ) for item in result: stage, libraries = item out.notify(stage + ": " + ", ".join(libraries)) lpms.terminate("please contact the package maintainer.")
def run_install(self): utils.xterm_title("(%s/%s) lpms: installing %s/%s-%s from %s" % (self.environment.index, self.environment.count, self.environment.category, self.environment.name, self.environment.version, self.environment.repo)) out.normal("installing %s to %s" % (self.environment.fullname, self.environment.install_dir)) installed_file = os.path.join(os.path.dirname(os.path.dirname( self.environment.build_dir)), ".installed") if os.path.isfile(installed_file) and self.environment.resume_build: out.warn_notify("%s had been already installed." % self.environment.fullname) return True lpms.logger.info("installing to %s" % self.environment.build_dir) self.run_stage("install") if self.environment.docs is not None: for doc in self.environment.docs: if isinstance(doc, list) or isinstance(doc, tuple): source_file, target_file = doc namestr = self.environment.fullname if self.environment.slot != "0" else self.environment.name target = self.environment.fix_target_path("/usr/share/doc/%s/%s" % (namestr, target_file)) source = os.path.join(self.environment.build_dir, source_file) # self.environment.index, insfile(source, target) #else: # self.environment.index, insdoc(doc) out.notify("%s has been installed." % self.environment.fullname) if not os.path.isfile(installed_file): touch(installed_file) if self.environment.stage == "install": lpms.terminate()
def conf(*args, **kwargs): '''Runs configure script with standard and given parameters''' conf_command = './configure' if "run_dir" in kwargs: conf_command = os.path.join(kwargs["run_dir"], "configure") if os.access(conf_command, os.F_OK): if os.access(conf_command, os.X_OK): args = '%s \ --prefix=/%s \ --build=%s \ --mandir=/%s \ --infodir=/%s \ --datadir=/%s \ --sysconfdir=/%s \ --localstatedir=/%s \ --libexecdir=/%s \ --libdir=/%s \ %s' % (conf_command, cst.prefix, \ cfg.LPMSConfig().CHOST, cst.man, \ cst.info, cst.data, \ cst.conf, cst.localstate, \ cst.libexec, cst.libdir, " ".join(args)) args = [arg for arg in args.split(" ") if arg.strip()] out.notify("running %s" % "\n\t".join(args)) if not system(" ".join(args)): raise BuildError("conf failed.") else: raise BuildError("configure script is not executable.") else: out.warn("no configure script found.")
def extract_gz(self, path): """Extracts GZIP archives. It generally ships single files like a patch. """ with gzip.open(path, "rb") as archive: out.notify("extracting %s to %s" % (os.path.basename(path), self.location)) file_content = archive.read() with open(os.path.join(self.location, "".join(os.path.basename(path).split(".gz"))), "w+") as myfile: myfile.write(file_content)
def make(*parameters, **kwargs): '''Runs standard build command with given parameters''' if "j" in kwargs: jobs = "-j" + str(kwargs["j"]) else: jobs = cfg.LPMSConfig().MAKEOPTS out.notify("running make %s %s" % (str(jobs), " ".join(parameters))) if not system("make %s %s" % (str(jobs), " ".join(parameters))): raise BuildError("make failed")
def raw_configure(*parameters, **kwargs): '''Runs configure script with only given parameters''' conf_command = './configure' args_pretty = conf_command + " " + "\n\t".join(parameters) out.notify("running %s" % args_pretty) if "run_dir" in kwargs: conf_command = os.path.join(kwargs["run_dir"], "configure") if not system("%s %s" % (conf_command, " ".join(parameters))): raise BuildError("raw_configure failed.")
def raw_configure(*parameters, **kwargs): '''Runs configure script with only given parameters''' conf_command = './configure' args_pretty = conf_command+" "+"\n\t".join(parameters) out.notify("running %s" % args_pretty) if "run_dir" in kwargs: conf_command = os.path.join(kwargs["run_dir"], "configure") if not system("%s %s" % (conf_command, " ".join(parameters))): raise BuildError("raw_configure failed.")
def make(*parameters, **kwargs): '''Runs standard build command with given parameters''' if "j" in kwargs: jobs = "-j"+str(kwargs["j"]) else: jobs = cfg.LPMSConfig().MAKEOPTS out.notify("running make %s %s" % (str(jobs), " ".join(parameters))) if not system("make %s %s" % (str(jobs), " ".join(parameters))): raise BuildError("make failed")
def raw_install(parameters='', arg='install'): '''Runs installation function with only given parameters''' out.notify("running make %s %s" % (parameters, arg)) if not system("make %s %s" % (parameters, arg)): raise BuildError("raw_install failed.") else: # remove /usr/share/info/dir file if it exists dir_file = "%s/usr/share/info/dir" % install_dir if os.path.isfile(dir_file): shelltools.remove_file("%s/usr/share/info/dir" % install_dir)
def raw_install(parameters = '', arg='install'): '''Runs installation function with only given parameters''' out.notify("running make %s %s" % (parameters, arg)) if not system("make %s %s" % (parameters, arg)): raise BuildError("raw_install failed.") else: # remove /usr/share/info/dir file if it exists dir_file = "%s/usr/share/info/dir" % install_dir if os.path.isfile(dir_file): shelltools.remove_file("%s/usr/share/info/dir" % install_dir)
def extract_gz(self, path): '''Extracts GZIP archives. It generally ships single files like a patch. ''' with gzip.open(path, "rb") as archive: out.notify("extracting %s to %s" % (os.path.basename(path), self.location)) file_content = archive.read() with open(os.path.join(self.location, "".join(os.path.basename(path).split(".gz"))), "w+") as myfile: myfile.write(file_content)
def run_prepare(self): out.normal("preparing source...") prepared_file = os.path.join(os.path.dirname(os.path.dirname(self.environment.build_dir)), ".prepared") if os.path.isfile(prepared_file): out.warn_notify("%s had been already prepared." % self.environment.fullname) return True self.run_stage("prepare") out.notify("%s has been prepared." % self.environment.fullname) if not os.path.isfile(prepared_file): touch(prepared_file) if self.environment.stage == "prepare": lpms.terminate()
def main(): available_repositories = utils.available_repositories() for item in os.listdir(cst.repos): repo_conf = os.path.join(cst.repos, item, cst.repo_file) if os.access(repo_conf, os.F_OK): with open(repo_conf) as data: data = conf.ReadConfig(data.read().splitlines(), delimiter="@") if item in available_repositories: out.normal("%s [%s]" % (item, out.color("enabled", "brightgreen"))) else: out.normal("%s [%s]" % (item, out.color("disabled", "brightred"))) out.notify("system name: %s" % item) if hasattr(data, "name"): out.notify("development name: %s" % data.name) else: out.warn("development name is not defined!") if hasattr(data, "summary"): out.notify("summary: %s" % data.summary) else: out.warn("summary is not defined!") if hasattr(data, "maintainer"): out.notify("maintainer: %s" % data.maintainer) else: out.warn("maintainer is not defined!") out.write("\n")
def main(): available_repositories = utils.available_repositories() for item in os.listdir(cst.repos): repo_conf = os.path.join(cst.repos, item, cst.repo_file) if os.access(repo_conf, os.F_OK): with open(repo_conf) as data: data = conf.ReadConfig(data.read().splitlines(), delimiter="@") if item in available_repositories: out.normal("%s [%s]" % (item, out.color("enabled", "brightgreen"))) else: out.normal("%s [%s]" % (item, out.color("disabled", "brightred"))) out.notify("system name: %s" % item) if hasattr(data, "name"): out.notify("development name: %s" % data.name) else: out.warn("development name is not defined!") if hasattr(data, "summary"): out.notify("summary: %s" % data.summary) else: out.warn("summary is not defined!") if hasattr(data, "maintainer"): out.notify("maintainer: %s" % data.maintainer) else: out.warn("maintainer is not defined!") out.write("\n")
def gnome2_icon_cache_update(*args, **kwargs): parameters = "-q -t -f" target = "/usr/share/icons/hicolor" if args: parameters = " ".join(args) if kwargs: if "target" in kwargs: target = kwargs["target"] out.notify("updating GTK+ icon cache...") if not shelltools.system("gtk-update-icon-cache %s %s" % (parameters, target), sandbox=False): out.write(out.color("\n\tFAILED\n", "red"))
def run_prepare(self): out.normal("preparing source...") prepared_file = os.path.join( os.path.dirname(os.path.dirname(self.environment.build_dir)), ".prepared") if os.path.isfile(prepared_file): out.warn_notify("%s had been already prepared." % self.environment.fullname) return True self.run_stage("prepare") out.notify("%s has been prepared." % self.environment.fullname) if not os.path.isfile(prepared_file): touch(prepared_file) if self.environment.stage == "prepare": lpms.terminate()
def select_pkgs(self): for pkg in self.instdb.get_all_packages(): self.repo, self.category, self.name, self.version, self.slot = pkg # catch packages which are from the outside if not self.repodb.find_package(package_name=self.name, \ package_category=self.category): if not (self.category, self.name) in self.notfound_pkg: self.notfound_pkg.append((self.category, self.name)) # get version data from repository database repository_items = self.repodb.find_package(package_name=self.name, \ package_category=self.category) if not repository_items: # if the installed package could not found in the repository database # add the item to not-founds list self.notfound_pkg.append((self.category, self.name)) continue # collect available package version by slot value available_versions = {} for item in repository_items: if item.slot in available_versions: available_versions[item.slot].append(item.version) else: available_versions[item.slot] = [item.version] # comparise versions for item in repository_items: if item.slot == self.slot: best_version = utils.best_version( available_versions[item.slot]) result = utils.vercmp(best_version, self.version) if result != 0: self.packages.append( os.path.join(self.category, self.name) + ":" + self.slot) break if self.notfound_pkg: out.write( "%s: the following packages were installed but they could not be found in the database:\n\n" % out.color("WARNING", "brightyellow")) for no_category, no_name, in self.notfound_pkg: out.notify("%s/%s" % (no_category, no_name)) out.write("\n")
def run_build(self): utils.xterm_title("(%s/%s) lpms: building %s/%s-%s from %s" % (self.environment.index, self.environment.count, self.environment.category, self.environment.name, self.environment.version, self.environment.repo)) out.normal("compiling source in %s" % self.environment.build_dir) built_file = os.path.join(os.path.dirname(os.path.dirname( self.environment.build_dir)), ".built") if os.path.isfile(built_file) and self.environment.resume_build: out.warn_notify("%s had been already built." % self.environment.fullname) return True lpms.logger.info("building in %s" % self.environment.build_dir) self.run_stage("build") out.notify("%s has been built." % self.environment.fullname) if not os.path.isfile(built_file): touch(built_file) if self.environment.stage == "build": lpms.terminate()
def collision_check(): # TODO: This is a temporary solution. collision_check function # must be a reusable part for using in remove operation out.normal("checking file collisions...") lpms.logger.info("checking file collisions") collision_object = file_collisions.CollisionProtect( environment.category, environment.name, environment.slot, real_root=environment.real_root, source_dir=environment.install_dir) collision_object.handle_collisions() if collision_object.orphans: out.write( out.color(" > ", "brightyellow") + "these files are orphan. the package will adopt the files:\n" ) index = 0 for orphan in collision_object.orphans: out.notify(orphan) index += 1 if index > 100: # FIXME: the files must be logged out.write( out.color(" > ", "brightyellow") + "...and many others.") break if collision_object.collisions: out.write( out.color(" > ", "brightyellow") + "file collisions detected:\n") for item in collision_object.collisions: (category, name, slot, version), path = item out.write(out.color(" -- ", "red")+category+"/"+name+"-"\ +version+":"+slot+" -> "+path+"\n") if collision_object.collisions and self.config.collision_protect: if environment.force_file_collision: out.warn( "Disregarding these collisions, you have been warned!") else: return False return True
def select_pkgs(self): for pkg in self.instdb.get_all_packages(): self.repo, self.category, self.name, self.version, self.slot = pkg # catch packages which are from the outside if not self.repodb.find_package(package_name=self.name, \ package_category=self.category): if not (self.category, self.name) in self.notfound_pkg: self.notfound_pkg.append((self.category, self.name)) # get version data from repository database repository_items = self.repodb.find_package(package_name=self.name, \ package_category=self.category) if not repository_items: # if the installed package could not found in the repository database # add the item to not-founds list self.notfound_pkg.append((self.category, self.name)) continue # collect available package version by slot value available_versions = {} for item in repository_items: if item.slot in available_versions: available_versions[item.slot].append(item.version) else: available_versions[item.slot] = [item.version] # comparise versions for item in repository_items: if item.slot == self.slot: best_version = utils.best_version(available_versions[item.slot]) result = utils.vercmp(best_version, self.version) if result != 0: self.packages.append(os.path.join(self.category, self.name)+":"+self.slot) break if self.notfound_pkg: out.write("%s: the following packages were installed but they could not be found in the database:\n\n" % out.color("WARNING", "brightyellow")) for no_category, no_name, in self.notfound_pkg: out.notify("%s/%s" % (no_category, no_name)) out.write("\n")
def update_repository(self, repo_name): exceptions = ['scripts', 'licenses', 'news', 'info', 'libraries', '.git', '.svn'] # fistly, drop the repo self.repodb.database.delete_repository(repo_name, commit=True) repo_path = os.path.join(cst.repos, repo_name) for category in os.listdir(repo_path): target_directory = os.path.join(repo_path, category) if category in exceptions or not os.path.isdir(target_directory): continue packages = os.listdir(target_directory) try: packages.remove("info.xml") except ValueError: pass if lpms.getopt("--verbose"): out.notify("%s" % out.color(category, "brightwhite")) for my_pkg in packages: try: self.update_package(repo_path, category, my_pkg) except IntegrityError: continue
def update_repository(self, repo_name): exceptions = [ 'scripts', 'licenses', 'news', 'info', 'libraries', '.git', '.svn' ] # fistly, drop the repo self.repodb.database.delete_repository(repo_name, commit=True) repo_path = os.path.join(cst.repos, repo_name) for category in os.listdir(repo_path): target_directory = os.path.join(repo_path, category) if category in exceptions or not os.path.isdir(target_directory): continue packages = os.listdir(target_directory) try: packages.remove("info.xml") except ValueError: pass if lpms.getopt("--verbose"): out.notify("%s" % out.color(category, "brightwhite")) for my_pkg in packages: try: self.update_package(repo_path, category, my_pkg) except IntegrityError: continue
def run_extract(self): # if the environment has no extract_plan variable, doesn't run extract function if not hasattr(self.environment, "extract_nevertheless") or not self.environment.extract_nevertheless: if not hasattr(self.environment, "extract_plan"): return target = os.path.dirname(self.environment.build_dir) extracted_file = os.path.join(os.path.dirname(target), ".extracted") if os.path.isfile(extracted_file): if self.environment.force_extract: shelltools.remove_file(extracted_file) else: out.write("%s %s/%s-%s had been already extracted.\n" % (out.color(">>", "brightyellow"), \ self.environment.category, self.environment.name, self.environment.version)) return True utils.xterm_title("lpms: extracting %s/%s/%s-%s" % (self.environment.repo, self.environment.category, \ self.environment.name, self.environment.version)) out.notify("extracting archive(s) to %s" % os.path.dirname(self.environment.build_dir)) # now, extract the archives self.run_stage("extract") out.notify("%s has been extracted." % self.environment.fullname) shelltools.touch(extracted_file) if self.environment.stage == "extract": lpms.terminate()
def run_install(self): utils.xterm_title("(%s/%s) lpms: installing %s/%s-%s from %s" % (self.environment.index, self.environment.count, self.environment.category, self.environment.name, self.environment.version, self.environment.repo)) out.normal("installing %s to %s" % (self.environment.fullname, self.environment.install_dir)) installed_file = os.path.join( os.path.dirname(os.path.dirname(self.environment.build_dir)), ".installed") if os.path.isfile(installed_file) and self.environment.resume_build: out.warn_notify("%s had been already installed." % self.environment.fullname) return True lpms.logger.info("installing to %s" % self.environment.build_dir) self.run_stage("install") if self.environment.docs is not None: for doc in self.environment.docs: if isinstance(doc, list) or isinstance(doc, tuple): source_file, target_file = doc namestr = self.environment.fullname if self.environment.slot != "0" else self.environment.name target = self.environment.fix_target_path( "/usr/share/doc/%s/%s" % (namestr, target_file)) source = os.path.join(self.environment.build_dir, source_file) # self.environment.index, insfile(source, target) #else: # self.environment.index, insdoc(doc) out.notify("%s has been installed." % self.environment.fullname) if not os.path.isfile(installed_file): touch(installed_file) if self.environment.stage == "install": lpms.terminate()
def aclocal(*parameters): '''Runs aclocal with given parameters''' command = " ".join(parameters) out.notify("running aclocal %s" % command) if not system("aclocal %s" % command): raise BuildError("aclocal failed.")
def perform_operation(self): '''Handles command line arguments and drive building operation''' self.set_environment_variables() # Check /proc and /dev. These filesystems must be mounted # to perform operations properly. for item in ('/proc', '/dev'): if not os.path.ismount(item): out.warn("%s is not mounted. You have been warned." % item) # clean source code extraction directory if it is wanted # TODO: check the following condition when resume functionality is back if self.instruction.clean_tmp: if self.instruction.resume_build is not None: out.warn( "clean-tmp is disabled because of resume-build is enabled." ) else: self.clean_temporary_directory() # we want to save starting time of the build operation to calculate building time # The starting point of logging lpms.logger.info("starting build (%s/%s) %s/%s/%s-%s" % (self.instruction.index, self.instruction.count, self.internals.env.repo, self.internals.env.category, self.internals.env.name, self.internals.env.version)) out.normal( "(%s/%s) building %s/%s from %s" % (self.instruction.index, self.instruction.count, out.color(self.internals.env.category, "green"), out.color( self.internals.env.name + "-" + self.internals.env.version, "green"), self.internals.env.repo)) if self.internals.env.sandbox: lpms.logger.info("sandbox enabled build") out.notify("sandbox is enabled") else: lpms.logger.warning("sandbox disabled build") out.warn_notify("sandbox is disabled") # fetch packages which are in download_plan list if self.internals.env.src_url is not None: # preprocess url shortcuts such as $name, $version and etc self.parse_src_url_field() # if the package is revisioned, override build_dir and install_dir. # remove revision number from these variables. if self.revisioned: for variable in ("build_dir", "install_dir"): new_variable = "".join(os.path.basename(getattr(self.internals.env, \ variable)).split(self.revision)) setattr(self.internals.env, variable, \ os.path.join(os.path.dirname(getattr(self.internals.env, \ variable)), new_variable)) utils.xterm_title( "lpms: downloading %s/%s/%s-%s" % (self.internals.env.repo, self.internals.env.category, self.internals.env.name, self.internals.env.version)) self.prepare_download_plan(self.internals.env.applied_options) if not fetcher.URLFetcher().run(self.download_plan): lpms.terminate("\nplease check the spec") if self.internals.env.applied_options is not None and self.internals.env.applied_options: out.notify("applied options: %s" % " ".join(self.internals.env.applied_options)) if self.internals.env.src_url is None and not self.extract_plan \ and hasattr(self.internals.env, "extract"): # Workaround for #208 self.internals.env.extract_nevertheless = True # Remove previous sandbox log if it is exist. if os.path.exists(cst.sandbox_log): shelltools.remove_file(cst.sandbox_log) # Enter the building directory os.chdir(self.internals.env.build_dir) # Manage ccache if hasattr(self.config, "ccache") and self.config.ccache: if utils.drive_ccache(config=self.config): out.notify("ccache is enabled.") else: out.warn( "ccache could not be enabled. so you should check dev-util/ccache" ) self.internals.env.start_time = time.time() return True, self.internals.env
def libtoolize(*parameters): '''Runs libtoolize with given parameters''' command = " ".join(parameters) out.notify("running libtoolize %s" % command) if not system("libtoolize %s" % command): raise BuildError("libtoolize failed.")
def libtoolize(*parameters): '''Runs libtoolize with given parameters''' command = " ".join(parameters) out.notify("running libtoolize %s" % command) if not system("libtoolize %s" % command): raise BuildError("libtoolize failed.")
def autoheader(*parameters): '''Runs autoheader with given parameters''' command = " ".join(parameters) out.notify("running autoheader %s" % command) if not system("autoheader %s" % command): raise BuildError("autoheader failed.")
def aclocal(*parameters): '''Runs aclocal with given parameters''' command = " ".join(parameters) out.notify("running aclocal %s" % command) if not system("aclocal %s" % command): raise BuildError("aclocal failed.")
def search(self): if not list(self.keyword) and lpms.getopt("--only-installed"): total = 0 for package in self.instdb.get_all_names(): repo, category, name = package version_data = self.instdb.get_version(name, repo_name=repo, \ pkg_category=category) total += 1 for slot in version_data: out.notify("%s/%s/%s [slot:%s] -> %s" % (repo, category, name, \ slot, ", ".join(version_data[slot]))) out.write("\npackage count: %d\n" % total) lpms.terminate() if lpms.getopt("--help") or len(self.keyword) == 0: self.usage() available = True results = [] if not lpms.getopt("--in-summary") and not lpms.getopt("--in-name"): self.cursor.execute( '''SELECT repo, category, name, version, summary, slot FROM \ package WHERE name LIKE (?) OR summary LIKE (?)''', ("%" + self.keyword + "%", "%" + self.keyword + "%")) results.extend(self.cursor.fetchall()) elif lpms.getopt("--in-summary"): self.cursor.execute( '''SELECT repo, category, name, version, summary, slot FROM \ package WHERE summary LIKE (?)''', ("%" + self.keyword + "%", )) results.extend(self.cursor.fetchall()) else: self.cursor.execute( '''SELECT repo, category, name, version, summary, slot FROM \ package WHERE name LIKE (?)''', ("%" + self.keyword + "%", )) results.extend(self.cursor.fetchall()) if not results: # if no result, search given keyword in installed packages database connection = sqlite3.connect(cst.installdb_path) cursor = connection.cursor() cursor.execute( '''SELECT repo, category, name, version, summary, slot FROM \ package WHERE name LIKE (?) OR summary LIKE (?)''', ("%" + self.keyword + "%", "%" + self.keyword + "%")) results.extend(cursor.fetchall()) if results: out.notify( "these packages are installed but no longer available.") available = False packages = self.classificate_packages(results) for index, package in enumerate(packages, 1): category, name = package if lpms.getopt("--interactive"): out.write("[" + str(index) + "] " + out.color(category, "green") + "/" + out.color(name, "green") + " - ") else: out.write( out.color(category, "green") + "/" + out.color(name, "green") + " - ") items = {} for item in packages[package]: if item[0] in items: items[item[0]].append(item[3]) else: items[item[0]] = [item[3]] for item in items: out.write( out.color(item, "yellow") + "(" + ", ".join(items[item]) + ") ") out.write("\n") out.write(" " + packages[package][0][4] + "\n") # shows a dialogue, selects the packages and triggers api's build function if results and lpms.getopt("--interactive"): my_packages = [] def ask(): out.write("\ngive number(s):\n") out.write( "in order to select more than one package, use space between numbers:\n" ) out.write("to exit, press Q or q.\n") while True: ask() answers = sys.stdin.readline().strip() if answers == "Q" or answers == "q": lpms.terminate() else: targets = set() for answer in answers.split(" "): if not answer.isdigit(): out.warn("%s is invalid. please give a number!" % out.color(answer, "red")) continue else: targets.add(answer) try: my_items = packages.keys() for target in targets: my_packages.append("/".join(my_items[int(target) - 1])) break except (IndexError, ValueError): out.warn("invalid command.") continue if my_packages: api.pkgbuild(my_packages, self.instruct)
def notify(msg): out.notify(msg)
def apply_patch(patches, level, reverse): for patch in patches: out.notify("applying patch %s" % out.color(basename(patch), "green")) ret = shelltools.system("patch --remove-empty-files --no-backup-if-mismatch %s -p%d -i \"%s\"" % (reverse, level, patch), show=False) if not ret: return False
def create_operation_plan(self): '''Resolve dependencies and prepares a convenient operation plan''' single_packages = PackageItem() for package in self.packages: self.parent_package = package self.current_package = None self.package_heap[package.id] = package dependencies = [] package_dependencies = self.collect_dependencies(package) if not package_dependencies: single_packages.add(package) continue # Create a list that consists of parent and child items for dependency in package_dependencies: dependency.parent = package.category+"/"+package.name+"/"+package.slot dependencies.append((package.id, dependency)) while True: buff = [] for parent, dependency in dependencies: self.current_package = dependency self.parent_package = None self.package_query.append((dependency.id, parent)) if dependency.id in self.processed: if self.processed[dependency.id] == self.package_options.get(dependency.id, None): # This package was processed and it has no option changes continue # Keep the package options to prevent extra transaction self.processed[dependency.id] = self.package_options.get(dependency.id, None) # Keep the package information for the next operations. # We don't want to create a new transaction for it. self.package_heap[dependency.id] = dependency # Get its dependencies package_collection = self.collect_dependencies(dependency) if not package_collection: # The item has no dependency continue # Create a list that consists of parent and child items for item in package_collection: item.parent = package.category+"/"+package.name+"/"+package.slot buff.append((dependency.id, item)) if not buff: # End of the node break dependencies = buff try: # Sort packages for building operation plan = sorter.topsort(self.package_query) except sorter.CycleError as err: answer, num_parents, children = err out.brightred("Circular dependency detected:\n") for items in sorter.find_cycles(parent_children=children): for item in items: package = self.repodb.find_package(package_id=item).get(0) out.write(package.repo+"/"+package.category+"/"+package.name+"-"\ +package.version+":"+package.slot+" ") out.write("\n") raise DependencyError # This part detects inline option conflicts removed = {} option_conflict = set() for package_id in self.inline_option_targets: for target in self.inline_option_targets[package_id]: for option in self.inline_option_targets[package_id][target]: if option.startswith("-"): if option in removed: removed[option].add((package_id, target)) else: removed[option] = set([(package_id, target)]) else: if "-"+option in removed: for (my_pkg_id, my_target) in removed["-"+option]: if my_target == target: option_conflict.add((my_target, \ self.package_heap[package_id], \ self.package_heap[my_pkg_id],\ option)) if option_conflict: out.error("option conflict detected:\n") for (pkg, add, remove, option)in option_conflict: out.error(out.color(option, "red")+" option on "+pkg+"\n") out.warn("%s/%s/%s/%s adds the option." % (add.repo, add.category, \ add.name, add.version)) out.warn("%s/%s/%s/%s removes the option." % (remove.repo, remove.category, \ remove.name, remove.version)) lpms.terminate() self.conditional_versions = {} for (key, values) in self.conditional_packages.items(): for value in values: target_package = self.package_heap[key] my_item = { "type": value["type"], "version": value["version"], "target": target_package.category+"/"+target_package.name+\ "/"+target_package.slot, } if not value["owner_id"] in self.conditional_versions: self.conditional_versions[value["owner_id"]] = [my_item] else: self.conditional_versions[value["owner_id"]].append(my_item) # TODO: I think I must use most professional way for ignore-depends feature. if lpms.getopt("--ignore-deps"): result = LCollect() result.packages = self.packages result.dependencies = self.package_dependencies result.options = self.package_options result.inline_option_targets = self.inline_option_targets result.conditional_versions = self.conditional_versions result.conflicts = self.conflicts return result # Workaround for postmerge dependencies for (id_dependency, id_package) in self.postmerge_dependencies: plan.remove(id_dependency) plan.insert(plan.index(id_package)+1, id_dependency) final_plan = PackageItem() required_package_ids = [package.id for package in self.packages] for package_id in plan: package = self.package_heap[package_id] continue_conditional = False # If a package has a conditional decision point, # we should consider the condition if package.id not in self.conditional_packages: for c_package_id in self.conditional_packages: c_package = self.package_heap[c_package_id] if package.pk == c_package.pk: continue_conditional = True if package_id in required_package_ids: final_plan.add_by_pk(c_package) break if package_id in required_package_ids: if continue_conditional is False: final_plan.add_by_pk(package) if continue_conditional: continue installed_package = self.instdb.find_package( package_category=package.category, package_name=package.name, package_slot=package.slot ) if installed_package: if package.id in self.inline_options: if installed_package.get(0).applied_options is None: final_plan.add_by_pk(package) continue continue_inline = False for inline_option in self.inline_options[package.id]: if not inline_option in installed_package.get(0).applied_options: final_plan.add_by_pk(package) continue_inline = True break if continue_inline: continue try: conditional_versions_query = self.instdb.find_conditional_versions( target=package.category+"/"+package.name+"/"+package.slot) if conditional_versions_query: for item in conditional_versions_query: item.decision_point["package_id"]=item.package_id if package.id in self.conditional_packages: if not item.decision_point in self.conditional_packages[package.id]: self.conditional_packages[package.id].append(item.decision_point) else: self.conditional_packages[package.id] = [item.decision_point] if package.id in self.conditional_packages: decision_points = self.conditional_packages[package.id] for decision_point in decision_points: comparison = utils.vercmp(installed_package.get(0).version, \ decision_point["version"]) if decision_point["type"] == ">=": if self.handle_condition_conflict(decision_point, final_plan, \ package.pk, ("<", ">"), (0, 1)) is False: continue if not comparison in (1, 0) or package.id in required_package_ids: final_plan.add_by_pk(package) elif decision_point["type"] == "<": if self.handle_condition_conflict(decision_point, final_plan, \ package.pk, (">", "<"), (0, -1)) is False: continue if comparison != -1: final_plan.add_by_pk(package) elif decision_point["type"] == ">": if self.handle_condition_conflict(decision_point, final_plan, \ package.pk, ("<", ">"), (0, 1)) is False: continue if comparison != 1 or package.id in required_package_ids: final_plan.add_by_pk(package) elif decision_point["type"] == "<=": if self.handle_condition_conflict(decision_point, final_plan, \ package.pk, (">", "<"), (0, -1)) is False: continue if not comparison in (-1, 0) or package.id in required_package_ids: final_plan.add_by_pk(package) elif decision_point["type"] == "==": if comparison != 0 or package.id in required_package_ids: final_plan.add_by_pk(package) except ConditionConflict: if not "owner_package" in decision_point: conflict_package = self.instdb.find_package(package_id=\ decision_point["package_id"]).get(0) decision_point["owner_package"] = conflict_package.repo+"/"+ \ conflict_package.category+"/"+ \ conflict_package.name+"/"+ \ conflict_package.version out.error("while selecting a convenient version of %s, a conflict detected:\n" % \ out.color(package.pk, "red")) out.notify(decision_point["owner_package"]+" wants "+\ decision_point["type"]+decision_point["version"]) out.notify(self.conflict_point["owner_package"]+" wants "+\ self.conflict_point["type"]+self.conflict_point["version"]) lpms.terminate("\nplease contact the package maintainers.") # Use new options if the package is effected if self.use_new_options and not package in final_plan: if package.id in self.package_options: for option in self.package_options[package.id]: if not option in installed_package.get(0).applied_options: final_plan.add_by_pk(package) break else: final_plan.add_by_pk(package) # Oh my god! Some packages have no dependency. if single_packages: for single_package in single_packages: for item_id in plan: if self.package_heap[item_id].pk == single_package.pk: single_packages.remove(single_package) break for single_package in single_packages: final_plan.insert_into(0, single_package) # Create LCollect object to manage package dependency data operation_plan = LCollect() operation_plan.packages = final_plan operation_plan.dependencies = self.package_dependencies operation_plan.options = self.package_options operation_plan.inline_option_targets = self.inline_option_targets operation_plan.conditional_versions = self.conditional_versions operation_plan.conflicts = self.conflicts return operation_plan
def perform_operation(self): '''Handles command line arguments and drive building operation''' self.set_environment_variables() # Check /proc and /dev. These filesystems must be mounted # to perform operations properly. for item in ('/proc', '/dev'): if not os.path.ismount(item): out.warn("%s is not mounted. You have been warned." % item) # clean source code extraction directory if it is wanted # TODO: check the following condition when resume functionality is back if self.instruction.clean_tmp: if self.instruction.resume_build is not None: out.warn("clean-tmp is disabled because of resume-build is enabled.") else: self.clean_temporary_directory() # we want to save starting time of the build operation to calculate building time # The starting point of logging lpms.logger.info("starting build (%s/%s) %s/%s/%s-%s" % ( self.instruction.index, self.instruction.count, self.internals.env.repo, self.internals.env.category, self.internals.env.name, self.internals.env.version ) ) out.normal("(%s/%s) building %s/%s from %s" % ( self.instruction.index, self.instruction.count, out.color(self.internals.env.category, "green"), out.color(self.internals.env.name+"-"+self.internals.env.version, "green"), self.internals.env.repo ) ) if self.internals.env.sandbox: lpms.logger.info("sandbox enabled build") out.notify("sandbox is enabled") else: lpms.logger.warning("sandbox disabled build") out.warn_notify("sandbox is disabled") # fetch packages which are in download_plan list if self.internals.env.src_url is not None: # preprocess url shortcuts such as $name, $version and etc self.parse_src_url_field() # if the package is revisioned, override build_dir and install_dir. # remove revision number from these variables. if self.revisioned: for variable in ("build_dir", "install_dir"): new_variable = "".join(os.path.basename(getattr(self.internals.env, \ variable)).split(self.revision)) setattr(self.internals.env, variable, \ os.path.join(os.path.dirname(getattr(self.internals.env, \ variable)), new_variable)) utils.xterm_title("lpms: downloading %s/%s/%s-%s" % ( self.internals.env.repo, self.internals.env.category, self.internals.env.name, self.internals.env.version ) ) self.prepare_download_plan(self.internals.env.applied_options) if not fetcher.URLFetcher().run(self.download_plan): lpms.terminate("\nplease check the spec") if self.internals.env.applied_options is not None and self.internals.env.applied_options: out.notify("applied options: %s" % " ".join(self.internals.env.applied_options)) if self.internals.env.src_url is None and not self.extract_plan \ and hasattr(self.internals.env, "extract"): # Workaround for #208 self.internals.env.extract_nevertheless = True # Remove previous sandbox log if it is exist. if os.path.exists(cst.sandbox_log): shelltools.remove_file(cst.sandbox_log) # Enter the building directory os.chdir(self.internals.env.build_dir) # Manage ccache if hasattr(self.config, "ccache") and self.config.ccache: if utils.drive_ccache(config=self.config): out.notify("ccache is enabled.") else: out.warn("ccache could not be enabled. so you should check dev-util/ccache") self.internals.env.start_time = time.time() return True, self.internals.env
def autoheader(*parameters): '''Runs autoheader with given parameters''' command = " ".join(parameters) out.notify("running autoheader %s" % command) if not system("autoheader %s" % command): raise BuildError("autoheader failed.")