def system(cmd, show=False, stage=None, sandbox=None): cfg = conf.LPMSConfig() if sandbox is None: sandbox = True if cfg.sandbox else False # override 'sandbox' variable if the user wants to modifiy from cli if lpms.getopt('--enable-sandbox'): sandbox = True elif lpms.getopt('--disable-sandbox'): sandbox = False if lpms.getopt("--verbose"): ret, output, err = run_cmd(cmd, True) elif (not cfg.print_output or lpms.getopt("--quiet")) \ and not show: ret, output, err = run_cmd(cmd, show=False, enable_sandbox=sandbox) else: ret, output, err = run_cmd(cmd, show=True, enable_sandbox=sandbox) if ret != 0: if not conf.LPMSConfig().print_output or lpms.getopt("--quiet"): out.brightred("\n>> error messages:\n") out.write(err) out.warn("command failed: %s" % out.color(cmd, "red")) if stage and output and err: return False, output + err return False return True
def __init__(self, package, instruction, **kwargs): # package and instruction are required. # Others can be None self.package = package self.instruction = instruction self.dependencies = kwargs.get("dependencies", None) self.options = kwargs.get("options", None) self.conditional_versions = kwargs.get("conditional_versions", None) self.conflicts = kwargs.get("conflicts", None) self.inline_option_targets = kwargs.get("inline_option_targets", None) # Internal variables self.repodb = api.RepositoryDB() self.instdb = api.InstallDB() self.download_plan = [] self.extract_plan = [] self.urls = [] self.internals = internals.InternalFunctions() self.internals.env.raw.update({ "get": self.internals.get, "cmd_options": [], "options": [] }) self.config = conf.LPMSConfig() if self.instruction.unset_env_variables is not None: utils.set_environment_variables() self.revisioned = False self.revision = None
def __init__(self, package, installdb=False): self.package = package self.repo = None self.category = None self.name = None self.version = None self.slot = None self.conf = conf.LPMSConfig() self.custom_arch_request = {} self.locked_packages = [] if not installdb: self.database = dbapi.RepositoryDB() arch_file = os.path.join(cst.user_dir, "arch") if os.path.isfile(arch_file): with open(arch_file) as lines: for line in lines.readlines(): if not line.strip(): continue self.custom_arch_request.update(utils.ParseArchFile(line.strip(), \ self.database).parse()) lock_file = os.path.join(cst.user_dir, "lock") if os.path.isfile(lock_file): with open(lock_file) as lines: for line in lines.readlines(): if not line.strip(): continue self.locked_packages.extend(utils.ParseUserDefinedFile(line.strip(), \ self.database).parse()) else: self.database = dbapi.InstallDB()
def __init__(self): # Get CommandLineParser and initialize it self.request = CommandLineParser() self.request.start() self.config = conf.LPMSConfig() # Get lpms' ScriptEngine class for running stages self.interpreter = interpreter.ScriptEngine()
def __init__(self, environment): self.symlinks = [] self.backup = [] self.environment = environment self.instdb = api.InstallDB() self.repodb = api.RepositoryDB() self.conf = conf.LPMSConfig() self.info_files = [] self.previous_files = [] self.filesdb = api.FilesDB() self.file_relationsdb = api.FileRelationsDB() self.reverse_dependsdb = api.ReverseDependsDB() self.binary_filetypes = ('application/x-executable', 'application/x-archive', \ 'application/x-sharedlib') self.merge_conf_file = os.path.join(self.environment.real_root, \ cst.merge_conf_file) self.previous_files = self.filesdb.get_paths_by_package(self.environment.name, \ repo=self.environment.repo, category=self.environment.category, \ version=self.environment.previous_version) # Unfortunately, this seems very ugly :( self.strip_debug_symbols = True if self.environment.no_strip is not None and \ ((self.environment.applied_options is not None and \ "debug" in self.env.applied_options) or \ utils.check_cflags("-g") or utils.check_cflags("-ggdb") \ or utils.check_cflags("-g3")) else False
def conf(*args, **kwargs): '''Runs configure script with standard and given parameters''' conf_command = './configure' if "run_dir" in kwargs: conf_command = os.path.join(kwargs["run_dir"], "configure") if os.access(conf_command, os.F_OK): if os.access(conf_command, os.X_OK): args = '%s \ --prefix=/%s \ --build=%s \ --mandir=/%s \ --infodir=/%s \ --datadir=/%s \ --sysconfdir=/%s \ --localstatedir=/%s \ --libexecdir=/%s \ --libdir=/%s \ %s' % (conf_command, cst.prefix, \ cfg.LPMSConfig().CHOST, cst.man, \ cst.info, cst.data, \ cst.conf, cst.localstate, \ cst.libexec, cst.libdir, " ".join(args)) args = [arg for arg in args.split(" ") if arg.strip()] out.notify("running %s" % "\n\t".join(args)) if not system(" ".join(args)): raise BuildError("conf failed.") else: raise BuildError("configure script is not executable.") else: out.warn("no configure script found.")
def __init__(self): # super(Interpreter, self).__init__() # self.environment = environment # self.environment.get = self.get # if self.environment.real_root is None: # self.environment.real_root = cst.root # self.script = script self.config = conf.LPMSConfig()
def standard_extract(): """ Runs standard extract procedure """ target = os.path.dirname(build_dir) for url in extract_plan: out.write(" %s %s\n" % (out.color(">", "green"), \ os.path.join(cfg.LPMSConfig().src_cache, \ os.path.basename(url)))) archive_path = os.path.join(cfg.LPMSConfig().src_cache, \ os.path.basename(url)) try: partial = [ atom.strip() for atom in partial.split(" ") if atom != "#" ] archive.extract(str(archive_path), str(target), partial) except NameError: archive.extract(str(archive_path), str(target))
def set_environment_variables(): config = conf.LPMSConfig() export('HOST', config.CHOST) export('CFLAGS', config.CFLAGS) export('CXXFLAGS', config.CXXFLAGS) export('LDFLAGS', config.LDFLAGS) export('JOBS', config.MAKEOPTS) export('CC', config.CHOST + "-" + "gcc") export('CXX', config.CHOST + "-" + "g++")
def __init__(self, packages, command_line_options=[], custom_options={}, use_new_options=False): self.packages = packages self.command_line_options = command_line_options self.custom_options = custom_options self.use_new_options = use_new_options self.conflicts = {} self.current_package = None self.parent_package = None self.conf = conf.LPMSConfig() self.instdb = api.InstallDB() self.repodb = api.RepositoryDB() self.conditional_packages = {} self.processed = {} self.package_heap = {} self.control_chars = ["||"] self.inline_options = {} self.inline_option_targets = {} self.package_dependencies = {} self.postmerge_dependencies = set() self.package_options = {} self.repository_cache = {} self.user_defined_options = {} self.package_query = [] self.locked_packages = [] self.global_options = set() self.forbidden_options = set() self.dependency_keywords = ( 'static_depends_build', 'static_depends_runtime', 'static_depends_conflict', 'static_depends_postmerge', 'optional_depends_build', 'optional_depends_runtime', 'optional_depends_conflict', 'optional_depends_postmerge' ) for option in self.conf.options.split(" "): if option.strip(): if not option.startswith("-"): self.global_options.add(option) else: self.forbidden_options.add(option[1:]) self.get_user_defined_files() self.parse_user_defined_options_file() if hasattr(self, "user_defined_lock_file"): for locked_item in self.user_defined_lock_file: self.locked_packages.extend(self.parse_user_defined_file(locked_item)) self.custom_arch_requests = {} if hasattr(self, "user_defined_arch_file"): for arch_item in self.user_defined_arch_file: self.custom_arch_requests.update(utils.ParseArchFile(arch_item, \ self.repodb).parse())
def make(*parameters, **kwargs): '''Runs standard build command with given parameters''' if "j" in kwargs: jobs = "-j" + str(kwargs["j"]) else: jobs = cfg.LPMSConfig().MAKEOPTS out.notify("running make %s %s" % (str(jobs), " ".join(parameters))) if not system("make %s %s" % (str(jobs), " ".join(parameters))): raise BuildError("make failed")
def write_archive_hash(urls, file_name): name, version = utils.parse_pkgname(file_name) for url in utils.parse_url_tag(urls, name, version): archive_name = os.path.basename(url) archive_path = os.path.join(conf.LPMSConfig().src_cache, archive_name) if not os.access(archive_path, os.F_OK): fetcher.URLFetcher().run([url]) sha1 = utils.sha1sum(archive_path) shelltools.echo( "hashes", "%s %s %s" % (archive_name, sha1, os.path.getsize(archive_path)))
def get_mimetype(path): if not os.access(path, os.R_OK): return False if conf.LPMSConfig().userland == "BSD": data = os.popen("file -i %s" % path).read().strip() return data.split(":", 1)[1].split(";")[0].strip() file_obj = magic.open(magic.MIME_TYPE) file_obj.load() mimetype = file_obj.file(path.encode('utf-8')) file_obj.close() return mimetype
def drive_ccache(config=None): '''Set ccache related environment variables''' # ccache facility if config is None: config = conf.LPMSConfig() ccache_path = config.ccache_path if hasattr( config, "ccache_path") else cst.ccache_path if os.access(ccache_path, os.R_OK): os.environ["PATH"] = "%s:%(PATH)s" % (ccache_path, os.environ) if hasattr(config, "ccache_dir"): os.environ["CCACHE_DIR"] = config.ccache_dir else: os.environ["CCACHE_DIR"] = cst.ccache_dir # sandboxed processes can access to CCACHE_DIR. os.environ["SANDBOX_PATHS"] = os.environ[ 'CCACHE_DIR'] + ":%(SANDBOX_PATHS)s" % os.environ return True return False
def color(msg, cl): if lpms.getopt("--no-color") or lpms.getopt( "-n") or not conf.LPMSConfig().colorize: return msg return colors[cl] + msg + colors['default']
from lpms import out from lpms import constants from lpms import shelltools from lpms import conf from lpms import utils # simple file downloader for lpms # based on http://stackoverflow.com/questions/2028517/python-urllib2-progress-hook # TODO LIST: # 1-add resume support # 2-show estimated time and download speed # URLfether only works with url, url must be came as a list config = conf.LPMSConfig() class URLFetcher: def __init__(self): self.chunk_size = 8192 self.begining = time.time() def estimated_time(self, current_size, total_size, time): # odun, great job! :p if current_size == 0: current_size = 1 elapsed = (total_size * (time / current_size) - time) # 1 = >> hour # 2 = >> minute # 3 = >> second
def remove_package(pkgnames, instruction): '''Triggers remove operation for given packages''' if instruction.like: # handle shortened package names database = dbapi.InstallDB() for item in instruction.like: query = database.db.cursor.execute( "SELECT name FROM package where name LIKE ?", (item, )) results = query.fetchall() if results: for result in results: pkgnames.append(result[0]) del database file_relationsdb = dbapi.FileRelationsDB() #try: packages = [ GetPackage(pkgname, installdb=True).select() for pkgname in pkgnames ] #except PackageNotFound as package_name: # out.error("%s seems not installed." % package_name) # lpms.terminate() instruction.count = len(packages) index = 0 # FIXME: I must create a new reverse dependency handler implementation #if instruct["show-reverse-depends"]: # instruct["ask"] = True # # WARNING: the mechanism only shows directly reverse dependencies # # supposing that if A is a reverse dependency of B and C is depends on A. # # when the user removes B, A and C will be broken. But lpms will warn the user about A. # broken_packages = [] # reversedb = dbapi.ReverseDependsDB() # out.normal("resolving primary reverse dependencies...\n") # for package in packages: # category, name, version = package[1:] # if lpms.getopt("--use-file-relations"): # broken_packages.extend(file_relations.get_packages(category, name, version)) # else: # broken_packages.extend(reversedb.get_reverse_depends(category, name)) # if broken_packages: # out.warn("the following packages will be broken:\n") # for broken_package in broken_packages: # broken_repo, broken_category, broken_name, broken_version = broken_package # out.write(" %s %s/%s/%s-%s\n" % (out.color(">", "brightred"), broken_repo, broken_category, \ # broken_name, broken_version)) # else: # out.warn("no reverse dependency found.") if instruction.ask: out.write("\n") for package in packages: out.write( " %s %s/%s/%s-%s\n" % (out.color( ">", "brightgreen"), out.color(package.repo, "green"), out.color(package.category, "green"), out.color(package.name, "green"), out.color(package.version, "green"))) utils.xterm_title("lpms: confirmation request") out.write("\nTotal %s package will be removed.\n\n" % out.color(str(instruction.count), "green")) if not utils.confirm("Would you like to continue?"): out.write("quitting...\n") utils.xterm_title_reset() lpms.terminate() realroot = instruction.new_root if instruction.new_root else cst.root config = conf.LPMSConfig() for package in packages: fdb = file_collisions.CollisionProtect(package.category, package.name, \ package.slot, version=package.version, real_root=realroot) fdb.handle_collisions() if fdb.collisions: out.write(out.color(" > ", "brightyellow")+"file collisions detected while removing %s/%s/%s-%s\n\n" \ % (package.repo, package.category, package.name, package.version)) for (c_package, c_path) in fdb.collisions: c_category, c_name, c_slot, c_version = c_package out.write(out.color(" -- ", "red")+c_category+"/"+c_name+"-"\ +c_version+":"+c_slot+" -> "+c_path+"\n") if fdb.collisions and config.collision_protect and not \ lpms.getopt('--force-file-collision'): out.write( "\nquitting... use '--force-file-collision' to continue.\n" ) lpms.terminate() index += 1 instruction.index = index if not initpreter.InitializeInterpreter( package, instruction, ['remove'], remove=True).initialize(): out.warn("an error occured during remove operation: %s/%s/%s-%s" % (package.repo, package.category, \ package.name, package.version)) else: file_relationsdb.delete_item_by_pkgdata(package.category, package.name, package.version, commit=True)
def check_cflags(flag): return flag in [atom.strip() for \ atom in conf.LPMSConfig().CFLAGS.strip(" ")]