def unpackinfo(self, mydest): """Unpacks all the files from the dataSegment into 'mydest'.""" if not self.scan(): return 0 mydest = normalize_path(mydest) + os.sep a = open(_unicode_encode(self.file, encoding=_encodings['fs'], errors='strict'), 'rb') if not os.path.exists(mydest): os.makedirs(mydest) startpos = 0 while ((startpos + 8) < self.indexsize): namelen = decodeint(self.index[startpos:startpos + 4]) datapos = decodeint(self.index[startpos + 4 + namelen:startpos + 8 + namelen]) datalen = decodeint(self.index[startpos + 8 + namelen:startpos + 12 + namelen]) myname = self.index[startpos + 4:startpos + 4 + namelen] myname = _unicode_decode(myname, encoding=_encodings['repo.content'], errors='replace') filename = os.path.join(mydest, myname.lstrip(os.sep)) filename = normalize_path(filename) if not filename.startswith(mydest): # myname contains invalid ../ component(s) continue dirname = os.path.dirname(filename) if dirname: if not os.path.exists(dirname): os.makedirs(dirname) mydat = open(_unicode_encode(filename, encoding=_encodings['fs'], errors='strict'), 'wb') a.seek(self.datapos + datapos) mydat.write(a.read(datalen)) mydat.close() startpos = startpos + namelen + 12 a.close() return 1
def xpand(myid, mydest): mydest = normalize_path(mydest) + os.sep myindex = myid[0] mydata = myid[1] myindexlen = len(myindex) startpos = 0 while ((startpos + 8) < myindexlen): namelen = decodeint(myindex[startpos:startpos + 4]) datapos = decodeint(myindex[startpos + 4 + namelen:startpos + 8 + namelen]) datalen = decodeint(myindex[startpos + 8 + namelen:startpos + 12 + namelen]) myname = myindex[startpos + 4:startpos + 4 + namelen] myname = _unicode_decode(myname, encoding=_encodings['repo.content'], errors='replace') filename = os.path.join(mydest, myname.lstrip(os.sep)) filename = normalize_path(filename) if not filename.startswith(mydest): # myname contains invalid ../ component(s) continue dirname = os.path.dirname(filename) if dirname: if not os.path.exists(dirname): os.makedirs(dirname) mydat = open(_unicode_encode(filename, encoding=_encodings['fs'], errors='strict'), 'wb') mydat.write(mydata[datapos:datapos + datalen]) mydat.close() startpos = startpos + namelen + 12
def addtolist(mylist, curdir): """(list, dir) --- Takes an array(list) and appends all files from dir down the directory tree. Returns nothing. list is modified.""" curdir = normalize_path( _unicode_decode(curdir, encoding=_encodings['fs'], errors='strict')) for parent, dirs, files in os.walk(curdir): parent = _unicode_decode(parent, encoding=_encodings['fs'], errors='strict') if parent != curdir: mylist.append(parent[len(curdir) + 1:] + os.sep) for x in dirs: try: _unicode_decode(x, encoding=_encodings['fs'], errors='strict') except UnicodeDecodeError: dirs.remove(x) for x in files: try: x = _unicode_decode(x, encoding=_encodings['fs'], errors='strict') except UnicodeDecodeError: continue mylist.append(os.path.join(parent, x)[len(curdir) + 1:])
def addtolist(mylist, curdir): """(list, dir) --- Takes an array(list) and appends all files from dir down the directory tree. Returns nothing. list is modified.""" curdir = normalize_path(_unicode_decode(curdir, encoding=_encodings['fs'], errors='strict')) for parent, dirs, files in os.walk(curdir): parent = _unicode_decode(parent, encoding=_encodings['fs'], errors='strict') if parent != curdir: mylist.append(parent[len(curdir) + 1:] + os.sep) for x in dirs: try: _unicode_decode(x, encoding=_encodings['fs'], errors='strict') except UnicodeDecodeError: dirs.remove(x) for x in files: try: x = _unicode_decode(x, encoding=_encodings['fs'], errors='strict') except UnicodeDecodeError: continue mylist.append(os.path.join(parent, x)[len(curdir) + 1:])
def unpackinfo(self, mydest): """Unpacks all the files from the dataSegment into 'mydest'.""" if not self.scan(): return 0 mydest = normalize_path(mydest) + os.sep a = open( _unicode_encode(self.file, encoding=_encodings["fs"], errors="strict"), "rb" ) if not os.path.exists(mydest): os.makedirs(mydest) startpos = 0 while (startpos + 8) < self.indexsize: namelen = decodeint(self.index[startpos : startpos + 4]) datapos = decodeint( self.index[startpos + 4 + namelen : startpos + 8 + namelen] ) datalen = decodeint( self.index[startpos + 8 + namelen : startpos + 12 + namelen] ) myname = self.index[startpos + 4 : startpos + 4 + namelen] myname = _unicode_decode( myname, encoding=_encodings["repo.content"], errors="replace" ) filename = os.path.join(mydest, myname.lstrip(os.sep)) filename = normalize_path(filename) if not filename.startswith(mydest): # myname contains invalid ../ component(s) continue dirname = os.path.dirname(filename) if dirname: if not os.path.exists(dirname): os.makedirs(dirname) mydat = open( _unicode_encode(filename, encoding=_encodings["fs"], errors="strict"), "wb", ) a.seek(self.datapos + datapos) mydat.write(a.read(datalen)) mydat.close() startpos = startpos + namelen + 12 a.close() return 1
def __init__(self, arch, status, sub_path, tree_path): self.arch = arch self.status = status if sub_path: sub_path = normalize_path(sub_path.lstrip(os.sep)) self.sub_path = sub_path self.tree_path = tree_path if tree_path: self.abs_path = os.path.join(tree_path, 'profiles', self.sub_path) else: self.abs_path = tree_path
def __init__(self, arch, status, sub_path, tree_path): self.arch = arch self.status = status if sub_path: sub_path = normalize_path(sub_path.lstrip(os.sep)) self.sub_path = sub_path self.tree_path = tree_path if tree_path: self.abs_path = os.path.join(tree_path, 'profiles', self.sub_path) else: self.abs_path = tree_path
def execute(self, path=None): if "hooks" not in self.settings['FEATURES']: return if not path: path = self.path path = normalize_path(path) if not os.path.exists(path): if self.myopts and "--debug" in self.myopts: # behavior mimicked by hook.sh self.output.ewarn( 'This hook path could not be found; ignored: ' + path) return if os.path.isdir(path): command = [HOOKS_SH_BINARY] if self.myopts: for myopt in self.myopts: command.extend(['--opt', myopt]) if self.myaction: command.extend(['--action', self.myaction]) if self.mytargets: for mytarget in self.mytargets: command.extend(['--target', mytarget]) command = [ BASH_BINARY, '-c', 'cd "' + path + '" && source "' + PORTAGE_BIN_PATH + '/isolated-functions.sh" && source ' + ' '.join(command) ] if self.myopts and "--verbose" in self.myopts: self.output.einfo('Executing hooks directory "' + self.path + '"...') code = spawn(mycommand=command, env=self.settings.environ()) if code: # if failure # behavior mimicked by hook.sh raise PortageException( '!!! Hook directory %s failed with exit code %s' % (self.path, code)) else: raise InvalidLocation('This hook path ought to be a directory: ' + path)
def execute (self, path=None): if "hooks" not in self.settings['FEATURES']: return if not path: path = self.path path = normalize_path(path) if not os.path.exists(path): if self.myopts and "--debug" in self.myopts: # behavior mimicked by hook.sh self.output.ewarn('This hook path could not be found; ignored: ' + path) return if os.path.isdir(path): command=[HOOKS_SH_BINARY] if self.myopts: for myopt in self.myopts: command.extend(['--opt', myopt]) if self.myaction: command.extend(['--action', self.myaction]) if self.mytargets: for mytarget in self.mytargets: command.extend(['--target', mytarget]) command=[BASH_BINARY, '-c', 'cd "'+path+'" && source "' + PORTAGE_BIN_PATH + '/isolated-functions.sh" && source ' + ' '.join(command)] if self.myopts and "--verbose" in self.myopts: self.output.einfo('Executing hooks directory "' + self.path + '"...') code = spawn(mycommand=command, env=self.settings.environ()) if code: # if failure # behavior mimicked by hook.sh raise PortageException('!!! Hook directory %s failed with exit code %s' % (self.path, code)) else: raise InvalidLocation('This hook path ought to be a directory: ' + path)
def __init__(self, repo_settings, myreporoot, config_root, options, vcs_settings, mydir, env): '''Class __init__''' self.repo_settings = repo_settings self.config_root = config_root self.options = options self.vcs_settings = vcs_settings self.env = env # Repoman sets it's own ACCEPT_KEYWORDS and we don't want it to # behave incrementally. self.repoman_incrementals = tuple(x for x in portage.const.INCREMENTALS if x != 'ACCEPT_KEYWORDS') self.categories = [] for path in self.repo_settings.repo_config.eclass_db.porttrees: self.categories.extend( portage.util.grabfile( os.path.join(path, 'profiles', 'categories'))) self.repo_settings.repoman_settings.categories = frozenset( portage.util.stack_lists([self.categories], incremental=1)) self.categories = self.repo_settings.repoman_settings.categories self.portdb = repo_settings.portdb self.portdb.settings = self.repo_settings.repoman_settings digest_only = self.options.mode != 'manifest-check' \ and self.options.digest == 'y' self.generate_manifest = digest_only or self.options.mode in \ ("manifest", 'commit', 'fix') # We really only need to cache the metadata that's necessary for visibility # filtering. Anything else can be discarded to reduce memory consumption. if not self.generate_manifest: # Don't do this when generating manifests, since that uses # additional keys if spawn_nofetch is called (RESTRICT and # DEFINED_PHASES). self.portdb._aux_cache_keys.clear() self.portdb._aux_cache_keys.update( ["EAPI", "IUSE", "KEYWORDS", "repository", "SLOT"]) self.reposplit = myreporoot.split(os.path.sep) self.repolevel = len(self.reposplit) if self.options.mode == 'commit': repochecks.commit_check(self.repolevel, self.reposplit) repochecks.conflict_check(self.vcs_settings, self.options) # Make startdir relative to the canonical repodir, so that we can pass # it to digestgen and it won't have to be canonicalized again. if self.repolevel == 1: startdir = self.repo_settings.repodir else: startdir = normalize_path(mydir) startdir = os.path.join( self.repo_settings.repodir, *startdir.split(os.sep)[-2 - self.repolevel + 3:]) # get lists of valid keywords, licenses, and use new_data = repo_metadata(self.portdb, self.repo_settings.repoman_settings) kwlist, liclist, uselist, profile_list, \ global_pmaskdict, liclist_deprecated = new_data self.repo_metadata = { 'kwlist': kwlist, 'liclist': liclist, 'uselist': uselist, 'profile_list': profile_list, 'pmaskdict': global_pmaskdict, 'lic_deprecated': liclist_deprecated, 'package.deprecated': InternalPackageSet( initial_atoms=portage.util.stack_lists([ portage.util.grabfile_package(os.path.join( path, 'profiles', 'package.deprecated'), recursive=True) for path in self.portdb.porttrees ], incremental=True)) } self.repo_settings.repoman_settings['PORTAGE_ARCHLIST'] = ' '.join( sorted(kwlist)) self.repo_settings.repoman_settings.backup_changes('PORTAGE_ARCHLIST') profiles = setup_profile(profile_list) check_profiles(profiles, self.repo_settings.repoman_settings.archlist()) scanlist = scan(self.repolevel, self.reposplit, startdir, self.categories, self.repo_settings) self.dev_keywords = dev_profile_keywords(profiles) self.qatracker = self.vcs_settings.qatracker if self.options.echangelog is None and self.repo_settings.repo_config.update_changelog: self.options.echangelog = 'y' if self.vcs_settings.vcs is None: self.options.echangelog = 'n' # Initialize the ModuleConfig class here # TODO Add layout.conf masters repository.yml config to the list to load/stack self.moduleconfig = ModuleConfig( self.repo_settings.masters_list, self.repo_settings.repoman_settings.valid_versions, repository_modules=self.options.experimental_repository_modules == 'y') checks = {} # The --echangelog option causes automatic ChangeLog generation, # which invalidates changelog.ebuildadded and changelog.missing # checks. # Note: Some don't use ChangeLogs in distributed SCMs. # It will be generated on server side from scm log, # before package moves to the rsync server. # This is needed because they try to avoid merge collisions. # Gentoo's Council decided to always use the ChangeLog file. # TODO: shouldn't this just be switched on the repo, iso the VCS? is_echangelog_enabled = self.options.echangelog in ('y', 'force') self.vcs_settings.vcs_is_cvs_or_svn = self.vcs_settings.vcs in ('cvs', 'svn') checks[ 'changelog'] = not is_echangelog_enabled and self.vcs_settings.vcs_is_cvs_or_svn if self.options.mode == "manifest" or self.options.quiet: pass elif self.options.pretend: print(green("\nRepoMan does a once-over of the neighborhood...")) else: print(green("\nRepoMan scours the neighborhood...")) self.changed = self.vcs_settings.changes # bypass unneeded VCS operations if not needed if (self.options.if_modified == "y" or self.options.mode not in ("manifest", "manifest-check")): self.changed.scan() self.have = { 'pmasked': False, 'dev_keywords': False, } # NOTE: match-all caches are not shared due to potential # differences between profiles in _get_implicit_iuse. self.caches = { 'arch': {}, 'arch_xmatch': {}, 'shared_xmatch': { "cp-list": {} }, } self.include_arches = None if self.options.include_arches: self.include_arches = set() self.include_arches.update( *[x.split() for x in self.options.include_arches]) self.include_profiles = None if self.options.include_profiles: self.include_profiles = set() self.include_profiles.update( *[x.split() for x in self.options.include_profiles]) # Disable the "self.modules['Ebuild'].notadded" check when not in commit mode and # running `svn status` in every package dir will be too expensive. checks['ebuild_notadded'] = not \ (self.vcs_settings.vcs == "svn" and self.repolevel < 3 and self.options.mode != "commit") self.effective_scanlist = scanlist if self.options.if_modified == "y": self.effective_scanlist = sorted( vcs_files_to_cps( chain(self.changed.changed, self.changed.new, self.changed.removed), self.repo_settings.repodir, self.repolevel, self.reposplit, self.categories)) # Create our kwargs dict here to initialize the plugins with self.kwargs = { "repo_settings": self.repo_settings, "portdb": self.portdb, "qatracker": self.qatracker, "vcs_settings": self.vcs_settings, "options": self.options, "metadata_xsd": get_metadata_xsd(self.repo_settings), "uselist": uselist, "checks": checks, "repo_metadata": self.repo_metadata, "profiles": profiles, "include_arches": self.include_arches, "include_profiles": self.include_profiles, "caches": self.caches, "repoman_incrementals": self.repoman_incrementals, "env": self.env, "have": self.have, "dev_keywords": self.dev_keywords, "linechecks": self.moduleconfig.linechecks, } # initialize the plugin checks here self.modules = {} self._ext_futures = {} self.pkg_level_futures = None
def package_from_ebuild(ebuild): pf = None if ebuild.endswith(".ebuild"): pf = os.path.basename(ebuild)[:-7] else: return False if not os.path.isabs(ebuild): mycwd = os.getcwd() # Try to get the non-canonical path from the PWD evironment variable, # since the canonical path returned from os.getcwd() may may be # unusable in cases where the directory stucture is built from # symlinks. pwd = os.environ.get('PWD', '') if sys.hexversion < 0x3000000: pwd = _unicode_decode(pwd, encoding=_encodings['content'], errors='strict') if pwd and pwd != mycwd and \ os.path.realpath(pwd) == mycwd: mycwd = portage.normalize_path(pwd) ebuild = os.path.join(mycwd, ebuild) ebuild = portage.normalize_path(ebuild) # portdbapi uses the canonical path for the base of the portage tree, but # subdirectories of the base can be built from symlinks (like crossdev # does). ebuild_portdir = os.path.realpath( os.path.dirname(os.path.dirname(os.path.dirname(ebuild)))) ebuild = os.path.join(ebuild_portdir, *ebuild.split(os.path.sep)[-3:]) vdb_path = os.path.join(portage.settings['ROOT'], VDB_PATH) # Make sure that portdb.findname() returns the correct ebuild. if ebuild_portdir != vdb_path and \ ebuild_portdir not in portage.portdb.porttrees: if sys.hexversion >= 0x3000000: os.environ["PORTDIR_OVERLAY"] = \ os.environ.get("PORTDIR_OVERLAY", "") + \ " " + _shell_quote(ebuild_portdir) else: os.environ["PORTDIR_OVERLAY"] = \ os.environ.get("PORTDIR_OVERLAY", "") + \ " " + _unicode_encode(_shell_quote(ebuild_portdir), encoding=_encodings['content'], errors='strict') portage.close_portdbapi_caches() imp.reload(portage) del portage.portdb.porttrees[1:] if ebuild_portdir != portage.portdb.porttree_root: portage.portdb.porttrees.append(ebuild_portdir) if not os.path.exists(ebuild): return False ebuild_split = ebuild.split("/") cpv = "%s/%s" % (ebuild_split[-3], pf) if not portage.catpkgsplit(cpv): return False if ebuild.startswith(os.path.join(portage.root, portage.const.VDB_PATH)): mytree = "vartree" portage_ebuild = portage.db[portage.root][mytree].dbapi.findname(cpv) if os.path.realpath(portage_ebuild) != ebuild: return False else: mytree = "porttree" portage_ebuild = portage.portdb.findname(cpv) if not portage_ebuild or portage_ebuild != ebuild: return False return cpv
def package_from_ebuild(ebuild): pf = None if ebuild.endswith(".ebuild"): pf = os.path.basename(ebuild)[:-7] else: return False if not os.path.isabs(ebuild): mycwd = os.getcwd() # Try to get the non-canonical path from the PWD evironment variable, # since the canonical path returned from os.getcwd() may may be # unusable in cases where the directory stucture is built from # symlinks. pwd = os.environ.get('PWD', '') if sys.hexversion < 0x3000000: pwd = _unicode_decode(pwd, encoding=_encodings['content'], errors='strict') if pwd and pwd != mycwd and \ os.path.realpath(pwd) == mycwd: mycwd = portage.normalize_path(pwd) ebuild = os.path.join(mycwd, ebuild) ebuild = portage.normalize_path(ebuild) # portdbapi uses the canonical path for the base of the portage tree, but # subdirectories of the base can be built from symlinks (like crossdev # does). ebuild_portdir = os.path.realpath( os.path.dirname(os.path.dirname(os.path.dirname(ebuild)))) ebuild = os.path.join(ebuild_portdir, *ebuild.split(os.path.sep)[-3:]) vdb_path = os.path.join(portage.settings['ROOT'], VDB_PATH) # Make sure that portdb.findname() returns the correct ebuild. if ebuild_portdir != vdb_path and \ ebuild_portdir not in portage.portdb.porttrees: if sys.hexversion >= 0x3000000: os.environ["PORTDIR_OVERLAY"] = \ os.environ.get("PORTDIR_OVERLAY", "") + \ " " + _shell_quote(ebuild_portdir) else: os.environ["PORTDIR_OVERLAY"] = \ os.environ.get("PORTDIR_OVERLAY", "") + \ " " + _unicode_encode(_shell_quote(ebuild_portdir), encoding=_encodings['content'], errors='strict') portage.close_portdbapi_caches() imp.reload(portage) del portage.portdb.porttrees[1:] if ebuild_portdir != portage.portdb.porttree_root: portage.portdb.porttrees.append(ebuild_portdir) if not os.path.exists(ebuild): return False ebuild_split = ebuild.split("/") cpv = "%s/%s" % (ebuild_split[-3], pf) if not portage.catpkgsplit(cpv): return False if ebuild.startswith(os.path.join(portage.root, portage.const.VDB_PATH)): mytree = "vartree" portage_ebuild = portage.db[portage.root][mytree].dbapi.findname(cpv) if os.path.realpath(portage_ebuild) != ebuild: return False else: mytree = "porttree" portage_ebuild = portage.portdb.findname(cpv) if not portage_ebuild or portage_ebuild != ebuild: return False return cpv
def __init__(self, repo_settings, myreporoot, config_root, options, vcs_settings, mydir, env): '''Class __init__''' self.repo_settings = repo_settings self.config_root = config_root self.options = options self.vcs_settings = vcs_settings self.env = env # Repoman sets it's own ACCEPT_KEYWORDS and we don't want it to # behave incrementally. self.repoman_incrementals = tuple( x for x in portage.const.INCREMENTALS if x != 'ACCEPT_KEYWORDS') self.categories = [] for path in self.repo_settings.repo_config.eclass_db.porttrees: self.categories.extend(portage.util.grabfile( os.path.join(path, 'profiles', 'categories'))) self.repo_settings.repoman_settings.categories = frozenset( portage.util.stack_lists([self.categories], incremental=1)) self.categories = self.repo_settings.repoman_settings.categories self.portdb = repo_settings.portdb self.portdb.settings = self.repo_settings.repoman_settings digest_only = self.options.mode != 'manifest-check' \ and self.options.digest == 'y' self.generate_manifest = digest_only or self.options.mode in \ ("manifest", 'commit', 'fix') # We really only need to cache the metadata that's necessary for visibility # filtering. Anything else can be discarded to reduce memory consumption. if not self.generate_manifest: # Don't do this when generating manifests, since that uses # additional keys if spawn_nofetch is called (RESTRICT and # DEFINED_PHASES). self.portdb._aux_cache_keys.clear() self.portdb._aux_cache_keys.update( ["EAPI", "IUSE", "KEYWORDS", "repository", "SLOT"]) self.reposplit = myreporoot.split(os.path.sep) self.repolevel = len(self.reposplit) if self.options.mode == 'commit': repochecks.commit_check(self.repolevel, self.reposplit) repochecks.conflict_check(self.vcs_settings, self.options) # Make startdir relative to the canonical repodir, so that we can pass # it to digestgen and it won't have to be canonicalized again. if self.repolevel == 1: startdir = self.repo_settings.repodir else: startdir = normalize_path(mydir) startdir = os.path.join( self.repo_settings.repodir, *startdir.split(os.sep)[-2 - self.repolevel + 3:]) # get lists of valid keywords, licenses, and use new_data = repo_metadata(self.portdb, self.repo_settings.repoman_settings) kwlist, liclist, uselist, profile_list, \ global_pmaskdict, liclist_deprecated = new_data self.repo_metadata = { 'kwlist': kwlist, 'liclist': liclist, 'uselist': uselist, 'profile_list': profile_list, 'pmaskdict': global_pmaskdict, 'lic_deprecated': liclist_deprecated, } self.repo_settings.repoman_settings['PORTAGE_ARCHLIST'] = ' '.join(sorted(kwlist)) self.repo_settings.repoman_settings.backup_changes('PORTAGE_ARCHLIST') profiles = setup_profile(profile_list) check_profiles(profiles, self.repo_settings.repoman_settings.archlist()) scanlist = scan(self.repolevel, self.reposplit, startdir, self.categories, self.repo_settings) self.dev_keywords = dev_profile_keywords(profiles) self.qatracker = self.vcs_settings.qatracker if self.options.echangelog is None and self.repo_settings.repo_config.update_changelog: self.options.echangelog = 'y' if self.vcs_settings.vcs is None: self.options.echangelog = 'n' # Initialize the ModuleConfig class here # TODO Add layout.conf masters repository.yml config to the list to load/stack self.moduleconfig = ModuleConfig(self.repo_settings.masters_list, self.repo_settings.repoman_settings.valid_versions, repository_modules=self.options.experimental_repository_modules == 'y') checks = {} # The --echangelog option causes automatic ChangeLog generation, # which invalidates changelog.ebuildadded and changelog.missing # checks. # Note: Some don't use ChangeLogs in distributed SCMs. # It will be generated on server side from scm log, # before package moves to the rsync server. # This is needed because they try to avoid merge collisions. # Gentoo's Council decided to always use the ChangeLog file. # TODO: shouldn't this just be switched on the repo, iso the VCS? is_echangelog_enabled = self.options.echangelog in ('y', 'force') self.vcs_settings.vcs_is_cvs_or_svn = self.vcs_settings.vcs in ('cvs', 'svn') checks['changelog'] = not is_echangelog_enabled and self.vcs_settings.vcs_is_cvs_or_svn if self.options.mode == "manifest" or self.options.quiet: pass elif self.options.pretend: print(green("\nRepoMan does a once-over of the neighborhood...")) else: print(green("\nRepoMan scours the neighborhood...")) self.changed = self.vcs_settings.changes # bypass unneeded VCS operations if not needed if (self.options.if_modified == "y" or self.options.mode not in ("manifest", "manifest-check")): self.changed.scan() self.have = { 'pmasked': False, 'dev_keywords': False, } # NOTE: match-all caches are not shared due to potential # differences between profiles in _get_implicit_iuse. self.caches = { 'arch': {}, 'arch_xmatch': {}, 'shared_xmatch': {"cp-list": {}}, } self.include_arches = None if self.options.include_arches: self.include_arches = set() self.include_arches.update(*[x.split() for x in self.options.include_arches]) # Disable the "self.modules['Ebuild'].notadded" check when not in commit mode and # running `svn status` in every package dir will be too expensive. checks['ebuild_notadded'] = not \ (self.vcs_settings.vcs == "svn" and self.repolevel < 3 and self.options.mode != "commit") self.effective_scanlist = scanlist if self.options.if_modified == "y": self.effective_scanlist = sorted(vcs_files_to_cps( chain(self.changed.changed, self.changed.new, self.changed.removed), self.repo_settings.repodir, self.repolevel, self.reposplit, self.categories)) # Create our kwargs dict here to initialize the plugins with self.kwargs = { "repo_settings": self.repo_settings, "portdb": self.portdb, "qatracker": self.qatracker, "vcs_settings": self.vcs_settings, "options": self.options, "metadata_xsd": get_metadata_xsd(self.repo_settings), "uselist": uselist, "checks": checks, "repo_metadata": self.repo_metadata, "profiles": profiles, "include_arches": self.include_arches, "caches": self.caches, "repoman_incrementals": self.repoman_incrementals, "env": self.env, "have": self.have, "dev_keywords": self.dev_keywords, "linechecks": self.moduleconfig.linechecks, } # initialize the plugin checks here self.modules = {} self._ext_futures = {} self.pkg_level_futures = None
def __init__(self, repo_settings, myreporoot, config_root, options, vcs_settings, mydir, env): '''Class __init__''' self.repo_settings = repo_settings self.config_root = config_root self.options = options self.vcs_settings = vcs_settings self.env = env # Repoman sets it's own ACCEPT_KEYWORDS and we don't want it to # behave incrementally. self.repoman_incrementals = tuple( x for x in portage.const.INCREMENTALS if x != 'ACCEPT_KEYWORDS') self.categories = [] for path in self.repo_settings.repo_config.eclass_db.porttrees: self.categories.extend(portage.util.grabfile( os.path.join(path, 'profiles', 'categories'))) self.repo_settings.repoman_settings.categories = frozenset( portage.util.stack_lists([self.categories], incremental=1)) self.categories = self.repo_settings.repoman_settings.categories self.portdb = repo_settings.portdb self.portdb.settings = self.repo_settings.repoman_settings # We really only need to cache the metadata that's necessary for visibility # filtering. Anything else can be discarded to reduce memory consumption. self.portdb._aux_cache_keys.clear() self.portdb._aux_cache_keys.update( ["EAPI", "IUSE", "KEYWORDS", "repository", "SLOT"]) self.reposplit = myreporoot.split(os.path.sep) self.repolevel = len(self.reposplit) if self.options.mode == 'commit': repochecks.commit_check(self.repolevel, self.reposplit) repochecks.conflict_check(self.vcs_settings, self.options) # Make startdir relative to the canonical repodir, so that we can pass # it to digestgen and it won't have to be canonicalized again. if self.repolevel == 1: startdir = self.repo_settings.repodir else: startdir = normalize_path(mydir) startdir = os.path.join( self.repo_settings.repodir, *startdir.split(os.sep)[-2 - self.repolevel + 3:]) # get lists of valid keywords, licenses, and use new_data = repo_metadata(self.portdb, self.repo_settings.repoman_settings) kwlist, liclist, uselist, profile_list, \ global_pmaskdict, liclist_deprecated = new_data self.repo_metadata = { 'kwlist': kwlist, 'liclist': liclist, 'uselist': uselist, 'profile_list': profile_list, 'pmaskdict': global_pmaskdict, 'lic_deprecated': liclist_deprecated, } self.repo_settings.repoman_settings['PORTAGE_ARCHLIST'] = ' '.join(sorted(kwlist)) self.repo_settings.repoman_settings.backup_changes('PORTAGE_ARCHLIST') self.profiles = setup_profile(profile_list) check_profiles(self.profiles, self.repo_settings.repoman_settings.archlist()) scanlist = scan(self.repolevel, self.reposplit, startdir, self.categories, self.repo_settings) self.dev_keywords = dev_profile_keywords(self.profiles) self.qatracker = QATracker() if self.options.echangelog is None and self.repo_settings.repo_config.update_changelog: self.options.echangelog = 'y' if self.vcs_settings.vcs is None: self.options.echangelog = 'n' self.check = {} # The --echangelog option causes automatic ChangeLog generation, # which invalidates changelog.ebuildadded and changelog.missing # checks. # Note: Some don't use ChangeLogs in distributed SCMs. # It will be generated on server side from scm log, # before package moves to the rsync server. # This is needed because they try to avoid merge collisions. # Gentoo's Council decided to always use the ChangeLog file. # TODO: shouldn't this just be switched on the repo, iso the VCS? is_echangelog_enabled = self.options.echangelog in ('y', 'force') self.vcs_settings.vcs_is_cvs_or_svn = self.vcs_settings.vcs in ('cvs', 'svn') self.check['changelog'] = not is_echangelog_enabled and self.vcs_settings.vcs_is_cvs_or_svn if self.options.mode == "manifest": pass elif self.options.pretend: print(green("\nRepoMan does a once-over of the neighborhood...")) else: print(green("\nRepoMan scours the neighborhood...")) self.changed = Changes(self.options) self.changed.scan(self.vcs_settings) self.have = { 'pmasked': False, 'dev_keywords': False, } # NOTE: match-all caches are not shared due to potential # differences between profiles in _get_implicit_iuse. self.caches = { 'arch': {}, 'arch_xmatch': {}, 'shared_xmatch': {"cp-list": {}}, } self.include_arches = None if self.options.include_arches: self.include_arches = set() self.include_arches.update(*[x.split() for x in self.options.include_arches]) # Disable the "ebuild.notadded" check when not in commit mode and # running `svn status` in every package dir will be too expensive. self.check['ebuild_notadded'] = not \ (self.vcs_settings.vcs == "svn" and self.repolevel < 3 and self.options.mode != "commit") self.effective_scanlist = scanlist if self.options.if_modified == "y": self.effective_scanlist = sorted(vcs_files_to_cps( chain(self.changed.changed, self.changed.new, self.changed.removed), self.repolevel, self.reposplit, self.categories)) self.live_eclasses = portage.const.LIVE_ECLASSES # initialize our checks classes here before the big xpkg loop self.manifester = Manifests(self.options, self.qatracker, self.repo_settings.repoman_settings) self.is_ebuild = IsEbuild(self.repo_settings.repoman_settings, self.repo_settings, self.portdb, self.qatracker) self.filescheck = FileChecks( self.qatracker, self.repo_settings.repoman_settings, self.repo_settings, self.portdb, self.vcs_settings) self.status_check = VCSStatus(self.vcs_settings, self.qatracker) self.fetchcheck = FetchChecks( self.qatracker, self.repo_settings, self.portdb, self.vcs_settings) self.pkgmeta = PkgMetadata(self.options, self.qatracker, self.repo_settings.repoman_settings) self.thirdparty = ThirdPartyMirrors(self.repo_settings.repoman_settings, self.qatracker) self.use_flag_checks = USEFlagChecks(self.qatracker, uselist) self.keywordcheck = KeywordChecks(self.qatracker, self.options) self.liveeclasscheck = LiveEclassChecks(self.qatracker) self.rubyeclasscheck = RubyEclassChecks(self.qatracker) self.eapicheck = EAPIChecks(self.qatracker, self.repo_settings) self.descriptioncheck = DescriptionChecks(self.qatracker) self.licensecheck = LicenseChecks(self.qatracker, liclist, liclist_deprecated) self.restrictcheck = RestrictChecks(self.qatracker)
def __init__(self, repo_settings, myreporoot, config_root, options, vcs_settings, mydir, env): '''Class __init__''' self.repo_settings = repo_settings self.config_root = config_root self.options = options self.vcs_settings = vcs_settings self.env = env # Repoman sets it's own ACCEPT_KEYWORDS and we don't want it to # behave incrementally. self.repoman_incrementals = tuple( x for x in portage.const.INCREMENTALS if x != 'ACCEPT_KEYWORDS') self.categories = [] for path in self.repo_settings.repo_config.eclass_db.porttrees: self.categories.extend(portage.util.grabfile( os.path.join(path, 'profiles', 'categories'))) self.repo_settings.repoman_settings.categories = frozenset( portage.util.stack_lists([self.categories], incremental=1)) self.categories = self.repo_settings.repoman_settings.categories metadata_dtd = None for path in reversed(self.repo_settings.repo_config.eclass_db.porttrees): path = os.path.join(path, 'metadata/dtd/metadata.dtd') if os.path.exists(path): metadata_dtd = path break self.portdb = repo_settings.portdb self.portdb.settings = self.repo_settings.repoman_settings # We really only need to cache the metadata that's necessary for visibility # filtering. Anything else can be discarded to reduce memory consumption. if self.options.mode != "manifest" and self.options.digest != "y": # Don't do this when generating manifests, since that uses # additional keys if spawn_nofetch is called (RESTRICT and # DEFINED_PHASES). self.portdb._aux_cache_keys.clear() self.portdb._aux_cache_keys.update( ["EAPI", "IUSE", "KEYWORDS", "repository", "SLOT"]) self.reposplit = myreporoot.split(os.path.sep) self.repolevel = len(self.reposplit) if self.options.mode == 'commit': repochecks.commit_check(self.repolevel, self.reposplit) repochecks.conflict_check(self.vcs_settings, self.options) # Make startdir relative to the canonical repodir, so that we can pass # it to digestgen and it won't have to be canonicalized again. if self.repolevel == 1: startdir = self.repo_settings.repodir else: startdir = normalize_path(mydir) startdir = os.path.join( self.repo_settings.repodir, *startdir.split(os.sep)[-2 - self.repolevel + 3:]) # get lists of valid keywords, licenses, and use new_data = repo_metadata(self.portdb, self.repo_settings.repoman_settings) kwlist, liclist, uselist, profile_list, \ global_pmaskdict, liclist_deprecated = new_data self.repo_metadata = { 'kwlist': kwlist, 'liclist': liclist, 'uselist': uselist, 'profile_list': profile_list, 'pmaskdict': global_pmaskdict, 'lic_deprecated': liclist_deprecated, } self.repo_settings.repoman_settings['PORTAGE_ARCHLIST'] = ' '.join(sorted(kwlist)) self.repo_settings.repoman_settings.backup_changes('PORTAGE_ARCHLIST') self.profiles = setup_profile(profile_list) check_profiles(self.profiles, self.repo_settings.repoman_settings.archlist()) scanlist = scan(self.repolevel, self.reposplit, startdir, self.categories, self.repo_settings) self.dev_keywords = dev_profile_keywords(self.profiles) self.qatracker = QATracker() if self.options.echangelog is None and self.repo_settings.repo_config.update_changelog: self.options.echangelog = 'y' if self.vcs_settings.vcs is None: self.options.echangelog = 'n' self.check = {} # The --echangelog option causes automatic ChangeLog generation, # which invalidates changelog.ebuildadded and changelog.missing # checks. # Note: Some don't use ChangeLogs in distributed SCMs. # It will be generated on server side from scm log, # before package moves to the rsync server. # This is needed because they try to avoid merge collisions. # Gentoo's Council decided to always use the ChangeLog file. # TODO: shouldn't this just be switched on the repo, iso the VCS? is_echangelog_enabled = self.options.echangelog in ('y', 'force') self.vcs_settings.vcs_is_cvs_or_svn = self.vcs_settings.vcs in ('cvs', 'svn') self.check['changelog'] = not is_echangelog_enabled and self.vcs_settings.vcs_is_cvs_or_svn if self.options.mode == "manifest" or self.options.quiet: pass elif self.options.pretend: print(green("\nRepoMan does a once-over of the neighborhood...")) else: print(green("\nRepoMan scours the neighborhood...")) self.changed = Changes(self.options) # bypass unneeded VCS operations if not needed if (self.options.if_modified == "y" or self.options.mode not in ("manifest", "manifest-check")): self.changed.scan(self.vcs_settings) self.have = { 'pmasked': False, 'dev_keywords': False, } # NOTE: match-all caches are not shared due to potential # differences between profiles in _get_implicit_iuse. self.caches = { 'arch': {}, 'arch_xmatch': {}, 'shared_xmatch': {"cp-list": {}}, } self.include_arches = None if self.options.include_arches: self.include_arches = set() self.include_arches.update(*[x.split() for x in self.options.include_arches]) # Disable the "ebuild.notadded" check when not in commit mode and # running `svn status` in every package dir will be too expensive. self.check['ebuild_notadded'] = not \ (self.vcs_settings.vcs == "svn" and self.repolevel < 3 and self.options.mode != "commit") self.effective_scanlist = scanlist if self.options.if_modified == "y": self.effective_scanlist = sorted(vcs_files_to_cps( chain(self.changed.changed, self.changed.new, self.changed.removed), self.repolevel, self.reposplit, self.categories)) self.live_eclasses = portage.const.LIVE_ECLASSES # initialize our checks classes here before the big xpkg loop self.manifester = Manifests(self.options, self.qatracker, self.repo_settings.repoman_settings) self.is_ebuild = IsEbuild(self.repo_settings.repoman_settings, self.repo_settings, self.portdb, self.qatracker) self.filescheck = FileChecks( self.qatracker, self.repo_settings.repoman_settings, self.repo_settings, self.portdb, self.vcs_settings) self.status_check = VCSStatus(self.vcs_settings, self.qatracker) self.fetchcheck = FetchChecks( self.qatracker, self.repo_settings, self.portdb, self.vcs_settings) self.pkgmeta = PkgMetadata(self.options, self.qatracker, self.repo_settings.repoman_settings, metadata_dtd=metadata_dtd) self.thirdparty = ThirdPartyMirrors(self.repo_settings.repoman_settings, self.qatracker) self.use_flag_checks = USEFlagChecks(self.qatracker, uselist) self.keywordcheck = KeywordChecks(self.qatracker, self.options) self.liveeclasscheck = LiveEclassChecks(self.qatracker) self.rubyeclasscheck = RubyEclassChecks(self.qatracker) self.eapicheck = EAPIChecks(self.qatracker, self.repo_settings) self.descriptioncheck = DescriptionChecks(self.qatracker) self.licensecheck = LicenseChecks(self.qatracker, liclist, liclist_deprecated) self.restrictcheck = RestrictChecks(self.qatracker)