def store(self): """ Store the registry data to the file. The existing inode will be replaced atomically, so if that inode is currently being used for a lock then that lock will be rendered useless. Therefore, it is important not to call this method until the current lock is ready to be immediately released. """ if os.environ.get( "SANDBOX_ON") == "1" or self._data == self._data_orig: return try: f = atomic_ofstream(self._filename, "wb") if self._json_write: f.write( _unicode_encode( json.dumps(self._data, **self._json_write_opts), encoding=_encodings["repo.content"], errors="strict", )) else: pickle.dump(self._data, f, protocol=2) f.close() except EnvironmentError as e: if e.errno != PermissionDenied.errno: writemsg_level( "!!! %s %s\n" % (e, self._filename), level=logging.ERROR, noiselevel=-1, ) else: self._data_orig = self._data.copy()
def update_index(self, mymanifests, myupdates): '''Update the vcs's modified index if it is needed @param mymanifests: manifest files updated @param myupdates: other files updated''' # It's not safe to use the git commit -a option since there might # be some modified files elsewhere in the working tree that the # user doesn't want to commit. Therefore, call git update-index # in order to ensure that the index is updated with the latest # versions of all new and modified files in the relevant portion # of the working tree. myfiles = mymanifests + myupdates myfiles.sort() update_index_cmd = ["git", "update-index"] update_index_cmd.extend(f.lstrip("./") for f in myfiles) if self.options.pretend: print("(%s)" % (" ".join(update_index_cmd),)) else: retval = spawn(update_index_cmd, env=os.environ) if retval != os.EX_OK: writemsg_level( "!!! Exiting on %s (shell) " "error code: %s\n" % (self.vcs_settings.vcs, retval), level=logging.ERROR, noiselevel=-1) sys.exit(retval)
def new(self, **kwargs): if kwargs: self._kwargs(kwargs) # initial checkout cvs_root = self.repo.sync_uri if ( portage.process.spawn_bash( "cd %s; exec cvs -z0 -d %s co -P -d %s %s" % ( portage._shell_quote(os.path.dirname(self.repo.location)), portage._shell_quote(cvs_root), portage._shell_quote(os.path.basename(self.repo.location)), portage._shell_quote( self.repo.module_specific_options["sync-cvs-repo"] ), ), **self.spawn_kwargs ) != os.EX_OK ): msg = "!!! cvs checkout error; exiting." self.logger(self.xterm_titles, msg) writemsg_level(msg + "\n", noiselevel=-1, level=logging.ERROR) return (1, False) return (0, False)
def _poll(self, timeout=None): """ All poll() calls pass through here. The poll events are added directly to self._poll_event_queue. In order to avoid endless blocking, this raises StopIteration if timeout is None and there are no file descriptors to poll. """ if timeout is None and \ not self._poll_event_handlers: raise StopIteration( "timeout is None and there are no poll() event handlers") while True: try: self._poll_event_queue.extend(self._poll_obj.poll(timeout)) break except (IOError, select.error) as e: # Silently handle EINTR, which is normal when we have # received a signal such as SIGINT (epoll objects may # raise IOError rather than select.error, at least in # Python 3.2). if not (e.args and e.args[0] == errno.EINTR): writemsg_level("\n!!! select error: %s\n" % (e, ), level=logging.ERROR, noiselevel=-1) del e # This typically means that we've received a SIGINT, so # raise StopIteration in order to break out of our current # iteration and respond appropriately to the signal as soon # as possible. raise StopIteration("interrupted")
def repo_name_check(trees): missing_repo_names = set() for root, root_trees in trees.items(): if "porttree" in root_trees: portdb = root_trees["porttree"].dbapi missing_repo_names.update(portdb.porttrees) repos = portdb.getRepositories() for r in repos: missing_repo_names.discard(portdb.getRepositoryPath(r)) if portdb.porttree_root in missing_repo_names and \ not os.path.exists(os.path.join( portdb.porttree_root, "profiles")): # This is normal if $PORTDIR happens to be empty, # so don't warn about it. missing_repo_names.remove(portdb.porttree_root) if missing_repo_names: msg = [] msg.append("WARNING: One or more repositories " + \ "have missing repo_name entries:") msg.append("") for p in missing_repo_names: msg.append("\t%s/profiles/repo_name" % (p,)) msg.append("") msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \ "should be a plain text file containing a unique " + \ "name for the repository on the first line.", 70)) writemsg_level("".join("%s\n" % l for l in msg), level=logging.WARNING, noiselevel=-1) return bool(missing_repo_names)
def add_manifest(self, mymanifests, myheaders, myupdates, myremoved, commitmessage): myfiles = mymanifests[:] # If there are no header (SVN/CVS keywords) changes in # the files, this Manifest commit must include the # other (yet uncommitted) files. if not myheaders: myfiles += myupdates myfiles += myremoved myfiles.sort() fd, commitmessagefile = tempfile.mkstemp(".repoman.msg") mymsg = os.fdopen(fd, "wb") mymsg.write(_unicode_encode(commitmessage)) mymsg.close() retval = self.vcs_settings.changes.commit(myfiles, commitmessagefile) # cleanup the commit message before possibly exiting try: os.unlink(commitmessagefile) except OSError: pass if retval != os.EX_OK: writemsg_level("!!! Exiting on %s (shell) " "error code: %s\n" % (self.vcs_settings.vcs, retval), level=logging.ERROR, noiselevel=-1) sys.exit(retval)
def priming_commit(self, myupdates, myremoved, commitmessage): myfiles = myupdates + myremoved commitmessagedir = tempfile.mkdtemp(".repoman.msg") commitmessagefile = os.path.join(commitmessagedir, "COMMIT_EDITMSG") with open(commitmessagefile, "wb") as mymsg: mymsg.write(_unicode_encode(commitmessage)) separator = '-' * 78 print() print(green("Using commit message:")) print(green(separator)) print(commitmessage) print(green(separator)) print() # Having a leading ./ prefix on file paths can trigger a bug in # the cvs server when committing files to multiple directories, # so strip the prefix. myfiles = [f.lstrip("./") for f in myfiles] retval = self.vcs_settings.changes.commit(myfiles, commitmessagefile) # cleanup the commit message before possibly exiting try: shutil.rmtree(commitmessagedir) except OSError: pass if retval != os.EX_OK: writemsg_level("!!! Exiting on %s (shell) " "error code: %s\n" % (self.vcs_settings.vcs, retval), level=logging.ERROR, noiselevel=-1) sys.exit(retval)
def linux_ro_checker(dir_list): """ Use /proc/self/mountinfo to check that no directories installed by the ebuild are set to be installed to a read-only filesystem. @param dir_list: A list of directories installed by the ebuild. @type dir_list: List @return: 1. A list of filesystems which are both set to be written to and are mounted read-only, may be empty. """ ro_filesystems = set() try: with io.open("/proc/self/mountinfo", mode='r', encoding=_encodings['content'], errors='replace') as f: for line in f: # we're interested in dir and both attr fileds which always # start with either 'ro' or 'rw' # example line: # 14 1 8:3 / / rw,noatime - ext3 /dev/root rw,errors=continue,commit=5,barrier=1,data=writeback # _dir ^ ^ attr1 ^ attr2 # there can be a variable number of fields # to the left of the ' - ', after the attr's, so split it there mount = line.split(' - ', 1) _dir, attr1 = mount[0].split()[4:6] attr2 = mount[1].split()[2] if attr1.startswith('ro') or attr2.startswith('ro'): ro_filesystems.add(_dir) # If /proc/self/mountinfo can't be read, assume that there are no RO # filesystems and return. except EnvironmentError: writemsg_level(_("!!! /proc/self/mountinfo cannot be read"), level=logging.WARNING, noiselevel=-1) return [] ro_devs = {} for x in ro_filesystems: try: ro_devs[os.stat(x).st_dev] = x except OSError: pass ro_filesystems.clear() for x in set(dir_list): try: dev = os.stat(x).st_dev except OSError: pass else: try: ro_filesystems.add(ro_devs[dev]) except KeyError: pass return ro_filesystems
def perform_post_sync_hook(self, reponame, dosyncuri='', repolocation=''): succeeded = os.EX_OK if reponame: _hooks = self.hooks["repo.postsync.d"] else: _hooks = self.hooks["postsync.d"] for filepath in _hooks: writemsg_level("Spawning post_sync hook: %s\n" % (_unicode_decode(_hooks[filepath])), level=logging.ERROR, noiselevel=4) if reponame: retval = portage.process.spawn( [filepath, reponame, dosyncuri, repolocation], env=self.settings.environ()) else: retval = portage.process.spawn([filepath], env=self.settings.environ()) if retval != os.EX_OK: writemsg_level( " %s Spawn failed for: %s, %s\n" % (bad("*"), _unicode_decode(_hooks[filepath]), filepath), level=logging.ERROR, noiselevel=-1) succeeded = retval return succeeded
def linux_ro_checker(dir_list): """ Use /proc/mounts to check that no directories installed by the ebuild are set to be installed to a read-only filesystem. @param dir_list: A list of directories installed by the ebuild. @type dir_list: List @return: 1. A list of filesystems which are both set to be written to and are mounted read-only, may be empty. """ ro_filesystems = set() try: with io.open("/proc/mounts", mode='r', encoding=_encodings['content'], errors='replace') as f: roregex = re.compile(r'(\A|,)ro(\Z|,)') for line in f: if roregex.search(line.split(" ")[3].strip()) is not None: romount = line.split(" ")[1].strip() ro_filesystems.add(romount) # If /proc/mounts can't be read, assume that there are no RO # filesystems and return. except EnvironmentError: writemsg_level(_("!!! /proc/mounts cannot be read"), level=logging.WARNING, noiselevel=-1) return [] return set.intersection(ro_filesystems, set(dir_list))
def _poll(self, timeout=None): """ All poll() calls pass through here. The poll events are added directly to self._poll_event_queue. In order to avoid endless blocking, this raises StopIteration if timeout is None and there are no file descriptors to poll. """ if not self._poll_event_handlers: self._schedule() if timeout is None and \ not self._poll_event_handlers: raise StopIteration( "timeout is None and there are no poll() event handlers") # The following error is known to occur with Linux kernel versions # less than 2.6.24: # # select.error: (4, 'Interrupted system call') # # This error has been observed after a SIGSTOP, followed by SIGCONT. # Treat it similar to EAGAIN if timeout is None, otherwise just return # without any events. while True: try: self._poll_event_queue.extend(self._poll_obj.poll(timeout)) break except select.error as e: writemsg_level("\n!!! select error: %s\n" % (e,), level=logging.ERROR, noiselevel=-1) del e if timeout is not None: break
def store(self): """ Store the registry data to the file. The existing inode will be replaced atomically, so if that inode is currently being used for a lock then that lock will be rendered useless. Therefore, it is important not to call this method until the current lock is ready to be immediately released. """ if os.environ.get("SANDBOX_ON") == "1" or \ self._data == self._data_orig: return try: f = atomic_ofstream(self._filename, 'wb') if self._json_write: f.write(_unicode_encode( json.dumps(self._data, **self._json_write_opts), encoding=_encodings['repo.content'], errors='strict')) else: pickle.dump(self._data, f, protocol=2) f.close() except EnvironmentError as e: if e.errno != PermissionDenied.errno: writemsg_level("!!! %s %s\n" % (e, self._filename), level=logging.ERROR, noiselevel=-1) else: self._data_orig = self._data.copy()
def async_output(self, msg, log_file=None, background=None, level=0, noiselevel=-1, loop=None): """ Output a msg to stdio (if not in background) and to a log file if provided. @param msg: a message string, including newline if appropriate @type msg: str @param log_file: log file in binary mode @type log_file: file @param background: send messages only to log (not to stdio) @type background: bool @param level: a numeric logging level (see the logging module) @type level: int @param noiselevel: passed directly to writemsg @type noiselevel: int """ global_background = self._is_background() if background is None or global_background: background = global_background if not background: writemsg_level(msg, level=level, noiselevel=noiselevel) if log_file is not None: yield _writer(log_file, _unicode_encode(msg), loop=loop)
def check_cvs_repo(self): if self.repo.module_specific_options.get('sync-cvs-repo') is None: writemsg_level("!!! %s\n" % _( "Repository '%s' has sync-type=cvs, but is missing sync-cvs-repo attribute" ) % self.repo.name, level=self.logger.ERROR, noiselevel=-1)
def _task_output(self, msg, log_path=None, level=0, noiselevel=-1): """ Output msg to stdout if not self._background. If log_path is not None then append msg to the log (appends with compression if the filename extension of log_path corresponds to a supported compression type). """ if not self._background: writemsg_level(msg, level=level, noiselevel=noiselevel) if log_path is not None: f = open(_unicode_encode(log_path, encoding=_encodings['fs'], errors='strict'), mode='ab') if log_path.endswith('.gz'): # NOTE: The empty filename argument prevents us from triggering # a bug in python3 which causes GzipFile to raise AttributeError # if fileobj.name is bytes instead of unicode. f = gzip.GzipFile(filename='', mode='ab', fileobj=f) f.write(_unicode_encode(msg)) f.close()
def _sync(self): """ Internal function to sync an existing CVS repository @return: tuple of return code (0=success), whether the cache needs to be updated @rtype: (int, bool) """ cvs_root = self.repo.sync_uri if cvs_root.startswith("cvs://"): cvs_root = cvs_root[6:] # cvs update msg = ">>> Starting cvs update with %s..." % self.repo.sync_uri self.logger(self.xterm_titles, msg) writemsg_level(msg + "\n") exitcode = portage.process.spawn_bash( "cd %s; exec cvs -z0 -q update -dP" % (portage._shell_quote(self.repo.location),), **portage._native_kwargs(self.spawn_kwargs) ) if exitcode != os.EX_OK: msg = "!!! cvs update error; exiting." self.logger(self.xterm_titles, msg) writemsg_level(msg + "\n", noiselevel=-1, level=logging.ERROR) return (exitcode, False)
def _poll(self, timeout=None): """ All poll() calls pass through here. The poll events are added directly to self._poll_event_queue. In order to avoid endless blocking, this raises StopIteration if timeout is None and there are no file descriptors to poll. """ if not self._poll_event_handlers: self._schedule() if timeout is None and \ not self._poll_event_handlers: raise StopIteration( "timeout is None and there are no poll() event handlers") # The following error is known to occur with Linux kernel versions # less than 2.6.24: # # select.error: (4, 'Interrupted system call') # # This error has been observed after a SIGSTOP, followed by SIGCONT. # Treat it similar to EAGAIN if timeout is None, otherwise just return # without any events. while True: try: self._poll_event_queue.extend(self._poll_obj.poll(timeout)) break except select.error as e: writemsg_level("\n!!! select error: %s\n" % (e, ), level=logging.ERROR, noiselevel=-1) del e if timeout is not None: break
def _validate(self): """ Implements unknown-features-warn and unknown-features-filter. """ if "unknown-features-warn" in self._features: unknown_features = self._features.difference(SUPPORTED_FEATURES) if unknown_features: unknown_features = unknown_features.difference( self._settings._unknown_features) if unknown_features: self._settings._unknown_features.update(unknown_features) writemsg_level( colorize( "BAD", _("FEATURES variable contains unknown value(s): %s" ) % ", ".join(sorted(unknown_features)), ) + "\n", level=logging.WARNING, noiselevel=-1, ) if "unknown-features-filter" in self._features: unknown_features = self._features.difference(SUPPORTED_FEATURES) if unknown_features: self.difference_update(unknown_features) self._prune_overrides()
def show_invalid_depstring_notice(parent_node, error_msg): msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \ "\n\n%s\n\n%s\n\n" % (error_msg, parent_node) p_key = parent_node.cpv p_status = parent_node.operation msg = [] if p_status == "nomerge": category, pf = portage.catsplit(p_key) pkg_location = os.path.join(parent_node.root_config.settings['EROOT'], portage.VDB_PATH, category, pf) msg.append("Portage is unable to process the dependencies of the ") msg.append("'%s' package. " % p_key) msg.append("In order to correct this problem, the package ") msg.append("should be uninstalled, reinstalled, or upgraded. ") msg.append("As a temporary workaround, the --nodeps option can ") msg.append("be used to ignore all dependencies. For reference, ") msg.append("the problematic dependencies can be found in the ") msg.append("*DEPEND files located in '%s/'." % pkg_location) else: msg.append("This package can not be installed. ") msg.append("Please notify the '%s' package maintainer " % p_key) msg.append("about this problem.") msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72)) writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
def _poll(self, timeout=None): """ All poll() calls pass through here. The poll events are added directly to self._poll_event_queue. In order to avoid endless blocking, this raises StopIteration if timeout is None and there are no file descriptors to poll. """ if timeout is None and \ not self._poll_event_handlers: raise StopIteration( "timeout is None and there are no poll() event handlers") while True: try: self._poll_event_queue.extend(self._poll_obj.poll(timeout)) break except select.error as e: # Silently handle EINTR, which is normal when we have # received a signal such as SIGINT. if not (e.args and e.args[0] == errno.EINTR): writemsg_level("\n!!! select error: %s\n" % (e,), level=logging.ERROR, noiselevel=-1) del e # This typically means that we've received a SIGINT, so # raise StopIteration in order to break out of our current # iteration and respond appropriately to the signal as soon # as possible. raise StopIteration("interrupted")
def new(self, **kwargs): '''Do the initial clone of the repository''' if kwargs: self._kwargs(kwargs) try: if not os.path.exists(self.repo.location): os.makedirs(self.repo.location) self.logger(self.xterm_titles, 'Created new directory %s' % self.repo.location) except IOError: return (1, False) sync_uri = self.repo.sync_uri if sync_uri.startswith("file://"): sync_uri = sync_uri[6:] depth_arg = '' if self.repo.sync_depth is not None: depth_arg = '--depth %d ' % self.repo.sync_depth git_cmd = "%s clone %s%s ." % (self.bin_command, depth_arg, portage._shell_quote(sync_uri)) writemsg_level(git_cmd + "\n") exitcode = portage.process.spawn_bash("cd %s ; exec %s" % ( portage._shell_quote(self.repo.location), git_cmd), **portage._native_kwargs(self.spawn_kwargs)) if exitcode != os.EX_OK: msg = "!!! git clone error in %s" % self.repo.location self.logger(self.xterm_titles, msg) writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1) return (exitcode, False) return (os.EX_OK, True)
def add_manifest(self, mymanifests, myheaders, myupdates, myremoved, commitmessage): myfiles = mymanifests[:] # If there are no header (SVN/CVS keywords) changes in # the files, this Manifest commit must include the # other (yet uncommitted) files. if not myheaders: myfiles += myupdates myfiles += myremoved myfiles.sort() fd, commitmessagefile = tempfile.mkstemp(".repoman.msg") mymsg = os.fdopen(fd, "wb") mymsg.write(_unicode_encode(commitmessage)) mymsg.close() retval = self.vcs_settings.changes.commit(myfiles, commitmessagefile) # cleanup the commit message before possibly exiting try: os.unlink(commitmessagefile) except OSError: pass if retval != os.EX_OK: writemsg_level( "!!! Exiting on %s (shell) " "error code: %s\n" % (self.vcs_settings.vcs, retval), level=logging.ERROR, noiselevel=-1) sys.exit(retval)
def new(self, **kwargs): '''Do the initial clone of the repository''' if kwargs: self._kwargs(kwargs) try: if not os.path.exists(self.repo.location): os.makedirs(self.repo.location) self.logger(self.xterm_titles, 'Created new directory %s' % self.repo.location) except IOError: return (1, False) sync_uri = self.repo.sync_uri if sync_uri.startswith("file://"): sync_uri = sync_uri[6:] git_cmd_opts = "" if self.settings.get("PORTAGE_QUIET") == "1": git_cmd_opts += " --quiet" if self.repo.sync_depth is not None: git_cmd_opts += " --depth %d" % self.repo.sync_depth git_cmd = "%s clone%s %s ." % (self.bin_command, git_cmd_opts, portage._shell_quote(sync_uri)) writemsg_level(git_cmd + "\n") exitcode = portage.process.spawn_bash("cd %s ; exec %s" % ( portage._shell_quote(self.repo.location), git_cmd), **portage._native_kwargs(self.spawn_kwargs)) if exitcode != os.EX_OK: msg = "!!! git clone error in %s" % self.repo.location self.logger(self.xterm_titles, msg) writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1) return (exitcode, False) return (os.EX_OK, True)
def update(self): ''' Update existing git repository, and ignore the syncuri. We are going to trust the user and assume that the user is in the branch that he/she wants updated. We'll let the user manage branches with git directly. ''' git_cmd_opts = "" if self.settings.get("PORTAGE_QUIET") == "1": git_cmd_opts += " --quiet" git_cmd = "%s pull%s" % (self.bin_command, git_cmd_opts) writemsg_level(git_cmd + "\n") rev_cmd = [self.bin_command, "rev-list", "--max-count=1", "HEAD"] previous_rev = subprocess.check_output(rev_cmd, cwd=portage._unicode_encode( self.repo.location)) exitcode = portage.process.spawn_bash( "cd %s ; exec %s" % (portage._shell_quote(self.repo.location), git_cmd), **portage._native_kwargs(self.spawn_kwargs)) if exitcode != os.EX_OK: msg = "!!! git pull error in %s" % self.repo.location self.logger(self.xterm_titles, msg) writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1) return (exitcode, False) current_rev = subprocess.check_output(rev_cmd, cwd=portage._unicode_encode( self.repo.location)) return (os.EX_OK, current_rev != previous_rev)
def update(self): ''' Update existing git repository, and ignore the syncuri. We are going to trust the user and assume that the user is in the branch that he/she wants updated. We'll let the user manage branches with git directly. ''' git_cmd_opts = "" if self.settings.get("PORTAGE_QUIET") == "1": git_cmd_opts += " --quiet" git_cmd = "%s pull%s" % (self.bin_command, git_cmd_opts) writemsg_level(git_cmd + "\n") rev_cmd = [self.bin_command, "rev-list", "--max-count=1", "HEAD"] previous_rev = subprocess.check_output(rev_cmd, cwd=portage._unicode_encode(self.repo.location)) exitcode = portage.process.spawn_bash("cd %s ; exec %s" % ( portage._shell_quote(self.repo.location), git_cmd), **portage._native_kwargs(self.spawn_kwargs)) if exitcode != os.EX_OK: msg = "!!! git pull error in %s" % self.repo.location self.logger(self.xterm_titles, msg) writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1) return (exitcode, False) current_rev = subprocess.check_output(rev_cmd, cwd=portage._unicode_encode(self.repo.location)) return (os.EX_OK, current_rev != previous_rev)
def add_manifest(self, mymanifests, myheaders, myupdates, myremoved, commitmessage): myfiles = mymanifests[:] # If there are no header (SVN/CVS keywords) changes in # the files, this Manifest commit must include the # other (yet uncommitted) files. if not myheaders: myfiles += myupdates myfiles += myremoved myfiles.sort() commitmessagedir = tempfile.mkdtemp(".repoman.msg") commitmessagefile = os.path.join(commitmessagedir, "COMMIT_EDITMSG") with open(commitmessagefile, "wb") as mymsg: mymsg.write(_unicode_encode(commitmessage)) retval = self.vcs_settings.changes.commit(myfiles, commitmessagefile) # cleanup the commit message before possibly exiting try: shutil.rmtree(commitmessagedir) except OSError: pass if retval != os.EX_OK: writemsg_level("!!! Exiting on %s (shell) " "error code: %s\n" % (self.vcs_settings.vcs, retval), level=logging.ERROR, noiselevel=-1) sys.exit(retval)
def create_overlay_package(config=None, repo=None, logger=None, xterm_titles=None): ''' Creates a layman overlay object from the given repos.conf repo info. @params config: layman.config class object @params repo: portage.repo class object @rtype tuple: overlay name and layman.overlay object or None ''' if repo: overlay = {'sources': []} desc = 'Defined and created from info in %(repo)s config file...'\ % ({'repo': repo.name}) if not config: config = BareConfig() if not repo.branch: repo.branch = '' overlay['name'] = repo.name overlay['descriptions'] = [desc] overlay['owner_name'] = 'repos.conf' overlay['owner_email'] = '127.0.0.1' overlay['sources'].append([repo.sync_uri, repo.layman_type, repo.branch]) overlay['priority'] = repo.priority ovl = Overlay.Overlay(config=config, ovl_dict=overlay, ignore=1) return (repo.name, ovl) msg = '!!! layman.plugin.create_overlay(), Error: repo not found.' if logger and xterm_titles: logger(xterm_titles, msg) writemsg_level(msg + '\n', level=logging.ERROR, noiselevel=-1) return None
def parse(self): with io.open( _unicode_encode(self.path, encoding=_encodings["fs"], errors="strict"), mode="r", encoding=_encodings["content"], errors="replace", ) as f: lines = f.readlines() self.restrictions = {} invalids = [] news_format = None # Look for News-Item-Format for i, line in enumerate(lines): format_match = _formatRE.match(line) if format_match is not None: news_format = format_match.group(1) if fnmatch.fnmatch(news_format, "[12].*"): break invalids.append((i + 1, line.rstrip("\n"))) if news_format is None: invalids.append((0, "News-Item-Format unspecified")) else: # Parse the rest for i, line in enumerate(lines): # Optimization to ignore regex matches on lines that # will never match if not line.startswith("D"): continue restricts = { _installedRE: DisplayInstalledRestriction, _profileRE: DisplayProfileRestriction, _keywordRE: DisplayKeywordRestriction, } for regex, restriction in restricts.items(): match = regex.match(line) if match: restrict = restriction(match.groups()[0].strip(), news_format) if not restrict.isValid(): invalids.append((i + 1, line.rstrip("\n"))) else: self.restrictions.setdefault(id(restriction), []).append(restrict) continue if invalids: self._valid = False msg = [ _(f"Invalid news item: {self.path}"), *(_(f" line {lineno}: {line}") for lineno, line in invalids), ] writemsg_level("".join(f"!!! {x}\n" for x in msg), level=logging.ERROR, noiselevel=-1) self._parsed = True
def repo_name_duplicate_check(trees): ignored_repos = {} for root, root_trees in trees.items(): if 'porttree' in root_trees: portdb = root_trees['porttree'].dbapi if portdb.settings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0': for repo_name, paths in portdb._ignored_repos: k = (root, repo_name, portdb.getRepositoryPath(repo_name)) ignored_repos.setdefault(k, []).extend(paths) if ignored_repos: msg = [] msg.append('WARNING: One or more repositories ' + \ 'have been ignored due to duplicate') msg.append(' profiles/repo_name entries:') msg.append('') for k in sorted(ignored_repos): msg.append(' %s overrides' % (k,)) for path in ignored_repos[k]: msg.append(' %s' % (path,)) msg.append('') msg.extend(' ' + x for x in textwrap.wrap( "All profiles/repo_name entries must be unique in order " + \ "to avoid having duplicates ignored. " + \ "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \ "/etc/make.conf if you would like to disable this warning.")) writemsg_level(''.join('%s\n' % l for l in msg), level=logging.WARNING, noiselevel=-1) return bool(ignored_repos)
def load(self): """ Reload the registry data from file """ self._data = None f = None try: f = open(_unicode_encode(self._filename, encoding=_encodings['fs'], errors='strict'), 'rb') if os.fstat(f.fileno()).st_size == 0: # ignore empty lock file pass else: self._data = pickle.load(f) except (AttributeError, EOFError, ValueError, pickle.UnpicklingError) as e: writemsg_level(_("!!! Error loading '%s': %s\n") % \ (self._filename, e), level=logging.ERROR, noiselevel=-1) except EnvironmentError as e: if not hasattr(e, 'errno'): raise elif e.errno == errno.ENOENT: pass elif e.errno == PermissionDenied.errno: raise PermissionDenied(self._filename) else: raise finally: if f is not None: f.close() if self._data is None: self._data = {} self._data_orig = self._data.copy() self.pruneNonExisting()
def has_bin(self): """Checks for existance of the external binary, and also checks for storage driver configuration problems. MUST only be called after _kwargs() has set the logger """ if self.bin_command is None: msg = [ "Command not found: %s" % self._bin_command, 'Type "emerge %s" to enable %s support.' % (self.bin_pkg, self._bin_command), ] for l in msg: writemsg_level("!!! %s\n" % l, level=logging.ERROR, noiselevel=-1) return False try: self.repo_storage except RepoStorageException as e: writemsg_level("!!! %s\n" % (e, ), level=logging.ERROR, noiselevel=-1) return False return True
def __init__(self, settings, logger): self.settings = settings self.logger = logger # Similar to emerge, sync needs a default umask so that created # files have sane permissions. os.umask(0o22) self.module_controller = portage.sync.module_controller self.module_names = self.module_controller.module_names self.hooks = {} for _dir in ["repo.postsync.d", "postsync.d"]: postsync_dir = os.path.join(self.settings["PORTAGE_CONFIGROOT"], portage.USER_CONFIG_PATH, _dir) hooks = OrderedDict() for filepath in util._recursive_file_list(postsync_dir): name = filepath.split(postsync_dir)[1].lstrip(os.sep) if os.access(filepath, os.X_OK): hooks[filepath] = name else: writemsg_level(" %s %s hook: '%s' is not executable\n" % ( warn("*"), _dir, _unicode_decode(name), ), level=logging.WARN, noiselevel=2) self.hooks[_dir] = hooks
def show_invalid_depstring_notice(parent_node, depstring, error_msg): msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \ "\n\n%s\n\n%s\n\n" % (error_msg, parent_node) p_key = parent_node.cpv p_status = parent_node.operation msg = [] if p_status == "nomerge": category, pf = portage.catsplit(p_key) pkg_location = os.path.join(parent_node.root_config.settings['EROOT'], portage.VDB_PATH, category, pf) msg.append("Portage is unable to process the dependencies of the ") msg.append("'%s' package. " % p_key) msg.append("In order to correct this problem, the package ") msg.append("should be uninstalled, reinstalled, or upgraded. ") msg.append("As a temporary workaround, the --nodeps option can ") msg.append("be used to ignore all dependencies. For reference, ") msg.append("the problematic dependencies can be found in the ") msg.append("*DEPEND files located in '%s/'." % pkg_location) else: msg.append("This package can not be installed. ") msg.append("Please notify the '%s' package maintainer " % p_key) msg.append("about this problem.") msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72)) writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
def update_index(self, mymanifests, myupdates): """Update the vcs's modified index if it is needed @param mymanifests: manifest files updated @param myupdates: other files updated""" # It's not safe to use the git commit -a option since there might # be some modified files elsewhere in the working tree that the # user doesn't want to commit. Therefore, call git update-index # in order to ensure that the index is updated with the latest # versions of all new and modified files in the relevant portion # of the working tree. myfiles = mymanifests + myupdates myfiles.sort() update_index_cmd = ["git", "update-index", "--add", "--remove"] update_index_cmd.extend(f.lstrip("./") for f in myfiles) if self.options.pretend: print("(%s)" % (" ".join(update_index_cmd), )) else: retval = spawn(update_index_cmd, env=os.environ) if retval != os.EX_OK: writemsg_level( "!!! Exiting on %s (shell) " "error code: %s\n" % (self.vcs_settings.vcs, retval), level=logging.ERROR, noiselevel=-1, ) sys.exit(retval)
def add_manifest(self, mymanifests, myheaders, myupdates, myremoved, commitmessage): myfiles = mymanifests[:] # If there are no header (SVN/CVS keywords) changes in # the files, this Manifest commit must include the # other (yet uncommitted) files. if not myheaders: myfiles += myupdates myfiles += myremoved myfiles.sort() commitmessagedir = tempfile.mkdtemp(".repoman.msg") commitmessagefile = os.path.join(commitmessagedir, "COMMIT_EDITMSG") with open(commitmessagefile, "wb") as mymsg: mymsg.write(_unicode_encode(commitmessage)) retval = self.vcs_settings.changes.commit(myfiles, commitmessagefile) # cleanup the commit message before possibly exiting try: shutil.rmtree(commitmessagedir) except OSError: pass if retval != os.EX_OK: writemsg_level( "!!! Exiting on %s (shell) " "error code: %s\n" % (self.vcs_settings.vcs, retval), level=logging.ERROR, noiselevel=-1) sys.exit(retval)
def priming_commit(self, myupdates, myremoved, commitmessage): myfiles = myupdates + myremoved commitmessagedir = tempfile.mkdtemp(".repoman.msg") commitmessagefile = os.path.join(commitmessagedir, "COMMIT_EDITMSG") with open(commitmessagefile, "wb") as mymsg: mymsg.write(_unicode_encode(commitmessage)) separator = '-' * 78 print() print(green("Using commit message:")) print(green(separator)) print(commitmessage) print(green(separator)) print() # Having a leading ./ prefix on file paths can trigger a bug in # the cvs server when committing files to multiple directories, # so strip the prefix. myfiles = [f.lstrip("./") for f in myfiles] retval = self.vcs_settings.changes.commit(myfiles, commitmessagefile) # cleanup the commit message before possibly exiting try: shutil.rmtree(commitmessagedir) except OSError: pass if retval != os.EX_OK: writemsg_level( "!!! Exiting on %s (shell) " "error code: %s\n" % (self.vcs_settings.vcs, retval), level=logging.ERROR, noiselevel=-1) sys.exit(retval)
def parse(self): f = io.open(_unicode_encode(self.path, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace') lines = f.readlines() f.close() self.restrictions = {} invalids = [] news_format = None # Look for News-Item-Format for i, line in enumerate(lines): format_match = _formatRE.match(line) if format_match is not None: news_format = format_match.group(1) if fnmatch.fnmatch(news_format, '[12].*'): break invalids.append((i + 1, line.rstrip('\n'))) if news_format is None: invalids.append((0, 'News-Item-Format unspecified')) else: # Parse the rest for i, line in enumerate(lines): # Optimization to ignore regex matches on lines that # will never match if not line.startswith('D'): continue restricts = { _installedRE: DisplayInstalledRestriction, _profileRE: DisplayProfileRestriction, _keywordRE: DisplayKeywordRestriction } for regex, restriction in restricts.items(): match = regex.match(line) if match: restrict = restriction(match.groups()[0].strip(), news_format) if not restrict.isValid(): invalids.append((i + 1, line.rstrip("\n"))) else: self.restrictions.setdefault(id(restriction), []).append(restrict) continue if invalids: self._valid = False msg = [] msg.append(_("Invalid news item: %s") % (self.path, )) for lineno, line in invalids: msg.append(_(" line %d: %s") % (lineno, line)) writemsg_level("".join("!!! %s\n" % x for x in msg), level=logging.ERROR, noiselevel=-1) self._parsed = True
def load(self): """Reload the registry data from file""" self._data = None f = None content = None try: f = open( _unicode_encode(self._filename, encoding=_encodings["fs"], errors="strict"), "rb", ) content = f.read() except EnvironmentError as e: if not hasattr(e, "errno"): raise elif e.errno == errno.ENOENT: pass elif e.errno == PermissionDenied.errno: raise PermissionDenied(self._filename) else: raise finally: if f is not None: f.close() # content is empty if it's an empty lock file if content: try: self._data = json.loads( _unicode_decode(content, encoding=_encodings["repo.content"], errors="strict")) except SystemExit: raise except Exception as e: try: self._data = pickle.loads(content) except SystemExit: raise except Exception: writemsg_level( _("!!! Error loading '%s': %s\n") % (self._filename, e), level=logging.ERROR, noiselevel=-1, ) if self._data is None: self._data = {} else: for k, v in self._data.items(): if (isinstance(v, (list, tuple)) and len(v) == 3 and isinstance(v[2], set)): # convert set to list, for write with JSONEncoder self._data[k] = (v[0], v[1], list(v[2])) self._data_orig = self._data.copy() self.pruneNonExisting()
def config_protect_check(trees): for root, root_trees in trees.items(): if not root_trees["root_config"].settings.get("CONFIG_PROTECT"): msg = "!!! CONFIG_PROTECT is empty" if root != "/": msg += " for '%s'" % root msg += "\n" writemsg_level(msg, level=logging.WARN, noiselevel=-1)
def check_uri(self): '''Check the sync_uri setting''' if self.repo.sync_uri is None: writemsg_level("!!! %s\n" % _( "Repository '%s' has sync-type attribute, but is missing sync-uri attribute" ) % self.repo.name, level=self.logger.ERROR, noiselevel=-1)
def new(self, **kwargs): '''Do the initial clone of the repository''' if kwargs: self._kwargs(kwargs) if not self.has_bin: return (1, False) try: if not os.path.exists(self.repo.location): os.makedirs(self.repo.location) self.logger(self.xterm_titles, 'Created new directory %s' % self.repo.location) except IOError: return (1, False) sync_uri = self.repo.sync_uri if sync_uri.startswith("file://"): sync_uri = sync_uri[7:] git_cmd_opts = "" if self.repo.module_specific_options.get('sync-git-env'): shlexed_env = shlex_split(self.repo.module_specific_options['sync-git-env']) env = dict((k, v) for k, _, v in (assignment.partition('=') for assignment in shlexed_env) if k) self.spawn_kwargs['env'].update(env) if self.repo.module_specific_options.get('sync-git-clone-env'): shlexed_env = shlex_split(self.repo.module_specific_options['sync-git-clone-env']) clone_env = dict((k, v) for k, _, v in (assignment.partition('=') for assignment in shlexed_env) if k) self.spawn_kwargs['env'].update(clone_env) if self.settings.get("PORTAGE_QUIET") == "1": git_cmd_opts += " --quiet" if self.repo.clone_depth is not None: if self.repo.clone_depth != 0: git_cmd_opts += " --depth %d" % self.repo.clone_depth elif self.repo.sync_depth is not None: if self.repo.sync_depth != 0: git_cmd_opts += " --depth %d" % self.repo.sync_depth else: # default git_cmd_opts += " --depth 1" if self.repo.module_specific_options.get('sync-git-clone-extra-opts'): git_cmd_opts += " %s" % self.repo.module_specific_options['sync-git-clone-extra-opts'] git_cmd = "%s clone%s %s ." % (self.bin_command, git_cmd_opts, portage._shell_quote(sync_uri)) writemsg_level(git_cmd + "\n") exitcode = portage.process.spawn_bash("cd %s ; exec %s" % ( portage._shell_quote(self.repo.location), git_cmd), **self.spawn_kwargs) if exitcode != os.EX_OK: msg = "!!! git clone error in %s" % self.repo.location self.logger(self.xterm_titles, msg) writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1) return (exitcode, False) if not self.verify_head(): return (1, False) return (os.EX_OK, True)
def check_procfs(): procfs_path = '/proc' if platform.system() not in ("Linux",) or \ os.path.ismount(procfs_path): return os.EX_OK msg = "It seems that %s is not mounted. You have been warned." % procfs_path writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)), level=logging.ERROR, noiselevel=-1) return 1
def new(self, **kwargs): """Do the initial clone of the repository""" if kwargs: self._kwargs(kwargs) try: if not os.path.exists(self.repo.location): os.makedirs(self.repo.location) self.logger(self.xterm_titles, "Created new directory %s" % self.repo.location) except IOError: return (1, False) sync_uri = self.repo.sync_uri if sync_uri.startswith("file://"): sync_uri = sync_uri[7:] hg_cmd_opts = "" if self.repo.module_specific_options.get("sync-mercurial-env"): shlexed_env = shlex_split( self.repo.module_specific_options["sync-mercurial-env"]) env = dict( (k, v) for k, _, v in (assignment.partition("=") for assignment in shlexed_env) if k) self.spawn_kwargs["env"].update(env) if self.repo.module_specific_options.get("sync-mercurial-clone-env"): shlexed_env = shlex_split( self.repo.module_specific_options["sync-mercurial-clone-env"]) clone_env = dict( (k, v) for k, _, v in (assignment.partition("=") for assignment in shlexed_env) if k) self.spawn_kwargs["env"].update(clone_env) if self.settings.get("PORTAGE_QUIET") == "1": hg_cmd_opts += " --quiet" if self.repo.module_specific_options.get( "sync-mercurial-clone-extra-opts"): hg_cmd_opts += ( " %s" % self.repo. module_specific_options["sync-mercurial-clone-extra-opts"]) hg_cmd = "%s clone%s %s ." % ( self.bin_command, hg_cmd_opts, portage._shell_quote(sync_uri), ) writemsg_level(hg_cmd + "\n") exitcode = portage.process.spawn(shlex_split(hg_cmd), cwd=portage._unicode_encode( self.repo.location), **self.spawn_kwargs) if exitcode != os.EX_OK: msg = "!!! hg clone error in %s" % self.repo.location self.logger(self.xterm_titles, msg) writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1) return (exitcode, False) return (os.EX_OK, True)
def check_auto_sync(self): '''Check the auto_sync setting''' if self.repo.auto_sync is None: writemsg_level("!!! %s\n" % _("Repository '%s' is missing auto_sync attribute") % self.repo.name, level=self.logger.ERROR, noiselevel=-1) elif self.repo.auto_sync.lower() not in ["yes", "true", "no", "false"]: writemsg_level("!!! %s\n" % _("Repository '%s' auto_sync attribute must be one of: %s") % (self.repo.name, '{yes, true, no, false}'), level=self.logger.ERROR, noiselevel=-1)
def add_manifest(self, mymanifests, myheaders, myupdates, myremoved, commitmessage): myfiles = mymanifests[:] # If there are no header (SVN/CVS keywords) changes in # the files, this Manifest commit must include the # other (yet uncommitted) files. if not myheaders: myfiles += myupdates myfiles += myremoved myfiles.sort() fd, commitmessagefile = tempfile.mkstemp(".repoman.msg") mymsg = os.fdopen(fd, "wb") mymsg.write(_unicode_encode(commitmessage)) mymsg.close() commit_cmd = [] if self.options.pretend and self.vcs_settings.vcs is None: # substitute a bogus value for pretend output commit_cmd.append("cvs") else: commit_cmd.append(self.vcs_settings.vcs) commit_cmd.extend(self.vcs_settings.vcs_global_opts) commit_cmd.append("commit") commit_cmd.extend(self.vcs_settings.vcs_local_opts) if self.vcs_settings.vcs == "hg": commit_cmd.extend(["--logfile", commitmessagefile]) commit_cmd.extend(myfiles) else: commit_cmd.extend(["-F", commitmessagefile]) commit_cmd.extend(f.lstrip("./") for f in myfiles) try: if self.options.pretend: print("(%s)" % (" ".join(commit_cmd), )) else: retval = spawn(commit_cmd, env=self.repo_settings.commit_env) if retval != os.EX_OK: if self.repo_settings.repo_config.sign_commit and self.vcs_settings.vcs == 'git' and \ not git_supports_gpg_sign(): # Inform user that newer git is needed (bug #403323). logging.error( "Git >=1.7.9 is required for signed commits!") writemsg_level("!!! Exiting on %s (shell) " "error code: %s\n" % (self.vcs_settings.vcs, retval), level=logging.ERROR, noiselevel=-1) sys.exit(retval) finally: try: os.unlink(commitmessagefile) except OSError: pass
def update(self): ''' Update existing git repository, and ignore the syncuri. We are going to trust the user and assume that the user is in the branch that he/she wants updated. We'll let the user manage branches with git directly. ''' git_cmd_opts = "" quiet = self.settings.get("PORTAGE_QUIET") == "1" if quiet: git_cmd_opts += " --quiet" if self.repo.module_specific_options.get('sync-git-pull-extra-opts'): git_cmd_opts += " %s" % self.repo.module_specific_options['sync-git-pull-extra-opts'] if self.repo.sync_depth is None: git_cmd = "%s pull%s" % (self.bin_command, git_cmd_opts) else: # Since the default merge strategy typically fails when # the depth is not unlimited, use `git fetch` followed by # `git reset --merge`. remote_branch = portage._unicode_decode( subprocess.check_output([self.bin_command, 'rev-parse', '--abbrev-ref', '--symbolic-full-name', '@{upstream}'], cwd=portage._unicode_encode(self.repo.location))).rstrip('\n') git_cmd_opts += " --depth %d" % self.repo.sync_depth git_cmd = "%s fetch %s%s" % (self.bin_command, remote_branch.partition('/')[0], git_cmd_opts) writemsg_level(git_cmd + "\n") rev_cmd = [self.bin_command, "rev-list", "--max-count=1", "HEAD"] previous_rev = subprocess.check_output(rev_cmd, cwd=portage._unicode_encode(self.repo.location)) exitcode = portage.process.spawn_bash("cd %s ; exec %s" % ( portage._shell_quote(self.repo.location), git_cmd), **self.spawn_kwargs) if exitcode == os.EX_OK and self.repo.sync_depth is not None: reset_cmd = [self.bin_command, 'reset', '--merge', remote_branch] if quiet: reset_cmd.append('--quiet') exitcode = subprocess.call(reset_cmd, cwd=portage._unicode_encode(self.repo.location)) if exitcode != os.EX_OK: msg = "!!! git pull error in %s" % self.repo.location self.logger(self.xterm_titles, msg) writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1) return (exitcode, False) current_rev = subprocess.check_output(rev_cmd, cwd=portage._unicode_encode(self.repo.location)) return (os.EX_OK, current_rev != previous_rev)
def update_manifest(self, checkdir): """Perform a manifest generation for the pkg @param checkdir: the current package directory @rtype: bool @return: True if successful, False otherwise """ self.generated_manifest = False failed = False self.auto_assumed = set() fetchlist_dict = portage.FetchlistDict(checkdir, self.repoman_settings, self.portdb) if self.options.mode == "manifest" and self.options.force: self._discard_dist_digests(checkdir, fetchlist_dict) self.repoman_settings["O"] = checkdir try: self.generated_manifest = digestgen( mysettings=self.repoman_settings, myportdb=self.portdb) except portage.exception.PermissionDenied as e: self.generated_manifest = False writemsg_level( "!!! Permission denied: '%s'\n" % (e, ), level=logging.ERROR, noiselevel=-1, ) if not self.generated_manifest: writemsg_level( "!!! Unable to generate manifest for '%s'.\n" % (checkdir, ), level=logging.ERROR, noiselevel=-1, ) failed = True if self.options.mode == "manifest": if (not failed and self.options.force and self.auto_assumed and "assume-digests" in self.repoman_settings.features): # Show which digests were assumed despite the --force option # being given. This output will already have been shown by # digestgen() if assume-digests is not enabled, so only show # it here if assume-digests is enabled. pkgs = list(fetchlist_dict) pkgs.sort() portage.writemsg_stdout( " digest.assumed %s" % portage.output.colorize( "WARN", str(len(self.auto_assumed)).rjust(18)) + "\n") for cpv in pkgs: fetchmap = fetchlist_dict[cpv] pf = portage.catsplit(cpv)[1] for distfile in sorted(fetchmap): if distfile in self.auto_assumed: portage.writemsg_stdout(" %s::%s\n" % (pf, distfile)) return not failed
def update(self): ''' Update existing git repository, and ignore the syncuri. We are going to trust the user and assume that the user is in the branch that he/she wants updated. We'll let the user manage branches with git directly. ''' if not self.has_bin: return (1, False) git_cmd_opts = "" if self.repo.module_specific_options.get('sync-git-env'): shlexed_env = shlex_split( self.repo.module_specific_options['sync-git-env']) env = dict( (k, v) for k, _, v in (assignment.partition('=') for assignment in shlexed_env) if k) self.spawn_kwargs['env'].update(env) if self.repo.module_specific_options.get('sync-git-pull-env'): shlexed_env = shlex_split( self.repo.module_specific_options['sync-git-pull-env']) pull_env = dict( (k, v) for k, _, v in (assignment.partition('=') for assignment in shlexed_env) if k) self.spawn_kwargs['env'].update(pull_env) if self.settings.get("PORTAGE_QUIET") == "1": git_cmd_opts += " --quiet" if self.repo.module_specific_options.get('sync-git-pull-extra-opts'): git_cmd_opts += " %s" % self.repo.module_specific_options[ 'sync-git-pull-extra-opts'] git_cmd = "%s pull%s" % (self.bin_command, git_cmd_opts) writemsg_level(git_cmd + "\n") rev_cmd = [self.bin_command, "rev-list", "--max-count=1", "HEAD"] previous_rev = subprocess.check_output(rev_cmd, cwd=portage._unicode_encode( self.repo.location)) exitcode = portage.process.spawn_bash( "cd %s ; exec %s" % (portage._shell_quote(self.repo.location), git_cmd), **self.spawn_kwargs) if exitcode != os.EX_OK: msg = "!!! git pull error in %s" % self.repo.location self.logger(self.xterm_titles, msg) writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1) return (exitcode, False) if not self.verify_head(): return (1, False) current_rev = subprocess.check_output(rev_cmd, cwd=portage._unicode_encode( self.repo.location)) return (os.EX_OK, current_rev != previous_rev)
def run(self, checkdir, portdb): self.generated_manifest = False self.digest_only = self.options.mode != 'manifest-check' \ and self.options.digest == 'y' if self.options.pretend: return False if self.options.mode in ("manifest", 'commit', 'fix') or self.digest_only: failed = False self.auto_assumed = set() fetchlist_dict = portage.FetchlistDict( checkdir, self.repoman_settings, portdb) if self.options.mode == 'manifest' and self.options.force: portage._doebuild_manifest_exempt_depend += 1 self.create_manifest(checkdir, fetchlist_dict) self.repoman_settings["O"] = checkdir try: self.generated_manifest = digestgen( mysettings=self.repoman_settings, myportdb=portdb) except portage.exception.PermissionDenied as e: self.generated_manifest = False writemsg_level( "!!! Permission denied: '%s'\n" % (e,), level=logging.ERROR, noiselevel=-1) if not self.generated_manifest: writemsg_level( "Unable to generate manifest.", level=logging.ERROR, noiselevel=-1) failed = True if self.options.mode == "manifest": if not failed and self.options.force and self.auto_assumed and \ 'assume-digests' in self.repoman_settings.features: # Show which digests were assumed despite the --force option # being given. This output will already have been shown by # digestgen() if assume-digests is not enabled, so only show # it here if assume-digests is enabled. pkgs = list(fetchlist_dict) pkgs.sort() portage.writemsg_stdout( " digest.assumed %s" % portage.output.colorize( "WARN", str(len(self.auto_assumed)).rjust(18)) + "\n") for cpv in pkgs: fetchmap = fetchlist_dict[cpv] pf = portage.catsplit(cpv)[1] for distfile in sorted(fetchmap): if distfile in self.auto_assumed: portage.writemsg_stdout( " %s::%s\n" % (pf, distfile)) # continue, skip remaining main loop code return True elif failed: sys.exit(1) return False
def do_callback(self, result): #print("result:", result, "callback()", self.callback) exitcode, updatecache_flg = result self.exitcode = exitcode if exitcode == 0: msg = "=== Sync completed for %s" % self.repo.name self.logger(self.xterm_titles, msg) writemsg_level(msg + "\n") if self.callback: self.callback(exitcode, updatecache_flg) return
def noisy_refresh_keys(): """ Since retry does not help for some types of errors, display errors as soon as they occur. """ try: openpgp_env.refresh_keys(keyserver=self.repo.sync_openpgp_keyserver) except Exception as e: writemsg_level("%s\n" % (e,), level=logging.ERROR, noiselevel=-1) raise # retry
def add_manifest(self, mymanifests, myheaders, myupdates, myremoved, commitmessage): myfiles = mymanifests[:] # If there are no header (SVN/CVS keywords) changes in # the files, this Manifest commit must include the # other (yet uncommitted) files. if not myheaders: myfiles += myupdates myfiles += myremoved myfiles.sort() fd, commitmessagefile = tempfile.mkstemp(".repoman.msg") mymsg = os.fdopen(fd, "wb") mymsg.write(_unicode_encode(commitmessage)) mymsg.close() commit_cmd = [] if self.options.pretend and self.vcs_settings.vcs is None: # substitute a bogus value for pretend output commit_cmd.append("cvs") else: commit_cmd.append(self.vcs_settings.vcs) commit_cmd.extend(self.vcs_settings.vcs_global_opts) commit_cmd.append("commit") commit_cmd.extend(self.vcs_settings.vcs_local_opts) if self.vcs_settings.vcs == "hg": commit_cmd.extend(["--logfile", commitmessagefile]) commit_cmd.extend(myfiles) else: commit_cmd.extend(["-F", commitmessagefile]) commit_cmd.extend(f.lstrip("./") for f in myfiles) try: if self.options.pretend: print("(%s)" % (" ".join(commit_cmd),)) else: retval = spawn(commit_cmd, env=self.repo_settings.commit_env) if retval != os.EX_OK: if self.repo_settings.repo_config.sign_commit and self.vcs_settings.vcs == 'git' and \ not git_supports_gpg_sign(): # Inform user that newer git is needed (bug #403323). logging.error( "Git >=1.7.9 is required for signed commits!") writemsg_level( "!!! Exiting on %s (shell) " "error code: %s\n" % (self.vcs_settings.vcs, retval), level=logging.ERROR, noiselevel=-1) sys.exit(retval) finally: try: os.unlink(commitmessagefile) except OSError: pass
def perform_hooks(rel_directory, *argv, prefix="/"): for filepath, name in get_hooks_from_dir(rel_directory, prefix).items(): hook_command = filepath + " " + " ".join(map(str, argv)) retval = portage.process.spawn(hook_command) if retval != portage.os.EX_OK: writemsg_level( " %s Spawn failed for: %s, %s\n" % (bad("*"), name, filepath), level=logging.ERROR, noiselevel=-1, )
def update_manifest(self, checkdir): '''Perform a manifest generation for the pkg @param checkdir: the current package directory @returns: dictionary ''' self.generated_manifest = False failed = False self.auto_assumed = set() fetchlist_dict = portage.FetchlistDict( checkdir, self.repoman_settings, self.portdb) if self.options.mode == 'manifest' and self.options.force: self._discard_dist_digests(checkdir, fetchlist_dict) self.repoman_settings["O"] = checkdir try: self.generated_manifest = digestgen( mysettings=self.repoman_settings, myportdb=self.portdb) except portage.exception.PermissionDenied as e: self.generated_manifest = False writemsg_level( "!!! Permission denied: '%s'\n" % (e,), level=logging.ERROR, noiselevel=-1) if not self.generated_manifest: writemsg_level( "Unable to generate manifest.", level=logging.ERROR, noiselevel=-1) failed = True if self.options.mode == "manifest": if not failed and self.options.force and self.auto_assumed and \ 'assume-digests' in self.repoman_settings.features: # Show which digests were assumed despite the --force option # being given. This output will already have been shown by # digestgen() if assume-digests is not enabled, so only show # it here if assume-digests is enabled. pkgs = list(fetchlist_dict) pkgs.sort() portage.writemsg_stdout( " digest.assumed %s" % portage.output.colorize( "WARN", str(len(self.auto_assumed)).rjust(18)) + "\n") for cpv in pkgs: fetchmap = fetchlist_dict[cpv] pf = portage.catsplit(cpv)[1] for distfile in sorted(fetchmap): if distfile in self.auto_assumed: portage.writemsg_stdout( " %s::%s\n" % (pf, distfile)) # continue, skip remaining main loop code return True elif failed: sys.exit(1) return False
def create_depgraph_params(myopts, myaction): #configure emerge engine parameters # # self: include _this_ package regardless of if it is merged. # selective: exclude the package if it is merged # recurse: go into the dependencies # deep: go into the dependencies of already merged packages # empty: pretend nothing is merged # complete: completely account for all known dependencies # remove: build graph for use in removing packages # rebuilt_binaries: replace installed packages with rebuilt binaries myparams = {"recurse" : True} if myaction == "remove": myparams["remove"] = True myparams["complete"] = True return myparams if "--update" in myopts or \ "--newuse" in myopts or \ "--reinstall" in myopts or \ "--noreplace" in myopts or \ myopts.get("--selective", "n") != "n": myparams["selective"] = True if "--emptytree" in myopts: myparams["empty"] = True myparams.pop("selective", None) if "--nodeps" in myopts: myparams.pop("recurse", None) if "--deep" in myopts: myparams["deep"] = myopts["--deep"] if "--complete-graph" in myopts: myparams["complete"] = True rebuilt_binaries = myopts.get('--rebuilt-binaries') if rebuilt_binaries is True or \ rebuilt_binaries != 'n' and \ '--usepkgonly' in myopts and \ myopts.get('--deep') is True and \ '--update' in myopts: myparams['rebuilt_binaries'] = True if myopts.get("--selective") == "n": # --selective=n can be used to remove selective # behavior that may have been implied by some # other option like --update. myparams.pop("selective", None) if '--debug' in myopts: writemsg_level('\n\nmyparams %s\n\n' % myparams, noiselevel=-1, level=logging.DEBUG) return myparams
def parse(self): f = io.open(_unicode_encode(self.path, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace') lines = f.readlines() f.close() self.restrictions = {} invalids = [] news_format = None # Look for News-Item-Format for i, line in enumerate(lines): format_match = _formatRE.match(line) if format_match is not None: news_format = format_match.group(1) if fnmatch.fnmatch(news_format, '[12].*'): break invalids.append((i + 1, line.rstrip('\n'))) if news_format is None: invalids.append((0, 'News-Item-Format unspecified')) else: # Parse the rest for i, line in enumerate(lines): # Optimization to ignore regex matches on lines that # will never match if not line.startswith('D'): continue restricts = { _installedRE : DisplayInstalledRestriction, _profileRE : DisplayProfileRestriction, _keywordRE : DisplayKeywordRestriction } for regex, restriction in restricts.items(): match = regex.match(line) if match: restrict = restriction(match.groups()[0].strip(), news_format) if not restrict.isValid(): invalids.append((i + 1, line.rstrip("\n"))) else: self.restrictions.setdefault( id(restriction), []).append(restrict) continue if invalids: self._valid = False msg = [] msg.append(_("Invalid news item: %s") % (self.path,)) for lineno, line in invalids: msg.append(_(" line %d: %s") % (lineno, line)) writemsg_level("".join("!!! %s\n" % x for x in msg), level=logging.ERROR, noiselevel=-1) self._parsed = True
def _check_depth(self, attr): d = getattr(self.repo, attr) if d is not None: try: d = int(d) except ValueError: writemsg_level("!!! %s\n" % _("%s value is not a number: '%s'") % (attr.replace('_', '-'), d), level=self.logger.ERROR, noiselevel=-1) else: setattr(self.repo, attr, d)