def _sync(self, verbosity, output_fd): try: st = os.stat(self.basedir) except EnvironmentError as e: if e.errno != errno.ENOENT: compatibility.raise_from( generic_exception(self, self.basedir, e)) command = self._initial_pull() + self.opts chdir = None else: if not stat.S_ISDIR(st.st_mode): raise generic_exception(self, self.basedir, "isn't a directory") command = self._update_existing() + self.opts chdir = self.basedir # we assume syncers support -v and -q for verbose and quiet output if verbosity < 0: command.append('-q') elif verbosity > 0: command.append('-' + 'v' * verbosity) ret = self._spawn(command, { 1: output_fd, 2: output_fd, 0: 0 }, cwd=chdir) return ret == 0
def collapse_named_section(self, name, raise_on_missing=True): """Collapse a config by name, possibly returning a cached instance. @returns: :obj:`CollapsedConfig`. If there is no section with this name a ConfigurationError is raised, unless raise_on_missing is False in which case None is returned. """ if name in self._refs: raise errors.ConfigurationError( 'Reference to %r is recursive' % (name,)) self._refs.add(name) try: result = self.rendered_sections.get(name) if result is not None: return result section_stack = self.sections_lookup.get(name) if section_stack is None: if not raise_on_missing: return None raise errors.ConfigurationError( 'no section called %r' % (name,)) try: result = self.collapse_section(section_stack, name) result.name = name except compatibility.IGNORED_EXCEPTIONS: raise except Exception: compatibility.raise_from(errors.ConfigurationError( "Collapsing section named %r" % (name,))) self.rendered_sections[name] = result return result finally: self._refs.remove(name)
def setup_distfiles(self): if not self.verified_files and self.allow_fetching: ops = self.domain.pkg_operations(self.pkg, observer=self.observer) if not ops.fetch(): raise format.BuildError("failed fetching required distfiles") self.verified_files = ops._fetch_op.verified_files if self.verified_files: try: if os.path.exists(self.env["DISTDIR"]): if (os.path.isdir(self.env["DISTDIR"]) and not os.path.islink(self.env["DISTDIR"])): shutil.rmtree(self.env["DISTDIR"]) else: os.unlink(self.env["DISTDIR"]) except EnvironmentError as oe: raise_from( format.FailedDirectory( self.env["DISTDIR"], "failed removing existing file/dir/link at: exception %s" % oe)) if not ensure_dirs(self.env["DISTDIR"], mode=0770, gid=portage_gid): raise format.FailedDirectory(
def _get_versions(self, catpkg): cppath = pjoin(self.base, catpkg[0], catpkg[1]) pkg = catpkg[-1] + "-" lp = len(pkg) extension = self.extension ext_len = -len(extension) try: ret = tuple(x[lp:ext_len] for x in listdir_files(cppath) if x[ext_len:] == extension and x[:lp] == pkg) if any(('scm' in x or '-try' in x) for x in ret): if not self.ignore_paludis_versioning: for x in ret: if 'scm' in x: raise ebuild_errors.InvalidCPV( "%s/%s-%s has nonstandard -scm " "version component" % (catpkg + (x, ))) elif 'try' in x: raise ebuild_errors.InvalidCPV( "%s/%s-%s has nonstandard -try " "version component" % (catpkg + (x, ))) raise AssertionError('unreachable codepoint was reached') return tuple(x for x in ret if ('scm' not in x and 'try' not in x)) return ret except EnvironmentError as e: raise_from(KeyError("failed fetching versions for package %s: %s" % \ (pjoin(self.base, catpkg.lstrip(os.path.sep)), str(e))))
def __init__(self, stream, term=None, forcetty=False, encoding=None): """Initialize. :type stream: file-like object. :param stream: stream to output to, defaulting to :py:class:`sys.stdout`. :type term: string. :param term: terminal type, pulled from the environment if omitted. :type forcetty: bool :param forcetty: force output of colors even if the wrapped stream is not a tty. """ PlainTextFormatter.__init__(self, stream, encoding=encoding) fd = stream.fileno() if term is None: # We only apply the remapping if we are guessing the # terminal type from the environment. If we get a term # type passed explicitly we just use it as-is (if the # caller wants the remap just doing the # term_alternates lookup there is easy enough.) term_env = os.environ.get('TERM') term_alt = self.term_alternates.get(term_env) for term in (term_alt, term_env): if term is not None: try: curses.setupterm(fd=fd, term=term) except curses.error: pass else: break else: raise TerminfoDisabled( 'no terminfo entries, not even for "dumb"?') else: # TODO maybe do something more useful than raising curses.error # if term is not in the terminfo db here? curses.setupterm(fd=fd, term=term) self._term = term self.width = curses.tigetnum('cols') try: self.reset = TerminfoReset(curses.tigetstr('sgr0')) self.bold = TerminfoMode(curses.tigetstr('bold')) self.underline = TerminfoMode(curses.tigetstr('smul')) self._color_reset = curses.tigetstr('op') self._set_color = (curses.tigetstr('setaf'), curses.tigetstr('setab')) except (_BogusTerminfo, curses.error): compatibility.raise_from(TerminfoHatesOurTerminal(self._term)) if not all(self._set_color): raise TerminfoDisabled( 'setting background/foreground colors is not supported') curses.tparm(self._set_color[0], curses.COLOR_WHITE) # [fg, bg] self._current_colors = [None, None] self._modes = set() self._pos = 0 self._fg_cache = defaultdictkey(partial(TerminfoColor, 0)) self._bg_cache = defaultdictkey(partial(TerminfoColor, 1))
def _check_magic(self, fd): fd.seek(-16, 2) try: pre, size, post = self.trailer.read(fd) if pre != self.trailer_pre_magic or post != self.trailer_post_magic: raise MalformedXpak( "not an xpak segment, trailer didn't match: %r" % fd) except struct.error: raise_from( MalformedXpak( "not an xpak segment, failed parsing trailer: %r" % fd)) # this is a bit daft, but the format seems to intentionally # have an off by 8 in the offset address. presumably cause the # header was added after the fact, either way we go +8 to # check the header magic. fd.seek(-(size + 8), 2) self.xpak_start = fd.tell() try: pre, index_len, data_len = self.header.read(fd) if pre != self.header_pre_magic: raise MalformedXpak( "not an xpak segment, header didn't match: %r" % fd) except struct.error: raise_from( MalformedXpak( "not an xpak segment, failed parsing header: %r" % fd)) return self.xpak_start + self.header.size, index_len, data_len
def tree(config, repo_config, cache=(), eclass_override=None, default_mirrors=None, ignore_paludis_versioning=False, allow_missing_manifests=False): eclass_override = _sort_eclasses(config, repo_config, eclass_override) try: masters = tuple(config.objects['repo'][r] for r in repo_config.masters) except RuntimeError as e: # TODO: migrate to RecursionError when going >=py3.5 if e.message.startswith('maximum recursion depth exceeded'): raise_from( errors.InitializationError( "'%s' repo has cyclic masters: %s" % (repo_config.repo_id, ', '.join(repo_config.masters)))) raise return _UnconfiguredTree( repo_config.location, eclass_override, masters=masters, cache=cache, default_mirrors=default_mirrors, ignore_paludis_versioning=ignore_paludis_versioning, allow_missing_manifests=allow_missing_manifests, repo_config=repo_config)
def keys_dict(self): fd = self._fd index_start, index_len, data_len = self._check_magic(fd) data_start = index_start + index_len keys_dict = OrderedDict() key_rewrite = self._reading_key_rewrites.get while index_len: key_len = struct.unpack(">L", fd.read(4))[0] key = fd.read(key_len) if compatibility.is_py3k: key = key.decode('ascii') if len(key) != key_len: raise MalformedXpak( "tried reading key %i of len %i, but hit EOF" % (len(keys_dict) + 1, key_len)) try: offset, data_len = struct.unpack(">LL", fd.read(8)) except struct.error: raise_from( MalformedXpak( "key %i, tried reading data offset/len but hit EOF" % (len(keys_dict) + 1))) key = key_rewrite(key, key) keys_dict[key] = (data_start + offset, data_len, compatibility.is_py3k and not key.startswith("environment")) index_len -= (key_len + 12 ) # 12 for key_len, offset, data_len longs return keys_dict
def load_make_conf(vars_dict, path, allow_sourcing=False, required=True, incrementals=False): """parse make.conf files Args: vars_dict (dict): dictionary to add parsed variables to path (str): path to the make.conf which can be a regular file or directory, if a directory is passed all the non-hidden files within that directory are parsed in alphabetical order. """ sourcing_command = None if allow_sourcing: sourcing_command = 'source' for fp in sorted_scan(os.path.realpath(path), follow_symlinks=True, nonexistent=True): try: new_vars = read_bash_dict( fp, vars_dict=vars_dict, sourcing_command=sourcing_command) except EnvironmentError as e: if e.errno == errno.EACCES: raise_from(errors.PermissionDeniedError(fp, write=False)) if e.errno != errno.ENOENT or required: raise_from(errors.ParsingError("parsing %r" % (fp,), exception=e)) return if incrementals: for key in econst.incrementals: if key in vars_dict and key in new_vars: new_vars[key] = "%s %s" % (vars_dict[key], new_vars[key]) # quirk of read_bash_dict; it returns only what was mutated. vars_dict.update(new_vars)
def __init__(self, location, cache_location=None, repo_id='vdb', disable_cache=False): prototype.tree.__init__(self, frozen=False) self.repo_id = repo_id self.location = location if disable_cache: cache_location = None elif cache_location is None: cache_location = pjoin("/var/cache/edb/dep", location.lstrip("/")) self.cache_location = cache_location self._versions_tmp_cache = {} try: st = os.stat(self.location) if not stat.S_ISDIR(st.st_mode): raise errors.InitializationError( "base not a dir: %r" % self.location) elif not st.st_mode & (os.X_OK|os.R_OK): raise errors.InitializationError( "base lacks read/executable: %r" % self.location) except OSError as e: if e.errno != errno.ENOENT: compatibility.raise_from(errors.InitializationError( "lstat failed on base %r" % self.location)) self.package_class = self.package_factory(self)
def __init__(self, location, cache_location=None, repo_id='vdb', disable_cache=False): prototype.tree.__init__(self, frozen=False) self.repo_id = repo_id self.location = location if disable_cache: cache_location = None elif cache_location is None: cache_location = pjoin("/var/cache/edb/dep", location.lstrip("/")) self.cache_location = cache_location self._versions_tmp_cache = {} try: st = os.stat(self.location) if not stat.S_ISDIR(st.st_mode): raise errors.InitializationError("base not a dir: %r" % self.location) elif not st.st_mode & (os.X_OK | os.R_OK): raise errors.InitializationError( "base lacks read/executable: %r" % self.location) except OSError as e: if e.errno != errno.ENOENT: compatibility.raise_from( errors.InitializationError("lstat failed on base %r" % self.location)) self.package_class = self.package_factory(self)
def get_default(self, type_name): """Finds the configuration specified default obj of type_name. Returns C{None} if no defaults. """ try: defaults = self.types.get(type_name, {}).iteritems() except compatibility.IGNORED_EXCEPTIONS: raise except Exception: compatibility.raise_from(errors.ConfigurationError( "Collapsing defaults for %r" % (type_name,))) defaults = [(name, section) for name, section in defaults if section.default] if not defaults: return None if len(defaults) > 1: defaults = sorted([x[0] for x in defaults]) raise errors.ConfigurationError( 'type %s incorrectly has multiple default sections: %s' % (type_name, ', '.join(map(repr, defaults)))) try: return defaults[0][1].instantiate() except compatibility.IGNORED_EXCEPTIONS: raise except Exception: compatibility.raise_from(errors.ConfigurationError( "Failed instantiating default %s %r" % (type_name, defaults[0][0]))) return None
def _visibility_limiters(self): path = pjoin(self.base, 'profiles', 'package.mask') pos, neg = [], [] try: if self.config.profile_format not in ['pms', 'portage-2']: paths = sorted(x.location for x in iter_scan(path) if x.is_reg) else: paths = [path] for path in paths: for line in iter_read_bash(path): line = line.strip() if line in ('-', ''): raise profiles.ProfileError( pjoin(self.base, 'profiles'), 'package.mask', "encountered empty negation: -") if line.startswith('-'): neg.append(atom.atom(line[1:])) else: pos.append(atom.atom(line)) except IOError as i: if i.errno != errno.ENOENT: raise except ebuild_errors.MalformedAtom as ma: raise_from( profiles.ProfileError(pjoin(self.base, 'profiles'), 'package.mask', ma)) return [neg, pos]
def collapse_named_section(self, name, raise_on_missing=True): """Collapse a config by name, possibly returning a cached instance. @returns: :obj:`CollapsedConfig`. If there is no section with this name a ConfigurationError is raised, unless raise_on_missing is False in which case None is returned. """ if name in self._refs: raise errors.ConfigurationError('Reference to %r is recursive' % (name, )) self._refs.add(name) try: result = self.rendered_sections.get(name) if result is not None: return result section_stack = self.sections_lookup.get(name) if section_stack is None: if not raise_on_missing: return None raise errors.ConfigurationError('no section called %r' % (name, )) try: result = self.collapse_section(section_stack, name) result.name = name except compatibility.IGNORED_EXCEPTIONS: raise except Exception: compatibility.raise_from( errors.ConfigurationError("Collapsing section named %r" % (name, ))) self.rendered_sections[name] = result return result finally: self._refs.remove(name)
def get_default(self, type_name): """Finds the configuration specified default obj of type_name. Returns C{None} if no defaults. """ try: defaults = self.types.get(type_name, {}).iteritems() except compatibility.IGNORED_EXCEPTIONS: raise except Exception: compatibility.raise_from( errors.ConfigurationError("Collapsing defaults for %r" % (type_name, ))) defaults = [(name, section) for name, section in defaults if section.default] if not defaults: return None if len(defaults) > 1: defaults = sorted([x[0] for x in defaults]) raise errors.ConfigurationError( 'type %s incorrectly has multiple default sections: %s' % (type_name, ', '.join(map(repr, defaults)))) try: return defaults[0][1].instantiate() except compatibility.IGNORED_EXCEPTIONS: raise except Exception: compatibility.raise_from( errors.ConfigurationError( "Failed instantiating default %s %r" % (type_name, defaults[0][0]))) return None
def _visibility_limiters(self): path = pjoin(self.base, 'profiles', 'package.mask') pos, neg = [], [] try: if self.config.profile_formats.intersection(['portage-1', 'portage-2']): paths = sorted_scan(path) else: paths = [path] for path in paths: for line in iter_read_bash(path): line = line.strip() if line in ('-', ''): raise profiles.ProfileError( pjoin(self.base, 'profiles'), 'package.mask', "encountered empty negation: -") if line.startswith('-'): neg.append(atom.atom(line[1:])) else: pos.append(atom.atom(line)) except IOError as i: if i.errno != errno.ENOENT: raise except ebuild_errors.MalformedAtom as ma: raise_from(profiles.ProfileError( pjoin(self.base, 'profiles'), 'package.mask', ma)) return [neg, pos]
def convert_string(central, value, arg_type): """Conversion func for a string-based DictConfigSection.""" if not isinstance(value, basestring): raise ValueError('convert_string invoked with non basestring instance:' ' val(%r), arg_type(%r)' % (value, arg_type)) if arg_type == 'callable': try: func = modules.load_attribute(value) except modules.FailedImport: compatibility.raise_from( errors.ConfigurationError('Cannot import %r' % (value, ))) if not callable(func): raise errors.ConfigurationError('%r is not callable' % (value, )) return func elif arg_type.startswith('refs:'): return list( LazyNamedSectionRef(central, arg_type, ref) for ref in str_to_list(value)) elif arg_type.startswith('ref:'): return LazyNamedSectionRef(central, arg_type, str_to_str(value)) elif arg_type == 'repr': return 'str', value func = _str_converters.get(arg_type) if func is None: raise errors.ConfigurationError('Unknown type %r' % (arg_type, )) return func(value)
def add_profile(config, base_path, user_profile_path=None, profile_override=None): if profile_override is None: profile = _find_profile_link(base_path) else: profile = normpath(abspath(profile_override)) if not os.path.exists(profile): raise_from(errors.ComplexInstantiationError( "%s doesn't exist" % (profile,))) paths = profiles.OnDiskProfile.split_abspath(profile) if paths is None: raise errors.ComplexInstantiationError( '%s expands to %s, but no profile detected' % (pjoin(base_path, 'make.profile'), profile)) if os.path.isdir(user_profile_path): config["profile"] = basics.AutoConfigSection({ "class": "pkgcore.ebuild.profiles.UserProfile", "parent_path": paths[0], "parent_profile": paths[1], "user_path": user_profile_path, }) else: config["profile"] = basics.AutoConfigSection({ "class": "pkgcore.ebuild.profiles.OnDiskProfile", "basepath": paths[0], "profile": paths[1], })
def add_profile(config, config_dir, profile_override=None): if profile_override is None: profile = _find_profile_link(config_dir) else: profile = normpath(abspath(profile_override)) if not os.path.exists(profile): raise_from( errors.ComplexInstantiationError("%s doesn't exist" % (profile, ))) paths = profiles.OnDiskProfile.split_abspath(profile) if paths is None: raise errors.ComplexInstantiationError( '%s expands to %s, but no profile detected' % (pjoin(config_dir, 'make.profile'), profile)) user_profile_path = pjoin(config_dir, 'profile') if os.path.isdir(user_profile_path): config["profile"] = basics.AutoConfigSection({ "class": "pkgcore.ebuild.profiles.UserProfile", "parent_path": paths[0], "parent_profile": paths[1], "user_path": user_profile_path, }) else: config["profile"] = basics.AutoConfigSection({ "class": "pkgcore.ebuild.profiles.OnDiskProfile", "basepath": paths[0], "profile": paths[1], })
def reconstruct_eclasses(self, cpv, eclass_string): """Turn a string from :obj:`serialize_eclasses` into a dict.""" if not isinstance(eclass_string, basestring): raise TypeError("eclass_string must be basestring, got %r" % eclass_string) eclass_data = eclass_string.strip().split(self.eclass_splitter) if eclass_data == [""]: # occasionally this occurs in the fs backends. they suck. return [] l = len(eclass_data) chf_funcs = self.eclass_chf_deserializers tuple_len = len(chf_funcs) + 1 if len(eclass_data) % tuple_len: raise errors.CacheCorruption( cpv, "_eclasses_ was of invalid len %i" "(must be mod %i)" % (len(eclass_data), tuple_len)) i = iter(eclass_data) # roughly; deserializer grabs the values it needs, resulting # in a sequence of key/tuple pairs for each block of chfs; # this is in turn fed into the dict kls which converts it # to the dict. # Finally, the first item, and that chain, is zipped into # a dict; in effect, if 2 chfs, this results in a stream of- # (eclass_name, ((chf1,chf1_val), (chf2, chf2_val))). try: return [(eclass, tuple(self._deserialize_eclass_chfs(i))) for eclass in i] except ValueError: raise_from( errors.CacheCorruption( cpv, 'ValueError reading %r' % (eclass_string, )))
def reconstruct_eclasses(self, cpv, eclass_string): """Turn a string from :obj:`serialize_eclasses` into a dict.""" if not isinstance(eclass_string, basestring): raise TypeError("eclass_string must be basestring, got %r" % eclass_string) eclass_data = eclass_string.strip().split(self.eclass_splitter) if eclass_data == [""]: # occasionally this occurs in the fs backends. they suck. return [] l = len(eclass_data) chf_funcs = self.eclass_chf_deserializers tuple_len = len(chf_funcs) + 1 if len(eclass_data) % tuple_len: raise errors.CacheCorruption( cpv, "_eclasses_ was of invalid len %i" "(must be mod %i)" % (len(eclass_data), tuple_len)) i = iter(eclass_data) # roughly; deserializer grabs the values it needs, resulting # in a sequence of key/tuple pairs for each block of chfs; # this is in turn fed into the dict kls which converts it # to the dict. # Finally, the first item, and that chain, is zipped into # a dict; in effect, if 2 chfs, this results in a stream of- # (eclass_name, ((chf1,chf1_val), (chf2, chf2_val))). try: return [(eclass, tuple(self._deserialize_eclass_chfs(i))) for eclass in i] except ValueError: raise_from(errors.CacheCorruption( cpv, 'ValueError reading %r' % (eclass_string,)))
def _get_versions(self, catpkg): cppath = pjoin(self.base, catpkg[0], catpkg[1]) pkg = catpkg[-1] + "-" lp = len(pkg) extension = self.extension ext_len = -len(extension) try: ret = tuple(x[lp:ext_len] for x in listdir_files(cppath) if x[ext_len:] == extension and x[:lp] == pkg) if any(('scm' in x or '-try' in x) for x in ret): if not self.ignore_paludis_versioning: for x in ret: if 'scm' in x: raise ebuild_errors.InvalidCPV( "%s/%s-%s has nonstandard -scm " "version component" % (catpkg + (x,))) elif 'try' in x: raise ebuild_errors.InvalidCPV( "%s/%s-%s has nonstandard -try " "version component" % (catpkg + (x,))) raise AssertionError('unreachable codepoint was reached') return tuple(x for x in ret if ('scm' not in x and 'try' not in x)) return ret except EnvironmentError as e: raise_from(KeyError( "failed fetching versions for package %s: %s" % (pjoin(self.base, '/'.join(catpkg)), str(e))))
def convert_string(central, value, arg_type): """Conversion func for a string-based DictConfigSection.""" if not isinstance(value, basestring): raise ValueError( 'convert_string invoked with non basestring instance:' ' val(%r), arg_type(%r)' % (value, arg_type)) if arg_type == 'callable': try: func = modules.load_attribute(value) except modules.FailedImport: compatibility.raise_from( errors.ConfigurationError('Cannot import %r' % (value,))) if not callable(func): raise errors.ConfigurationError('%r is not callable' % (value,)) return func elif arg_type.startswith('refs:'): return list(LazyNamedSectionRef(central, arg_type, ref) for ref in str_to_list(value)) elif arg_type.startswith('ref:'): return LazyNamedSectionRef(central, arg_type, str_to_str(value)) elif arg_type == 'repr': return 'str', value func = _str_converters.get(arg_type) if func is None: raise errors.ConfigurationError('Unknown type %r' % (arg_type,)) return func(value)
def load_make_config(vars_dict, path, allow_sourcing=False, required=True, incrementals=False): sourcing_command = None if allow_sourcing: sourcing_command = 'source' try: new_vars = read_bash_dict(path, vars_dict=vars_dict, sourcing_command=sourcing_command) except EnvironmentError as e: if e.errno == errno.EACCES: raise_from(errors.PermissionDeniedError(path, write=False)) if e.errno != errno.ENOENT or required: raise_from( errors.ParsingError("parsing %r" % (path, ), exception=e)) return if incrementals: for key in const.incrementals: if key in vars_dict and key in new_vars: new_vars[key] = "%s %s" % (vars_dict[key], new_vars[key]) # quirk of read_bash_dict; it returns only what was mutated. vars_dict.update(new_vars)
def keys_dict(self): fd = self._fd index_start, index_len, data_len = self._check_magic(fd) data_start = index_start + index_len keys_dict = OrderedDict() key_rewrite = self._reading_key_rewrites.get while index_len: key_len = struct.unpack(">L", fd.read(4))[0] key = fd.read(key_len) if compatibility.is_py3k: key = key.decode('ascii') if len(key) != key_len: raise MalformedXpak( "tried reading key %i of len %i, but hit EOF" % ( len(keys_dict) + 1, key_len)) try: offset, data_len = struct.unpack(">LL", fd.read(8)) except struct.error: raise_from(MalformedXpak( "key %i, tried reading data offset/len but hit EOF" % ( len(keys_dict) + 1))) key = key_rewrite(key, key) keys_dict[key] = ( data_start + offset, data_len, compatibility.is_py3k and not key.startswith("environment")) index_len -= (key_len + 12) # 12 for key_len, offset, data_len longs return keys_dict
def setup_distfiles(self): if not self.verified_files and self.allow_fetching: ops = self.domain.pkg_operations(self.pkg, observer=self.observer) if not ops.fetch(): raise format.BuildError("failed fetching required distfiles") self.verified_files = ops._fetch_op.verified_files if self.verified_files: try: if os.path.exists(self.env["DISTDIR"]): if (os.path.isdir(self.env["DISTDIR"]) and not os.path.islink(self.env["DISTDIR"])): shutil.rmtree(self.env["DISTDIR"]) else: os.unlink(self.env["DISTDIR"]) except EnvironmentError as oe: raise_from(format.FailedDirectory( self.env["DISTDIR"], "failed removing existing file/dir/link at: exception %s" % oe)) if not ensure_dirs(self.env["DISTDIR"], mode=0770, gid=portage_gid): raise format.FailedDirectory(
def _verify(self, file_location, target, all_chksums=True, handlers=None): """ Internal function for derivatives. Digs through chksums, and either returns None, or throws an errors.FetchFailed exception. - -2: file doesn't exist. - -1: if (size chksum is available, and file is smaller than stated chksum) - 0: if all chksums match - 1: if file is too large (if size chksums are available) or else size is right but a chksum didn't match. if all_chksums is True, all chksums must be verified; if false, all a handler can be found for are used. """ nondefault_handlers = handlers if handlers is None: try: handlers = get_handlers(target.chksums) except KeyError, e: compatibility.raise_from( errors.FetchFailed(file_location, "Couldn't find a required checksum handler"))
def existent_path(value): if not os.path.exists(value): raise ValueError("path %r doesn't exist on disk" % (value,)) try: return osutils.abspath(value) except EnvironmentError as e: compatibility.raise_from(ValueError("while resolving path %r, encountered error: %r" % (value, e)))
def _check_magic(self, fd): fd.seek(-16, 2) try: pre, size, post = self.trailer.read(fd) if pre != self.trailer_pre_magic or post != self.trailer_post_magic: raise MalformedXpak( "not an xpak segment, trailer didn't match: %r" % fd) except struct.error: raise_from(MalformedXpak( "not an xpak segment, failed parsing trailer: %r" % fd)) # this is a bit daft, but the format seems to intentionally # have an off by 8 in the offset address. presumably cause the # header was added after the fact, either way we go +8 to # check the header magic. fd.seek(-(size + 8), 2) self.xpak_start = fd.tell() try: pre, index_len, data_len = self.header.read(fd) if pre != self.header_pre_magic: raise MalformedXpak( "not an xpak segment, header didn't match: %r" % fd) except struct.error: raise_from(MalformedXpak( "not an xpak segment, failed parsing header: %r" % fd)) return self.xpak_start + self.header.size, index_len, data_len
def instantiate(self): if self._instance is None: try: self._instance = self._instantiate() except compatibility.IGNORED_EXCEPTIONS: raise except Exception, e: compatibility.raise_from(errors.InstantiationError(self.name))
def _get_categories(self, *optional_category): # return if optional_category is passed... cause it's not yet supported if optional_category: return {} try: return tuple(x for x in listdir_dirs(self.base) if x.lower() != "all") except EnvironmentError as e: raise_from(KeyError("failed fetching categories: %s" % str(e)))
def _delitem(self, cpv): try: os.remove(pjoin(self.location, cpv)) except OSError as e: if e.errno == errno.ENOENT: raise KeyError(cpv) else: raise_from(errors.CacheCorruption(cpv, e))
def _acquire_fd(self): flags = os.R_OK if self.create: flags |= os.O_CREAT try: self.fd = os.open(self.path, flags) except OSError, oe: compatibility.raise_from(GenericFailed(self.path, oe))
def _acquire_fd(self): flags = os.R_OK if self.create: flags |= os.O_CREAT try: self.fd = os.open(self.path, flags) except OSError as oe: compatibility.raise_from(GenericFailed(self.path, oe))
def convert_to_restrict(sequence, default=packages.AlwaysTrue): """Convert an iterable to a list of atoms, or return the default""" l = [] try: for x in sequence: l.append(parserestrict.parse_match(x)) except parserestrict.ParseError, e: compatibility.raise_from(optparse.OptionValueError("arg %r isn't a valid atom: %s" % (x, e)))
def _sync(self, verbosity, output_fd): try: st = os.stat(self.basedir) except EnvironmentError, ie: if ie.errno != errno.ENOENT: compatibility.raise_from(generic_exception(self, self.basedir, ie)) command = self._initial_pull() chdir = None
def parse_match(text): """generate appropriate restriction for text Parsing basically breaks it down into chunks split by /, with each chunk allowing for prefix/postfix globbing- note that a postfixed glob on package token is treated as package attribute matching, not as necessarily a version match. If only one chunk is found, it's treated as a package chunk. Finally, it supports a nonstandard variation of atom syntax where the category can be dropped. Examples: - `*`: match all - `dev-*/*`: category must start with 'dev-' - `dev-*`: package must start with 'dev-' - `*-apps/portage*`: category must end in '-apps', package must start with 'portage' - `>=portage-2.1`: atom syntax, package 'portage', version greater then or equal to '2.1' :param text: string to attempt to parse :type text: string :return: :obj:`pkgcore.restrictions.packages` derivative """ # Ensure the text var is a string if we're under py3k. if not is_py3k: text = text.encode('ascii') orig_text = text = text.strip() if "!" in text: raise ParseError( "!, or any form of blockers make no sense in this usage: %s" % ( text,)) tsplit = text.rsplit("/", 1) if len(tsplit) == 1: ops, text = collect_ops(text) if not ops: if "*" in text: r = convert_glob(text) if r is None: return packages.AlwaysTrue return packages.PackageRestriction("package", r) elif text.startswith("*"): raise ParseError( "cannot do prefix glob matches with version ops: %s" % ( orig_text,)) # ok... fake category. whee. try: r = list(util.collect_package_restrictions( atom.atom("%scategory/%s" % (ops, text)).restrictions, attrs=("category",), invert=True)) except errors.MalformedAtom, e: raise_from(ParseError(str(e))) if len(r) == 1: return r[0] return packages.AndRestriction(*r)
def __init__(self, location, eclass_cache, cache=(), default_mirrors=None, ignore_paludis_versioning=False, allow_missing_manifests=False, repo_config=None): """ :param location: on disk location of the tree :param cache: sequence of :obj:`pkgcore.cache.template.database` instances to use for storing metadata :param eclass_cache: If not None, :obj:`pkgcore.ebuild.eclass_cache` instance representing the eclasses available, if None, generates the eclass_cache itself :param default_mirrors: Either None, or sequence of mirrors to try fetching from first, then falling back to other uri :param ignore_paludis_versioning: If False, fail when -scm is encountred. if True, silently ignore -scm ebuilds. """ prototype.tree.__init__(self) if repo_config is None: repo_config = repo_objs.RepoConfig(location) self.config = repo_config self.base = self.location = location try: if not stat.S_ISDIR(os.stat(self.base).st_mode): raise errors.InitializationError( "base not a dir: %s" % self.base) except OSError: raise_from(errors.InitializationError( "lstat failed on base %s" % (self.base,))) self.eclass_cache = eclass_cache self.licenses = repo_objs.Licenses(location) fp = pjoin(self.base, metadata_offset, "thirdpartymirrors") mirrors = {} try: for k, v in read_dict(fp, splitter=None).iteritems(): v = v.split() shuffle(v) mirrors[k] = v except EnvironmentError as ee: if ee.errno != errno.ENOENT: raise if isinstance(cache, (tuple, list)): cache = tuple(cache) else: cache = (cache,) self.mirrors = mirrors self.default_mirrors = default_mirrors self.cache = cache self.ignore_paludis_versioning = ignore_paludis_versioning self._allow_missing_chksums = allow_missing_manifests self.package_class = self.package_factory( self, cache, self.eclass_cache, self.mirrors, self.default_mirrors) self._shared_pkg_cache = WeakValCache()
def _getitem(self, cpv): path = pjoin(self.location, cpv) try: data = readlines_ascii(path, True, True, True) if data is None: raise KeyError(cpv) return self._parse_data(data, data.mtime) except (EnvironmentError, ValueError) as e: raise_from(errors.CacheCorruption(cpv, e))
def _recast_exception_decorator(exc_class, name, functor, *args, **kwds): try: return functor(*args, **kwds) except compatibility.IGNORED_EXCEPTIONS: raise except Exception as exc: if isinstance(exc, exc_class): raise compatibility.raise_from(exc_class(name))
def render_value(self, central, name, arg_type): try: return self.func(central, self.dict[name], arg_type) except compatibility.IGNORED_EXCEPTIONS: raise except Exception: compatibility.raise_from(errors.ConfigurationError( "Failed converting argument %r to %s" % (name, arg_type)))
def existent_path(value): if not os.path.exists(value): raise ValueError("path %r doesn't exist on disk" % (value,)) try: return osutils.abspath(value) except EnvironmentError as e: compatibility.raise_from( ValueError("while resolving path %r, encountered error: %r" % (value, e)))
def instantiate(self): if self._instance is None: try: self._instance = self._instantiate() except compatibility.IGNORED_EXCEPTIONS: raise except Exception as e: compatibility.raise_from(errors.InstantiationError(self.name)) return self._instance
def render_value(self, central, name, arg_type): try: return self.func(central, self.dict[name], arg_type) except compatibility.IGNORED_EXCEPTIONS: raise except Exception: compatibility.raise_from( errors.ConfigurationError( "Failed converting argument %r to %s" % (name, arg_type)))
def __pull_metadata(self, key): if key == "contents": return self.scan_contents(self.image_root) elif key == "environment": return local_source(self.environment_path) else: try: return getattr(self.pkg, key) except AttributeError: raise_from(KeyError(key))
def existent_path(value): """Check if file argument path exists.""" if not os.path.exists(value): raise argparse.ArgumentTypeError("nonexistent path: %r" % (value, )) try: return osutils.abspath(value) except EnvironmentError as e: compatibility.raise_from( ValueError("while resolving path %r, encountered error: %r" % (value, e)))
def _pull_manifest(self): if self._source is None: return source, gpg = self._source try: data = parse_manifest(source, ignore_gpg=gpg) except EnvironmentError, e: if not self.thin or e.errno != errno.ENOENT: raise_from(errors.ParseChksumError(source, e)) data = {}, {}, {}, {}
def _acquire_fd(self): # write access is needed to acquire LOCK_EX # https://github.com/pkgcore/snakeoil/pull/23 flags = os.O_RDWR if self.create: flags |= os.O_CREAT try: self.fd = os.open(self.path, flags) except OSError as oe: compatibility.raise_from(GenericFailed(self.path, oe))
def convert_to_restrict(sequence, default=packages.AlwaysTrue): """Convert an iterable to a list of atoms, or return the default""" l = [] try: for x in sequence: l.append(parserestrict.parse_match(x)) except parserestrict.ParseError as e: compatibility.raise_from( argparse.ArgumentError("arg %r isn't a valid atom: %s" % (x, e))) return l or [default]
def unpack(self): """execute the unpack phase""" if self.setup_is_for_src: self.setup_distfiles() if self.userpriv: try: os.chown(self.env["WORKDIR"], portage_uid, -1) except OSError as e: raise_from(format.GenericBuildError("failed forcing %i uid for WORKDIR: %s" % (portage_uid, e))) return self._generic_phase("unpack", True, True)
def _enact_change(self, flags, blocking): if self.fd is None: self._acquire_fd() # we do it this way, due to the fact try/except is a bit of a hit if not blocking: try: fcntl.flock(self.fd, flags | fcntl.LOCK_NB) except IOError, ie: if ie.errno == errno.EAGAIN: return False compatibility.raise_from(GenericFailed(self.path, ie))
def _enact_change(self, flags, blocking): if self.fd is None: self._acquire_fd() # we do it this way, due to the fact try/except is a bit of a hit if not blocking: try: fcntl.flock(self.fd, flags|fcntl.LOCK_NB) except IOError, ie: if ie.errno == errno.EAGAIN: return False compatibility.raise_from(GenericFailed(self.path, ie))
def run_generic_phase(pkg, phase, env, userpriv, sandbox, fakeroot, extra_handlers=None, failure_allowed=False, logging=None): """ :param phase: phase to execute :param env: environment mapping for the phase :param userpriv: will we drop to :obj:`pkgcore.os_data.portage_uid` and :obj:`pkgcore.os_data.portage_gid` access for this phase? :param sandbox: should this phase be sandboxed? :param fakeroot: should the phase be fakeroot'd? Only really useful for install phase, and is mutually exclusive with sandbox :param extra_handlers: extra command handlers :type extra_handlers: mapping from string to callable :param failure_allowed: allow failure without raising error :type failure_allowed: boolean :param logging: None or a filepath to log output to :return: True when the phase has finished execution """ userpriv = userpriv and is_userpriv_capable() sandbox = sandbox and is_sandbox_capable() fakeroot = fakeroot and is_fakeroot_capable() if env is None: env = expected_ebuild_env(pkg) ebd = request_ebuild_processor(userpriv=userpriv, sandbox=sandbox, fakeroot=fakeroot) # this is a bit of a hack; used until ebd accepts observers that handle # the output redirection on it's own. Primary relevance is when # stdout/stderr are pointed at a file; we leave buffering on, just # force the flush for synchronization. sys.stdout.flush() sys.stderr.flush() try: if not ebd.run_phase(phase, env, env.get('T'), sandbox=sandbox, logging=logging, additional_commands=extra_handlers): if not failure_allowed: raise format.GenericBuildError( phase + ": Failed building (False/0 return from handler)") logger.warning("executing phase %s: execution failed, ignoring" % (phase,)) except Exception as e: ebd.shutdown_processor() release_ebuild_processor(ebd) if isinstance(e, IGNORED_EXCEPTIONS + (format.GenericBuildError,)): raise raise_from( format.GenericBuildError("Executing phase %s: Caught exception: " "%s" % (phase, e))) release_ebuild_processor(ebd) return True