Esempio n. 1
0
	def _xmatch(self, level, atom):
		"""
		This method does not expand old-style virtuals because it
		is restricted to returning matches for a single ${CATEGORY}/${PN}
		and old-style virual matches unreliable for that when querying
		multiple package databases. If necessary, old-style virtuals
		can be performed on atoms prior to calling this method.
		"""
		cp = portage.dep_getkey(atom)
		if level == "match-all":
			matches = set()
			for db in self._dbs:
				if hasattr(db, "xmatch"):
					matches.update(db.xmatch(level, atom))
				else:
					matches.update(db.match(atom))
			result = list(x for x in matches if portage.cpv_getkey(x) == cp)
			db._cpv_sort_ascending(result)
		elif level == "match-visible":
			matches = set()
			for db in self._dbs:
				if hasattr(db, "xmatch"):
					matches.update(db.xmatch(level, atom))
				else:
					db_keys = list(db._aux_cache_keys)
					for cpv in db.match(atom):
						metadata = zip(db_keys,
							db.aux_get(cpv, db_keys))
						if not self._visible(db, cpv, metadata):
							continue
						matches.add(cpv)
			result = list(x for x in matches if portage.cpv_getkey(x) == cp)
			db._cpv_sort_ascending(result)
		elif level == "bestmatch-visible":
			result = None
			for db in self._dbs:
				if hasattr(db, "xmatch"):
					cpv = db.xmatch("bestmatch-visible", atom)
					if not cpv or portage.cpv_getkey(cpv) != cp:
						continue
					if not result or cpv == portage.best([cpv, result]):
						result = cpv
				else:
					db_keys = Package.metadata_keys
					# break out of this loop with highest visible
					# match, checked in descending order
					for cpv in reversed(db.match(atom)):
						if portage.cpv_getkey(cpv) != cp:
							continue
						metadata = zip(db_keys,
							db.aux_get(cpv, db_keys))
						if not self._visible(db, cpv, metadata):
							continue
						if not result or cpv == portage.best([cpv, result]):
							result = cpv
						break
		else:
			raise NotImplementedError(level)
		return result
Esempio n. 2
0
	def _xmatch(self, level, atom):
		"""
		This method does not expand old-style virtuals because it
		is restricted to returning matches for a single ${CATEGORY}/${PN}
		and old-style virual matches unreliable for that when querying
		multiple package databases. If necessary, old-style virtuals
		can be performed on atoms prior to calling this method.
		"""
		cp = portage.dep_getkey(atom)
		if level == "match-all":
			matches = set()
			for db in self._dbs:
				if hasattr(db, "xmatch"):
					matches.update(db.xmatch(level, atom))
				else:
					matches.update(db.match(atom))
			result = list(x for x in matches if portage.cpv_getkey(x) == cp)
			db._cpv_sort_ascending(result)
		elif level == "match-visible":
			matches = set()
			for db in self._dbs:
				if hasattr(db, "xmatch"):
					matches.update(db.xmatch(level, atom))
				else:
					db_keys = list(db._aux_cache_keys)
					for cpv in db.match(atom):
						metadata = zip(db_keys,
							db.aux_get(cpv, db_keys))
						if not self._visible(db, cpv, metadata):
							continue
						matches.add(cpv)
			result = list(x for x in matches if portage.cpv_getkey(x) == cp)
			db._cpv_sort_ascending(result)
		elif level == "bestmatch-visible":
			result = None
			for db in self._dbs:
				if hasattr(db, "xmatch"):
					cpv = db.xmatch("bestmatch-visible", atom)
					if not cpv or portage.cpv_getkey(cpv) != cp:
						continue
					if not result or cpv == portage.best([cpv, result]):
						result = cpv
				else:
					db_keys = Package.metadata_keys
					# break out of this loop with highest visible
					# match, checked in descending order
					for cpv in reversed(db.match(atom)):
						if portage.cpv_getkey(cpv) != cp:
							continue
						metadata = zip(db_keys,
							db.aux_get(cpv, db_keys))
						if not self._visible(db, cpv, metadata):
							continue
						if not result or cpv == portage.best([cpv, result]):
							result = cpv
						break
		else:
			raise NotImplementedError(level)
		return result
Esempio n. 3
0
	def __init__(self, **kwargs):
		Task.__init__(self, **kwargs)
		self.root = self.root_config.root
		self._raw_metadata = _PackageMetadataWrapperBase(self.metadata)
		self.metadata = _PackageMetadataWrapper(self, self._raw_metadata)
		if not self.built:
			self.metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
		self.cp = portage.cpv_getkey(self.cpv)
		slot = self.slot
		if _slot_re.match(slot) is None:
			self._invalid_metadata('SLOT.invalid',
				"SLOT: invalid value: '%s'" % slot)
			# Avoid an InvalidAtom exception when creating slot_atom.
			# This package instance will be masked due to empty SLOT.
			slot = '0'
		if (self.iuse.enabled or self.iuse.disabled) and \
			not eapi_has_iuse_defaults(self.metadata["EAPI"]):
			if not self.installed:
				self._invalid_metadata('EAPI.incompatible',
					"IUSE contains defaults, but EAPI doesn't allow them")
		self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
		self.category, self.pf = portage.catsplit(self.cpv)
		self.cpv_split = portage.catpkgsplit(self.cpv)
		self.pv_split = self.cpv_split[1:]
		self._validate_deps()
		self.masks = self._masks()
		self.visible = self._visible(self.masks)
Esempio n. 4
0
	def cleanPackage(self, vardb, cpv):
		'''
		Before calling this function you should call lock and load.
		After calling this function you should call unlock.
		'''
		if not self._lock:
			raise AssertionError('cleanPackage needs the set to be locked')

		worldlist = list(self._atoms)
		mykey = cpv_getkey(cpv)
		newworldlist = []
		for x in worldlist:
			if x.cp == mykey:
				matches = vardb.match(x, use_cache=0)
				if not matches:
					#zap our world entry
					pass
				elif len(matches) == 1 and matches[0] == cpv:
					#zap our world entry
					pass
				else:
					#others are around; keep it.
					newworldlist.append(x)
			else:
				#this doesn't match the package we're unmerging; keep it.
				newworldlist.append(x)

		newworldlist.extend(self._nonatoms)
		self.replace(newworldlist)
Esempio n. 5
0
 def load(self):
     try:
         mtime = os.stat(self._filename).st_mtime
     except (OSError, IOError):
         mtime = None
     if not self._loaded or self._mtime != mtime:
         try:
             data, errors = self.loader.load()
             for fname in errors:
                 for e in errors[fname]:
                     self.errors.append(fname + ": " + e)
         except EnvironmentError as e:
             if e.errno != errno.ENOENT:
                 raise
             del e
             data = {}
         if self.greedy:
             atoms = []
             for a in data:
                 matches = self.dbapi.match(a)
                 for cpv in matches:
                     atoms.append("%s:%s" % (cpv_getkey(cpv), self.dbapi.aux_get(cpv, ["SLOT"])[0]))
                     # In addition to any installed slots, also try to pull
                     # in the latest new slot that may be available.
                 atoms.append(a)
         else:
             atoms = iter(data)
         self._setAtoms(atoms)
         self._mtime = mtime
Esempio n. 6
0
    def __init__(
        self,
        package,
        keywords_list,
        porttree,
        ignoreslots=False,
        content_align="bottom",
        usebold=False,
        toplist="archlist",
    ):
        """Query all relevant data from portage databases."""
        (
            packages,
            self.repositories,
            self.slots,
            self.eapi,
            self.keywords,
        ) = self.__checkExist(porttree, package)
        # convert repositories from path to name
        self.repositories = [
            porttree.getRepositoryName(x) for x in self.repositories
        ]
        self.slot_length = max([len(x) for x in self.slots])
        repositories_length = max([len(x) for x in self.repositories])
        self.keyword_length = len(keywords_list)
        vers = self.VersionChecker(list(zip(packages, self.repositories)))
        self.versions = vers.versions
        masks = vers.masks
        self.version_length = max([len(x) for x in self.versions])
        self.version_count = len(self.versions)
        self.redundant = self.RedundancyChecker(masks, self.keywords,
                                                self.slots,
                                                ignoreslots).redundant
        redundant_length = max([len(x) for x in self.redundant])

        ver = self.__formatVersions(self.versions, content_align,
                                    self.version_length)
        kws = self.__formatKeywords(self.keywords, keywords_list, usebold,
                                    toplist)
        repos_configs = porttree.repositories.prepos
        eap = self.__formatEapis(self.eapi, self.repositories, repos_configs,
                                 1)
        red = self.__formatAdditional(self.redundant, "purple",
                                      redundant_length)
        slt = self.__formatAdditional(self.slots, "bold", self.slot_length)
        rep = self.__formatAdditional(self.repositories, "yellow",
                                      repositories_length)
        # those + numbers are spaces in printout. keywords are multiplied also because of that
        linesep = "%s+%s+%s+%s" % (
            "".ljust(self.version_length + 1, "-"),
            "".ljust(self.keyword_length * 2 + 1, "-"),
            "".ljust(redundant_length + self.slot_length + 1 + 4, "-"),
            "".ljust(repositories_length + 1, "-"),
        )

        self.content = self.__prepareContentResult(ver, kws, eap, red, slt,
                                                   self.slot_length, rep,
                                                   linesep)
        self.content_length = len(linesep)
        self.cp = port.cpv_getkey(packages[0])
Esempio n. 7
0
    def cpv_inject(self, mycpv, metadata=None):
        """Adds a cpv to the list of available packages. See the
		exclusive_slots constructor parameter for behavior with
		respect to SLOT metadata.
		@param mycpv: cpv for the package to inject
		@type mycpv: str
		@param metadata: dictionary of raw metadata for aux_get() calls
		@param metadata: dict
		"""
        self._clear_cache()
        mycp = cpv_getkey(mycpv)
        self.cpvdict[mycpv] = metadata
        myslot = None
        if self._exclusive_slots and metadata:
            myslot = metadata.get("SLOT", None)
        if myslot and mycp in self.cpdict:
            # If necessary, remove another package in the same SLOT.
            for cpv in self.cpdict[mycp]:
                if mycpv != cpv:
                    other_metadata = self.cpvdict[cpv]
                    if other_metadata:
                        if myslot == other_metadata.get("SLOT", None):
                            self.cpv_remove(cpv)
                            break
        if mycp not in self.cpdict:
            self.cpdict[mycp] = []
        if not mycpv in self.cpdict[mycp]:
            self.cpdict[mycp].append(mycpv)
Esempio n. 8
0
def _p_to_cp(p):
    """
    Convert a package name or a DEPEND atom to category/package format.
    Raises an exception if program name is ambiguous.
    """
    try:
        ret = portage.dep_getkey(p)
        if ret:
            return ret
    except portage.exception.InvalidAtom:
        pass

    try:
        ret = _porttree().dbapi.xmatch("bestmatch-visible", p)
        if ret:
            return portage.dep_getkey(ret)
    except portage.exception.InvalidAtom:
        pass

    try:
        ret = _porttree().dbapi.xmatch("match-all", p)
        if ret:
            return portage.cpv_getkey(ret[0])
    except portage.exception.InvalidAtom:
        pass

    return None
Esempio n. 9
0
	def __init__(self, package, keywords_list, porttree, ignoreslots = False, content_align = 'bottom', usebold = False, toplist = 'archlist'):
		"""Query all relevant data from portage databases."""
		packages, self.repositories, self.slots, self.keywords = self.__checkExist(porttree, package)
		# convert repositories from path to name
		self.repositories = [porttree.getRepositoryName(x) for x in self.repositories]
		self.slot_length = max([len(x) for x in self.slots])
		repositories_length = max([len(x) for x in self.repositories])
		self.keyword_length = len(keywords_list)
		vers =self.VersionChecker(packages)
		self.versions = vers.versions
		masks = vers.masks
		self.version_length = max([len(x) for x in self.versions])
		self.version_count = len(self.versions)
		self.redundant = self.RedundancyChecker(masks, self.keywords, self.slots, ignoreslots).redundant
		redundant_length = max([len(x) for x in self.redundant])

		ver = self.__formatVersions(self.versions, content_align, self.version_length)
		kws = self.__formatKeywords(self.keywords, keywords_list, usebold, toplist)
		red = self.__formatAdditional(self.redundant, 'purple', redundant_length)
		slt = self.__formatAdditional(self.slots, 'bold', self.slot_length)
		rep = self.__formatAdditional(self.repositories, 'yellow', repositories_length)
		# those + nubers are spaces in printout. keywords are multiplied also because of that
		linesep = '%s+%s+%s+%s' % (''.ljust(self.version_length+1, '-'),
			''.ljust(self.keyword_length*2+1, '-'),
			''.ljust(redundant_length+self.slot_length+3, '-'),
			''.ljust(repositories_length+1, '-')
		)

		self.content = self.__prepareContentResult(ver, kws, red, slt, self.slot_length, rep, linesep)
		self.content_length = len(linesep)
		self.cp = port.cpv_getkey(packages[0])
Esempio n. 10
0
    def cleanPackage(self, vardb, cpv):
        """
		Before calling this function you should call lock and load.
		After calling this function you should call unlock.
		"""
        if not self._lock:
            raise AssertionError("cleanPackage needs the set to be locked")

        worldlist = list(self._atoms)
        mykey = cpv_getkey(cpv)
        newworldlist = []
        for x in worldlist:
            if x.cp == mykey:
                matches = vardb.match(x, use_cache=0)
                if not matches:
                    # zap our world entry
                    pass
                elif len(matches) == 1 and matches[0] == cpv:
                    # zap our world entry
                    pass
                else:
                    # others are around; keep it.
                    newworldlist.append(x)
            else:
                # this doesn't match the package we're unmerging; keep it.
                newworldlist.append(x)

        newworldlist.extend(self._nonatoms)
        self.replace(newworldlist)
Esempio n. 11
0
    def cpv_inject(self, mycpv, metadata=None):
        """Adds a cpv to the list of available packages. See the
		exclusive_slots constructor parameter for behavior with
		respect to SLOT metadata.
		@param mycpv: cpv for the package to inject
		@type mycpv: str
		@param metadata: dictionary of raw metadata for aux_get() calls
		@param metadata: dict
		"""
        self._clear_cache()
        mycp = cpv_getkey(mycpv)
        self.cpvdict[mycpv] = metadata
        myslot = None
        if self._exclusive_slots and metadata:
            myslot = metadata.get("SLOT", None)
        if myslot and mycp in self.cpdict:
            # If necessary, remove another package in the same SLOT.
            for cpv in self.cpdict[mycp]:
                if mycpv != cpv:
                    other_metadata = self.cpvdict[cpv]
                    if other_metadata:
                        if myslot == other_metadata.get("SLOT", None):
                            self.cpv_remove(cpv)
                            break
        if mycp not in self.cpdict:
            self.cpdict[mycp] = []
        if not mycpv in self.cpdict[mycp]:
            self.cpdict[mycp].append(mycpv)
Esempio n. 12
0
	def load(self):
		try:
			mtime = os.stat(self._filename).st_mtime
		except (OSError, IOError):
			mtime = None
		if (not self._loaded or self._mtime != mtime):
			try:
				data, errors = self.loader.load()
				for fname in errors:
					for e in errors[fname]:
						self.errors.append(fname+": "+e)
			except EnvironmentError as e:
				if e.errno != errno.ENOENT:
					raise
				del e
				data = {}
			if self.greedy:
				atoms = []
				for a in data:
					matches = self.dbapi.match(a)
					for cpv in matches:
						atoms.append("%s:%s" % (cpv_getkey(cpv),
							self.dbapi.aux_get(cpv, ["SLOT"])[0]))
					# In addition to any installed slots, also try to pull
					# in the latest new slot that may be available.
					atoms.append(a)
			else:
				atoms = iter(data)
			self._setAtoms(atoms)
			self._mtime = mtime
Esempio n. 13
0
def isValidCP(cp):
	"""Check whether a string is a valid cat/pkg-name.

	This is for 2.0.51 vs. CVS HEAD compatibility, I've not found any function
	for that which would exists in both. Weird...

	@param cp: catageory/package string
	@rtype: bool
	"""

	if not '/' in cp:
		return False
	try:
		portage.cpv_getkey(cp+"-0")
	except:
		return False
	else:
		return True
Esempio n. 14
0
def _p_to_cp(p):
    '''
    Convert a package name or a DEPEND atom to category/package format.
    Raises an exception if program name is ambigous.
    '''
    ret = _porttree().dbapi.xmatch("match-all", p)
    if ret:
        return portage.cpv_getkey(ret[0])
    return None
Esempio n. 15
0
def _p_to_cp(p):
    '''
    Convert a package name or a DEPEND atom to category/package format.
    Raises an exception if program name is ambigous.
    '''
    ret = _porttree().dbapi.xmatch("match-all", p)
    if ret:
        return portage.cpv_getkey(ret[0])
    return None
Esempio n. 16
0
def isValidCP(cp):
	"""Check whether a string is a valid cat/pkg-name.

	This is for 2.0.51 vs. CVS HEAD compatibility, I've not found any function
	for that which would exists in both. Weird...

	@param cp: catageory/package string
	@rtype: bool
	"""

	if not '/' in cp:
		return False
	try:
		portage.cpv_getkey(cp+"-0")
	except:
		return False
	else:
		return True
Esempio n. 17
0
def _p_to_cp(p):
    '''
    Convert a package name or a DEPEND atom to category/package format.
    Raises an exception if program name is ambiguous.
    '''
    ret = _porttree().dbapi.xmatch("match-all", p)
    if ret:
        return portage.cpv_getkey(ret[0])
    log.error('_p_to_cp: Package {0} not found by xmatch'.format(p))
    return None
Esempio n. 18
0
 def cpv_remove(self, mycpv):
     """Removes a cpv from the list of available packages."""
     self._clear_cache()
     mycp = cpv_getkey(mycpv)
     if mycpv in self.cpvdict:
         del self.cpvdict[mycpv]
     if mycp not in self.cpdict:
         return
     while mycpv in self.cpdict[mycp]:
         del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
     if not len(self.cpdict[mycp]):
         del self.cpdict[mycp]
Esempio n. 19
0
 def cpv_remove(self, mycpv):
     """Removes a cpv from the list of available packages."""
     self._clear_cache()
     mycp = cpv_getkey(mycpv)
     if mycpv in self.cpvdict:
         del self.cpvdict[mycpv]
     if mycp not in self.cpdict:
         return
     while mycpv in self.cpdict[mycp]:
         del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
     if not len(self.cpdict[mycp]):
         del self.cpdict[mycp]
Esempio n. 20
0
    def __init__(self, **kwargs):
        Task.__init__(self, **kwargs)
        # the SlotObject constructor assigns self.root_config from keyword args
        # and is an instance of a '_emerge.RootConfig.RootConfig class
        self.root = self.root_config.root
        self._raw_metadata = _PackageMetadataWrapperBase(self.metadata)
        self.metadata = _PackageMetadataWrapper(self, self._raw_metadata)
        if not self.built:
            self.metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
        self.cp = portage.cpv_getkey(self.cpv)
        slot = self.slot
        if _slot_re.match(slot) is None:
            self._invalid_metadata('SLOT.invalid',
                                   "SLOT: invalid value: '%s'" % slot)
            # Avoid an InvalidAtom exception when creating slot_atom.
            # This package instance will be masked due to empty SLOT.
            slot = '0'
        if (self.iuse.enabled or self.iuse.disabled) and \
         not eapi_has_iuse_defaults(self.metadata["EAPI"]):
            if not self.installed:
                self._invalid_metadata(
                    'EAPI.incompatible',
                    "IUSE contains defaults, but EAPI doesn't allow them")
        self.slot_atom = portage.dep.Atom("%s%s%s" %
                                          (self.cp, _slot_separator, slot))
        self.category, self.pf = portage.catsplit(self.cpv)
        self.cpv_split = portage.catpkgsplit(self.cpv)
        self.pv_split = self.cpv_split[1:]
        if self.inherited is None:
            self.inherited = frozenset()
        repo = _gen_valid_repo(self.metadata.get('repository', ''))
        if not repo:
            repo = self.UNKNOWN_REPO
        self.metadata['repository'] = repo

        self._validate_deps()
        self.masks = self._masks()
        self.visible = self._visible(self.masks)
        if self.operation is None:
            if self.onlydeps or self.installed:
                self.operation = "nomerge"
            else:
                self.operation = "merge"

        self._hash_key = Package._gen_hash_key(cpv=self.cpv,
                                               installed=self.installed,
                                               onlydeps=self.onlydeps,
                                               operation=self.operation,
                                               repo_name=repo,
                                               root_config=self.root_config,
                                               type_name=self.type_name)
        self._hash_value = hash(self._hash_key)
Esempio n. 21
0
    def __init__(self,
                 package,
                 keywords_list,
                 porttree,
                 ignoreslots=False,
                 content_align='bottom',
                 usebold=False,
                 toplist='archlist'):
        """Query all relevant data from portage databases."""
        packages, self.repositories, self.slots, self.keywords = self.__checkExist(
            porttree, package)
        # convert repositories from path to name
        self.repositories = [
            porttree.getRepositoryName(x) for x in self.repositories
        ]
        self.slot_length = max([len(x) for x in self.slots])
        repositories_length = max([len(x) for x in self.repositories])
        self.keyword_length = len(keywords_list)
        vers = self.VersionChecker(packages)
        self.versions = vers.versions
        masks = vers.masks
        self.version_length = max([len(x) for x in self.versions])
        self.version_count = len(self.versions)
        self.redundant = self.RedundancyChecker(masks, self.keywords,
                                                self.slots,
                                                ignoreslots).redundant
        redundant_length = max([len(x) for x in self.redundant])

        ver = self.__formatVersions(self.versions, content_align,
                                    self.version_length)
        kws = self.__formatKeywords(self.keywords, keywords_list, usebold,
                                    toplist)
        red = self.__formatAdditional(self.redundant, 'purple',
                                      redundant_length)
        slt = self.__formatAdditional(self.slots, 'bold', self.slot_length)
        rep = self.__formatAdditional(self.repositories, 'yellow',
                                      repositories_length)
        # those + nubers are spaces in printout. keywords are multiplied also because of that
        linesep = '%s+%s+%s+%s' % (''.ljust(
            self.version_length + 1, '-'), ''.ljust(
                self.keyword_length * 2 + 1, '-'), ''.ljust(
                    redundant_length + self.slot_length + 3, '-'), ''.ljust(
                        repositories_length + 1, '-'))

        self.content = self.__prepareContentResult(ver, kws, red, slt,
                                                   self.slot_length, rep,
                                                   linesep)
        self.content_length = len(linesep)
        self.cp = port.cpv_getkey(packages[0])
Esempio n. 22
0
	def __init__(self, **kwargs):
		Task.__init__(self, **kwargs)
		# the SlotObject constructor assigns self.root_config from keyword args
		# and is an instance of a '_emerge.RootConfig.RootConfig class
		self.root = self.root_config.root
		self._raw_metadata = _PackageMetadataWrapperBase(self.metadata)
		self.metadata = _PackageMetadataWrapper(self, self._raw_metadata)
		if not self.built:
			self.metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
		self.cp = portage.cpv_getkey(self.cpv)
		slot = self.slot
		if _slot_re.match(slot) is None:
			self._invalid_metadata('SLOT.invalid',
				"SLOT: invalid value: '%s'" % slot)
			# Avoid an InvalidAtom exception when creating slot_atom.
			# This package instance will be masked due to empty SLOT.
			slot = '0'
		if (self.iuse.enabled or self.iuse.disabled) and \
			not eapi_has_iuse_defaults(self.metadata["EAPI"]):
			if not self.installed:
				self._invalid_metadata('EAPI.incompatible',
					"IUSE contains defaults, but EAPI doesn't allow them")
		self.slot_atom = portage.dep.Atom("%s%s%s" % (self.cp, _slot_separator, slot))
		self.category, self.pf = portage.catsplit(self.cpv)
		self.cpv_split = portage.catpkgsplit(self.cpv)
		self.pv_split = self.cpv_split[1:]
		if self.inherited is None:
			self.inherited = frozenset()
		repo = _gen_valid_repo(self.metadata.get('repository', ''))
		if not repo:
			repo = self.UNKNOWN_REPO
		self.metadata['repository'] = repo

		self._validate_deps()
		self.masks = self._masks()
		self.visible = self._visible(self.masks)
		if self.operation is None:
			if self.onlydeps or self.installed:
				self.operation = "nomerge"
			else:
				self.operation = "merge"

		self._hash_key = Package._gen_hash_key(cpv=self.cpv,
			installed=self.installed, onlydeps=self.onlydeps,
			operation=self.operation, repo_name=repo,
			root_config=self.root_config,
			type_name=self.type_name)
		self._hash_value = hash(self._hash_key)
Esempio n. 23
0
def _cpv_to_cp(cpv):
    try:
        ret = portage.dep_getkey(cpv)
        if ret:
            return ret
    except portage.exception.InvalidAtom:
        pass

    try:
        ret = portage.cpv_getkey(cpv)
        if ret:
            return ret
    except portage.exception.InvalidAtom:
        pass

    return cpv
Esempio n. 24
0
    def _iter_cp_all(self):
        self._cp_map = cp_map = {}
        previous_cp = None
        for cpv in self._vardb._iter_cpv_all(sort=True):
            cp = portage.cpv_getkey(cpv)
            if cp is not None:
                cp_list = cp_map.get(cp)
                if cp_list is None:
                    cp_list = []
                    cp_map[cp] = cp_list
                cp_list.append(cpv)
                if previous_cp is not None and previous_cp != cp:
                    yield previous_cp
                previous_cp = cp

        if previous_cp is not None:
            yield previous_cp
Esempio n. 25
0
    def _iter_cp_all(self):
        self._cp_map = cp_map = {}
        previous_cp = None
        for cpv in self._vardb._iter_cpv_all(sort=True):
            cp = portage.cpv_getkey(cpv)
            if cp is not None:
                cp_list = cp_map.get(cp)
                if cp_list is None:
                    cp_list = []
                    cp_map[cp] = cp_list
                cp_list.append(cpv)
                if previous_cp is not None and previous_cp != cp:
                    yield previous_cp
                previous_cp = cp

        if previous_cp is not None:
            yield previous_cp
Esempio n. 26
0
	def __init__(self, **kwargs):
		Task.__init__(self, **kwargs)
		self.root = self.root_config.root
		self.metadata = _PackageMetadataWrapper(self, self.metadata)
		if not self.built:
			self.metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
		self.cp = portage.cpv_getkey(self.cpv)
		slot = self.slot
		if _slot_re.match(slot) is None:
			self._invalid_metadata('SLOT.invalid',
				"SLOT: invalid value: '%s'" % slot)
			# Avoid an InvalidAtom exception when creating slot_atom.
			# This package instance will be masked due to empty SLOT.
			slot = '0'
		self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
		self.category, self.pf = portage.catsplit(self.cpv)
		self.cpv_split = portage.catpkgsplit(self.cpv)
		self.pv_split = self.cpv_split[1:]
		self.masks = self._masks()
		self.visible = self._visible(self.masks)
Esempio n. 27
0
 def __init__(self, **kwargs):
     Task.__init__(self, **kwargs)
     self.root = self.root_config.root
     self.metadata = _PackageMetadataWrapper(self, self.metadata)
     if not self.built:
         self.metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
     self.cp = portage.cpv_getkey(self.cpv)
     slot = self.slot
     if _slot_re.match(slot) is None:
         self._invalid_metadata('SLOT.invalid',
                                "SLOT: invalid value: '%s'" % slot)
         # Avoid an InvalidAtom exception when creating slot_atom.
         # This package instance will be masked due to empty SLOT.
         slot = '0'
     self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
     self.category, self.pf = portage.catsplit(self.cpv)
     self.cpv_split = portage.catpkgsplit(self.cpv)
     self.pv_split = self.cpv_split[1:]
     self.masks = self._masks()
     self.visible = self._visible(self.masks)
Esempio n. 28
0
def _p_to_cp(p):
    try:
        ret = portage.dep_getkey(p)
        if ret:
            return ret
    except portage.exception.InvalidAtom:
        pass

    try:
        ret = _porttree().dbapi.xmatch("bestmatch-visible", p)
        if ret:
            return portage.dep_getkey(ret)
    except portage.exception.InvalidAtom:
        pass

    try:
        ret = _porttree().dbapi.xmatch("match-all", p)
        if ret:
            return portage.cpv_getkey(ret[0])
    except portage.exception.InvalidAtom:
        pass

    return None
Esempio n. 29
0
def _unmerge_display(root_config, myopts, unmerge_action,
	unmerge_files, clean_delay=1, ordered=0,
	writemsg_level=portage.util.writemsg_level):
	"""
	Returns a tuple of (returncode, pkgmap) where returncode is
	os.EX_OK if no errors occur, and 1 otherwise.
	"""

	quiet = "--quiet" in myopts
	settings = root_config.settings
	sets = root_config.sets
	vartree = root_config.trees["vartree"]
	candidate_catpkgs=[]
	global_unmerge=0
	out = portage.output.EOutput()
	pkg_cache = {}
	db_keys = list(vartree.dbapi._aux_cache_keys)

	def _pkg(cpv):
		pkg = pkg_cache.get(cpv)
		if pkg is None:
			pkg = Package(built=True, cpv=cpv, installed=True,
				metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
				operation="uninstall", root_config=root_config,
				type_name="installed")
			pkg_cache[cpv] = pkg
		return pkg

	vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH)
	try:
		# At least the parent needs to exist for the lock file.
		portage.util.ensure_dirs(vdb_path)
	except portage.exception.PortageException:
		pass
	vdb_lock = None
	try:
		if os.access(vdb_path, os.W_OK):
			vartree.dbapi.lock()
			vdb_lock = True

		realsyslist = []
		sys_virt_map = {}
		for x in sets["system"].getAtoms():
			for atom in expand_new_virt(vartree.dbapi, x):
				if not atom.blocker:
					realsyslist.append(atom)
					if atom.cp != x.cp:
						sys_virt_map[atom.cp] = x.cp

		syslist = []
		for x in realsyslist:
			mycp = x.cp
			# Since Gentoo stopped using old-style virtuals in
			# 2011, typically it's possible to avoid getvirtuals()
			# calls entirely. It will not be triggered here by
			# new-style virtuals since those are expanded to
			# non-virtual atoms above by expand_new_virt().
			if mycp.startswith("virtual/") and \
				mycp in settings.getvirtuals():
				providers = []
				for provider in settings.getvirtuals()[mycp]:
					if vartree.dbapi.match(provider):
						providers.append(provider)
				if len(providers) == 1:
					syslist.extend(providers)
			else:
				syslist.append(mycp)
		syslist = frozenset(syslist)

		if not unmerge_files:
			if unmerge_action in ["rage-clean", "unmerge"]:
				print()
				print(bold("emerge %s" % unmerge_action) +
						" can only be used with specific package names")
				print()
				return 1, {}
			else:
				global_unmerge = 1

		localtree = vartree
		# process all arguments and add all
		# valid db entries to candidate_catpkgs
		if global_unmerge:
			if not unmerge_files:
				candidate_catpkgs.extend(vartree.dbapi.cp_all())
		else:
			#we've got command-line arguments
			if not unmerge_files:
				print("\nNo packages to %s have been provided.\n" %
						unmerge_action)
				return 1, {}
			for x in unmerge_files:
				arg_parts = x.split('/')
				if x[0] not in [".","/"] and \
					arg_parts[-1][-7:] != ".ebuild":
					#possible cat/pkg or dep; treat as such
					candidate_catpkgs.append(x)
				elif unmerge_action in ["prune","clean"]:
					print("\n!!! Prune and clean do not accept individual" + \
						" ebuilds as arguments;\n    skipping.\n")
					continue
				else:
					# it appears that the user is specifying an installed
					# ebuild and we're in "unmerge" mode, so it's ok.
					if not os.path.exists(x):
						print("\n!!! The path '"+x+"' doesn't exist.\n")
						return 1, {}
	
					absx   = os.path.abspath(x)
					sp_absx = absx.split("/")
					if sp_absx[-1][-7:] == ".ebuild":
						del sp_absx[-1]
						absx = "/".join(sp_absx)
	
					sp_absx_len = len(sp_absx)
	
					vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH)
	
					sp_vdb     = vdb_path.split("/")
					sp_vdb_len = len(sp_vdb)
	
					if not os.path.exists(absx+"/CONTENTS"):
						print("!!! Not a valid db dir: "+str(absx))
						return 1, {}
	
					if sp_absx_len <= sp_vdb_len:
						# The Path is shorter... so it can't be inside the vdb.
						print(sp_absx)
						print(absx)
						print("\n!!!",x,"cannot be inside "+ \
							vdb_path+"; aborting.\n")
						return 1, {}
	
					for idx in range(0,sp_vdb_len):
						if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
							print(sp_absx)
							print(absx)
							print("\n!!!", x, "is not inside "+\
								vdb_path+"; aborting.\n")
							return 1, {}
	
					print("="+"/".join(sp_absx[sp_vdb_len:]))
					candidate_catpkgs.append(
						"="+"/".join(sp_absx[sp_vdb_len:]))
	
		newline=""
		if (not "--quiet" in myopts):
			newline="\n"
		if settings["ROOT"] != "/":
			writemsg_level(darkgreen(newline+ \
				">>> Using system located in ROOT tree %s\n" % \
				settings["ROOT"]))

		if (("--pretend" in myopts) or ("--ask" in myopts)) and \
			not ("--quiet" in myopts):
			writemsg_level(darkgreen(newline+\
				">>> These are the packages that would be unmerged:\n"))

		# Preservation of order is required for --depclean and --prune so
		# that dependencies are respected. Use all_selected to eliminate
		# duplicate packages since the same package may be selected by
		# multiple atoms.
		pkgmap = []
		all_selected = set()
		for x in candidate_catpkgs:
			# cycle through all our candidate deps and determine
			# what will and will not get unmerged
			try:
				mymatch = vartree.dbapi.match(x)
			except portage.exception.AmbiguousPackageName as errpkgs:
				print("\n\n!!! The short ebuild name \"" + \
					x + "\" is ambiguous.  Please specify")
				print("!!! one of the following fully-qualified " + \
					"ebuild names instead:\n")
				for i in errpkgs[0]:
					print("    " + green(i))
				print()
				sys.exit(1)
	
			if not mymatch and x[0] not in "<>=~":
				mymatch = localtree.dep_match(x)
			if not mymatch:
				portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
					(x.replace("null/", ""), unmerge_action), noiselevel=-1)
				continue

			pkgmap.append(
				{"protected": set(), "selected": set(), "omitted": set()})
			mykey = len(pkgmap) - 1
			if unmerge_action in ["rage-clean", "unmerge"]:
					for y in mymatch:
						if y not in all_selected:
							pkgmap[mykey]["selected"].add(y)
							all_selected.add(y)
			elif unmerge_action == "prune":
				if len(mymatch) == 1:
					continue
				best_version = mymatch[0]
				best_slot = vartree.getslot(best_version)
				best_counter = vartree.dbapi.cpv_counter(best_version)
				for mypkg in mymatch[1:]:
					myslot = vartree.getslot(mypkg)
					mycounter = vartree.dbapi.cpv_counter(mypkg)
					if (myslot == best_slot and mycounter > best_counter) or \
						mypkg == portage.best([mypkg, best_version]):
						if myslot == best_slot:
							if mycounter < best_counter:
								# On slot collision, keep the one with the
								# highest counter since it is the most
								# recently installed.
								continue
						best_version = mypkg
						best_slot = myslot
						best_counter = mycounter
				pkgmap[mykey]["protected"].add(best_version)
				pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
					if mypkg != best_version and mypkg not in all_selected)
				all_selected.update(pkgmap[mykey]["selected"])
			else:
				# unmerge_action == "clean"
				slotmap={}
				for mypkg in mymatch:
					if unmerge_action == "clean":
						myslot = localtree.getslot(mypkg)
					else:
						# since we're pruning, we don't care about slots
						# and put all the pkgs in together
						myslot = 0
					if myslot not in slotmap:
						slotmap[myslot] = {}
					slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg

				for mypkg in vartree.dbapi.cp_list(
					portage.cpv_getkey(mymatch[0])):
					myslot = vartree.getslot(mypkg)
					if myslot not in slotmap:
						slotmap[myslot] = {}
					slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg

				for myslot in slotmap:
					counterkeys = list(slotmap[myslot])
					if not counterkeys:
						continue
					counterkeys.sort()
					pkgmap[mykey]["protected"].add(
						slotmap[myslot][counterkeys[-1]])
					del counterkeys[-1]

					for counter in counterkeys[:]:
						mypkg = slotmap[myslot][counter]
						if mypkg not in mymatch:
							counterkeys.remove(counter)
							pkgmap[mykey]["protected"].add(
								slotmap[myslot][counter])

					#be pretty and get them in order of merge:
					for ckey in counterkeys:
						mypkg = slotmap[myslot][ckey]
						if mypkg not in all_selected:
							pkgmap[mykey]["selected"].add(mypkg)
							all_selected.add(mypkg)
					# ok, now the last-merged package
					# is protected, and the rest are selected
		numselected = len(all_selected)
		if global_unmerge and not numselected:
			portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
			return 1, {}
	
		if not numselected:
			portage.writemsg_stdout(
				"\n>>> No packages selected for removal by " + \
				unmerge_action + "\n")
			return 1, {}
	finally:
		if vdb_lock:
			vartree.dbapi.flush_cache()
			vartree.dbapi.unlock()

	# generate a list of package sets that are directly or indirectly listed in "selected",
	# as there is no persistent list of "installed" sets
	installed_sets = ["selected"]
	stop = False
	pos = 0
	while not stop:
		stop = True
		pos = len(installed_sets)
		for s in installed_sets[pos - 1:]:
			if s not in sets:
				continue
			candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
			if candidates:
				stop = False
				installed_sets += candidates
	installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
	del stop, pos

	# we don't want to unmerge packages that are still listed in user-editable package sets
	# listed in "world" as they would be remerged on the next update of "world" or the 
	# relevant package sets.
	unknown_sets = set()
	for cp in range(len(pkgmap)):
		for cpv in pkgmap[cp]["selected"].copy():
			try:
				pkg = _pkg(cpv)
			except KeyError:
				# It could have been uninstalled
				# by a concurrent process.
				continue

			if unmerge_action != "clean" and root_config.root == "/":
				skip_pkg = False
				if portage.match_from_list(portage.const.PORTAGE_PACKAGE_ATOM,
						[pkg]):
					msg = ("Not unmerging package %s "
							"since there is no valid reason for Portage to "
							"%s itself.") % (pkg.cpv, unmerge_action)
					skip_pkg = True
				elif vartree.dbapi._dblink(cpv).isowner(
						portage._python_interpreter):
					msg = ("Not unmerging package %s since there is no valid "
							"reason for Portage to %s currently used Python "
							"interpreter.") % (pkg.cpv, unmerge_action)
					skip_pkg = True
				if skip_pkg:
					for line in textwrap.wrap(msg, 75):
						out.eerror(line)
					# adjust pkgmap so the display output is correct
					pkgmap[cp]["selected"].remove(cpv)
					all_selected.remove(cpv)
					pkgmap[cp]["protected"].add(cpv)
					continue

			parents = []
			for s in installed_sets:
				# skip sets that the user requested to unmerge, and skip world 
				# user-selected set, since the package will be removed from
				# that set later on.
				if s in root_config.setconfig.active or s == "selected":
					continue

				if s not in sets:
					if s in unknown_sets:
						continue
					unknown_sets.add(s)
					out = portage.output.EOutput()
					out.eerror(("Unknown set '@%s' in %s%s") % \
						(s, root_config.settings['EROOT'], portage.const.WORLD_SETS_FILE))
					continue

				# only check instances of EditablePackageSet as other classes are generally used for
				# special purposes and can be ignored here (and are usually generated dynamically, so the
				# user can't do much about them anyway)
				if isinstance(sets[s], EditablePackageSet):

					# This is derived from a snippet of code in the
					# depgraph._iter_atoms_for_pkg() method.
					for atom in sets[s].iterAtomsForPackage(pkg):
						inst_matches = vartree.dbapi.match(atom)
						inst_matches.reverse() # descending order
						higher_slot = None
						for inst_cpv in inst_matches:
							try:
								inst_pkg = _pkg(inst_cpv)
							except KeyError:
								# It could have been uninstalled
								# by a concurrent process.
								continue

							if inst_pkg.cp != atom.cp:
								continue
							if pkg >= inst_pkg:
								# This is descending order, and we're not
								# interested in any versions <= pkg given.
								break
							if pkg.slot_atom != inst_pkg.slot_atom:
								higher_slot = inst_pkg
								break
						if higher_slot is None:
							parents.append(s)
							break
			if parents:
				print(colorize("WARN", "Package %s is going to be unmerged," % cpv))
				print(colorize("WARN", "but still listed in the following package sets:"))
				print("    %s\n" % ", ".join(parents))

	del installed_sets

	numselected = len(all_selected)
	if not numselected:
		writemsg_level(
			"\n>>> No packages selected for removal by " + \
			unmerge_action + "\n")
		return 1, {}

	# Unmerge order only matters in some cases
	if not ordered:
		unordered = {}
		for d in pkgmap:
			selected = d["selected"]
			if not selected:
				continue
			cp = portage.cpv_getkey(next(iter(selected)))
			cp_dict = unordered.get(cp)
			if cp_dict is None:
				cp_dict = {}
				unordered[cp] = cp_dict
				for k in d:
					cp_dict[k] = set()
			for k, v in d.items():
				cp_dict[k].update(v)
		pkgmap = [unordered[cp] for cp in sorted(unordered)]

	for x in range(len(pkgmap)):
		selected = pkgmap[x]["selected"]
		if not selected:
			continue
		for mytype, mylist in pkgmap[x].items():
			if mytype == "selected":
				continue
			mylist.difference_update(all_selected)
		cp = portage.cpv_getkey(next(iter(selected)))
		for y in localtree.dep_match(cp):
			if y not in pkgmap[x]["omitted"] and \
				y not in pkgmap[x]["selected"] and \
				y not in pkgmap[x]["protected"] and \
				y not in all_selected:
				pkgmap[x]["omitted"].add(y)
		if global_unmerge and not pkgmap[x]["selected"]:
			#avoid cluttering the preview printout with stuff that isn't getting unmerged
			continue
		if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
			virt_cp = sys_virt_map.get(cp)
			if virt_cp is None:
				cp_info = "'%s'" % (cp,)
			else:
				cp_info = "'%s' (%s)" % (cp, virt_cp)
			writemsg_level(colorize("BAD","\n\n!!! " + \
				"%s is part of your system profile.\n" % (cp_info,)),
				level=logging.WARNING, noiselevel=-1)
			writemsg_level(colorize("WARN","!!! Unmerging it may " + \
				"be damaging to your system.\n\n"),
				level=logging.WARNING, noiselevel=-1)
		if not quiet:
			writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
		else:
			writemsg_level(bold(cp) + ": ", noiselevel=-1)
		for mytype in ["selected","protected","omitted"]:
			if not quiet:
				writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
			if pkgmap[x][mytype]:
				sorted_pkgs = []
				for mypkg in pkgmap[x][mytype]:
					try:
						sorted_pkgs.append(mypkg.cpv)
					except AttributeError:
						sorted_pkgs.append(_pkg_str(mypkg))
				sorted_pkgs.sort(key=cpv_sort_key())
				for mypkg in sorted_pkgs:
					if mytype == "selected":
						writemsg_level(
							colorize("UNMERGE_WARN", mypkg.version + " "),
							noiselevel=-1)
					else:
						writemsg_level(
							colorize("GOOD", mypkg.version + " "),
							noiselevel=-1)
			else:
				writemsg_level("none ", noiselevel=-1)
			if not quiet:
				writemsg_level("\n", noiselevel=-1)
		if quiet:
			writemsg_level("\n", noiselevel=-1)

	writemsg_level("\nAll selected packages: %s\n" %
				" ".join('=%s' % x for x in all_selected), noiselevel=-1)

	writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
		" packages are slated for removal.\n")
	writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
			" and " + colorize("GOOD", "'omitted'") + \
			" packages will not be removed.\n\n")

	return os.EX_OK, pkgmap
Esempio n. 30
0
File: ebuild.py Progetto: rubic/salt
def _cpv_to_name(cpv):
    if cpv == "":
        return ""
    return str(portage.cpv_getkey(cpv))
Esempio n. 31
0
def findPackages(
		options,
		exclude=None,
		destructive=False,
		time_limit=0,
		package_names=False,
		pkgdir=None,
		port_dbapi=portage.db[portage.root]["porttree"].dbapi,
		var_dbapi=portage.db[portage.root]["vartree"].dbapi
	):
	"""Find all obsolete binary packages.

	XXX: packages are found only by symlinks.
	Maybe i should also return .tbz2 files from All/ that have
	no corresponding symlinks.

	@param options: dict of options determined at runtime
	@param exclude: an exclusion dict as defined in
			exclude.parseExcludeFile class.
	@param destructive: boolean, defaults to False
	@param time_limit: integer time value as returned by parseTime()
	@param package_names: boolean, defaults to False.
			used only if destructive=True
	@param pkgdir: path to the binary package dir being checked
	@param port_dbapi: defaults to portage.db[portage.root]["porttree"].dbapi
					can be overridden for tests.
	@param var_dbapi: defaults to portage.db[portage.root]["vartree"].dbapi
					can be overridden for tests.

	@rtype: dict
	@return clean_me i.e. {'cat/pkg-ver.tbz2': [filepath],}
	"""
	if exclude is None:
		exclude = {}
	clean_me = {}
	# create a full package dictionary

	# now do an access test, os.walk does not error for "no read permission"
	try:
		test = os.listdir(pkgdir)
		del test
	except EnvironmentError as er:
		if options['ignore-failure']:
			exit(0)
		print( pp.error("Error accessing PKGDIR." ), file=sys.stderr)
		print( pp.error("(Check your make.conf file and environment)."), file=sys.stderr)
		print( pp.error("Error: %s" %str(er)), file=sys.stderr)
		exit(1)

	# if portage supports FEATURES=binpkg-multi-instance, then
	# cpv_all can return multiple instances per cpv, where
	# instances are distinguishable by some extra attributes
	# provided by portage's _pkg_str class
	bin_dbapi = portage.binarytree(pkgdir=pkgdir, settings=var_dbapi.settings).dbapi
	for cpv in bin_dbapi.cpv_all():
		mtime = int(bin_dbapi.aux_get(cpv, ['_mtime_'])[0])
		if time_limit and mtime >= time_limit:
			# time-limit exclusion
			continue
		# dict is cpv->[pkgs] (supports binpkg-multi-instance)
		clean_me.setdefault(cpv, []).append(cpv)

	# keep only obsolete ones
	if destructive and package_names:
		cp_all = dict.fromkeys(var_dbapi.cp_all())
	else:
		cp_all = {}
	for cpv in list(clean_me):
		if exclDictMatchCP(exclude,portage.cpv_getkey(cpv)):
			# exclusion because of the exclude file
			del clean_me[cpv]
			continue
		if not destructive and port_dbapi.cpv_exists(cpv):
			# exclusion because pkg still exists (in porttree)
			del clean_me[cpv]
			continue
		if destructive and var_dbapi.cpv_exists(cpv):
			buildtime = var_dbapi.aux_get(cpv, ['BUILD_TIME'])[0]
			clean_me[cpv] = [pkg for pkg in clean_me[cpv]
				# only keep path if BUILD_TIME is identical with vartree
				if bin_dbapi.aux_get(pkg, ['BUILD_TIME'])[0] != buildtime]
			if not clean_me[cpv]:
				# nothing we can clean for this package
				del clean_me[cpv]
				continue
		if portage.cpv_getkey(cpv) in cp_all and port_dbapi.cpv_exists(cpv):
			# exlusion because of --package-names
			del clean_me[cpv]

	# the getname method correctly supports FEATURES=binpkg-multi-instance,
	# allowing for multiple paths per cpv (the API used here is also compatible
	# with older portage which does not support binpkg-multi-instance)
	for cpv, pkgs in clean_me.items():
		clean_me[cpv] = [bin_dbapi.bintree.getname(pkg) for pkg in pkgs]

	return clean_me
Esempio n. 32
0
	def __get_path(self,cpv):
		cat,pn = catsplit(cpv_getkey(cpv))
		return os.path.join(self.portdir,cat,pn,os.path.basename(cpv) + ".ebuild")
Esempio n. 33
0
	def move_ent(self, mylist):
		if not self.populated:
			self.populate()
		origcp = mylist[1]
		newcp = mylist[2]
		# sanity check
		for atom in (origcp, newcp):
			if not isjustname(atom):
				raise InvalidPackageName(str(atom))
		mynewcat = catsplit(newcp)[0]
		origmatches=self.dbapi.cp_list(origcp)
		moves = 0
		if not origmatches:
			return moves
		for mycpv in origmatches:
			mycpv_cp = portage.cpv_getkey(mycpv)
			if mycpv_cp != origcp:
				# Ignore PROVIDE virtual match.
				continue
			mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
			myoldpkg = catsplit(mycpv)[1]
			mynewpkg = catsplit(mynewcpv)[1]

			if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
				writemsg(_("!!! Cannot update binary: Destination exists.\n"),
					noiselevel=-1)
				writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
				continue

			tbz2path = self.getname(mycpv)
			if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
				writemsg(_("!!! Cannot update readonly binary: %s\n") % mycpv,
					noiselevel=-1)
				continue

			moves += 1
			mytbz2 = portage.xpak.tbz2(tbz2path)
			mydata = mytbz2.get_data()
			updated_items = update_dbentries([mylist], mydata)
			mydata.update(updated_items)
			mydata[_unicode_encode('PF',
				encoding=_encodings['repo.content'])] = \
				_unicode_encode(mynewpkg + "\n",
				encoding=_encodings['repo.content'])
			mydata[_unicode_encode('CATEGORY',
				encoding=_encodings['repo.content'])] = \
				_unicode_encode(mynewcat + "\n",
				encoding=_encodings['repo.content'])
			if mynewpkg != myoldpkg:
				ebuild_data = mydata.pop(_unicode_encode(myoldpkg + '.ebuild',
					encoding=_encodings['repo.content']), None)
				if ebuild_data is not None:
					mydata[_unicode_encode(mynewpkg + '.ebuild',
						encoding=_encodings['repo.content'])] = ebuild_data

			mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))

			self.dbapi.cpv_remove(mycpv)
			del self._pkg_paths[mycpv]
			new_path = self.getname(mynewcpv)
			self._pkg_paths[mynewcpv] = os.path.join(
				*new_path.split(os.path.sep)[-2:])
			if new_path != mytbz2:
				self._ensure_dir(os.path.dirname(new_path))
				_movefile(tbz2path, new_path, mysettings=self.settings)
				self._remove_symlink(mycpv)
				if new_path.split(os.path.sep)[-2] == "All":
					self._create_symlink(mynewcpv)
			self.inject(mynewcpv)

		return moves
Esempio n. 34
0
def unmerge(root_config, myopts, unmerge_action,
	unmerge_files, ldpath_mtimes, autoclean=0,
	clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
	scheduler=None, writemsg_level=portage.util.writemsg_level):

	if clean_world:
		clean_world = myopts.get('--deselect') != 'n'
	quiet = "--quiet" in myopts
	enter_invalid = '--ask-enter-invalid' in myopts
	settings = root_config.settings
	sets = root_config.sets
	vartree = root_config.trees["vartree"]
	candidate_catpkgs=[]
	global_unmerge=0
	xterm_titles = "notitles" not in settings.features
	out = portage.output.EOutput()
	pkg_cache = {}
	db_keys = list(vartree.dbapi._aux_cache_keys)

	def _pkg(cpv):
		pkg = pkg_cache.get(cpv)
		if pkg is None:
			pkg = Package(built=True, cpv=cpv, installed=True,
				metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
				operation="uninstall", root_config=root_config,
				type_name="installed")
			pkg_cache[cpv] = pkg
		return pkg

	vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
	try:
		# At least the parent needs to exist for the lock file.
		portage.util.ensure_dirs(vdb_path)
	except portage.exception.PortageException:
		pass
	vdb_lock = None
	try:
		if os.access(vdb_path, os.W_OK):
			vdb_lock = portage.locks.lockdir(vdb_path)
		realsyslist = sets["system"].getAtoms()
		syslist = []
		for x in realsyslist:
			mycp = portage.dep_getkey(x)
			if mycp in settings.getvirtuals():
				providers = []
				for provider in settings.getvirtuals()[mycp]:
					if vartree.dbapi.match(provider):
						providers.append(provider)
				if len(providers) == 1:
					syslist.extend(providers)
			else:
				syslist.append(mycp)
	
		mysettings = portage.config(clone=settings)
	
		if not unmerge_files:
			if unmerge_action == "unmerge":
				print()
				print(bold("emerge unmerge") + " can only be used with specific package names")
				print()
				return 0
			else:
				global_unmerge = 1
	
		localtree = vartree
		# process all arguments and add all
		# valid db entries to candidate_catpkgs
		if global_unmerge:
			if not unmerge_files:
				candidate_catpkgs.extend(vartree.dbapi.cp_all())
		else:
			#we've got command-line arguments
			if not unmerge_files:
				print("\nNo packages to unmerge have been provided.\n")
				return 0
			for x in unmerge_files:
				arg_parts = x.split('/')
				if x[0] not in [".","/"] and \
					arg_parts[-1][-7:] != ".ebuild":
					#possible cat/pkg or dep; treat as such
					candidate_catpkgs.append(x)
				elif unmerge_action in ["prune","clean"]:
					print("\n!!! Prune and clean do not accept individual" + \
						" ebuilds as arguments;\n    skipping.\n")
					continue
				else:
					# it appears that the user is specifying an installed
					# ebuild and we're in "unmerge" mode, so it's ok.
					if not os.path.exists(x):
						print("\n!!! The path '"+x+"' doesn't exist.\n")
						return 0
	
					absx   = os.path.abspath(x)
					sp_absx = absx.split("/")
					if sp_absx[-1][-7:] == ".ebuild":
						del sp_absx[-1]
						absx = "/".join(sp_absx)
	
					sp_absx_len = len(sp_absx)
	
					vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
					vdb_len  = len(vdb_path)
	
					sp_vdb     = vdb_path.split("/")
					sp_vdb_len = len(sp_vdb)
	
					if not os.path.exists(absx+"/CONTENTS"):
						print("!!! Not a valid db dir: "+str(absx))
						return 0
	
					if sp_absx_len <= sp_vdb_len:
						# The Path is shorter... so it can't be inside the vdb.
						print(sp_absx)
						print(absx)
						print("\n!!!",x,"cannot be inside "+ \
							vdb_path+"; aborting.\n")
						return 0
	
					for idx in range(0,sp_vdb_len):
						if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
							print(sp_absx)
							print(absx)
							print("\n!!!", x, "is not inside "+\
								vdb_path+"; aborting.\n")
							return 0
	
					print("="+"/".join(sp_absx[sp_vdb_len:]))
					candidate_catpkgs.append(
						"="+"/".join(sp_absx[sp_vdb_len:]))
	
		newline=""
		if (not "--quiet" in myopts):
			newline="\n"
		if settings["ROOT"] != "/":
			writemsg_level(darkgreen(newline+ \
				">>> Using system located in ROOT tree %s\n" % \
				settings["ROOT"]))

		if (("--pretend" in myopts) or ("--ask" in myopts)) and \
			not ("--quiet" in myopts):
			writemsg_level(darkgreen(newline+\
				">>> These are the packages that would be unmerged:\n"))

		# Preservation of order is required for --depclean and --prune so
		# that dependencies are respected. Use all_selected to eliminate
		# duplicate packages since the same package may be selected by
		# multiple atoms.
		pkgmap = []
		all_selected = set()
		for x in candidate_catpkgs:
			# cycle through all our candidate deps and determine
			# what will and will not get unmerged
			try:
				mymatch = vartree.dbapi.match(x)
			except portage.exception.AmbiguousPackageName as errpkgs:
				print("\n\n!!! The short ebuild name \"" + \
					x + "\" is ambiguous.  Please specify")
				print("!!! one of the following fully-qualified " + \
					"ebuild names instead:\n")
				for i in errpkgs[0]:
					print("    " + green(i))
				print()
				sys.exit(1)
	
			if not mymatch and x[0] not in "<>=~":
				mymatch = localtree.dep_match(x)
			if not mymatch:
				portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
					(x, unmerge_action), noiselevel=-1)
				continue

			pkgmap.append(
				{"protected": set(), "selected": set(), "omitted": set()})
			mykey = len(pkgmap) - 1
			if unmerge_action=="unmerge":
					for y in mymatch:
						if y not in all_selected:
							pkgmap[mykey]["selected"].add(y)
							all_selected.add(y)
			elif unmerge_action == "prune":
				if len(mymatch) == 1:
					continue
				best_version = mymatch[0]
				best_slot = vartree.getslot(best_version)
				best_counter = vartree.dbapi.cpv_counter(best_version)
				for mypkg in mymatch[1:]:
					myslot = vartree.getslot(mypkg)
					mycounter = vartree.dbapi.cpv_counter(mypkg)
					if (myslot == best_slot and mycounter > best_counter) or \
						mypkg == portage.best([mypkg, best_version]):
						if myslot == best_slot:
							if mycounter < best_counter:
								# On slot collision, keep the one with the
								# highest counter since it is the most
								# recently installed.
								continue
						best_version = mypkg
						best_slot = myslot
						best_counter = mycounter
				pkgmap[mykey]["protected"].add(best_version)
				pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
					if mypkg != best_version and mypkg not in all_selected)
				all_selected.update(pkgmap[mykey]["selected"])
			else:
				# unmerge_action == "clean"
				slotmap={}
				for mypkg in mymatch:
					if unmerge_action == "clean":
						myslot = localtree.getslot(mypkg)
					else:
						# since we're pruning, we don't care about slots
						# and put all the pkgs in together
						myslot = 0
					if myslot not in slotmap:
						slotmap[myslot] = {}
					slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg

				for mypkg in vartree.dbapi.cp_list(
					portage.cpv_getkey(mymatch[0])):
					myslot = vartree.getslot(mypkg)
					if myslot not in slotmap:
						slotmap[myslot] = {}
					slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg

				for myslot in slotmap:
					counterkeys = list(slotmap[myslot])
					if not counterkeys:
						continue
					counterkeys.sort()
					pkgmap[mykey]["protected"].add(
						slotmap[myslot][counterkeys[-1]])
					del counterkeys[-1]

					for counter in counterkeys[:]:
						mypkg = slotmap[myslot][counter]
						if mypkg not in mymatch:
							counterkeys.remove(counter)
							pkgmap[mykey]["protected"].add(
								slotmap[myslot][counter])

					#be pretty and get them in order of merge:
					for ckey in counterkeys:
						mypkg = slotmap[myslot][ckey]
						if mypkg not in all_selected:
							pkgmap[mykey]["selected"].add(mypkg)
							all_selected.add(mypkg)
					# ok, now the last-merged package
					# is protected, and the rest are selected
		numselected = len(all_selected)
		if global_unmerge and not numselected:
			portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
			return 0
	
		if not numselected:
			portage.writemsg_stdout(
				"\n>>> No packages selected for removal by " + \
				unmerge_action + "\n")
			return 0
	finally:
		if vdb_lock:
			vartree.dbapi.flush_cache()
			portage.locks.unlockdir(vdb_lock)
	
	from portage._sets.base import EditablePackageSet
	
	# generate a list of package sets that are directly or indirectly listed in "selected",
	# as there is no persistent list of "installed" sets
	installed_sets = ["selected"]
	stop = False
	pos = 0
	while not stop:
		stop = True
		pos = len(installed_sets)
		for s in installed_sets[pos - 1:]:
			if s not in sets:
				continue
			candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
			if candidates:
				stop = False
				installed_sets += candidates
	installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
	del stop, pos

	# we don't want to unmerge packages that are still listed in user-editable package sets
	# listed in "world" as they would be remerged on the next update of "world" or the 
	# relevant package sets.
	unknown_sets = set()
	for cp in range(len(pkgmap)):
		for cpv in pkgmap[cp]["selected"].copy():
			try:
				pkg = _pkg(cpv)
			except KeyError:
				# It could have been uninstalled
				# by a concurrent process.
				continue

			if unmerge_action != "clean" and \
				root_config.root == "/" and \
				portage.match_from_list(
				portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
				msg = ("Not unmerging package %s since there is no valid " + \
				"reason for portage to unmerge itself.") % (pkg.cpv,)
				for line in textwrap.wrap(msg, 75):
					out.eerror(line)
				# adjust pkgmap so the display output is correct
				pkgmap[cp]["selected"].remove(cpv)
				all_selected.remove(cpv)
				pkgmap[cp]["protected"].add(cpv)
				continue

			parents = []
			for s in installed_sets:
				# skip sets that the user requested to unmerge, and skip world 
				# user-selected set, since the package will be removed from
				# that set later on.
				if s in root_config.setconfig.active or s == "selected":
					continue

				if s not in sets:
					if s in unknown_sets:
						continue
					unknown_sets.add(s)
					out = portage.output.EOutput()
					out.eerror(("Unknown set '@%s' in %s%s") % \
						(s, root_config.root, portage.const.WORLD_SETS_FILE))
					continue

				# only check instances of EditablePackageSet as other classes are generally used for
				# special purposes and can be ignored here (and are usually generated dynamically, so the
				# user can't do much about them anyway)
				if isinstance(sets[s], EditablePackageSet):

					# This is derived from a snippet of code in the
					# depgraph._iter_atoms_for_pkg() method.
					for atom in sets[s].iterAtomsForPackage(pkg):
						inst_matches = vartree.dbapi.match(atom)
						inst_matches.reverse() # descending order
						higher_slot = None
						for inst_cpv in inst_matches:
							try:
								inst_pkg = _pkg(inst_cpv)
							except KeyError:
								# It could have been uninstalled
								# by a concurrent process.
								continue

							if inst_pkg.cp != atom.cp:
								continue
							if pkg >= inst_pkg:
								# This is descending order, and we're not
								# interested in any versions <= pkg given.
								break
							if pkg.slot_atom != inst_pkg.slot_atom:
								higher_slot = inst_pkg
								break
						if higher_slot is None:
							parents.append(s)
							break
			if parents:
				print(colorize("WARN", "Package %s is going to be unmerged," % cpv))
				print(colorize("WARN", "but still listed in the following package sets:"))
				print("    %s\n" % ", ".join(parents))

	del installed_sets

	numselected = len(all_selected)
	if not numselected:
		writemsg_level(
			"\n>>> No packages selected for removal by " + \
			unmerge_action + "\n")
		return 0

	# Unmerge order only matters in some cases
	if not ordered:
		unordered = {}
		for d in pkgmap:
			selected = d["selected"]
			if not selected:
				continue
			cp = portage.cpv_getkey(next(iter(selected)))
			cp_dict = unordered.get(cp)
			if cp_dict is None:
				cp_dict = {}
				unordered[cp] = cp_dict
				for k in d:
					cp_dict[k] = set()
			for k, v in d.items():
				cp_dict[k].update(v)
		pkgmap = [unordered[cp] for cp in sorted(unordered)]

	for x in range(len(pkgmap)):
		selected = pkgmap[x]["selected"]
		if not selected:
			continue
		for mytype, mylist in pkgmap[x].items():
			if mytype == "selected":
				continue
			mylist.difference_update(all_selected)
		cp = portage.cpv_getkey(next(iter(selected)))
		for y in localtree.dep_match(cp):
			if y not in pkgmap[x]["omitted"] and \
				y not in pkgmap[x]["selected"] and \
				y not in pkgmap[x]["protected"] and \
				y not in all_selected:
				pkgmap[x]["omitted"].add(y)
		if global_unmerge and not pkgmap[x]["selected"]:
			#avoid cluttering the preview printout with stuff that isn't getting unmerged
			continue
		if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
			writemsg_level(colorize("BAD","\a\n\n!!! " + \
				"'%s' is part of your system profile.\n" % cp),
				level=logging.WARNING, noiselevel=-1)
			writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
				"be damaging to your system.\n\n"),
				level=logging.WARNING, noiselevel=-1)
			if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
				countdown(int(settings["EMERGE_WARNING_DELAY"]),
					colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
		if not quiet:
			writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
		else:
			writemsg_level(bold(cp) + ": ", noiselevel=-1)
		for mytype in ["selected","protected","omitted"]:
			if not quiet:
				writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
			if pkgmap[x][mytype]:
				sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
				sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
				for pn, ver, rev in sorted_pkgs:
					if rev == "r0":
						myversion = ver
					else:
						myversion = ver + "-" + rev
					if mytype == "selected":
						writemsg_level(
							colorize("UNMERGE_WARN", myversion + " "),
							noiselevel=-1)
					else:
						writemsg_level(
							colorize("GOOD", myversion + " "), noiselevel=-1)
			else:
				writemsg_level("none ", noiselevel=-1)
			if not quiet:
				writemsg_level("\n", noiselevel=-1)
		if quiet:
			writemsg_level("\n", noiselevel=-1)

	writemsg_level("\nAll selected packages: %s\n" % " ".join(all_selected), noiselevel=-1)

	writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
		" packages are slated for removal.\n")
	writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
			" and " + colorize("GOOD", "'omitted'") + \
			" packages will not be removed.\n\n")

	if "--pretend" in myopts:
		#we're done... return
		return 0
	if "--ask" in myopts:
		if userquery("Would you like to unmerge these packages?",
			enter_invalid) == "No":
			# enter pretend mode for correct formatting of results
			myopts["--pretend"] = True
			print()
			print("Quitting.")
			print()
			return 0
	#the real unmerging begins, after a short delay....
	if clean_delay and not autoclean:
		countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")

	for x in range(len(pkgmap)):
		for y in pkgmap[x]["selected"]:
			writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
			emergelog(xterm_titles, "=== Unmerging... ("+y+")")
			mysplit = y.split("/")
			#unmerge...
			retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
				mysettings, unmerge_action not in ["clean","prune"],
				vartree=vartree, ldpath_mtimes=ldpath_mtimes,
				scheduler=scheduler)

			if retval != os.EX_OK:
				emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
				if raise_on_error:
					raise UninstallFailure(retval)
				sys.exit(retval)
			else:
				if clean_world and hasattr(sets["selected"], "cleanPackage")\
						and hasattr(sets["selected"], "lock"):
					sets["selected"].lock()
					if hasattr(sets["selected"], "load"):
						sets["selected"].load()
					sets["selected"].cleanPackage(vartree.dbapi, y)
					sets["selected"].unlock()
				emergelog(xterm_titles, " >>> unmerge success: "+y)

	if clean_world and hasattr(sets["selected"], "remove")\
			and hasattr(sets["selected"], "lock"):
		sets["selected"].lock()
		# load is called inside remove()
		for s in root_config.setconfig.active:
			sets["selected"].remove(SETPREFIX + s)
		sets["selected"].unlock()

	return 1
Esempio n. 35
0
	def move_ent(self, mylist, repo_match=None):
		if not self.populated:
			self.populate()
		origcp = mylist[1]
		newcp = mylist[2]
		# sanity check
		for atom in (origcp, newcp):
			if not isjustname(atom):
				raise InvalidPackageName(_unicode(atom))
		mynewcat = catsplit(newcp)[0]
		origmatches=self.dbapi.cp_list(origcp)
		moves = 0
		if not origmatches:
			return moves
		for mycpv in origmatches:
			try:
				mycpv = self.dbapi._pkg_str(mycpv, None)
			except (KeyError, InvalidData):
				continue
			mycpv_cp = portage.cpv_getkey(mycpv)
			if mycpv_cp != origcp:
				# Ignore PROVIDE virtual match.
				continue
			if repo_match is not None \
				and not repo_match(mycpv.repo):
				continue

			# Use isvalidatom() to check if this move is valid for the
			# EAPI (characters allowed in package names may vary).
			if not isvalidatom(newcp, eapi=mycpv.eapi):
				continue

			mynewcpv = mycpv.replace(mycpv_cp, _unicode(newcp), 1)
			myoldpkg = catsplit(mycpv)[1]
			mynewpkg = catsplit(mynewcpv)[1]

			if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
				writemsg(_("!!! Cannot update binary: Destination exists.\n"),
					noiselevel=-1)
				writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
				continue

			tbz2path = self.getname(mycpv)
			if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
				writemsg(_("!!! Cannot update readonly binary: %s\n") % mycpv,
					noiselevel=-1)
				continue

			moves += 1
			mytbz2 = portage.xpak.tbz2(tbz2path)
			mydata = mytbz2.get_data()
			updated_items = update_dbentries([mylist], mydata, parent=mycpv)
			mydata.update(updated_items)
			mydata[b'PF'] = \
				_unicode_encode(mynewpkg + "\n",
				encoding=_encodings['repo.content'])
			mydata[b'CATEGORY'] = \
				_unicode_encode(mynewcat + "\n",
				encoding=_encodings['repo.content'])
			if mynewpkg != myoldpkg:
				ebuild_data = mydata.pop(_unicode_encode(myoldpkg + '.ebuild',
					encoding=_encodings['repo.content']), None)
				if ebuild_data is not None:
					mydata[_unicode_encode(mynewpkg + '.ebuild',
						encoding=_encodings['repo.content'])] = ebuild_data

			mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))

			self.dbapi.cpv_remove(mycpv)
			del self._pkg_paths[self.dbapi._instance_key(mycpv)]
			metadata = self.dbapi._aux_cache_slot_dict()
			for k in self.dbapi._aux_cache_keys:
				v = mydata.get(_unicode_encode(k))
				if v is not None:
					v = _unicode_decode(v)
					metadata[k] = " ".join(v.split())
			mynewcpv = _pkg_str(mynewcpv, metadata=metadata)
			new_path = self.getname(mynewcpv)
			self._pkg_paths[
				self.dbapi._instance_key(mynewcpv)] = new_path[len(self.pkgdir)+1:]
			if new_path != mytbz2:
				self._ensure_dir(os.path.dirname(new_path))
				_movefile(tbz2path, new_path, mysettings=self.settings)
			self.inject(mynewcpv)

		return moves
Esempio n. 36
0
def _cpv_to_cp(cpv):
    ret = portage.cpv_getkey(cpv)
    if ret:
        return ret
    else:
        return cpv
Esempio n. 37
0
def _cpv_to_cp(cpv):
    ret = portage.cpv_getkey(cpv)
    if ret:
        return ret
    else:
        return cpv
Esempio n. 38
0
    def output(self):
        """Outputs the results of the search."""
        msg = []
        msg.append("\b\b  \n[ Results for search key : " + \
         bold(self.searchkey) + " ]\n")
        msg.append("[ Applications found : " + \
         bold(str(self.mlen)) + " ]\n\n")
        vardb = self.vartree.dbapi
        for mtype in self.matches:
            for match, masked in self.matches[mtype]:
                full_package = None
                if mtype == "pkg":
                    catpack = match
                    full_package = self.portdb.xmatch("bestmatch-visible",
                                                      match)
                    if not full_package:
                        #no match found; we don't want to query description
                        masked = 1
                        full_package = portage.best(
                            self.portdb.xmatch("match-all", match))
                elif mtype == "desc":
                    full_package = match
                    match = portage.cpv_getkey(match)
                elif mtype == "set":
                    msg.append(green("*") + "  " + bold(match) + "\n")
                    if self.verbose:
                        msg.append("      " + darkgreen("Description:") + \
                         "   " + \
                         self.sdict[match].getMetadata("DESCRIPTION") \
                         + "\n\n")
                    writemsg_stdout(''.join(msg), noiselevel=-1)
                if full_package:
                    try:
                        desc, homepage, license = self.portdb.aux_get(
                            full_package,
                            ["DESCRIPTION", "HOMEPAGE", "LICENSE"])
                    except KeyError:
                        msg.append(
                            "emerge: search: aux_get() failed, skipping\n")
                        continue
                    if masked:
                        msg.append(green("*") + "  " + \
                         white(match) + " " + red("[ Masked ]") + "\n")
                    else:
                        msg.append(green("*") + "  " + bold(match) + "\n")
                    myversion = self.getVersion(full_package,
                                                search.VERSION_RELEASE)

                    mysum = [0, 0]
                    file_size_str = None
                    mycat = match.split("/")[0]
                    mypkg = match.split("/")[1]
                    mycpv = match + "-" + myversion
                    myebuild = self.portdb.findname(mycpv)
                    if myebuild:
                        pkgdir = os.path.dirname(myebuild)
                        from portage import manifest
                        mf = manifest.Manifest(pkgdir,
                                               self.settings["DISTDIR"])
                        try:
                            uri_map = self.portdb.getFetchMap(mycpv)
                        except portage.exception.InvalidDependString as e:
                            file_size_str = "Unknown (%s)" % (e, )
                            del e
                        else:
                            try:
                                mysum[0] = mf.getDistfilesSize(uri_map)
                            except KeyError as e:
                                file_size_str = "Unknown (missing " + \
                                 "digest for %s)" % (e,)
                                del e

                    available = False
                    for db in self._dbs:
                        if db is not vardb and \
                         db.cpv_exists(mycpv):
                            available = True
                            if not myebuild and hasattr(db, "bintree"):
                                myebuild = db.bintree.getname(mycpv)
                                try:
                                    mysum[0] = os.stat(myebuild).st_size
                                except OSError:
                                    myebuild = None
                            break

                    if myebuild and file_size_str is None:
                        mystr = str(mysum[0] // 1024)
                        mycount = len(mystr)
                        while (mycount > 3):
                            mycount -= 3
                            mystr = mystr[:mycount] + "," + mystr[mycount:]
                        file_size_str = mystr + " kB"

                    if self.verbose:
                        if available:
                            msg.append("      %s %s\n" % \
                             (darkgreen("Latest version available:"),
                             myversion))
                        msg.append("      %s\n" % \
                         self.getInstallationStatus(mycat+'/'+mypkg))
                        if myebuild:
                            msg.append("      %s %s\n" % \
                             (darkgreen("Size of files:"), file_size_str))
                        msg.append("      " + darkgreen("Homepage:") + \
                         "      " + homepage + "\n")
                        msg.append("      " + darkgreen("Description:") \
                         + "   " + desc + "\n")
                        msg.append("      " + darkgreen("License:") + \
                         "       " + license + "\n\n")
        writemsg_stdout(''.join(msg), noiselevel=-1)
Esempio n. 39
0
def _cpv_to_name(cpv):
    if cpv == '':
        return ''
    return str(portage.cpv_getkey(cpv))
Esempio n. 40
0
def _unmerge_display(root_config,
                     myopts,
                     unmerge_action,
                     unmerge_files,
                     clean_delay=1,
                     ordered=0,
                     writemsg_level=portage.util.writemsg_level):
    """
	Returns a tuple of (returncode, pkgmap) where returncode is
	os.EX_OK if no errors occur, and 1 otherwise.
	"""

    quiet = "--quiet" in myopts
    settings = root_config.settings
    sets = root_config.sets
    vartree = root_config.trees["vartree"]
    candidate_catpkgs = []
    global_unmerge = 0
    out = portage.output.EOutput()
    pkg_cache = {}
    db_keys = list(vartree.dbapi._aux_cache_keys)

    def _pkg(cpv):
        pkg = pkg_cache.get(cpv)
        if pkg is None:
            pkg = Package(built=True,
                          cpv=cpv,
                          installed=True,
                          metadata=zip(db_keys,
                                       vartree.dbapi.aux_get(cpv, db_keys)),
                          operation="uninstall",
                          root_config=root_config,
                          type_name="installed")
            pkg_cache[cpv] = pkg
        return pkg

    vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH)
    try:
        # At least the parent needs to exist for the lock file.
        portage.util.ensure_dirs(vdb_path)
    except portage.exception.PortageException:
        pass
    vdb_lock = None
    try:
        if os.access(vdb_path, os.W_OK):
            vartree.dbapi.lock()
            vdb_lock = True

        realsyslist = []
        sys_virt_map = {}
        for x in sets["system"].getAtoms():
            for atom in expand_new_virt(vartree.dbapi, x):
                if not atom.blocker:
                    realsyslist.append(atom)
                    if atom.cp != x.cp:
                        sys_virt_map[atom.cp] = x.cp

        syslist = []
        for x in realsyslist:
            mycp = x.cp
            # Since Gentoo stopped using old-style virtuals in
            # 2011, typically it's possible to avoid getvirtuals()
            # calls entirely. It will not be triggered here by
            # new-style virtuals since those are expanded to
            # non-virtual atoms above by expand_new_virt().
            if mycp.startswith("virtual/") and \
             mycp in settings.getvirtuals():
                providers = []
                for provider in settings.getvirtuals()[mycp]:
                    if vartree.dbapi.match(provider):
                        providers.append(provider)
                if len(providers) == 1:
                    syslist.extend(providers)
            else:
                syslist.append(mycp)
        syslist = frozenset(syslist)

        if not unmerge_files:
            if unmerge_action in ["rage-clean", "unmerge"]:
                print()
                print(
                    bold("emerge %s" % unmerge_action) +
                    " can only be used with specific package names")
                print()
                return 1, {}

            global_unmerge = 1

        # process all arguments and add all
        # valid db entries to candidate_catpkgs
        if global_unmerge:
            if not unmerge_files:
                candidate_catpkgs.extend(vartree.dbapi.cp_all())
        else:
            #we've got command-line arguments
            if not unmerge_files:
                print("\nNo packages to %s have been provided.\n" %
                      unmerge_action)
                return 1, {}
            for x in unmerge_files:
                arg_parts = x.split('/')
                if x[0] not in [".","/"] and \
                 arg_parts[-1][-7:] != ".ebuild":
                    #possible cat/pkg or dep; treat as such
                    candidate_catpkgs.append(x)
                elif unmerge_action in ["prune", "clean"]:
                    print("\n!!! Prune and clean do not accept individual" + \
                     " ebuilds as arguments;\n    skipping.\n")
                    continue
                else:
                    # it appears that the user is specifying an installed
                    # ebuild and we're in "unmerge" mode, so it's ok.
                    if not os.path.exists(x):
                        print("\n!!! The path '" + x + "' doesn't exist.\n")
                        return 1, {}

                    absx = os.path.abspath(x)
                    sp_absx = absx.split("/")
                    if sp_absx[-1][-7:] == ".ebuild":
                        del sp_absx[-1]
                        absx = "/".join(sp_absx)

                    sp_absx_len = len(sp_absx)

                    vdb_path = os.path.join(settings["EROOT"],
                                            portage.VDB_PATH)

                    sp_vdb = vdb_path.split("/")
                    sp_vdb_len = len(sp_vdb)

                    if not os.path.exists(absx + "/CONTENTS"):
                        print("!!! Not a valid db dir: " + str(absx))
                        return 1, {}

                    if sp_absx_len <= sp_vdb_len:
                        # The Path is shorter... so it can't be inside the vdb.
                        print(sp_absx)
                        print(absx)
                        print("\n!!!",x,"cannot be inside "+ \
                         vdb_path+"; aborting.\n")
                        return 1, {}

                    for idx in range(0, sp_vdb_len):
                        if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
                            print(sp_absx)
                            print(absx)
                            print("\n!!!", x, "is not inside "+\
                             vdb_path+"; aborting.\n")
                            return 1, {}

                    print("=" + "/".join(sp_absx[sp_vdb_len:]))
                    candidate_catpkgs.append("=" +
                                             "/".join(sp_absx[sp_vdb_len:]))

        newline = ""
        if not quiet:
            newline = "\n"
        if settings["ROOT"] != "/":
            writemsg_level(darkgreen(newline+ \
             ">>> Using system located in ROOT tree %s\n" % \
             settings["ROOT"]))

        if ("--pretend" in myopts or "--ask" in myopts) and not quiet:
            writemsg_level(darkgreen(newline+\
             ">>> These are the packages that would be unmerged:\n"))

        # Preservation of order is required for --depclean and --prune so
        # that dependencies are respected. Use all_selected to eliminate
        # duplicate packages since the same package may be selected by
        # multiple atoms.
        pkgmap = []
        all_selected = set()
        for x in candidate_catpkgs:
            # cycle through all our candidate deps and determine
            # what will and will not get unmerged
            try:
                mymatch = vartree.dbapi.match(x)
            except portage.exception.AmbiguousPackageName as errpkgs:
                print("\n\n!!! The short ebuild name \"" + \
                 x + "\" is ambiguous.  Please specify")
                print("!!! one of the following fully-qualified " + \
                 "ebuild names instead:\n")
                for i in errpkgs[0]:
                    print("    " + green(i))
                print()
                sys.exit(1)

            if not mymatch and x[0] not in "<>=~":
                mymatch = vartree.dep_match(x)
            if not mymatch:
                portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
                 (x.replace("null/", ""), unmerge_action), noiselevel=-1)
                continue

            pkgmap.append({
                "protected": set(),
                "selected": set(),
                "omitted": set()
            })
            mykey = len(pkgmap) - 1
            if unmerge_action in ["rage-clean", "unmerge"]:
                for y in mymatch:
                    if y not in all_selected:
                        pkgmap[mykey]["selected"].add(y)
                        all_selected.add(y)
            elif unmerge_action == "prune":
                if len(mymatch) == 1:
                    continue
                best_version = mymatch[0]
                best_slot = vartree.getslot(best_version)
                best_counter = vartree.dbapi.cpv_counter(best_version)
                for mypkg in mymatch[1:]:
                    myslot = vartree.getslot(mypkg)
                    mycounter = vartree.dbapi.cpv_counter(mypkg)
                    if (myslot == best_slot and mycounter > best_counter) or \
                     mypkg == portage.best([mypkg, best_version]):
                        if myslot == best_slot:
                            if mycounter < best_counter:
                                # On slot collision, keep the one with the
                                # highest counter since it is the most
                                # recently installed.
                                continue
                        best_version = mypkg
                        best_slot = myslot
                        best_counter = mycounter
                pkgmap[mykey]["protected"].add(best_version)
                pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
                 if mypkg != best_version and mypkg not in all_selected)
                all_selected.update(pkgmap[mykey]["selected"])
            else:
                # unmerge_action == "clean"
                slotmap = {}
                for mypkg in mymatch:
                    if unmerge_action == "clean":
                        myslot = vartree.getslot(mypkg)
                    else:
                        # since we're pruning, we don't care about slots
                        # and put all the pkgs in together
                        myslot = 0
                    if myslot not in slotmap:
                        slotmap[myslot] = {}
                    slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg

                for mypkg in vartree.dbapi.cp_list(
                        portage.cpv_getkey(mymatch[0])):
                    myslot = vartree.getslot(mypkg)
                    if myslot not in slotmap:
                        slotmap[myslot] = {}
                    slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg

                for myslot in slotmap:
                    counterkeys = list(slotmap[myslot])
                    if not counterkeys:
                        continue
                    counterkeys.sort()
                    pkgmap[mykey]["protected"].add(
                        slotmap[myslot][counterkeys[-1]])
                    del counterkeys[-1]

                    for counter in counterkeys[:]:
                        mypkg = slotmap[myslot][counter]
                        if mypkg not in mymatch:
                            counterkeys.remove(counter)
                            pkgmap[mykey]["protected"].add(
                                slotmap[myslot][counter])

                    #be pretty and get them in order of merge:
                    for ckey in counterkeys:
                        mypkg = slotmap[myslot][ckey]
                        if mypkg not in all_selected:
                            pkgmap[mykey]["selected"].add(mypkg)
                            all_selected.add(mypkg)
                    # ok, now the last-merged package
                    # is protected, and the rest are selected
        numselected = len(all_selected)
        if global_unmerge and not numselected:
            portage.writemsg_stdout(
                "\n>>> No outdated packages were found on your system.\n")
            return 1, {}

        if not numselected:
            portage.writemsg_stdout(
             "\n>>> No packages selected for removal by " + \
             unmerge_action + "\n")
            return 1, {}
    finally:
        if vdb_lock:
            vartree.dbapi.flush_cache()
            vartree.dbapi.unlock()

    # generate a list of package sets that are directly or indirectly listed in "selected",
    # as there is no persistent list of "installed" sets
    installed_sets = ["selected"]
    stop = False
    pos = 0
    while not stop:
        stop = True
        pos = len(installed_sets)
        for s in installed_sets[pos - 1:]:
            if s not in sets:
                continue
            candidates = [
                x[len(SETPREFIX):] for x in sets[s].getNonAtoms()
                if x.startswith(SETPREFIX)
            ]
            if candidates:
                stop = False
                installed_sets += candidates
    installed_sets = [
        x for x in installed_sets if x not in root_config.setconfig.active
    ]
    del stop, pos

    # we don't want to unmerge packages that are still listed in user-editable package sets
    # listed in "world" as they would be remerged on the next update of "world" or the
    # relevant package sets.
    unknown_sets = set()
    for cp in range(len(pkgmap)):
        for cpv in pkgmap[cp]["selected"].copy():
            try:
                pkg = _pkg(cpv)
            except KeyError:
                # It could have been uninstalled
                # by a concurrent process.
                continue

            if unmerge_action != "clean" and root_config.root == "/":
                skip_pkg = False
                if portage.match_from_list(portage.const.PORTAGE_PACKAGE_ATOM,
                                           [pkg]):
                    msg = ("Not unmerging package %s "
                           "since there is no valid reason for Portage to "
                           "%s itself.") % (pkg.cpv, unmerge_action)
                    skip_pkg = True
                elif vartree.dbapi._dblink(cpv).isowner(
                        portage._python_interpreter):
                    msg = ("Not unmerging package %s since there is no valid "
                           "reason for Portage to %s currently used Python "
                           "interpreter.") % (pkg.cpv, unmerge_action)
                    skip_pkg = True
                if skip_pkg:
                    for line in textwrap.wrap(msg, 75):
                        out.eerror(line)
                    # adjust pkgmap so the display output is correct
                    pkgmap[cp]["selected"].remove(cpv)
                    all_selected.remove(cpv)
                    pkgmap[cp]["protected"].add(cpv)
                    continue

            parents = []
            for s in installed_sets:
                # skip sets that the user requested to unmerge, and skip world
                # user-selected set, since the package will be removed from
                # that set later on.
                if s in root_config.setconfig.active or s == "selected":
                    continue

                if s not in sets:
                    if s in unknown_sets:
                        continue
                    unknown_sets.add(s)
                    out = portage.output.EOutput()
                    out.eerror(("Unknown set '@%s' in %s%s") % \
                     (s, root_config.settings['EROOT'], portage.const.WORLD_SETS_FILE))
                    continue

                # only check instances of EditablePackageSet as other classes are generally used for
                # special purposes and can be ignored here (and are usually generated dynamically, so the
                # user can't do much about them anyway)
                if isinstance(sets[s], EditablePackageSet):

                    # This is derived from a snippet of code in the
                    # depgraph._iter_atoms_for_pkg() method.
                    for atom in sets[s].iterAtomsForPackage(pkg):
                        inst_matches = vartree.dbapi.match(atom)
                        inst_matches.reverse()  # descending order
                        higher_slot = None
                        for inst_cpv in inst_matches:
                            try:
                                inst_pkg = _pkg(inst_cpv)
                            except KeyError:
                                # It could have been uninstalled
                                # by a concurrent process.
                                continue

                            if inst_pkg.cp != atom.cp:
                                continue
                            if pkg >= inst_pkg:
                                # This is descending order, and we're not
                                # interested in any versions <= pkg given.
                                break
                            if pkg.slot_atom != inst_pkg.slot_atom:
                                higher_slot = inst_pkg
                                break
                        if higher_slot is None:
                            parents.append(s)
                            break
            if parents:
                print(
                    colorize("WARN",
                             "Package %s is going to be unmerged," % cpv))
                print(
                    colorize(
                        "WARN",
                        "but still listed in the following package sets:"))
                print("    %s\n" % ", ".join(parents))

    del installed_sets

    numselected = len(all_selected)
    if not numselected:
        writemsg_level(
         "\n>>> No packages selected for removal by " + \
         unmerge_action + "\n")
        return 1, {}

    # Unmerge order only matters in some cases
    if not ordered:
        unordered = {}
        for d in pkgmap:
            selected = d["selected"]
            if not selected:
                continue
            cp = portage.cpv_getkey(next(iter(selected)))
            cp_dict = unordered.get(cp)
            if cp_dict is None:
                cp_dict = {}
                unordered[cp] = cp_dict
                for k in d:
                    cp_dict[k] = set()
            for k, v in d.items():
                cp_dict[k].update(v)
        pkgmap = [unordered[cp] for cp in sorted(unordered)]

    # Sort each set of selected packages
    if ordered:
        for pkg in pkgmap:
            pkg["selected"] = sorted(pkg["selected"], key=cpv_sort_key())

    for x in range(len(pkgmap)):
        selected = pkgmap[x]["selected"]
        if not selected:
            continue
        for mytype, mylist in pkgmap[x].items():
            if mytype == "selected":
                continue
            mylist.difference_update(all_selected)
        cp = portage.cpv_getkey(next(iter(selected)))
        for y in vartree.dep_match(cp):
            if y not in pkgmap[x]["omitted"] and \
             y not in pkgmap[x]["selected"] and \
             y not in pkgmap[x]["protected"] and \
             y not in all_selected:
                pkgmap[x]["omitted"].add(y)
        if global_unmerge and not pkgmap[x]["selected"]:
            #avoid cluttering the preview printout with stuff that isn't getting unmerged
            continue
        if not (pkgmap[x]["protected"]
                or pkgmap[x]["omitted"]) and cp in syslist:
            virt_cp = sys_virt_map.get(cp)
            if virt_cp is None:
                cp_info = "'%s'" % (cp, )
            else:
                cp_info = "'%s' (%s)" % (cp, virt_cp)
            writemsg_level(colorize("BAD","\n\n!!! " + \
             "%s is part of your system profile.\n" % (cp_info,)),
             level=logging.WARNING, noiselevel=-1)
            writemsg_level(colorize("WARN","!!! Unmerging it may " + \
             "be damaging to your system.\n\n"),
             level=logging.WARNING, noiselevel=-1)
        if not quiet:
            writemsg_level("\n %s\n" % (bold(cp), ), noiselevel=-1)
        else:
            writemsg_level(bold(cp) + ": ", noiselevel=-1)
        for mytype in ["selected", "protected", "omitted"]:
            if not quiet:
                writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
            if pkgmap[x][mytype]:
                sorted_pkgs = []
                for mypkg in pkgmap[x][mytype]:
                    try:
                        sorted_pkgs.append(mypkg.cpv)
                    except AttributeError:
                        sorted_pkgs.append(_pkg_str(mypkg))
                sorted_pkgs.sort(key=cpv_sort_key())
                for mypkg in sorted_pkgs:
                    if mytype == "selected":
                        writemsg_level(colorize("UNMERGE_WARN",
                                                mypkg.version + " "),
                                       noiselevel=-1)
                    else:
                        writemsg_level(colorize("GOOD", mypkg.version + " "),
                                       noiselevel=-1)
            else:
                writemsg_level("none ", noiselevel=-1)
            if not quiet:
                writemsg_level("\n", noiselevel=-1)
        if quiet:
            writemsg_level("\n", noiselevel=-1)

    writemsg_level("\nAll selected packages: %s\n" %
                   " ".join('=%s' % x for x in all_selected),
                   noiselevel=-1)

    writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
     " packages are slated for removal.\n")
    writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
      " and " + colorize("GOOD", "'omitted'") + \
      " packages will not be removed.\n\n")

    return os.EX_OK, pkgmap
Esempio n. 41
0
	def move_ent(self, mylist, repo_match=None):
		if not self.populated:
			self.populate()
		origcp = mylist[1]
		newcp = mylist[2]
		# sanity check
		for atom in (origcp, newcp):
			if not isjustname(atom):
				raise InvalidPackageName(str(atom))
		mynewcat = catsplit(newcp)[0]
		origmatches=self.dbapi.cp_list(origcp)
		moves = 0
		if not origmatches:
			return moves
		for mycpv in origmatches:
			mycpv_cp = portage.cpv_getkey(mycpv)
			if mycpv_cp != origcp:
				# Ignore PROVIDE virtual match.
				continue
			if repo_match is not None \
				and not repo_match(self.dbapi.aux_get(mycpv,
					['repository'])[0]):
				continue
			mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
			myoldpkg = catsplit(mycpv)[1]
			mynewpkg = catsplit(mynewcpv)[1]

			if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
				writemsg(_("!!! Cannot update binary: Destination exists.\n"),
					noiselevel=-1)
				writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
				continue

			tbz2path = self.getname(mycpv)
			if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
				writemsg(_("!!! Cannot update readonly binary: %s\n") % mycpv,
					noiselevel=-1)
				continue

			moves += 1
			mytbz2 = portage.xpak.tbz2(tbz2path)
			mydata = mytbz2.get_data()
			updated_items = update_dbentries([mylist], mydata)
			mydata.update(updated_items)
			mydata[_unicode_encode('PF',
				encoding=_encodings['repo.content'])] = \
				_unicode_encode(mynewpkg + "\n",
				encoding=_encodings['repo.content'])
			mydata[_unicode_encode('CATEGORY',
				encoding=_encodings['repo.content'])] = \
				_unicode_encode(mynewcat + "\n",
				encoding=_encodings['repo.content'])
			if mynewpkg != myoldpkg:
				ebuild_data = mydata.pop(_unicode_encode(myoldpkg + '.ebuild',
					encoding=_encodings['repo.content']), None)
				if ebuild_data is not None:
					mydata[_unicode_encode(mynewpkg + '.ebuild',
						encoding=_encodings['repo.content'])] = ebuild_data

			mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))

			self.dbapi.cpv_remove(mycpv)
			del self._pkg_paths[mycpv]
			new_path = self.getname(mynewcpv)
			self._pkg_paths[mynewcpv] = os.path.join(
				*new_path.split(os.path.sep)[-2:])
			if new_path != mytbz2:
				self._ensure_dir(os.path.dirname(new_path))
				_movefile(tbz2path, new_path, mysettings=self.settings)
				self._remove_symlink(mycpv)
				if new_path.split(os.path.sep)[-2] == "All":
					self._create_symlink(mynewcpv)
			self.inject(mynewcpv)

		return moves
Esempio n. 42
0
	def _populate(self, getbinpkgs=0):
		if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
			return 0

		# Clear all caches in case populate is called multiple times
		# as may be the case when _global_updates calls populate()
		# prior to performing package moves since it only wants to
		# operate on local packages (getbinpkgs=0).
		self._remotepkgs = None
		self.dbapi._clear_cache()
		self.dbapi._aux_cache.clear()
		if True:
			pkg_paths = {}
			self._pkg_paths = pkg_paths
			dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
			if "All" in dirs:
				dirs.remove("All")
			dirs.sort()
			dirs.insert(0, "All")
			pkgindex = self._load_pkgindex()
			pf_index = None
			if not self._pkgindex_version_supported(pkgindex):
				pkgindex = self._new_pkgindex()
			header = pkgindex.header
			metadata = {}
			for d in pkgindex.packages:
				metadata[d["CPV"]] = d
			update_pkgindex = False
			for mydir in dirs:
				for myfile in listdir(os.path.join(self.pkgdir, mydir)):
					if not myfile.endswith(".tbz2"):
						continue
					mypath = os.path.join(mydir, myfile)
					full_path = os.path.join(self.pkgdir, mypath)
					s = os.lstat(full_path)
					if stat.S_ISLNK(s.st_mode):
						continue

					# Validate data from the package index and try to avoid
					# reading the xpak if possible.
					if mydir != "All":
						possibilities = None
						d = metadata.get(mydir+"/"+myfile[:-5])
						if d:
							possibilities = [d]
					else:
						if pf_index is None:
							pf_index = {}
							for mycpv in metadata:
								mycat, mypf = catsplit(mycpv)
								pf_index.setdefault(
									mypf, []).append(metadata[mycpv])
						possibilities = pf_index.get(myfile[:-5])
					if possibilities:
						match = None
						for d in possibilities:
							try:
								if long(d["MTIME"]) != s[stat.ST_MTIME]:
									continue
							except (KeyError, ValueError):
								continue
							try:
								if long(d["SIZE"]) != long(s.st_size):
									continue
							except (KeyError, ValueError):
								continue
							if not self._pkgindex_keys.difference(d):
								match = d
								break
						if match:
							mycpv = match["CPV"]
							if mycpv in pkg_paths:
								# discard duplicates (All/ is preferred)
								continue
							pkg_paths[mycpv] = mypath
							# update the path if the package has been moved
							oldpath = d.get("PATH")
							if oldpath and oldpath != mypath:
								update_pkgindex = True
							if mypath != mycpv + ".tbz2":
								d["PATH"] = mypath
								if not oldpath:
									update_pkgindex = True
							else:
								d.pop("PATH", None)
								if oldpath:
									update_pkgindex = True
							self.dbapi.cpv_inject(mycpv)
							if not self.dbapi._aux_cache_keys.difference(d):
								aux_cache = self.dbapi._aux_cache_slot_dict()
								for k in self.dbapi._aux_cache_keys:
									aux_cache[k] = d[k]
								self.dbapi._aux_cache[mycpv] = aux_cache
							continue
					if not os.access(full_path, os.R_OK):
						writemsg(_("!!! Permission denied to read " \
							"binary package: '%s'\n") % full_path,
							noiselevel=-1)
						self.invalids.append(myfile[:-5])
						continue
					metadata_bytes = portage.xpak.tbz2(full_path).get_data()
					mycat = _unicode_decode(metadata_bytes.get(
						_unicode_encode("CATEGORY",
						encoding=_encodings['repo.content']), ""),
						encoding=_encodings['repo.content'], errors='replace')
					mypf = _unicode_decode(metadata_bytes.get(
						_unicode_encode("PF",
						encoding=_encodings['repo.content']), ""),
						encoding=_encodings['repo.content'], errors='replace')
					slot = _unicode_decode(metadata_bytes.get(
						_unicode_encode("SLOT",
						encoding=_encodings['repo.content']), ""),
						encoding=_encodings['repo.content'], errors='replace')
					mypkg = myfile[:-5]
					if not mycat or not mypf or not slot:
						#old-style or corrupt package
						writemsg(_("\n!!! Invalid binary package: '%s'\n") % full_path,
							noiselevel=-1)
						missing_keys = []
						if not mycat:
							missing_keys.append("CATEGORY")
						if not mypf:
							missing_keys.append("PF")
						if not slot:
							missing_keys.append("SLOT")
						msg = []
						if missing_keys:
							missing_keys.sort()
							msg.append(_("Missing metadata key(s): %s.") % \
								", ".join(missing_keys))
						msg.append(_(" This binary package is not " \
							"recoverable and should be deleted."))
						from textwrap import wrap
						for line in wrap("".join(msg), 72):
							writemsg("!!! %s\n" % line, noiselevel=-1)
						self.invalids.append(mypkg)
						continue
					mycat = mycat.strip()
					slot = slot.strip()
					if mycat != mydir and mydir != "All":
						continue
					if mypkg != mypf.strip():
						continue
					mycpv = mycat + "/" + mypkg
					if mycpv in pkg_paths:
						# All is first, so it's preferred.
						continue
					if not self.dbapi._category_re.match(mycat):
						writemsg(_("!!! Binary package has an " \
							"unrecognized category: '%s'\n") % full_path,
							noiselevel=-1)
						writemsg(_("!!! '%s' has a category that is not" \
							" listed in %setc/portage/categories\n") % \
							(mycpv, self.settings["PORTAGE_CONFIGROOT"]),
							noiselevel=-1)
						continue
					pkg_paths[mycpv] = mypath
					self.dbapi.cpv_inject(mycpv)
					update_pkgindex = True
					d = metadata.get(mycpv, {})
					if d:
						try:
							if long(d["MTIME"]) != s[stat.ST_MTIME]:
								d.clear()
						except (KeyError, ValueError):
							d.clear()
					if d:
						try:
							if long(d["SIZE"]) != long(s.st_size):
								d.clear()
						except (KeyError, ValueError):
							d.clear()

					d["CPV"] = mycpv
					d["SLOT"] = slot
					d["MTIME"] = str(s[stat.ST_MTIME])
					d["SIZE"] = str(s.st_size)

					d.update(zip(self._pkgindex_aux_keys,
						self.dbapi.aux_get(mycpv, self._pkgindex_aux_keys)))
					try:
						self._eval_use_flags(mycpv, d)
					except portage.exception.InvalidDependString:
						writemsg(_("!!! Invalid binary package: '%s'\n") % \
							self.getname(mycpv), noiselevel=-1)
						self.dbapi.cpv_remove(mycpv)
						del pkg_paths[mycpv]

					# record location if it's non-default
					if mypath != mycpv + ".tbz2":
						d["PATH"] = mypath
					else:
						d.pop("PATH", None)
					metadata[mycpv] = d
					if not self.dbapi._aux_cache_keys.difference(d):
						aux_cache = self.dbapi._aux_cache_slot_dict()
						for k in self.dbapi._aux_cache_keys:
							aux_cache[k] = d[k]
						self.dbapi._aux_cache[mycpv] = aux_cache

			for cpv in list(metadata):
				if cpv not in pkg_paths:
					del metadata[cpv]

			# Do not bother to write the Packages index if $PKGDIR/All/ exists
			# since it will provide no benefit due to the need to read CATEGORY
			# from xpak.
			if update_pkgindex and os.access(self.pkgdir, os.W_OK):
				del pkgindex.packages[:]
				pkgindex.packages.extend(iter(metadata.values()))
				self._update_pkgindex_header(pkgindex.header)
				f = atomic_ofstream(self._pkgindex_file)
				pkgindex.write(f)
				f.close()

		if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
			writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
				noiselevel=-1)

		if getbinpkgs and 'PORTAGE_BINHOST' in self.settings:
			base_url = self.settings["PORTAGE_BINHOST"]
			from portage.const import CACHE_PATH
			try:
				from urllib.parse import urlparse
			except ImportError:
				from urlparse import urlparse
			urldata = urlparse(base_url)
			pkgindex_file = os.path.join(self.settings["ROOT"], CACHE_PATH, "binhost",
				urldata[1] + urldata[2], "Packages")
			pkgindex = self._new_pkgindex()
			try:
				f = codecs.open(_unicode_encode(pkgindex_file,
					encoding=_encodings['fs'], errors='strict'),
					mode='r', encoding=_encodings['repo.content'],
					errors='replace')
				try:
					pkgindex.read(f)
				finally:
					f.close()
			except EnvironmentError as e:
				if e.errno != errno.ENOENT:
					raise
			local_timestamp = pkgindex.header.get("TIMESTAMP", None)
			try:
				from urllib.request import urlopen as urllib_request_urlopen
			except ImportError:
				from urllib import urlopen as urllib_request_urlopen
			rmt_idx = self._new_pkgindex()
			try:
				# urlparse.urljoin() only works correctly with recognized
				# protocols and requires the base url to have a trailing
				# slash, so join manually...
				f = urllib_request_urlopen(base_url.rstrip("/") + "/Packages")
				f_dec = codecs.iterdecode(f,
					_encodings['repo.content'], errors='replace')
				try:
					rmt_idx.readHeader(f_dec)
					remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
					if not remote_timestamp:
						# no timestamp in the header, something's wrong
						pkgindex = None
					else:
						if not self._pkgindex_version_supported(rmt_idx):
							writemsg(_("\n\n!!! Binhost package index version" \
							" is not supported: '%s'\n") % \
							rmt_idx.header.get("VERSION"), noiselevel=-1)
							pkgindex = None
						elif local_timestamp != remote_timestamp:
							rmt_idx.readBody(f_dec)
							pkgindex = rmt_idx
				finally:
					f.close()
			except EnvironmentError as e:
				writemsg(_("\n\n!!! Error fetching binhost package" \
					" info from '%s'\n") % base_url)
				writemsg("!!! %s\n\n" % str(e))
				del e
				pkgindex = None
			if pkgindex is rmt_idx:
				pkgindex.modified = False # don't update the header
				try:
					ensure_dirs(os.path.dirname(pkgindex_file))
					f = atomic_ofstream(pkgindex_file)
					pkgindex.write(f)
					f.close()
				except (IOError, PortageException):
					if os.access(os.path.dirname(pkgindex_file), os.W_OK):
						raise
					# The current user doesn't have permission to cache the
					# file, but that's alright.
			if pkgindex:
				# Organize remote package list as a cpv -> metadata map.
				self._remotepkgs = _pkgindex_cpv_map_latest_build(pkgindex)
				self._remote_has_index = True
				self._remote_base_uri = pkgindex.header.get("URI", base_url)
				self.__remotepkgs = {}
				for cpv in self._remotepkgs:
					self.dbapi.cpv_inject(cpv)
				self.populated = 1
				if True:
					# Remote package instances override local package
					# if they are not identical.
					hash_names = ["SIZE"] + self._pkgindex_hashes
					for cpv, local_metadata in metadata.items():
						remote_metadata = self._remotepkgs.get(cpv)
						if remote_metadata is None:
							continue
						# Use digests to compare identity.
						identical = True
						for hash_name in hash_names:
							local_value = local_metadata.get(hash_name)
							if local_value is None:
								continue
							remote_value = remote_metadata.get(hash_name)
							if remote_value is None:
								continue
							if local_value != remote_value:
								identical = False
								break
						if identical:
							del self._remotepkgs[cpv]
						else:
							# Override the local package in the aux_get cache.
							self.dbapi._aux_cache[cpv] = remote_metadata
				else:
					# Local package instances override remote instances.
					for cpv in metadata:
						self._remotepkgs.pop(cpv, None)
				return
			self._remotepkgs = {}
			try:
				chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
				if chunk_size < 8:
					chunk_size = 8
			except (ValueError, KeyError):
				chunk_size = 3000
			writemsg_stdout("\n")
			writemsg_stdout(
				colorize("GOOD", _("Fetching bininfo from ")) + \
				re.sub(r'//(.+):.+@(.+)/', r'//\1:*password*@\2/', base_url) + "\n")
			self.__remotepkgs = portage.getbinpkg.dir_get_metadata(
				self.settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
			#writemsg(green("  -- DONE!\n\n"))

			for mypkg in list(self.__remotepkgs):
				if "CATEGORY" not in self.__remotepkgs[mypkg]:
					#old-style or corrupt package
					writemsg(_("!!! Invalid remote binary package: %s\n") % mypkg,
						noiselevel=-1)
					del self.__remotepkgs[mypkg]
					continue
				mycat = self.__remotepkgs[mypkg]["CATEGORY"].strip()
				fullpkg = mycat+"/"+mypkg[:-5]

				if fullpkg in metadata:
					# When using this old protocol, comparison with the remote
					# package isn't supported, so the local package is always
					# preferred even if getbinpkgsonly is enabled.
					continue

				if not self.dbapi._category_re.match(mycat):
					writemsg(_("!!! Remote binary package has an " \
						"unrecognized category: '%s'\n") % fullpkg,
						noiselevel=-1)
					writemsg(_("!!! '%s' has a category that is not" \
						" listed in %setc/portage/categories\n") % \
						(fullpkg, self.settings["PORTAGE_CONFIGROOT"]),
						noiselevel=-1)
					continue
				mykey = portage.cpv_getkey(fullpkg)
				try:
					# invalid tbz2's can hurt things.
					self.dbapi.cpv_inject(fullpkg)
					remote_metadata = self.__remotepkgs[mypkg]
					for k, v in remote_metadata.items():
						remote_metadata[k] = v.strip()

					# Eliminate metadata values with names that digestCheck
					# uses, since they are not valid when using the old
					# protocol. Typically this is needed for SIZE metadata
					# which corresponds to the size of the unpacked files
					# rather than the binpkg file size, triggering digest
					# verification failures as reported in bug #303211.
					remote_metadata.pop('SIZE', None)
					for k in portage.checksum.hashfunc_map:
						remote_metadata.pop(k, None)

					self._remotepkgs[fullpkg] = remote_metadata
				except SystemExit as e:
					raise
				except:
					writemsg(_("!!! Failed to inject remote binary package: %s\n") % fullpkg,
						noiselevel=-1)
					del self.__remotepkgs[mypkg]
					continue
		self.populated=1
Esempio n. 43
0
	def move_ent(self, mylist, repo_match=None):
		if not self.populated:
			self.populate()
		origcp = mylist[1]
		newcp = mylist[2]
		# sanity check
		for atom in (origcp, newcp):
			if not isjustname(atom):
				raise InvalidPackageName(_unicode(atom))
		mynewcat = catsplit(newcp)[0]
		origmatches=self.dbapi.cp_list(origcp)
		moves = 0
		if not origmatches:
			return moves
		for mycpv in origmatches:
			try:
				mycpv = self.dbapi._pkg_str(mycpv, None)
			except (KeyError, InvalidData):
				continue
			mycpv_cp = portage.cpv_getkey(mycpv)
			if mycpv_cp != origcp:
				# Ignore PROVIDE virtual match.
				continue
			if repo_match is not None \
				and not repo_match(mycpv.repo):
				continue

			# Use isvalidatom() to check if this move is valid for the
			# EAPI (characters allowed in package names may vary).
			if not isvalidatom(newcp, eapi=mycpv.eapi):
				continue

			mynewcpv = mycpv.replace(mycpv_cp, _unicode(newcp), 1)
			myoldpkg = catsplit(mycpv)[1]
			mynewpkg = catsplit(mynewcpv)[1]

			if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
				writemsg(_("!!! Cannot update binary: Destination exists.\n"),
					noiselevel=-1)
				writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
				continue

			tbz2path = self.getname(mycpv)
			if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
				writemsg(_("!!! Cannot update readonly binary: %s\n") % mycpv,
					noiselevel=-1)
				continue

			moves += 1
			mytbz2 = portage.xpak.tbz2(tbz2path)
			mydata = mytbz2.get_data()
			updated_items = update_dbentries([mylist], mydata, parent=mycpv)
			mydata.update(updated_items)
			mydata[b'PF'] = \
				_unicode_encode(mynewpkg + "\n",
				encoding=_encodings['repo.content'])
			mydata[b'CATEGORY'] = \
				_unicode_encode(mynewcat + "\n",
				encoding=_encodings['repo.content'])
			if mynewpkg != myoldpkg:
				ebuild_data = mydata.pop(_unicode_encode(myoldpkg + '.ebuild',
					encoding=_encodings['repo.content']), None)
				if ebuild_data is not None:
					mydata[_unicode_encode(mynewpkg + '.ebuild',
						encoding=_encodings['repo.content'])] = ebuild_data

			mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))

			self.dbapi.cpv_remove(mycpv)
			del self._pkg_paths[self.dbapi._instance_key(mycpv)]
			metadata = self.dbapi._aux_cache_slot_dict()
			for k in self.dbapi._aux_cache_keys:
				v = mydata.get(_unicode_encode(k))
				if v is not None:
					v = _unicode_decode(v)
					metadata[k] = " ".join(v.split())
			mynewcpv = _pkg_str(mynewcpv, metadata=metadata)
			new_path = self.getname(mynewcpv)
			self._pkg_paths[
				self.dbapi._instance_key(mynewcpv)] = new_path[len(self.pkgdir)+1:]
			if new_path != mytbz2:
				self._ensure_dir(os.path.dirname(new_path))
				_movefile(tbz2path, new_path, mysettings=self.settings)
			self.inject(mynewcpv)

		return moves
Esempio n. 44
0
	def output(self):
		"""Outputs the results of the search."""
		msg = []
		msg.append("\b\b  \n[ Results for search key : " + \
			bold(self.searchkey) + " ]\n")
		msg.append("[ Applications found : " + \
			bold(str(self.mlen)) + " ]\n\n")
		vardb = self.vartree.dbapi
		metadata_keys = set(Package.metadata_keys)
		metadata_keys.update(["DESCRIPTION", "HOMEPAGE", "LICENSE", "SRC_URI"])
		metadata_keys = tuple(metadata_keys)
		for mtype in self.matches:
			for match,masked in self.matches[mtype]:
				full_package = None
				if mtype == "pkg":
					full_package = self._xmatch(
						"bestmatch-visible", match)
					if not full_package:
						#no match found; we don't want to query description
						masked=1
						full_package = portage.best(
							self._xmatch("match-all",match))
				elif mtype == "desc":
					full_package = match
					match        = portage.cpv_getkey(match)
				elif mtype == "set":
					msg.append(green("*") + "  " + bold(match) + "\n")
					if self.verbose:
						msg.append("      " + darkgreen("Description:") + \
							"   " + \
							self.sdict[match].getMetadata("DESCRIPTION") \
							+ "\n\n")
				if full_package:
					try:
						metadata = dict(zip(metadata_keys,
							self._aux_get(full_package, metadata_keys)))
					except KeyError:
						msg.append("emerge: search: aux_get() failed, skipping\n")
						continue

					desc = metadata["DESCRIPTION"]
					homepage = metadata["HOMEPAGE"]
					license = metadata["LICENSE"]

					if masked:
						msg.append(green("*") + "  " + \
							white(match) + " " + red("[ Masked ]") + "\n")
					else:
						msg.append(green("*") + "  " + bold(match) + "\n")
					myversion = self.getVersion(full_package, search.VERSION_RELEASE)

					mysum = [0,0]
					file_size_str = None
					mycat = match.split("/")[0]
					mypkg = match.split("/")[1]
					mycpv = match + "-" + myversion
					myebuild = self._findname(mycpv)
					if myebuild:
						pkg = Package(built=False, cpv=mycpv,
							installed=False, metadata=metadata,
							root_config=self.root_config, type_name="ebuild")
						pkgdir = os.path.dirname(myebuild)
						mf = self.settings.repositories.get_repo_for_location(
							os.path.dirname(os.path.dirname(pkgdir)))
						mf = mf.load_manifest(
							pkgdir, self.settings["DISTDIR"])
						try:
							uri_map = _parse_uri_map(mycpv, metadata,
								use=pkg.use.enabled)
						except portage.exception.InvalidDependString as e:
							file_size_str = "Unknown (%s)" % (e,)
							del e
						else:
							try:
								mysum[0] = mf.getDistfilesSize(uri_map)
							except KeyError as e:
								file_size_str = "Unknown (missing " + \
									"digest for %s)" % (e,)
								del e

					available = False
					for db in self._dbs:
						if db is not vardb and \
							db.cpv_exists(mycpv):
							available = True
							if not myebuild and hasattr(db, "bintree"):
								myebuild = db.bintree.getname(mycpv)
								try:
									mysum[0] = os.stat(myebuild).st_size
								except OSError:
									myebuild = None
							break

					if myebuild and file_size_str is None:
						file_size_str = localized_size(mysum[0])

					if self.verbose:
						if available:
							msg.append("      %s %s\n" % \
								(darkgreen("Latest version available:"),
								myversion))
						msg.append("      %s\n" % \
							self.getInstallationStatus(mycat+'/'+mypkg))
						if myebuild:
							msg.append("      %s %s\n" % \
								(darkgreen("Size of files:"), file_size_str))
						msg.append("      " + darkgreen("Homepage:") + \
							"      " + homepage + "\n")
						msg.append("      " + darkgreen("Description:") \
							+ "   " + desc + "\n")
						msg.append("      " + darkgreen("License:") + \
							"       " + license + "\n\n")
		writemsg_stdout(''.join(msg), noiselevel=-1)
Esempio n. 45
0
def findPackages(options,
                 exclude=None,
                 destructive=False,
                 time_limit=0,
                 package_names=False,
                 pkgdir=None,
                 port_dbapi=portage.db[portage.root]["porttree"].dbapi,
                 var_dbapi=portage.db[portage.root]["vartree"].dbapi):
    """Find all obsolete binary packages.

	XXX: packages are found only by symlinks.
	Maybe i should also return .tbz2 files from All/ that have
	no corresponding symlinks.

	@param options: dict of options determined at runtime
	@param exclude: an exclusion dict as defined in
			exclude.parseExcludeFile class.
	@param destructive: boolean, defaults to False
	@param time_limit: integer time value as returned by parseTime()
	@param package_names: boolean, defaults to False.
			used only if destructive=True
	@param pkgdir: path to the binary package dir being checked
	@param port_dbapi: defaults to portage.db[portage.root]["porttree"].dbapi
					can be overridden for tests.
	@param var_dbapi: defaults to portage.db[portage.root]["vartree"].dbapi
					can be overridden for tests.

	@rtype: dict
	@return clean_me i.e. {'cat/pkg-ver.tbz2': [filepath],}
	"""
    if exclude is None:
        exclude = {}
    clean_me = {}
    # create a full package dictionary

    # now do an access test, os.walk does not error for "no read permission"
    try:
        test = os.listdir(pkgdir)
        del test
    except EnvironmentError as er:
        if options['ignore-failure']:
            exit(0)
        print(pp.error("Error accessing PKGDIR."), file=sys.stderr)
        print(pp.error("(Check your make.conf file and environment)."),
              file=sys.stderr)
        print(pp.error("Error: %s" % str(er)), file=sys.stderr)
        exit(1)

    # if portage supports FEATURES=binpkg-multi-instance, then
    # cpv_all can return multiple instances per cpv, where
    # instances are distinguishable by some extra attributes
    # provided by portage's _pkg_str class
    bin_dbapi = portage.binarytree(pkgdir=pkgdir,
                                   settings=var_dbapi.settings).dbapi
    for cpv in bin_dbapi.cpv_all():
        mtime = int(bin_dbapi.aux_get(cpv, ['_mtime_'])[0])
        if time_limit and mtime >= time_limit:
            # time-limit exclusion
            continue
        # dict is cpv->[pkgs] (supports binpkg-multi-instance)
        clean_me.setdefault(cpv, []).append(cpv)

    # keep only obsolete ones
    if destructive and package_names:
        cp_all = dict.fromkeys(var_dbapi.cp_all())
    else:
        cp_all = {}
    for cpv in list(clean_me):
        if exclDictMatchCP(exclude, portage.cpv_getkey(cpv)):
            # exclusion because of the exclude file
            del clean_me[cpv]
            continue
        if not destructive and port_dbapi.cpv_exists(cpv):
            # exclusion because pkg still exists (in porttree)
            del clean_me[cpv]
            continue
        if destructive and var_dbapi.cpv_exists(cpv):
            buildtime = var_dbapi.aux_get(cpv, ['BUILD_TIME'])[0]
            clean_me[cpv] = [
                pkg for pkg in clean_me[cpv]
                # only keep path if BUILD_TIME is identical with vartree
                if bin_dbapi.aux_get(pkg, ['BUILD_TIME'])[0] != buildtime
            ]
            if not clean_me[cpv]:
                # nothing we can clean for this package
                del clean_me[cpv]
                continue
        if portage.cpv_getkey(cpv) in cp_all and port_dbapi.cpv_exists(cpv):
            # exlusion because of --package-names
            del clean_me[cpv]

    # the getname method correctly supports FEATURES=binpkg-multi-instance,
    # allowing for multiple paths per cpv (the API used here is also compatible
    # with older portage which does not support binpkg-multi-instance)
    for cpv, pkgs in clean_me.items():
        clean_me[cpv] = [bin_dbapi.bintree.getname(pkg) for pkg in pkgs]

    return clean_me
Esempio n. 46
0
def findPackages(
    options,
    exclude=None,
    destructive=False,
    time_limit=0,
    package_names=False,
    pkgdir=None,
    port_dbapi=portage.db[portage.root]["porttree"].dbapi,
    var_dbapi=portage.db[portage.root]["vartree"].dbapi,
):
    """Find all obsolete binary packages.

	XXX: packages are found only by symlinks.
	Maybe i should also return .tbz2 files from All/ that have
	no corresponding symlinks.

	@param options: dict of options determined at runtime
	@param exclude: an exclusion dict as defined in
			exclude.parseExcludeFile class.
	@param destructive: boolean, defaults to False
	@param time_limit: integer time value as returned by parseTime()
	@param package_names: boolean, defaults to False.
			used only if destructive=True
	@param pkgdir: path to the binary package dir being checked
	@param port_dbapi: defaults to portage.db[portage.root]["porttree"].dbapi
					can be overridden for tests.
	@param var_dbapi: defaults to portage.db[portage.root]["vartree"].dbapi
					can be overridden for tests.

	@rtype: dict
	@return clean_me i.e. {'cat/pkg-ver.tbz2': [filepath],}
	"""
    if exclude is None:
        exclude = {}
    clean_me = {}
    # create a full package dictionary

    # now do an access test, os.walk does not error for "no read permission"
    try:
        test = os.listdir(pkgdir)
        del test
    except EnvironmentError as er:
        print(pp.error("Error accessing PKGDIR."), file=sys.stderr)
        print(pp.error("(Check your /etc/make.conf and environment)."), file=sys.stderr)
        print(pp.error("Error: %s" % str(er)), file=sys.stderr)
        exit(1)
    for root, dirs, files in os.walk(pkgdir):
        if root[-3:] == "All":
            continue
        for file in files:
            if not file[-5:] == ".tbz2":
                # ignore non-tbz2 files
                continue
            path = os.path.join(root, file)
            category = os.path.split(root)[-1]
            cpv = category + "/" + file[:-5]
            st = os.lstat(path)
            if time_limit and (st[stat.ST_MTIME] >= time_limit):
                # time-limit exclusion
                continue
                # dict is cpv->[files] (2 files in general, because of symlink)
            clean_me[cpv] = [path]
            # if os.path.islink(path):
            if stat.S_ISLNK(st[stat.ST_MODE]):
                clean_me[cpv].append(os.path.realpath(path))
                # keep only obsolete ones
    if destructive:
        dbapi = var_dbapi
        if package_names:
            cp_all = dict.fromkeys(dbapi.cp_all())
        else:
            cp_all = {}
    else:
        dbapi = port_dbapi
        cp_all = {}
    for cpv in list(clean_me):
        if exclDictMatchCP(exclude, portage.cpv_getkey(cpv)):
            # exclusion because of the exclude file
            del clean_me[cpv]
            continue
        if dbapi.cpv_exists(cpv):
            # exclusion because pkg still exists (in porttree or vartree)
            del clean_me[cpv]
            continue
        if portage.cpv_getkey(cpv) in cp_all:
            # exlusion because of --package-names
            del clean_me[cpv]

    return clean_me
Esempio n. 47
0
def findPackages(options,
                 exclude=None,
                 destructive=False,
                 time_limit=0,
                 package_names=False,
                 pkgdir=None,
                 port_dbapi=portage.db[portage.root]["porttree"].dbapi,
                 var_dbapi=portage.db[portage.root]["vartree"].dbapi):
    """Find obsolete binary packages.

	@param options: dict of options determined at runtime
	@type  options: dict
	@param exclude: exclusion dict (as defined in the exclude.parseExcludeFile class)
	@type  exclude: dict, optional
	@param destructive: binpkg is obsolete if not installed (default: `False`)
	@type  destructive: bool, optional
	@param time_limit: exclude binpkg if newer than time value as returned by parseTime()
	@type  time_limit: int, optional
	@param package_names: exclude all binpkg versions if package is installed
						  (used with `destructive=True`) (default: `False`)
	@type  package_names: bool, optional
	@param pkgdir: path to the binpkg cache (PKGDIR)
	@type  pkgdir: str
	@param port_dbapi: defaults to portage.db[portage.root]["porttree"].dbapi
					   Can be overridden for tests.
	@param  var_dbapi: defaults to portage.db[portage.root]["vartree"].dbapi
					   Can be overridden for tests.

	@return binary packages to remove. e.g. {'cat/pkg-ver': [filepath]}
	@rtype: dict
	"""
    if exclude is None:
        exclude = {}

    # Access test, os.walk does not error for "no read permission"
    try:
        test = os.listdir(pkgdir)
        del test
    except EnvironmentError as er:
        if options['ignore-failure']:
            exit(0)
        print(pp.error("Error accessing PKGDIR."), file=sys.stderr)
        print(pp.error("(Check your make.conf file and environment)."),
              file=sys.stderr)
        print(pp.error("Error: %s" % str(er)), file=sys.stderr)
        exit(1)

    # Create a dictionary of all installed packages
    if destructive and package_names:
        installed = dict.fromkeys(var_dbapi.cp_all())
    else:
        installed = {}

    # Dictionary of binary packages to clean. Organized as cpv->[pkgs] in order
    # to support FEATURES=binpkg-multi-instance.
    dead_binpkgs = {}

    bin_dbapi = portage.binarytree(pkgdir=pkgdir,
                                   settings=var_dbapi.settings).dbapi
    for cpv in bin_dbapi.cpv_all():
        cp = portage.cpv_getkey(cpv)

        # Exclude per --exclude-file=...
        if exclDictMatchCP(exclude, cp):
            continue

        # Exclude if binpkg is newer than --time-limit=...
        if time_limit:
            mtime = int(bin_dbapi.aux_get(cpv, ['_mtime_'])[0])
            if mtime >= time_limit:
                continue

        # Exclude if binpkg exists in the porttree and not --deep
        if not destructive and port_dbapi.cpv_exists(cpv):
            if not options['changed-deps']:
                continue

            dep_keys = ('RDEPEND', 'PDEPEND')
            keys = ('EAPI', 'USE') + dep_keys
            binpkg_metadata = dict(zip(keys, bin_dbapi.aux_get(cpv, keys)))
            ebuild_metadata = dict(zip(keys, port_dbapi.aux_get(cpv, keys)))

            if _deps_equal(' '.join(binpkg_metadata[key] for key in dep_keys),
                           binpkg_metadata['EAPI'],
                           ' '.join(ebuild_metadata[key] for key in dep_keys),
                           ebuild_metadata['EAPI'],
                           frozenset(binpkg_metadata['USE'].split())):
                continue

        if destructive and var_dbapi.cpv_exists(cpv):
            # Exclude if an instance of the package is installed due to
            # the --package-names option.
            if cp in installed and port_dbapi.cpv_exists(cpv):
                continue

            # Exclude if BUILD_TIME of binpkg is same as vartree
            buildtime = var_dbapi.aux_get(cpv, ['BUILD_TIME'])[0]
            if buildtime == bin_dbapi.aux_get(cpv, ['BUILD_TIME'])[0]:
                continue

        binpkg_path = bin_dbapi.bintree.getname(cpv)
        dead_binpkgs.setdefault(cpv, []).append(binpkg_path)

    return dead_binpkgs
Esempio n. 48
0
	def _populate(self, getbinpkgs=0):
		if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
			return 0

		# Clear all caches in case populate is called multiple times
		# as may be the case when _global_updates calls populate()
		# prior to performing package moves since it only wants to
		# operate on local packages (getbinpkgs=0).
		self._remotepkgs = None
		self.dbapi._clear_cache()
		self.dbapi._aux_cache.clear()
		if True:
			pkg_paths = {}
			self._pkg_paths = pkg_paths
			dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
			if "All" in dirs:
				dirs.remove("All")
			dirs.sort()
			dirs.insert(0, "All")
			pkgindex = self._load_pkgindex()
			pf_index = None
			if not self._pkgindex_version_supported(pkgindex):
				pkgindex = self._new_pkgindex()
			header = pkgindex.header
			metadata = {}
			for d in pkgindex.packages:
				metadata[d["CPV"]] = d
			update_pkgindex = False
			for mydir in dirs:
				for myfile in listdir(os.path.join(self.pkgdir, mydir)):
					if not myfile.endswith(".tbz2"):
						continue
					mypath = os.path.join(mydir, myfile)
					full_path = os.path.join(self.pkgdir, mypath)
					s = os.lstat(full_path)
					if stat.S_ISLNK(s.st_mode):
						continue

					# Validate data from the package index and try to avoid
					# reading the xpak if possible.
					if mydir != "All":
						possibilities = None
						d = metadata.get(mydir+"/"+myfile[:-5])
						if d:
							possibilities = [d]
					else:
						if pf_index is None:
							pf_index = {}
							for mycpv in metadata:
								mycat, mypf = catsplit(mycpv)
								pf_index.setdefault(
									mypf, []).append(metadata[mycpv])
						possibilities = pf_index.get(myfile[:-5])
					if possibilities:
						match = None
						for d in possibilities:
							try:
								if long(d["MTIME"]) != s[stat.ST_MTIME]:
									continue
							except (KeyError, ValueError):
								continue
							try:
								if long(d["SIZE"]) != long(s.st_size):
									continue
							except (KeyError, ValueError):
								continue
							if not self._pkgindex_keys.difference(d):
								match = d
								break
						if match:
							mycpv = match["CPV"]
							if mycpv in pkg_paths:
								# discard duplicates (All/ is preferred)
								continue
							mycpv = _pkg_str(mycpv)
							pkg_paths[mycpv] = mypath
							# update the path if the package has been moved
							oldpath = d.get("PATH")
							if oldpath and oldpath != mypath:
								update_pkgindex = True
							if mypath != mycpv + ".tbz2":
								d["PATH"] = mypath
								if not oldpath:
									update_pkgindex = True
							else:
								d.pop("PATH", None)
								if oldpath:
									update_pkgindex = True
							self.dbapi.cpv_inject(mycpv)
							if not self.dbapi._aux_cache_keys.difference(d):
								aux_cache = self.dbapi._aux_cache_slot_dict()
								for k in self.dbapi._aux_cache_keys:
									aux_cache[k] = d[k]
								self.dbapi._aux_cache[mycpv] = aux_cache
							continue
					if not os.access(full_path, os.R_OK):
						writemsg(_("!!! Permission denied to read " \
							"binary package: '%s'\n") % full_path,
							noiselevel=-1)
						self.invalids.append(myfile[:-5])
						continue
					metadata_bytes = portage.xpak.tbz2(full_path).get_data()
					mycat = _unicode_decode(metadata_bytes.get(b"CATEGORY", ""),
						encoding=_encodings['repo.content'], errors='replace')
					mypf = _unicode_decode(metadata_bytes.get(b"PF", ""),
						encoding=_encodings['repo.content'], errors='replace')
					slot = _unicode_decode(metadata_bytes.get(b"SLOT", ""),
						encoding=_encodings['repo.content'], errors='replace')
					mypkg = myfile[:-5]
					if not mycat or not mypf or not slot:
						#old-style or corrupt package
						writemsg(_("\n!!! Invalid binary package: '%s'\n") % full_path,
							noiselevel=-1)
						missing_keys = []
						if not mycat:
							missing_keys.append("CATEGORY")
						if not mypf:
							missing_keys.append("PF")
						if not slot:
							missing_keys.append("SLOT")
						msg = []
						if missing_keys:
							missing_keys.sort()
							msg.append(_("Missing metadata key(s): %s.") % \
								", ".join(missing_keys))
						msg.append(_(" This binary package is not " \
							"recoverable and should be deleted."))
						for line in textwrap.wrap("".join(msg), 72):
							writemsg("!!! %s\n" % line, noiselevel=-1)
						self.invalids.append(mypkg)
						continue
					mycat = mycat.strip()
					slot = slot.strip()
					if mycat != mydir and mydir != "All":
						continue
					if mypkg != mypf.strip():
						continue
					mycpv = mycat + "/" + mypkg
					if mycpv in pkg_paths:
						# All is first, so it's preferred.
						continue
					if not self.dbapi._category_re.match(mycat):
						writemsg(_("!!! Binary package has an " \
							"unrecognized category: '%s'\n") % full_path,
							noiselevel=-1)
						writemsg(_("!!! '%s' has a category that is not" \
							" listed in %setc/portage/categories\n") % \
							(mycpv, self.settings["PORTAGE_CONFIGROOT"]),
							noiselevel=-1)
						continue
					mycpv = _pkg_str(mycpv)
					pkg_paths[mycpv] = mypath
					self.dbapi.cpv_inject(mycpv)
					update_pkgindex = True
					d = metadata.get(mycpv, {})
					if d:
						try:
							if long(d["MTIME"]) != s[stat.ST_MTIME]:
								d.clear()
						except (KeyError, ValueError):
							d.clear()
					if d:
						try:
							if long(d["SIZE"]) != long(s.st_size):
								d.clear()
						except (KeyError, ValueError):
							d.clear()

					d["CPV"] = mycpv
					d["SLOT"] = slot
					d["MTIME"] = str(s[stat.ST_MTIME])
					d["SIZE"] = str(s.st_size)

					d.update(zip(self._pkgindex_aux_keys,
						self.dbapi.aux_get(mycpv, self._pkgindex_aux_keys)))
					try:
						self._eval_use_flags(mycpv, d)
					except portage.exception.InvalidDependString:
						writemsg(_("!!! Invalid binary package: '%s'\n") % \
							self.getname(mycpv), noiselevel=-1)
						self.dbapi.cpv_remove(mycpv)
						del pkg_paths[mycpv]

					# record location if it's non-default
					if mypath != mycpv + ".tbz2":
						d["PATH"] = mypath
					else:
						d.pop("PATH", None)
					metadata[mycpv] = d
					if not self.dbapi._aux_cache_keys.difference(d):
						aux_cache = self.dbapi._aux_cache_slot_dict()
						for k in self.dbapi._aux_cache_keys:
							aux_cache[k] = d[k]
						self.dbapi._aux_cache[mycpv] = aux_cache

			for cpv in list(metadata):
				if cpv not in pkg_paths:
					del metadata[cpv]

			# Do not bother to write the Packages index if $PKGDIR/All/ exists
			# since it will provide no benefit due to the need to read CATEGORY
			# from xpak.
			if update_pkgindex and os.access(self.pkgdir, os.W_OK):
				del pkgindex.packages[:]
				pkgindex.packages.extend(iter(metadata.values()))
				self._update_pkgindex_header(pkgindex.header)
				self._pkgindex_write(pkgindex)

		if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
			writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
				noiselevel=-1)

		if not getbinpkgs or 'PORTAGE_BINHOST' not in self.settings:
			self.populated=1
			return
		self._remotepkgs = {}
		for base_url in self.settings["PORTAGE_BINHOST"].split():
			parsed_url = urlparse(base_url)
			host = parsed_url.netloc
			port = parsed_url.port
			user = None
			passwd = None
			user_passwd = ""
			if "@" in host:
				user, host = host.split("@", 1)
				user_passwd = user + "@"
				if ":" in user:
					user, passwd = user.split(":", 1)
			port_args = []
			if port is not None:
				port_str = ":%s" % (port,)
				if host.endswith(port_str):
					host = host[:-len(port_str)]
			pkgindex_file = os.path.join(self.settings["EROOT"], CACHE_PATH, "binhost",
				host, parsed_url.path.lstrip("/"), "Packages")
			pkgindex = self._new_pkgindex()
			try:
				f = io.open(_unicode_encode(pkgindex_file,
					encoding=_encodings['fs'], errors='strict'),
					mode='r', encoding=_encodings['repo.content'],
					errors='replace')
				try:
					pkgindex.read(f)
				finally:
					f.close()
			except EnvironmentError as e:
				if e.errno != errno.ENOENT:
					raise
			local_timestamp = pkgindex.header.get("TIMESTAMP", None)
			remote_timestamp = None
			rmt_idx = self._new_pkgindex()
			proc = None
			tmp_filename = None
			try:
				# urlparse.urljoin() only works correctly with recognized
				# protocols and requires the base url to have a trailing
				# slash, so join manually...
				url = base_url.rstrip("/") + "/Packages"
				try:
					f = _urlopen(url, if_modified_since=local_timestamp)
					if hasattr(f, 'headers') and f.headers.get('timestamp', ''):
						remote_timestamp = f.headers.get('timestamp')
				except IOError as err:
					if hasattr(err, 'code') and err.code == 304: # not modified (since local_timestamp)
						raise UseCachedCopyOfRemoteIndex()

					path = parsed_url.path.rstrip("/") + "/Packages"

					if parsed_url.scheme == 'sftp':
						# The sftp command complains about 'Illegal seek' if
						# we try to make it write to /dev/stdout, so use a
						# temp file instead.
						fd, tmp_filename = tempfile.mkstemp()
						os.close(fd)
						if port is not None:
							port_args = ['-P', "%s" % (port,)]
						proc = subprocess.Popen(['sftp'] + port_args + \
							[user_passwd + host + ":" + path, tmp_filename])
						if proc.wait() != os.EX_OK:
							raise
						f = open(tmp_filename, 'rb')
					elif parsed_url.scheme == 'ssh':
						if port is not None:
							port_args = ['-p', "%s" % (port,)]
						proc = subprocess.Popen(['ssh'] + port_args + \
							[user_passwd + host, '--', 'cat', path],
							stdout=subprocess.PIPE)
						f = proc.stdout
					else:
						setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
						fcmd = self.settings.get(setting)
						if not fcmd:
							raise
						fd, tmp_filename = tempfile.mkstemp()
						tmp_dirname, tmp_basename = os.path.split(tmp_filename)
						os.close(fd)
						success = portage.getbinpkg.file_get(url,
						     tmp_dirname, fcmd=fcmd, filename=tmp_basename)
						if not success:
							raise EnvironmentError("%s failed" % (setting,))
						f = open(tmp_filename, 'rb')

				f_dec = codecs.iterdecode(f,
					_encodings['repo.content'], errors='replace')
				try:
					rmt_idx.readHeader(f_dec)
					if not remote_timestamp: # in case it had not been read from HTTP header
						remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
					if not remote_timestamp:
						# no timestamp in the header, something's wrong
						pkgindex = None
						writemsg(_("\n\n!!! Binhost package index " \
						" has no TIMESTAMP field.\n"), noiselevel=-1)
					else:
						if not self._pkgindex_version_supported(rmt_idx):
							writemsg(_("\n\n!!! Binhost package index version" \
							" is not supported: '%s'\n") % \
							rmt_idx.header.get("VERSION"), noiselevel=-1)
							pkgindex = None
						elif local_timestamp != remote_timestamp:
							rmt_idx.readBody(f_dec)
							pkgindex = rmt_idx
				finally:
					# Timeout after 5 seconds, in case close() blocks
					# indefinitely (see bug #350139).
					try:
						try:
							AlarmSignal.register(5)
							f.close()
						finally:
							AlarmSignal.unregister()
					except AlarmSignal:
						writemsg("\n\n!!! %s\n" % \
							_("Timed out while closing connection to binhost"),
							noiselevel=-1)
			except UseCachedCopyOfRemoteIndex:
				writemsg_stdout("\n")
				writemsg_stdout(
					colorize("GOOD", _("Local copy of remote index is up-to-date and will be used.")) + \
					"\n")
				rmt_idx = pkgindex
			except EnvironmentError as e:
				writemsg(_("\n\n!!! Error fetching binhost package" \
					" info from '%s'\n") % _hide_url_passwd(base_url))
				writemsg("!!! %s\n\n" % str(e))
				del e
				pkgindex = None
			if proc is not None:
				if proc.poll() is None:
					proc.kill()
					proc.wait()
				proc = None
			if tmp_filename is not None:
				try:
					os.unlink(tmp_filename)
				except OSError:
					pass
			if pkgindex is rmt_idx:
				pkgindex.modified = False # don't update the header
				try:
					ensure_dirs(os.path.dirname(pkgindex_file))
					f = atomic_ofstream(pkgindex_file)
					pkgindex.write(f)
					f.close()
				except (IOError, PortageException):
					if os.access(os.path.dirname(pkgindex_file), os.W_OK):
						raise
					# The current user doesn't have permission to cache the
					# file, but that's alright.
			if pkgindex:
				# Organize remote package list as a cpv -> metadata map.
				remotepkgs = _pkgindex_cpv_map_latest_build(pkgindex)
				remote_base_uri = pkgindex.header.get("URI", base_url)
				for cpv, remote_metadata in remotepkgs.items():
					remote_metadata["BASE_URI"] = remote_base_uri
					self._pkgindex_uri[cpv] = url
				self._remotepkgs.update(remotepkgs)
				self._remote_has_index = True
				for cpv in remotepkgs:
					self.dbapi.cpv_inject(cpv)
				if True:
					# Remote package instances override local package
					# if they are not identical.
					hash_names = ["SIZE"] + self._pkgindex_hashes
					for cpv, local_metadata in metadata.items():
						remote_metadata = self._remotepkgs.get(cpv)
						if remote_metadata is None:
							continue
						# Use digests to compare identity.
						identical = True
						for hash_name in hash_names:
							local_value = local_metadata.get(hash_name)
							if local_value is None:
								continue
							remote_value = remote_metadata.get(hash_name)
							if remote_value is None:
								continue
							if local_value != remote_value:
								identical = False
								break
						if identical:
							del self._remotepkgs[cpv]
						else:
							# Override the local package in the aux_get cache.
							self.dbapi._aux_cache[cpv] = remote_metadata
				else:
					# Local package instances override remote instances.
					for cpv in metadata:
						self._remotepkgs.pop(cpv, None)
				continue
			try:
				chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
				if chunk_size < 8:
					chunk_size = 8
			except (ValueError, KeyError):
				chunk_size = 3000
			writemsg_stdout("\n")
			writemsg_stdout(
				colorize("GOOD", _("Fetching bininfo from ")) + \
				_hide_url_passwd(base_url) + "\n")
			remotepkgs = portage.getbinpkg.dir_get_metadata(
				base_url, chunk_size=chunk_size)

			for mypkg, remote_metadata in remotepkgs.items():
				mycat = remote_metadata.get("CATEGORY")
				if mycat is None:
					#old-style or corrupt package
					writemsg(_("!!! Invalid remote binary package: %s\n") % mypkg,
						noiselevel=-1)
					continue
				mycat = mycat.strip()
				try:
					fullpkg = _pkg_str(mycat+"/"+mypkg[:-5])
				except InvalidData:
					writemsg(_("!!! Invalid remote binary package: %s\n") % mypkg,
						noiselevel=-1)
					continue

				if fullpkg in metadata:
					# When using this old protocol, comparison with the remote
					# package isn't supported, so the local package is always
					# preferred even if getbinpkgsonly is enabled.
					continue

				if not self.dbapi._category_re.match(mycat):
					writemsg(_("!!! Remote binary package has an " \
						"unrecognized category: '%s'\n") % fullpkg,
						noiselevel=-1)
					writemsg(_("!!! '%s' has a category that is not" \
						" listed in %setc/portage/categories\n") % \
						(fullpkg, self.settings["PORTAGE_CONFIGROOT"]),
						noiselevel=-1)
					continue
				mykey = portage.cpv_getkey(fullpkg)
				try:
					# invalid tbz2's can hurt things.
					self.dbapi.cpv_inject(fullpkg)
					for k, v in remote_metadata.items():
						remote_metadata[k] = v.strip()
					remote_metadata["BASE_URI"] = base_url

					# Eliminate metadata values with names that digestCheck
					# uses, since they are not valid when using the old
					# protocol. Typically this is needed for SIZE metadata
					# which corresponds to the size of the unpacked files
					# rather than the binpkg file size, triggering digest
					# verification failures as reported in bug #303211.
					remote_metadata.pop('SIZE', None)
					for k in portage.checksum.hashfunc_map:
						remote_metadata.pop(k, None)

					self._remotepkgs[fullpkg] = remote_metadata
				except SystemExit as e:
					raise
				except:
					writemsg(_("!!! Failed to inject remote binary package: %s\n") % fullpkg,
						noiselevel=-1)
					continue
		self.populated=1
Esempio n. 49
0
def unmerge(root_config,
            myopts,
            unmerge_action,
            unmerge_files,
            ldpath_mtimes,
            autoclean=0,
            clean_world=1,
            clean_delay=1,
            ordered=0,
            raise_on_error=0,
            scheduler=None,
            writemsg_level=portage.util.writemsg_level):

    if clean_world:
        clean_world = myopts.get('--deselect') != 'n'
    quiet = "--quiet" in myopts
    enter_invalid = '--ask-enter-invalid' in myopts
    settings = root_config.settings
    sets = root_config.sets
    vartree = root_config.trees["vartree"]
    candidate_catpkgs = []
    global_unmerge = 0
    xterm_titles = "notitles" not in settings.features
    out = portage.output.EOutput()
    pkg_cache = {}
    db_keys = list(vartree.dbapi._aux_cache_keys)

    def _pkg(cpv):
        pkg = pkg_cache.get(cpv)
        if pkg is None:
            pkg = Package(cpv=cpv,
                          installed=True,
                          metadata=zip(db_keys,
                                       vartree.dbapi.aux_get(cpv, db_keys)),
                          root_config=root_config,
                          type_name="installed")
            pkg_cache[cpv] = pkg
        return pkg

    vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
    try:
        # At least the parent needs to exist for the lock file.
        portage.util.ensure_dirs(vdb_path)
    except portage.exception.PortageException:
        pass
    vdb_lock = None
    try:
        if os.access(vdb_path, os.W_OK):
            vdb_lock = portage.locks.lockdir(vdb_path)
        realsyslist = sets["system"].getAtoms()
        syslist = []
        for x in realsyslist:
            mycp = portage.dep_getkey(x)
            if mycp in settings.getvirtuals():
                providers = []
                for provider in settings.getvirtuals()[mycp]:
                    if vartree.dbapi.match(provider):
                        providers.append(provider)
                if len(providers) == 1:
                    syslist.extend(providers)
            else:
                syslist.append(mycp)

        mysettings = portage.config(clone=settings)

        if not unmerge_files:
            if unmerge_action == "unmerge":
                print()
                print(
                    bold("emerge unmerge") +
                    " can only be used with specific package names")
                print()
                return 0
            else:
                global_unmerge = 1

        localtree = vartree
        # process all arguments and add all
        # valid db entries to candidate_catpkgs
        if global_unmerge:
            if not unmerge_files:
                candidate_catpkgs.extend(vartree.dbapi.cp_all())
        else:
            #we've got command-line arguments
            if not unmerge_files:
                print("\nNo packages to unmerge have been provided.\n")
                return 0
            for x in unmerge_files:
                arg_parts = x.split('/')
                if x[0] not in [".","/"] and \
                 arg_parts[-1][-7:] != ".ebuild":
                    #possible cat/pkg or dep; treat as such
                    candidate_catpkgs.append(x)
                elif unmerge_action in ["prune", "clean"]:
                    print("\n!!! Prune and clean do not accept individual" + \
                     " ebuilds as arguments;\n    skipping.\n")
                    continue
                else:
                    # it appears that the user is specifying an installed
                    # ebuild and we're in "unmerge" mode, so it's ok.
                    if not os.path.exists(x):
                        print("\n!!! The path '" + x + "' doesn't exist.\n")
                        return 0

                    absx = os.path.abspath(x)
                    sp_absx = absx.split("/")
                    if sp_absx[-1][-7:] == ".ebuild":
                        del sp_absx[-1]
                        absx = "/".join(sp_absx)

                    sp_absx_len = len(sp_absx)

                    vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
                    vdb_len = len(vdb_path)

                    sp_vdb = vdb_path.split("/")
                    sp_vdb_len = len(sp_vdb)

                    if not os.path.exists(absx + "/CONTENTS"):
                        print("!!! Not a valid db dir: " + str(absx))
                        return 0

                    if sp_absx_len <= sp_vdb_len:
                        # The Path is shorter... so it can't be inside the vdb.
                        print(sp_absx)
                        print(absx)
                        print("\n!!!",x,"cannot be inside "+ \
                         vdb_path+"; aborting.\n")
                        return 0

                    for idx in range(0, sp_vdb_len):
                        if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
                            print(sp_absx)
                            print(absx)
                            print("\n!!!", x, "is not inside "+\
                             vdb_path+"; aborting.\n")
                            return 0

                    print("=" + "/".join(sp_absx[sp_vdb_len:]))
                    candidate_catpkgs.append("=" +
                                             "/".join(sp_absx[sp_vdb_len:]))

        newline = ""
        if (not "--quiet" in myopts):
            newline = "\n"
        if settings["ROOT"] != "/":
            writemsg_level(darkgreen(newline+ \
             ">>> Using system located in ROOT tree %s\n" % \
             settings["ROOT"]))

        if (("--pretend" in myopts) or ("--ask" in myopts)) and \
         not ("--quiet" in myopts):
            writemsg_level(darkgreen(newline+\
             ">>> These are the packages that would be unmerged:\n"))

        # Preservation of order is required for --depclean and --prune so
        # that dependencies are respected. Use all_selected to eliminate
        # duplicate packages since the same package may be selected by
        # multiple atoms.
        pkgmap = []
        all_selected = set()
        for x in candidate_catpkgs:
            # cycle through all our candidate deps and determine
            # what will and will not get unmerged
            try:
                mymatch = vartree.dbapi.match(x)
            except portage.exception.AmbiguousPackageName as errpkgs:
                print("\n\n!!! The short ebuild name \"" + \
                 x + "\" is ambiguous.  Please specify")
                print("!!! one of the following fully-qualified " + \
                 "ebuild names instead:\n")
                for i in errpkgs[0]:
                    print("    " + green(i))
                print()
                sys.exit(1)

            if not mymatch and x[0] not in "<>=~":
                mymatch = localtree.dep_match(x)
            if not mymatch:
                portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
                 (x, unmerge_action), noiselevel=-1)
                continue

            pkgmap.append({
                "protected": set(),
                "selected": set(),
                "omitted": set()
            })
            mykey = len(pkgmap) - 1
            if unmerge_action == "unmerge":
                for y in mymatch:
                    if y not in all_selected:
                        pkgmap[mykey]["selected"].add(y)
                        all_selected.add(y)
            elif unmerge_action == "prune":
                if len(mymatch) == 1:
                    continue
                best_version = mymatch[0]
                best_slot = vartree.getslot(best_version)
                best_counter = vartree.dbapi.cpv_counter(best_version)
                for mypkg in mymatch[1:]:
                    myslot = vartree.getslot(mypkg)
                    mycounter = vartree.dbapi.cpv_counter(mypkg)
                    if (myslot == best_slot and mycounter > best_counter) or \
                     mypkg == portage.best([mypkg, best_version]):
                        if myslot == best_slot:
                            if mycounter < best_counter:
                                # On slot collision, keep the one with the
                                # highest counter since it is the most
                                # recently installed.
                                continue
                        best_version = mypkg
                        best_slot = myslot
                        best_counter = mycounter
                pkgmap[mykey]["protected"].add(best_version)
                pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
                 if mypkg != best_version and mypkg not in all_selected)
                all_selected.update(pkgmap[mykey]["selected"])
            else:
                # unmerge_action == "clean"
                slotmap = {}
                for mypkg in mymatch:
                    if unmerge_action == "clean":
                        myslot = localtree.getslot(mypkg)
                    else:
                        # since we're pruning, we don't care about slots
                        # and put all the pkgs in together
                        myslot = 0
                    if myslot not in slotmap:
                        slotmap[myslot] = {}
                    slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg

                for mypkg in vartree.dbapi.cp_list(
                        portage.cpv_getkey(mymatch[0])):
                    myslot = vartree.getslot(mypkg)
                    if myslot not in slotmap:
                        slotmap[myslot] = {}
                    slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg

                for myslot in slotmap:
                    counterkeys = list(slotmap[myslot])
                    if not counterkeys:
                        continue
                    counterkeys.sort()
                    pkgmap[mykey]["protected"].add(
                        slotmap[myslot][counterkeys[-1]])
                    del counterkeys[-1]

                    for counter in counterkeys[:]:
                        mypkg = slotmap[myslot][counter]
                        if mypkg not in mymatch:
                            counterkeys.remove(counter)
                            pkgmap[mykey]["protected"].add(
                                slotmap[myslot][counter])

                    #be pretty and get them in order of merge:
                    for ckey in counterkeys:
                        mypkg = slotmap[myslot][ckey]
                        if mypkg not in all_selected:
                            pkgmap[mykey]["selected"].add(mypkg)
                            all_selected.add(mypkg)
                    # ok, now the last-merged package
                    # is protected, and the rest are selected
        numselected = len(all_selected)
        if global_unmerge and not numselected:
            portage.writemsg_stdout(
                "\n>>> No outdated packages were found on your system.\n")
            return 0

        if not numselected:
            portage.writemsg_stdout(
             "\n>>> No packages selected for removal by " + \
             unmerge_action + "\n")
            return 0
    finally:
        if vdb_lock:
            vartree.dbapi.flush_cache()
            portage.locks.unlockdir(vdb_lock)

    from portage.sets.base import EditablePackageSet

    # generate a list of package sets that are directly or indirectly listed in "selected",
    # as there is no persistent list of "installed" sets
    installed_sets = ["selected"]
    stop = False
    pos = 0
    while not stop:
        stop = True
        pos = len(installed_sets)
        for s in installed_sets[pos - 1:]:
            if s not in sets:
                continue
            candidates = [
                x[len(SETPREFIX):] for x in sets[s].getNonAtoms()
                if x.startswith(SETPREFIX)
            ]
            if candidates:
                stop = False
                installed_sets += candidates
    installed_sets = [
        x for x in installed_sets if x not in root_config.setconfig.active
    ]
    del stop, pos

    # we don't want to unmerge packages that are still listed in user-editable package sets
    # listed in "world" as they would be remerged on the next update of "world" or the
    # relevant package sets.
    unknown_sets = set()
    for cp in range(len(pkgmap)):
        for cpv in pkgmap[cp]["selected"].copy():
            try:
                pkg = _pkg(cpv)
            except KeyError:
                # It could have been uninstalled
                # by a concurrent process.
                continue

            if unmerge_action != "clean" and \
             root_config.root == "/" and \
             portage.match_from_list(
             portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
                msg = ("Not unmerging package %s since there is no valid " + \
                "reason for portage to unmerge itself.") % (pkg.cpv,)
                for line in textwrap.wrap(msg, 75):
                    out.eerror(line)
                # adjust pkgmap so the display output is correct
                pkgmap[cp]["selected"].remove(cpv)
                all_selected.remove(cpv)
                pkgmap[cp]["protected"].add(cpv)
                continue

            parents = []
            for s in installed_sets:
                # skip sets that the user requested to unmerge, and skip world
                # user-selected set, since the package will be removed from
                # that set later on.
                if s in root_config.setconfig.active or s == "selected":
                    continue

                if s not in sets:
                    if s in unknown_sets:
                        continue
                    unknown_sets.add(s)
                    out = portage.output.EOutput()
                    out.eerror(("Unknown set '@%s' in %s%s") % \
                     (s, root_config.root, portage.const.WORLD_SETS_FILE))
                    continue

                # only check instances of EditablePackageSet as other classes are generally used for
                # special purposes and can be ignored here (and are usually generated dynamically, so the
                # user can't do much about them anyway)
                if isinstance(sets[s], EditablePackageSet):

                    # This is derived from a snippet of code in the
                    # depgraph._iter_atoms_for_pkg() method.
                    for atom in sets[s].iterAtomsForPackage(pkg):
                        inst_matches = vartree.dbapi.match(atom)
                        inst_matches.reverse()  # descending order
                        higher_slot = None
                        for inst_cpv in inst_matches:
                            try:
                                inst_pkg = _pkg(inst_cpv)
                            except KeyError:
                                # It could have been uninstalled
                                # by a concurrent process.
                                continue

                            if inst_pkg.cp != atom.cp:
                                continue
                            if pkg >= inst_pkg:
                                # This is descending order, and we're not
                                # interested in any versions <= pkg given.
                                break
                            if pkg.slot_atom != inst_pkg.slot_atom:
                                higher_slot = inst_pkg
                                break
                        if higher_slot is None:
                            parents.append(s)
                            break
            if parents:
                #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
                #print colorize("WARN", "but still listed in the following package sets:")
                #print "    %s\n" % ", ".join(parents)
                print(
                    colorize("WARN",
                             "Not unmerging package %s as it is" % cpv))
                print(
                    colorize(
                        "WARN",
                        "still referenced by the following package sets:"))
                print("    %s\n" % ", ".join(parents))
                # adjust pkgmap so the display output is correct
                pkgmap[cp]["selected"].remove(cpv)
                all_selected.remove(cpv)
                pkgmap[cp]["protected"].add(cpv)

    del installed_sets

    numselected = len(all_selected)
    if not numselected:
        writemsg_level(
         "\n>>> No packages selected for removal by " + \
         unmerge_action + "\n")
        return 0

    # Unmerge order only matters in some cases
    if not ordered:
        unordered = {}
        for d in pkgmap:
            selected = d["selected"]
            if not selected:
                continue
            cp = portage.cpv_getkey(next(iter(selected)))
            cp_dict = unordered.get(cp)
            if cp_dict is None:
                cp_dict = {}
                unordered[cp] = cp_dict
                for k in d:
                    cp_dict[k] = set()
            for k, v in d.items():
                cp_dict[k].update(v)
        pkgmap = [unordered[cp] for cp in sorted(unordered)]

    for x in range(len(pkgmap)):
        selected = pkgmap[x]["selected"]
        if not selected:
            continue
        for mytype, mylist in pkgmap[x].items():
            if mytype == "selected":
                continue
            mylist.difference_update(all_selected)
        cp = portage.cpv_getkey(next(iter(selected)))
        for y in localtree.dep_match(cp):
            if y not in pkgmap[x]["omitted"] and \
             y not in pkgmap[x]["selected"] and \
             y not in pkgmap[x]["protected"] and \
             y not in all_selected:
                pkgmap[x]["omitted"].add(y)
        if global_unmerge and not pkgmap[x]["selected"]:
            #avoid cluttering the preview printout with stuff that isn't getting unmerged
            continue
        if not (pkgmap[x]["protected"]
                or pkgmap[x]["omitted"]) and cp in syslist:
            writemsg_level(colorize("BAD","\a\n\n!!! " + \
             "'%s' is part of your system profile.\n" % cp),
             level=logging.WARNING, noiselevel=-1)
            writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
             "be damaging to your system.\n\n"),
             level=logging.WARNING, noiselevel=-1)
            if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
                countdown(int(settings["EMERGE_WARNING_DELAY"]),
                          colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
        if not quiet:
            writemsg_level("\n %s\n" % (bold(cp), ), noiselevel=-1)
        else:
            writemsg_level(bold(cp) + ": ", noiselevel=-1)
        for mytype in ["selected", "protected", "omitted"]:
            if not quiet:
                writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
            if pkgmap[x][mytype]:
                sorted_pkgs = [
                    portage.catpkgsplit(mypkg)[1:]
                    for mypkg in pkgmap[x][mytype]
                ]
                sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
                for pn, ver, rev in sorted_pkgs:
                    if rev == "r0":
                        myversion = ver
                    else:
                        myversion = ver + "-" + rev
                    if mytype == "selected":
                        writemsg_level(colorize("UNMERGE_WARN",
                                                myversion + " "),
                                       noiselevel=-1)
                    else:
                        writemsg_level(colorize("GOOD", myversion + " "),
                                       noiselevel=-1)
            else:
                writemsg_level("none ", noiselevel=-1)
            if not quiet:
                writemsg_level("\n", noiselevel=-1)
        if quiet:
            writemsg_level("\n", noiselevel=-1)

    writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
     " packages are slated for removal.\n")
    writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
      " and " + colorize("GOOD", "'omitted'") + \
      " packages will not be removed.\n\n")

    if "--pretend" in myopts:
        #we're done... return
        return 0
    if "--ask" in myopts:
        if userquery("Would you like to unmerge these packages?",
                     enter_invalid) == "No":
            # enter pretend mode for correct formatting of results
            myopts["--pretend"] = True
            print()
            print("Quitting.")
            print()
            return 0
    #the real unmerging begins, after a short delay....
    if clean_delay and not autoclean:
        countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")

    for x in range(len(pkgmap)):
        for y in pkgmap[x]["selected"]:
            writemsg_level(">>> Unmerging " + y + "...\n", noiselevel=-1)
            emergelog(xterm_titles, "=== Unmerging... (" + y + ")")
            mysplit = y.split("/")
            #unmerge...
            retval = portage.unmerge(mysplit[0],
                                     mysplit[1],
                                     settings["ROOT"],
                                     mysettings,
                                     unmerge_action not in ["clean", "prune"],
                                     vartree=vartree,
                                     ldpath_mtimes=ldpath_mtimes,
                                     scheduler=scheduler)

            if retval != os.EX_OK:
                emergelog(xterm_titles, " !!! unmerge FAILURE: " + y)
                if raise_on_error:
                    raise UninstallFailure(retval)
                sys.exit(retval)
            else:
                if clean_world and hasattr(sets["selected"], "cleanPackage")\
                  and hasattr(sets["selected"], "lock"):
                    sets["selected"].lock()
                    if hasattr(sets["selected"], "load"):
                        sets["selected"].load()
                    sets["selected"].cleanPackage(vartree.dbapi, y)
                    sets["selected"].unlock()
                emergelog(xterm_titles, " >>> unmerge success: " + y)

    if clean_world and hasattr(sets["selected"], "remove")\
      and hasattr(sets["selected"], "lock"):
        sets["selected"].lock()
        # load is called inside remove()
        for s in root_config.setconfig.active:
            sets["selected"].remove(SETPREFIX + s)
        sets["selected"].unlock()

    return 1
    def _extract_version(package_version):
        # This does not seem to be in the stable portage lib yet
        # return portage.cpv_getversion(package_version)

        cp = portage.cpv_getkey(package_version)
        return package_version[len(cp) + 1:]
Esempio n. 51
0
	def output(self):
		"""Outputs the results of the search."""
		msg = []
		msg.append("\b\b  \n[ Results for search key : " + \
			bold(self.searchkey) + " ]\n")
		msg.append("[ Applications found : " + \
			bold(str(self.mlen)) + " ]\n\n")
		vardb = self.vartree.dbapi
		for mtype in self.matches:
			for match,masked in self.matches[mtype]:
				full_package = None
				if mtype == "pkg":
					catpack = match
					full_package = self.portdb.xmatch(
						"bestmatch-visible", match)
					if not full_package:
						#no match found; we don't want to query description
						masked=1
						full_package = portage.best(
							self.portdb.xmatch("match-all",match))
				elif mtype == "desc":
					full_package = match
					match        = portage.cpv_getkey(match)
				elif mtype == "set":
					msg.append(green("*") + "  " + bold(match) + "\n")
					if self.verbose:
						msg.append("      " + darkgreen("Description:") + \
							"   " + \
							self.sdict[match].getMetadata("DESCRIPTION") \
							+ "\n\n")
					writemsg_stdout(''.join(msg), noiselevel=-1)
				if full_package:
					try:
						desc, homepage, license = self.portdb.aux_get(
							full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
					except KeyError:
						msg.append("emerge: search: aux_get() failed, skipping\n")
						continue
					if masked:
						msg.append(green("*") + "  " + \
							white(match) + " " + red("[ Masked ]") + "\n")
					else:
						msg.append(green("*") + "  " + bold(match) + "\n")
					myversion = self.getVersion(full_package, search.VERSION_RELEASE)

					mysum = [0,0]
					file_size_str = None
					mycat = match.split("/")[0]
					mypkg = match.split("/")[1]
					mycpv = match + "-" + myversion
					myebuild = self.portdb.findname(mycpv)
					if myebuild:
						pkgdir = os.path.dirname(myebuild)
						from portage import manifest
						mf = manifest.Manifest(
							pkgdir, self.settings["DISTDIR"])
						try:
							uri_map = self.portdb.getFetchMap(mycpv)
						except portage.exception.InvalidDependString as e:
							file_size_str = "Unknown (%s)" % (e,)
							del e
						else:
							try:
								mysum[0] = mf.getDistfilesSize(uri_map)
							except KeyError as e:
								file_size_str = "Unknown (missing " + \
									"digest for %s)" % (e,)
								del e

					available = False
					for db in self._dbs:
						if db is not vardb and \
							db.cpv_exists(mycpv):
							available = True
							if not myebuild and hasattr(db, "bintree"):
								myebuild = db.bintree.getname(mycpv)
								try:
									mysum[0] = os.stat(myebuild).st_size
								except OSError:
									myebuild = None
							break

					if myebuild and file_size_str is None:
						mystr = str(mysum[0] // 1024)
						mycount = len(mystr)
						while (mycount > 3):
							mycount -= 3
							mystr = mystr[:mycount] + "," + mystr[mycount:]
						file_size_str = mystr + " kB"

					if self.verbose:
						if available:
							msg.append("      %s %s\n" % \
								(darkgreen("Latest version available:"),
								myversion))
						msg.append("      %s\n" % \
							self.getInstallationStatus(mycat+'/'+mypkg))
						if myebuild:
							msg.append("      %s %s\n" % \
								(darkgreen("Size of files:"), file_size_str))
						msg.append("      " + darkgreen("Homepage:") + \
							"      " + homepage + "\n")
						msg.append("      " + darkgreen("Description:") \
							+ "   " + desc + "\n")
						msg.append("      " + darkgreen("License:") + \
							"       " + license + "\n\n")
		writemsg_stdout(''.join(msg), noiselevel=-1)
Esempio n. 52
0
    def output(self):
        """Outputs the results of the search."""
        msg = []
        msg.append("\b\b  \n[ Results for search key : " + \
         bold(self.searchkey) + " ]\n")
        msg.append("[ Applications found : " + \
         bold(str(self.mlen)) + " ]\n\n")
        vardb = self.vartree.dbapi
        metadata_keys = set(Package.metadata_keys)
        metadata_keys.update(["DESCRIPTION", "HOMEPAGE", "LICENSE", "SRC_URI"])
        metadata_keys = tuple(metadata_keys)
        for mtype in self.matches:
            for match, masked in self.matches[mtype]:
                full_package = None
                if mtype == "pkg":
                    full_package = self._xmatch("bestmatch-visible", match)
                    if not full_package:
                        #no match found; we don't want to query description
                        masked = 1
                        full_package = portage.best(
                            self._xmatch("match-all", match))
                elif mtype == "desc":
                    full_package = match
                    match = portage.cpv_getkey(match)
                elif mtype == "set":
                    msg.append(green("*") + "  " + bold(match) + "\n")
                    if self.verbose:
                        msg.append("      " + darkgreen("Description:") + \
                         "   " + \
                         self.sdict[match].getMetadata("DESCRIPTION") \
                         + "\n\n")
                if full_package:
                    try:
                        metadata = dict(
                            zip(metadata_keys,
                                self._aux_get(full_package, metadata_keys)))
                    except KeyError:
                        msg.append(
                            "emerge: search: aux_get() failed, skipping\n")
                        continue

                    desc = metadata["DESCRIPTION"]
                    homepage = metadata["HOMEPAGE"]
                    license = metadata["LICENSE"]

                    if masked:
                        msg.append(green("*") + "  " + \
                         white(match) + " " + red("[ Masked ]") + "\n")
                    else:
                        msg.append(green("*") + "  " + bold(match) + "\n")
                    myversion = self.getVersion(full_package,
                                                search.VERSION_RELEASE)

                    mysum = [0, 0]
                    file_size_str = None
                    mycat = match.split("/")[0]
                    mypkg = match.split("/")[1]
                    mycpv = match + "-" + myversion
                    myebuild = self._findname(mycpv)
                    if myebuild:
                        pkg = Package(built=False,
                                      cpv=mycpv,
                                      installed=False,
                                      metadata=metadata,
                                      root_config=self.root_config,
                                      type_name="ebuild")
                        pkgdir = os.path.dirname(myebuild)
                        mf = self.settings.repositories.get_repo_for_location(
                            os.path.dirname(os.path.dirname(pkgdir)))
                        mf = mf.load_manifest(pkgdir, self.settings["DISTDIR"])
                        try:
                            uri_map = _parse_uri_map(mycpv,
                                                     metadata,
                                                     use=pkg.use.enabled)
                        except portage.exception.InvalidDependString as e:
                            file_size_str = "Unknown (%s)" % (e, )
                            del e
                        else:
                            try:
                                mysum[0] = mf.getDistfilesSize(uri_map)
                            except KeyError as e:
                                file_size_str = "Unknown (missing " + \
                                 "digest for %s)" % (e,)
                                del e

                    available = False
                    for db in self._dbs:
                        if db is not vardb and \
                         db.cpv_exists(mycpv):
                            available = True
                            if not myebuild and hasattr(db, "bintree"):
                                myebuild = db.bintree.getname(mycpv)
                                try:
                                    mysum[0] = os.stat(myebuild).st_size
                                except OSError:
                                    myebuild = None
                            break

                    if myebuild and file_size_str is None:
                        file_size_str = localized_size(mysum[0])

                    if self.verbose:
                        if available:
                            msg.append("      %s %s\n" % \
                             (darkgreen("Latest version available:"),
                             myversion))
                        msg.append("      %s\n" % \
                         self.getInstallationStatus(mycat+'/'+mypkg))
                        if myebuild:
                            msg.append("      %s %s\n" % \
                             (darkgreen("Size of files:"), file_size_str))
                        msg.append("      " + darkgreen("Homepage:") + \
                         "      " + homepage + "\n")
                        msg.append("      " + darkgreen("Description:") \
                         + "   " + desc + "\n")
                        msg.append("      " + darkgreen("License:") + \
                         "       " + license + "\n\n")
        writemsg_stdout(''.join(msg), noiselevel=-1)
Esempio n. 53
0
def _p_to_cp(p):
    ret = _porttree().dbapi.xmatch("match-all", p)
    if ret:
        return portage.cpv_getkey(ret[0])
    return None
Esempio n. 54
0
 def __get_path(self, cpv):
     cat, pn = catsplit(cpv_getkey(cpv))
     return os.path.join(self.portdir, cat, pn,
                         os.path.basename(cpv) + ".ebuild")
Esempio n. 55
0
def _p_to_cp(p):
    ret = _porttree().dbapi.xmatch("match-all", p)
    if ret:
        return portage.cpv_getkey(ret[0])
    return None
Esempio n. 56
0
def _p_to_cp(p):
    ret = _porttree().dbapi.xmatch("match-all", p)
    if ret:
        return portage.cpv_getkey(ret[0])
    log.error('_p_to_cp: Package {0} not found by xmatch'.format(p))
    return None
Esempio n. 57
0
def _cpv_to_name(cpv):
    if cpv == '':
        return ''
    return str(portage.cpv_getkey(cpv))
Esempio n. 58
0
	def _populate(self, getbinpkgs=0):
		if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
			return 0

		# Clear all caches in case populate is called multiple times
		# as may be the case when _global_updates calls populate()
		# prior to performing package moves since it only wants to
		# operate on local packages (getbinpkgs=0).
		self._remotepkgs = None
		self.dbapi._clear_cache()
		self.dbapi._aux_cache.clear()
		if True:
			pkg_paths = {}
			self._pkg_paths = pkg_paths
			dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
			if "All" in dirs:
				dirs.remove("All")
			dirs.sort()
			dirs.insert(0, "All")
			pkgindex = self._load_pkgindex()
			pf_index = None
			if not self._pkgindex_version_supported(pkgindex):
				pkgindex = self._new_pkgindex()
			header = pkgindex.header
			metadata = {}
			for d in pkgindex.packages:
				metadata[d["CPV"]] = d
			update_pkgindex = False
			for mydir in dirs:
				for myfile in listdir(os.path.join(self.pkgdir, mydir)):
					if not myfile.endswith(".tbz2"):
						continue
					mypath = os.path.join(mydir, myfile)
					full_path = os.path.join(self.pkgdir, mypath)
					s = os.lstat(full_path)
					if stat.S_ISLNK(s.st_mode):
						continue

					# Validate data from the package index and try to avoid
					# reading the xpak if possible.
					if mydir != "All":
						possibilities = None
						d = metadata.get(mydir+"/"+myfile[:-5])
						if d:
							possibilities = [d]
					else:
						if pf_index is None:
							pf_index = {}
							for mycpv in metadata:
								mycat, mypf = catsplit(mycpv)
								pf_index.setdefault(
									mypf, []).append(metadata[mycpv])
						possibilities = pf_index.get(myfile[:-5])
					if possibilities:
						match = None
						for d in possibilities:
							try:
								if long(d["MTIME"]) != s[stat.ST_MTIME]:
									continue
							except (KeyError, ValueError):
								continue
							try:
								if long(d["SIZE"]) != long(s.st_size):
									continue
							except (KeyError, ValueError):
								continue
							if not self._pkgindex_keys.difference(d):
								match = d
								break
						if match:
							mycpv = match["CPV"]
							if mycpv in pkg_paths:
								# discard duplicates (All/ is preferred)
								continue
							pkg_paths[mycpv] = mypath
							# update the path if the package has been moved
							oldpath = d.get("PATH")
							if oldpath and oldpath != mypath:
								update_pkgindex = True
							if mypath != mycpv + ".tbz2":
								d["PATH"] = mypath
								if not oldpath:
									update_pkgindex = True
							else:
								d.pop("PATH", None)
								if oldpath:
									update_pkgindex = True
							self.dbapi.cpv_inject(mycpv)
							if not self.dbapi._aux_cache_keys.difference(d):
								aux_cache = self.dbapi._aux_cache_slot_dict()
								for k in self.dbapi._aux_cache_keys:
									aux_cache[k] = d[k]
								self.dbapi._aux_cache[mycpv] = aux_cache
							continue
					if not os.access(full_path, os.R_OK):
						writemsg(_("!!! Permission denied to read " \
							"binary package: '%s'\n") % full_path,
							noiselevel=-1)
						self.invalids.append(myfile[:-5])
						continue
					metadata_bytes = portage.xpak.tbz2(full_path).get_data()
					mycat = _unicode_decode(metadata_bytes.get(
						_unicode_encode("CATEGORY",
						encoding=_encodings['repo.content']), ""),
						encoding=_encodings['repo.content'], errors='replace')
					mypf = _unicode_decode(metadata_bytes.get(
						_unicode_encode("PF",
						encoding=_encodings['repo.content']), ""),
						encoding=_encodings['repo.content'], errors='replace')
					slot = _unicode_decode(metadata_bytes.get(
						_unicode_encode("SLOT",
						encoding=_encodings['repo.content']), ""),
						encoding=_encodings['repo.content'], errors='replace')
					mypkg = myfile[:-5]
					if not mycat or not mypf or not slot:
						#old-style or corrupt package
						writemsg(_("\n!!! Invalid binary package: '%s'\n") % full_path,
							noiselevel=-1)
						missing_keys = []
						if not mycat:
							missing_keys.append("CATEGORY")
						if not mypf:
							missing_keys.append("PF")
						if not slot:
							missing_keys.append("SLOT")
						msg = []
						if missing_keys:
							missing_keys.sort()
							msg.append(_("Missing metadata key(s): %s.") % \
								", ".join(missing_keys))
						msg.append(_(" This binary package is not " \
							"recoverable and should be deleted."))
						from textwrap import wrap
						for line in wrap("".join(msg), 72):
							writemsg("!!! %s\n" % line, noiselevel=-1)
						self.invalids.append(mypkg)
						continue
					mycat = mycat.strip()
					slot = slot.strip()
					if mycat != mydir and mydir != "All":
						continue
					if mypkg != mypf.strip():
						continue
					mycpv = mycat + "/" + mypkg
					if mycpv in pkg_paths:
						# All is first, so it's preferred.
						continue
					if not self.dbapi._category_re.match(mycat):
						writemsg(_("!!! Binary package has an " \
							"unrecognized category: '%s'\n") % full_path,
							noiselevel=-1)
						writemsg(_("!!! '%s' has a category that is not" \
							" listed in %setc/portage/categories\n") % \
							(mycpv, self.settings["PORTAGE_CONFIGROOT"]),
							noiselevel=-1)
						continue
					pkg_paths[mycpv] = mypath
					self.dbapi.cpv_inject(mycpv)
					update_pkgindex = True
					d = metadata.get(mycpv, {})
					if d:
						try:
							if long(d["MTIME"]) != s[stat.ST_MTIME]:
								d.clear()
						except (KeyError, ValueError):
							d.clear()
					if d:
						try:
							if long(d["SIZE"]) != long(s.st_size):
								d.clear()
						except (KeyError, ValueError):
							d.clear()

					d["CPV"] = mycpv
					d["SLOT"] = slot
					d["MTIME"] = str(s[stat.ST_MTIME])
					d["SIZE"] = str(s.st_size)

					d.update(zip(self._pkgindex_aux_keys,
						self.dbapi.aux_get(mycpv, self._pkgindex_aux_keys)))
					try:
						self._eval_use_flags(mycpv, d)
					except portage.exception.InvalidDependString:
						writemsg(_("!!! Invalid binary package: '%s'\n") % \
							self.getname(mycpv), noiselevel=-1)
						self.dbapi.cpv_remove(mycpv)
						del pkg_paths[mycpv]

					# record location if it's non-default
					if mypath != mycpv + ".tbz2":
						d["PATH"] = mypath
					else:
						d.pop("PATH", None)
					metadata[mycpv] = d
					if not self.dbapi._aux_cache_keys.difference(d):
						aux_cache = self.dbapi._aux_cache_slot_dict()
						for k in self.dbapi._aux_cache_keys:
							aux_cache[k] = d[k]
						self.dbapi._aux_cache[mycpv] = aux_cache

			for cpv in list(metadata):
				if cpv not in pkg_paths:
					del metadata[cpv]

			# Do not bother to write the Packages index if $PKGDIR/All/ exists
			# since it will provide no benefit due to the need to read CATEGORY
			# from xpak.
			if update_pkgindex and os.access(self.pkgdir, os.W_OK):
				del pkgindex.packages[:]
				pkgindex.packages.extend(iter(metadata.values()))
				self._update_pkgindex_header(pkgindex.header)
				from portage.util import atomic_ofstream
				f = atomic_ofstream(self._pkgindex_file)
				try:
					pkgindex.write(f)
				finally:
					f.close()

		if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
			writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
				noiselevel=-1)

		if getbinpkgs and 'PORTAGE_BINHOST' in self.settings:
			base_url = self.settings["PORTAGE_BINHOST"]
			from portage.const import CACHE_PATH
			try:
				from urllib.parse import urlparse
			except ImportError:
				from urlparse import urlparse
			urldata = urlparse(base_url)
			pkgindex_file = os.path.join(self.settings["ROOT"], CACHE_PATH, "binhost",
				urldata[1] + urldata[2], "Packages")
			pkgindex = self._new_pkgindex()
			try:
				f = codecs.open(_unicode_encode(pkgindex_file,
					encoding=_encodings['fs'], errors='strict'),
					mode='r', encoding=_encodings['repo.content'],
					errors='replace')
				try:
					pkgindex.read(f)
				finally:
					f.close()
			except EnvironmentError as e:
				if e.errno != errno.ENOENT:
					raise
			local_timestamp = pkgindex.header.get("TIMESTAMP", None)
			try:
				from urllib.request import urlopen as urllib_request_urlopen
			except ImportError:
				from urllib import urlopen as urllib_request_urlopen
			rmt_idx = self._new_pkgindex()
			try:
				# urlparse.urljoin() only works correctly with recognized
				# protocols and requires the base url to have a trailing
				# slash, so join manually...
				f = urllib_request_urlopen(base_url.rstrip("/") + "/Packages")
				f_dec = codecs.iterdecode(f,
					_encodings['repo.content'], errors='replace')
				try:
					rmt_idx.readHeader(f_dec)
					remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
					if not remote_timestamp:
						# no timestamp in the header, something's wrong
						pkgindex = None
					else:
						if not self._pkgindex_version_supported(rmt_idx):
							writemsg(_("\n\n!!! Binhost package index version" \
							" is not supported: '%s'\n") % \
							rmt_idx.header.get("VERSION"), noiselevel=-1)
							pkgindex = None
						elif local_timestamp != remote_timestamp:
							rmt_idx.readBody(f_dec)
							pkgindex = rmt_idx
				finally:
					f.close()
			except EnvironmentError as e:
				writemsg(_("\n\n!!! Error fetching binhost package" \
					" info from '%s'\n") % base_url)
				writemsg("!!! %s\n\n" % str(e))
				del e
				pkgindex = None
			if pkgindex is rmt_idx:
				pkgindex.modified = False # don't update the header
				from portage.util import atomic_ofstream, ensure_dirs
				try:
					ensure_dirs(os.path.dirname(pkgindex_file))
					f = atomic_ofstream(pkgindex_file)
					pkgindex.write(f)
					f.close()
				except PortageException:
					if os.access(os.path.join(
						self.settings["ROOT"], CACHE_PATH), os.W_OK):
						raise
					# The current user doesn't have permission to cache the
					# file, but that's alright.
			if pkgindex:
				self._remotepkgs = {}
				for d in pkgindex.packages:
					self._remotepkgs[d["CPV"]] = d
				self._remote_has_index = True
				self._remote_base_uri = pkgindex.header.get("URI", base_url)
				self.__remotepkgs = {}
				for cpv in self._remotepkgs:
					self.dbapi.cpv_inject(cpv)
				self.populated = 1
				if True:
					# Remote package instances override local package
					# if they are not identical.
					hash_names = ["SIZE"] + self._pkgindex_hashes
					for cpv, local_metadata in metadata.items():
						remote_metadata = self._remotepkgs.get(cpv)
						if remote_metadata is None:
							continue
						# Use digests to compare identity.
						identical = True
						for hash_name in hash_names:
							local_value = local_metadata.get(hash_name)
							if local_value is None:
								continue
							remote_value = remote_metadata.get(hash_name)
							if remote_value is None:
								continue
							if local_value != remote_value:
								identical = False
								break
						if identical:
							del self._remotepkgs[cpv]
						else:
							# Override the local package in the aux_get cache.
							self.dbapi._aux_cache[cpv] = remote_metadata
				else:
					# Local package instances override remote instances.
					for cpv in metadata:
						self._remotepkgs.pop(cpv, None)
				return
			self._remotepkgs = {}
			try:
				chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
				if chunk_size < 8:
					chunk_size = 8
			except (ValueError, KeyError):
				chunk_size = 3000
			writemsg_stdout("\n")
			writemsg_stdout(
				colorize("GOOD", _("Fetching bininfo from ")) + \
				re.sub(r'//(.+):.+@(.+)/', r'//\1:*password*@\2/', base_url) + "\n")
			self.__remotepkgs = portage.getbinpkg.dir_get_metadata(
				self.settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
			#writemsg(green("  -- DONE!\n\n"))

			for mypkg in list(self.__remotepkgs):
				if "CATEGORY" not in self.__remotepkgs[mypkg]:
					#old-style or corrupt package
					writemsg(_("!!! Invalid remote binary package: %s\n") % mypkg,
						noiselevel=-1)
					del self.__remotepkgs[mypkg]
					continue
				mycat = self.__remotepkgs[mypkg]["CATEGORY"].strip()
				fullpkg = mycat+"/"+mypkg[:-5]

				if fullpkg in metadata:
					# When using this old protocol, comparison with the remote
					# package isn't supported, so the local package is always
					# preferred even if getbinpkgsonly is enabled.
					continue

				if not self.dbapi._category_re.match(mycat):
					writemsg(_("!!! Remote binary package has an " \
						"unrecognized category: '%s'\n") % fullpkg,
						noiselevel=-1)
					writemsg(_("!!! '%s' has a category that is not" \
						" listed in %setc/portage/categories\n") % \
						(fullpkg, self.settings["PORTAGE_CONFIGROOT"]),
						noiselevel=-1)
					continue
				mykey = portage.cpv_getkey(fullpkg)
				try:
					# invalid tbz2's can hurt things.
					self.dbapi.cpv_inject(fullpkg)
					remote_metadata = self.__remotepkgs[mypkg]
					for k, v in remote_metadata.items():
						remote_metadata[k] = v.strip()

					# Eliminate metadata values with names that digestCheck
					# uses, since they are not valid when using the old
					# protocol. Typically this is needed for SIZE metadata
					# which corresponds to the size of the unpacked files
					# rather than the binpkg file size, triggering digest
					# verification failures as reported in bug #303211.
					remote_metadata.pop('SIZE', None)
					for k in portage.checksum.hashfunc_map:
						remote_metadata.pop(k, None)

					self._remotepkgs[fullpkg] = remote_metadata
				except SystemExit as e:
					raise
				except:
					writemsg(_("!!! Failed to inject remote binary package: %s\n") % fullpkg,
						noiselevel=-1)
					del self.__remotepkgs[mypkg]
					continue
		self.populated=1
#!/usr/bin/env python2

import portage

portdb = portage.portdb
portdb.porttrees = [portdb.porttree_root] # exclude overlays
settings = portage.config(clone=portage.settings)
vardb = portage.db[settings['ROOT']]['vartree'].dbapi

for cpv in vardb.cpv_all():
	slot, = vardb.aux_get(cpv, ['SLOT'])
	cp = portage.cpv_getkey(cpv)
	atom = cp
	if slot:
		atom += ":" + slot
	if not portdb.xmatch('match-visible', atom):
		print atom
Esempio n. 60
0
				if dep_pkg in bad_pkgs:
					unsatisfied_dep = True
					bad_pkgs.add(parent)
					break

				if dep_pkg in good_pkgs or dep_pkg in traversed:
					continue

				metadata = dict(zip(metadata_keys,
					portdb.aux_get(dep_pkg, metadata_keys)))

				# If this package isn't the highest visible version
				# in the slot then drop it in order to avoid a slot
				# conflict.
				slot_atom = "%s:%s" % (portage.cpv_getkey(dep_pkg),
					metadata["SLOT"])
				best_visible_slot = portdb.xmatch("bestmatch-visible",
					slot_atom)
				if dep_pkg != best_visible_slot:
					unsatisfied_dep = True
					bad_pkgs.add(dep_pkg)
					bad_pkgs.add(parent)
					break

				dep_str = " ".join(metadata[k] for k in dep_keys)
				settings.setcpv(dep_pkg, mydb=metadata)
				metadata["USE"] = settings["PORTAGE_USE"]
				success, atoms = portage.dep_check(dep_str,
					None, settings, myuse=metadata["USE"].split(),
					trees=portage.db, myroot=settings["ROOT"])