예제 #1
0
def grab_updates(updpath, prev_mtimes=None):
	"""Returns all the updates from the given directory as a sorted list of
	tuples, each containing (file_path, statobj, content).  If prev_mtimes is
	given then only updates with differing mtimes are considered."""
	try:
		mylist = os.listdir(updpath)
	except OSError as oe:
		if oe.errno == errno.ENOENT:
			raise DirectoryNotFound(updpath)
		raise
	if prev_mtimes is None:
		prev_mtimes = {}
	# validate the file name (filter out CVS directory, etc...)
	mylist = [myfile for myfile in mylist if len(myfile) == 7 and myfile[1:3] == "Q-"]
	if len(mylist) == 0:
		return []
	
	# update names are mangled to make them sort properly
	mylist = [myfile[3:]+"-"+myfile[:2] for myfile in mylist]
	mylist.sort()
	mylist = [myfile[5:]+"-"+myfile[:4] for myfile in mylist]

	update_data = []
	for myfile in mylist:
		file_path = os.path.join(updpath, myfile)
		mystat = os.stat(file_path)
		if file_path not in prev_mtimes or \
		long(prev_mtimes[file_path]) != mystat[stat.ST_MTIME]:
			content = codecs.open(_unicode_encode(file_path,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['repo.content'], errors='replace'
				).read()
			update_data.append((file_path, mystat, content))
	return update_data
예제 #2
0
	def _get_all_modules(self):
		"""scans the emaint modules dir for loadable modules

		@rtype: dictionary of module_plugins
		"""
		module_dir =  self._module_path
		importables = []
		names = os.listdir(module_dir)
		for entry in names:
			# skip any __init__ or __pycache__ files or directories
			if entry.startswith('__'):
				continue
			try:
				# test for statinfo to ensure it should a real module
				# it will bail if it errors
				os.lstat(os.path.join(module_dir, entry, '__init__.py'))
				importables.append(entry)
			except EnvironmentError:
				pass
		kids = {}
		for entry in importables:
			new_module = Module(entry, self._namepath)
			for module_name in new_module.kids:
				kid = new_module.kids[module_name]
				kid['parent'] = new_module
				kids[kid['name']] = kid
			self.parents.append(entry)
		return kids
예제 #3
0
def hardlock_cleanup(path, remove_all_locks=False):
    mypid = str(os.getpid())
    myhost = os.uname()[1]
    mydl = os.listdir(path)

    results = []
    mycount = 0

    mylist = {}
    for x in mydl:
        if os.path.isfile(path + "/" + x):
            parts = x.split(".hardlock-")
            if len(parts) == 2:
                filename = parts[0][1:]
                hostpid = parts[1].split("-")
                host = "-".join(hostpid[:-1])
                pid = hostpid[-1]

                if filename not in mylist:
                    mylist[filename] = {}
                if host not in mylist[filename]:
                    mylist[filename][host] = []
                mylist[filename][host].append(pid)

                mycount += 1

    results.append(_("Found %(count)s locks") % {"count": mycount})

    for x in mylist:
        if myhost in mylist[x] or remove_all_locks:
            mylockname = hardlock_name(path + "/" + x)
            if hardlink_is_mine(mylockname, path+"/"+x) or \
               not os.path.exists(path+"/"+x) or \
              remove_all_locks:
                for y in mylist[x]:
                    for z in mylist[x][y]:
                        filename = path + "/." + x + ".hardlock-" + y + "-" + z
                        if filename == mylockname:
                            continue
                        try:
                            # We're sweeping through, unlinking everyone's locks.
                            os.unlink(filename)
                            results.append(_("Unlinked: ") + filename)
                        except OSError:
                            pass
                try:
                    os.unlink(path + "/" + x)
                    results.append(_("Unlinked: ") + path + "/" + x)
                    os.unlink(mylockname)
                    results.append(_("Unlinked: ") + mylockname)
                except OSError:
                    pass
            else:
                try:
                    os.unlink(mylockname)
                    results.append(_("Unlinked: ") + mylockname)
                except OSError:
                    pass

    return results
예제 #4
0
def grablines(myfilename, recursive=0, remember_source_file=False):
	mylines=[]
	if recursive and os.path.isdir(myfilename):
		if os.path.basename(myfilename) in _ignorecvs_dirs:
			return mylines
		dirlist = os.listdir(myfilename)
		dirlist.sort()
		for f in dirlist:
			if not f.startswith(".") and not f.endswith("~"):
				mylines.extend(grablines(
					os.path.join(myfilename, f), recursive, remember_source_file))
	else:
		try:
			myfile = io.open(_unicode_encode(myfilename,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['content'], errors='replace')
			if remember_source_file:
				mylines = [(line, myfilename) for line in myfile.readlines()]
			else:
				mylines = myfile.readlines()
			myfile.close()
		except IOError as e:
			if e.errno == PermissionDenied.errno:
				raise PermissionDenied(myfilename)
			pass
	return mylines
예제 #5
0
def have_ebuild_dir(path, maxdepth=3):
	"""
	Try to figure out if 'path' or a subdirectory contains one or more
	ebuild files named appropriately for their parent directory.
	"""
	stack = [(normalize_path(path), 1)]
	while stack:
		path, depth = stack.pop()
		basename = os.path.basename(path)
		try:
			listdir = os.listdir(path)
		except OSError:
			continue
		for filename in listdir:
			abs_filename = os.path.join(path, filename)
			try:
				st = os.stat(abs_filename)
			except OSError:
				continue
			if stat.S_ISDIR(st.st_mode):
				if depth < maxdepth:
					stack.append((abs_filename, depth + 1))
			elif stat.S_ISREG(st.st_mode):
				if filename.endswith(".ebuild") and \
					filename.startswith(basename + "-"):
					return os.path.dirname(os.path.dirname(path))
예제 #6
0
def get_glsa_list(myconfig):
    """
	Returns a list of all available GLSAs in the given repository
	by comparing the filelist there with the pattern described in
	the config.

	@type	myconfig: portage.config
	@param	myconfig: Portage settings instance

	@rtype:		List of Strings
	@return:	a list of GLSA IDs in this repository
	"""
    rValue = []

    if "GLSA_DIR" in myconfig:
        repository = myconfig["GLSA_DIR"]
    else:
        repository = os.path.join(myconfig["PORTDIR"], "metadata", "glsa")

    if not os.access(repository, os.R_OK):
        return []
    dirlist = os.listdir(repository)
    prefix = "glsa-"
    suffix = ".xml"

    for f in dirlist:
        try:
            if f[:len(prefix)] == prefix and f[-1 * len(suffix):] == suffix:
                rValue.append(f[len(prefix):-1 * len(suffix)])
        except IndexError:
            pass
    return rValue
예제 #7
0
 def __iter__(self):
     """generator for walking the dir struct"""
     dirs = [(0, self.location)]
     len_base = len(self.location)
     while dirs:
         depth, dir_path = dirs.pop()
         try:
             dir_list = os.listdir(dir_path)
         except OSError as e:
             if e.errno != errno.ENOENT:
                 raise
             del e
             continue
         for l in dir_list:
             if l.endswith(".cpickle"):
                 continue
             p = os.path.join(dir_path, l)
             try:
                 st = os.lstat(p)
             except OSError:
                 # Cache entry disappeared.
                 continue
             if stat.S_ISDIR(st.st_mode):
                 # Only recurse 1 deep, in order to avoid iteration over
                 # entries from another nested cache instance. This can
                 # happen if the user nests an overlay inside
                 # /usr/portage/local as in bug #302764.
                 if depth < 1:
                     dirs.append((depth + 1, p))
                 continue
             yield p[len_base + 1:]
예제 #8
0
def cacheddir(my_original_path,
              ignorecvs,
              ignorelist,
              EmptyOnError,
              followSymlinks=True):
    mypath = normalize_path(my_original_path)
    try:
        pathstat = os.stat(mypath)
        if not stat.S_ISDIR(pathstat.st_mode):
            raise DirectoryNotFound(mypath)
    except EnvironmentError as e:
        if e.errno == PermissionDenied.errno:
            raise PermissionDenied(mypath)
        del e
        return [], []
    except PortageException:
        return [], []
    else:
        try:
            fpaths = os.listdir(mypath)
        except EnvironmentError as e:
            if e.errno != errno.EACCES:
                raise
            del e
            raise PermissionDenied(mypath)
        ftype = []
        for x in fpaths:
            try:
                if followSymlinks:
                    pathstat = os.stat(mypath + "/" + x)
                else:
                    pathstat = os.lstat(mypath + "/" + x)

                if stat.S_ISREG(pathstat[stat.ST_MODE]):
                    ftype.append(0)
                elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
                    ftype.append(1)
                elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
                    ftype.append(2)
                else:
                    ftype.append(3)
            except (IOError, OSError):
                ftype.append(3)

    if ignorelist or ignorecvs:
        ret_list = []
        ret_ftype = []
        for file_path, file_type in zip(fpaths, ftype):
            if file_path in ignorelist:
                pass
            elif ignorecvs:
                if file_path[:2] != ".#" and not (file_type == 1
                                                  and file_path in VCS_DIRS):
                    ret_list.append(file_path)
                    ret_ftype.append(file_type)
    else:
        ret_list = fpaths
        ret_ftype = ftype

    return ret_list, ret_ftype
예제 #9
0
파일: flat_hash.py 프로젝트: amadio/portage
	def __iter__(self):
		"""generator for walking the dir struct"""
		dirs = [(0, self.location)]
		len_base = len(self.location)
		while dirs:
			depth, dir_path = dirs.pop()
			try:
				dir_list = os.listdir(dir_path)
			except OSError as e:
				if e.errno != errno.ENOENT:
					raise
				del e
				continue
			for l in dir_list:
				p = os.path.join(dir_path, l)
				try:
					st = os.lstat(p)
				except OSError:
					# Cache entry disappeared.
					continue
				if stat.S_ISDIR(st.st_mode):
					# Only recurse 1 deep, in order to avoid iteration over
					# entries from another nested cache instance. This can
					# happen if the user nests an overlay inside
					# /usr/portage/local as in bug #302764.
					if depth < 1:
						dirs.append((depth+1, p))
					continue

				try:
					yield _pkg_str(p[len_base+1:])
				except InvalidData:
					continue
예제 #10
0
def hardlock_cleanup(path, remove_all_locks=False):
	myhost = os.uname()[1]
	mydl = os.listdir(path)

	results = []
	mycount = 0

	mylist = {}
	for x in mydl:
		if os.path.isfile(path + "/" + x):
			parts = x.split(".hardlock-")
			if len(parts) == 2:
				filename = parts[0][1:]
				hostpid  = parts[1].split("-")
				host  = "-".join(hostpid[:-1])
				pid   = hostpid[-1]
				
				if filename not in mylist:
					mylist[filename] = {}
				if host not in mylist[filename]:
					mylist[filename][host] = []
				mylist[filename][host].append(pid)

				mycount += 1


	results.append(_("Found %(count)s locks") % {"count": mycount})
	
	for x in mylist:
		if myhost in mylist[x] or remove_all_locks:
			mylockname = hardlock_name(path + "/" + x)
			if hardlink_is_mine(mylockname, path + "/" + x) or \
			   not os.path.exists(path + "/" + x) or \
				 remove_all_locks:
				for y in mylist[x]:
					for z in mylist[x][y]:
						filename = path + "/." + x + ".hardlock-" + y + "-" + z
						if filename == mylockname:
							continue
						try:
							# We're sweeping through, unlinking everyone's locks.
							os.unlink(filename)
							results.append(_("Unlinked: ") + filename)
						except OSError:
							pass
				try:
					os.unlink(path + "/" + x)
					results.append(_("Unlinked: ") + path + "/" + x)
					os.unlink(mylockname)
					results.append(_("Unlinked: ") + mylockname)
				except OSError:
					pass
			else:
				try:
					os.unlink(mylockname)
					results.append(_("Unlinked: ") + mylockname)
				except OSError:
					pass

	return results
예제 #11
0
파일: __init__.py 프로젝트: sifive/portage
def getTestNames(path):
    files = os.listdir(path)
    files = [
        f[:-3] for f in files if f.startswith("test") and f.endswith(".py")
    ]
    files.sort()
    return files
예제 #12
0
def _prepare_fake_distdir(settings, alist):
	orig_distdir = settings["DISTDIR"]
	edpath = os.path.join(settings["PORTAGE_BUILDDIR"], "distdir")
	portage.util.ensure_dirs(edpath, gid=portage_gid, mode=0o755)

	# Remove any unexpected files or directories.
	for x in os.listdir(edpath):
		symlink_path = os.path.join(edpath, x)
		st = os.lstat(symlink_path)
		if x in alist and stat.S_ISLNK(st.st_mode):
			continue
		if stat.S_ISDIR(st.st_mode):
			shutil.rmtree(symlink_path)
		else:
			os.unlink(symlink_path)

	# Check for existing symlinks and recreate if necessary.
	for x in alist:
		symlink_path = os.path.join(edpath, x)
		target = os.path.join(orig_distdir, x)
		try:
			link_target = os.readlink(symlink_path)
		except OSError:
			os.symlink(target, symlink_path)
		else:
			if link_target != target:
				os.unlink(symlink_path)
				os.symlink(target, symlink_path)
예제 #13
0
    def _get_all_modules(self):
        """scans the _module_path dir for loadable modules

        @rtype: dictionary of module_plugins
        """
        module_dir = self._module_path
        names = os.listdir(module_dir)

        def _a_real_module(entry):
            try:
                # test for statinfo to ensure it should a real module
                # it will bail if it errors
                os.lstat(os.path.join(module_dir, entry, "__init__.py"))
            except EnvironmentError:
                return False
            return True

        # The importables list cannot be a generator.
        # If it was a generator, it would be consumed by self.parents.extend()
        # and the following for loop wouldn't have anything to iterate with.
        importables = [
            entry for entry in names
            if not entry.startswith("__") and _a_real_module(entry)
        ]
        self.parents.extend(importables)

        kids = {}
        for entry in importables:
            new_module = Module(entry, self._namepath)
            self._check_compat(new_module)
            for module_name in new_module.kids:
                kid = new_module.kids[module_name]
                kid["parent"] = new_module
                kids[kid["name"]] = kid
        return kids
예제 #14
0
파일: module.py 프로젝트: vsgroyper/portage
    def _get_all_modules(self):
        """scans the _module_path dir for loadable modules

		@rtype: dictionary of module_plugins
		"""
        module_dir = self._module_path
        importables = []
        names = os.listdir(module_dir)
        for entry in names:
            # skip any __init__ or __pycache__ files or directories
            if entry.startswith('__'):
                continue
            try:
                # test for statinfo to ensure it should a real module
                # it will bail if it errors
                os.lstat(os.path.join(module_dir, entry, '__init__.py'))
                importables.append(entry)
            except EnvironmentError:
                pass
        kids = {}
        for entry in importables:
            new_module = Module(entry, self._namepath)
            self._check_compat(new_module)
            for module_name in new_module.kids:
                kid = new_module.kids[module_name]
                kid['parent'] = new_module
                kids[kid['name']] = kid
            self.parents.append(entry)
        return kids
예제 #15
0
def collect_ebuild_messages(path):
	""" Collect elog messages generated by the bash logging function stored 
		at 'path'.
	"""
	mylogfiles = None
	try:
		mylogfiles = os.listdir(path)
	except OSError:
		pass
	# shortcut for packages without any messages
	if not mylogfiles:
		return {}
	# exploit listdir() file order so we process log entries in chronological order
	mylogfiles.reverse()
	logentries = {}
	for msgfunction in mylogfiles:
		filename = os.path.join(path, msgfunction)
		if msgfunction not in EBUILD_PHASES:
			writemsg(_("!!! can't process invalid log file: %s\n") % filename,
				noiselevel=-1)
			continue
		if not msgfunction in logentries:
			logentries[msgfunction] = []
		lastmsgtype = None
		msgcontent = []
		f = io.open(_unicode_encode(filename,
			encoding=_encodings['fs'], errors='strict'),
			mode='r', encoding=_encodings['repo.content'], errors='replace')
		for l in f:
			l = l.rstrip('\n')
			if not l:
				continue
			try:
				msgtype, msg = l.split(" ", 1)
			except ValueError:
				writemsg(_("!!! malformed entry in "
					"log file: '%s'\n") % filename, noiselevel=-1)
				continue

			if lastmsgtype is None:
				lastmsgtype = msgtype
			
			if msgtype == lastmsgtype:
				msgcontent.append(msg)
			else:
				if msgcontent:
					logentries[msgfunction].append((lastmsgtype, msgcontent))
				msgcontent = [msg]
			lastmsgtype = msgtype
		f.close()
		if msgcontent:
			logentries[msgfunction].append((lastmsgtype, msgcontent))

	# clean logfiles to avoid repetitions
	for f in mylogfiles:
		try:
			os.unlink(os.path.join(path, f))
		except OSError:
			pass
	return logentries
예제 #16
0
파일: glsa.py 프로젝트: entoo/portage-src
def get_glsa_list(myconfig):
	"""
	Returns a list of all available GLSAs in the given repository
	by comparing the filelist there with the pattern described in
	the config.

	@type	myconfig: portage.config
	@param	myconfig: Portage settings instance

	@rtype:		List of Strings
	@return:	a list of GLSA IDs in this repository
	"""
	rValue = []

	if "GLSA_DIR" in myconfig:
		repository = myconfig["GLSA_DIR"]
	else:
		repository = os.path.join(myconfig["PORTDIR"], "metadata", "glsa")

	if not os.access(repository, os.R_OK):
		return []
	dirlist = os.listdir(repository)
	prefix = "glsa-"
	suffix = ".xml"

	for f in dirlist:
		try:
			if f[:len(prefix)] == prefix and f[-1*len(suffix):] == suffix:
				rValue.append(f[len(prefix):-1*len(suffix)])
		except IndexError:
			pass
	return rValue
예제 #17
0
def have_ebuild_dir(path, maxdepth=3):
	""" 
	Try to figure out if 'path' or a subdirectory contains one or more
	ebuild files named appropriately for their parent directory.
	"""
	stack = [(normalize_path(path), 1)]
	while stack:
		path, depth = stack.pop()
		basename = os.path.basename(path)
		try:
			listdir = os.listdir(path)
		except OSError:
			continue
		for filename in listdir:
			abs_filename = os.path.join(path, filename)
			try:
				st = os.stat(abs_filename)
			except OSError:
				continue
			if stat.S_ISDIR(st.st_mode):
				if depth < maxdepth:
					stack.append((abs_filename, depth + 1))
			elif stat.S_ISREG(st.st_mode):
				if filename.endswith(".ebuild") and \
					filename.startswith(basename + "-"):
					return os.path.dirname(os.path.dirname(path))
예제 #18
0
def fixdbentries(update_iter, dbdir, eapi=None, parent=None):
    """Performs update commands which result in search and replace operations
	for each of the files in dbdir (excluding CONTENTS and environment.bz2).
	Returns True when actual modifications are necessary and False otherwise."""

    warnings.warn("portage.update.fixdbentries() is deprecated",
                  DeprecationWarning,
                  stacklevel=2)

    mydata = {}
    for myfile in [f for f in os.listdir(dbdir) if f not in ignored_dbentries]:
        file_path = os.path.join(dbdir, myfile)
        with io.open(_unicode_encode(file_path,
                                     encoding=_encodings['fs'],
                                     errors='strict'),
                     mode='r',
                     encoding=_encodings['repo.content'],
                     errors='replace') as f:
            mydata[myfile] = f.read()
    updated_items = update_dbentries(update_iter,
                                     mydata,
                                     eapi=eapi,
                                     parent=parent)
    for myfile, mycontent in updated_items.items():
        file_path = os.path.join(dbdir, myfile)
        write_atomic(file_path, mycontent, encoding=_encodings['repo.content'])
    return len(updated_items) > 0
예제 #19
0
def FindPackagesToScan(settings, startdir, reposplit):
    """ Try to find packages that need to be scanned
	
	Args:
		settings - portage.config instance, preferably repoman_settings
		startdir - directory that repoman was run in
		reposplit - root of the repository
	Returns:
		A list of directories to scan
	"""

    def AddPackagesInDir(path):
        """ Given a list of dirs, add any packages in it """
        ret = []
        pkgdirs = os.listdir(path)
        for d in pkgdirs:
            if d == "CVS" or d.startswith("."):
                continue
            p = os.path.join(path, d)

            if os.path.isdir(p):
                cat_pkg_dir = os.path.join(*p.split(os.path.sep)[-2:])
                logging.debug("adding %s to scanlist" % cat_pkg_dir)
                ret.append(cat_pkg_dir)
        return ret

    scanlist = []
    repolevel = len(reposplit)
    if repolevel == 1:  # root of the tree, startdir = repodir
        for cat in settings.categories:
            path = os.path.join(startdir, cat)
            if not os.path.isdir(path):
                continue
            pkgdirs = os.listdir(path)
            scanlist.extend(AddPackagesInDir(path))
    elif repolevel == 2:  # category level, startdir = catdir
        # we only want 1 segment of the directory, is why we use catdir instead of startdir
        catdir = reposplit[-2]
        if catdir not in settings.categories:
            logging.warn(
                "%s is not a valid category according to profiles/categories, "
                "skipping checks in %s" % (catdir, catdir)
            )
        else:
            scanlist = AddPackagesInDir(catdir)
    elif repolevel == 3:  # pkgdir level, startdir = pkgdir
        catdir = reposplit[-2]
        pkgdir = reposplit[-1]
        if catdir not in settings.categories:
            logging.warn(
                "%s is not a valid category according to profiles/categories, "
                "skipping checks in %s" % (catdir, catdir)
            )
        else:
            path = os.path.join(catdir, pkgdir)
            logging.debug("adding %s to scanlist" % path)
            scanlist.append(path)
    return scanlist
예제 #20
0
def collect_ebuild_messages(path):
	""" Collect elog messages generated by the bash logging function stored 
		at 'path'.
	"""
	mylogfiles = None
	try:
		mylogfiles = os.listdir(path)
	except OSError:
		pass
	# shortcut for packages without any messages
	if not mylogfiles:
		return {}
	# exploit listdir() file order so we process log entries in chronological order
	mylogfiles.reverse()
	logentries = {}
	for msgfunction in mylogfiles:
		filename = os.path.join(path, msgfunction)
		if msgfunction not in EBUILD_PHASES:
			writemsg(_("!!! can't process invalid log file: %s\n") % filename,
				noiselevel=-1)
			continue
		if not msgfunction in logentries:
			logentries[msgfunction] = []
		lastmsgtype = None
		msgcontent = []
		for l in codecs.open(_unicode_encode(filename,
			encoding=_encodings['fs'], errors='strict'),
			mode='r', encoding=_encodings['repo.content'], errors='replace'):
			if not l:
				continue
			try:
				msgtype, msg = l.split(" ", 1)
			except ValueError:
				writemsg(_("!!! malformed entry in "
					"log file: '%s'\n") % filename, noiselevel=-1)
				continue

			if lastmsgtype is None:
				lastmsgtype = msgtype
			
			if msgtype == lastmsgtype:
				msgcontent.append(msg)
			else:
				if msgcontent:
					logentries[msgfunction].append((lastmsgtype, msgcontent))
				msgcontent = [msg]
			lastmsgtype = msgtype
		if msgcontent:
			logentries[msgfunction].append((lastmsgtype, msgcontent))

	# clean logfiles to avoid repetitions
	for f in mylogfiles:
		try:
			os.unlink(os.path.join(path, f))
		except OSError:
			pass
	return logentries
예제 #21
0
파일: listdir.py 프로젝트: Spencerx/portage
def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
	mypath = normalize_path(my_original_path)
	try:
		pathstat = os.stat(mypath)
		if not stat.S_ISDIR(pathstat.st_mode):
			raise DirectoryNotFound(mypath)
	except EnvironmentError as e:
		if e.errno == PermissionDenied.errno:
			raise PermissionDenied(mypath)
		del e
		return [], []
	except PortageException:
		return [], []
	else:
		try:
			fpaths = os.listdir(mypath)
		except EnvironmentError as e:
			if e.errno != errno.EACCES:
				raise
			del e
			raise PermissionDenied(mypath)
		ftype = []
		for x in fpaths:
			try:
				if followSymlinks:
					pathstat = os.stat(mypath+"/"+x)
				else:
					pathstat = os.lstat(mypath+"/"+x)

				if stat.S_ISREG(pathstat[stat.ST_MODE]):
					ftype.append(0)
				elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
					ftype.append(1)
				elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
					ftype.append(2)
				else:
					ftype.append(3)
			except (IOError, OSError):
				ftype.append(3)

	if ignorelist or ignorecvs:
		ret_list = []
		ret_ftype = []
		for file_path, file_type in zip(fpaths, ftype):
			if file_path in ignorelist:
				pass
			elif ignorecvs:
				if file_path[:2] != ".#" and \
					not (file_type == 1 and file_path in VCS_DIRS):
					ret_list.append(file_path)
					ret_ftype.append(file_type)
	else:
		ret_list = fpaths
		ret_ftype = ftype

	return ret_list, ret_ftype
예제 #22
0
파일: utilities.py 프로젝트: lmtwga/portage
def FindPackagesToScan(settings, startdir, reposplit):
    """ Try to find packages that need to be scanned
	
	Args:
		settings - portage.config instance, preferably repoman_settings
		startdir - directory that repoman was run in
		reposplit - root of the repository
	Returns:
		A list of directories to scan
	"""
    def AddPackagesInDir(path):
        """ Given a list of dirs, add any packages in it """
        ret = []
        pkgdirs = os.listdir(path)
        for d in pkgdirs:
            if d == 'CVS' or d.startswith('.'):
                continue
            p = os.path.join(path, d)

            if os.path.isdir(p):
                cat_pkg_dir = os.path.join(*p.split(os.path.sep)[-2:])
                logging.debug('adding %s to scanlist' % cat_pkg_dir)
                ret.append(cat_pkg_dir)
        return ret

    scanlist = []
    repolevel = len(reposplit)
    if repolevel == 1:  # root of the tree, startdir = repodir
        for cat in settings.categories:
            path = os.path.join(startdir, cat)
            if not os.path.isdir(path):
                continue
            pkgdirs = os.listdir(path)
            scanlist.extend(AddPackagesInDir(path))
    elif repolevel == 2:  # category level, startdir = catdir
        # we only want 1 segment of the directory, is why we use catdir instead of startdir
        catdir = reposplit[-2]
        if catdir not in settings.categories:
            logging.warning('%s is not a valid category according to '
                            'profiles/categories, skipping checks in %s' %
                            (catdir, catdir))
        else:
            scanlist = AddPackagesInDir(catdir)
    elif repolevel == 3:  # pkgdir level, startdir = pkgdir
        catdir = reposplit[-2]
        pkgdir = reposplit[-1]
        if catdir not in settings.categories:
            logging.warning('%s is not a valid category according to '
                            'profiles/categories, skipping checks in %s' %
                            (catdir, catdir))
        else:
            path = os.path.join(catdir, pkgdir)
            logging.debug('adding %s to scanlist' % path)
            scanlist.append(path)
    return scanlist
예제 #23
0
    def commit_update(self):
        update_location = self.current_update
        self._update_location = None
        try:
            snapshots = [int(name) for name in os.listdir(self._snapshots_dir)]
        except OSError:
            snapshots = []
            portage.util.ensure_dirs(self._snapshots_dir)
            portage.util.apply_stat_permissions(
                self._snapshots_dir, os.stat(self._storage_location))
        if snapshots:
            new_id = max(snapshots) + 1
        else:
            new_id = 1
        os.rename(update_location,
                  os.path.join(self._snapshots_dir, str(new_id)))
        new_symlink = self._latest_symlink + '.new'
        try:
            os.unlink(new_symlink)
        except OSError:
            pass
        os.symlink('snapshots/{}'.format(new_id), new_symlink)

        # If SyncManager.pre_sync creates an empty directory where
        # self._latest_symlink is suppose to be (which is normal if
        # sync-rcu-store-dir has been removed), then we need to remove
        # the directory or else rename will raise IsADirectoryError
        # when we try to replace the directory with a symlink.
        try:
            os.rmdir(self._latest_symlink)
        except OSError:
            pass

        os.rename(new_symlink, self._latest_symlink)

        try:
            user_location_correct = os.path.samefile(self._user_location,
                                                     self._latest_symlink)
        except OSError:
            user_location_correct = False

        if not user_location_correct:
            new_symlink = self._user_location + '.new'
            try:
                os.unlink(new_symlink)
            except OSError:
                pass
            os.symlink(self._latest_symlink, new_symlink)
            os.rename(new_symlink, self._user_location)

        coroutine_return()
        yield None
예제 #24
0
	def _scan_cat(self, cat):
		for repo in self._repo_list:
			cat_dir = repo.location + "/" + cat
			try:
				pkg_list = os.listdir(cat_dir)
			except OSError as e:
				if e.errno not in (errno.ENOTDIR, errno.ENOENT, errno.ESTALE):
					raise
				continue
			for p in pkg_list:
				if os.path.isdir(cat_dir + "/" + p):
					self._items[cat + "/" + p].append(repo)
		self._scanned_cats.add(cat)
예제 #25
0
파일: porttree.py 프로젝트: gentoo/portage
	def _scan_cat(self, cat):
		for repo in self._repo_list:
			cat_dir = repo.location + "/" + cat
			try:
				pkg_list = os.listdir(cat_dir)
			except OSError as e:
				if e.errno not in (errno.ENOTDIR, errno.ENOENT, errno.ESTALE):
					raise
				continue
			for p in pkg_list:
				if os.path.isdir(cat_dir + "/" + p):
					self._items[cat + "/" + p].append(repo)
		self._scanned_cats.add(cat)
예제 #26
0
		def modify_files(dir_path):
			for name in os.listdir(dir_path):
				path = os.path.join(dir_path, name)
				st = os.lstat(path)
				if stat.S_ISREG(st.st_mode):
					with io.open(path, mode='a',
						encoding=_encodings["stdio"]) as f:
						f.write("modified at %d\n" % time.time())
				elif stat.S_ISLNK(st.st_mode):
					old_dest = os.readlink(path)
					os.unlink(path)
					os.symlink(old_dest +
						" modified at %d" % time.time(), path)
예제 #27
0
 def modify_files(dir_path):
     for name in os.listdir(dir_path):
         path = os.path.join(dir_path, name)
         st = os.lstat(path)
         if stat.S_ISREG(st.st_mode):
             with io.open(path, mode='a',
                          encoding=_encodings["stdio"]) as f:
                 f.write("modified at %d\n" % time.time())
         elif stat.S_ISLNK(st.st_mode):
             old_dest = os.readlink(path)
             os.unlink(path)
             os.symlink(old_dest + " modified at %d" % time.time(),
                        path)
예제 #28
0
def new_protect_filename(mydest, newmd5=None, force=False):
	"""Resolves a config-protect filename for merging, optionally
	using the last filename if the md5 matches. If force is True,
	then a new filename will be generated even if mydest does not
	exist yet.
	(dest,md5) ==> 'string'            --- path_to_target_filename
	(dest)     ==> ('next', 'highest') --- next_target and most-recent_target
	"""

	# config protection filename format:
	# ._cfg0000_foo
	# 0123456789012

	os = _os_merge

	prot_num = -1
	last_pfile = ""

	if not force and \
		not os.path.exists(mydest):
		return mydest

	real_filename = os.path.basename(mydest)
	real_dirname  = os.path.dirname(mydest)
	for pfile in os.listdir(real_dirname):
		if pfile[0:5] != "._cfg":
			continue
		if pfile[10:] != real_filename:
			continue
		try:
			new_prot_num = int(pfile[5:9])
			if new_prot_num > prot_num:
				prot_num = new_prot_num
				last_pfile = pfile
		except ValueError:
			continue
	prot_num = prot_num + 1

	new_pfile = normalize_path(os.path.join(real_dirname,
		"._cfg" + str(prot_num).zfill(4) + "_" + real_filename))
	old_pfile = normalize_path(os.path.join(real_dirname, last_pfile))
	if last_pfile and newmd5:
		try:
			last_pfile_md5 = portage.checksum._perform_md5_merge(old_pfile)
		except FileNotFound:
			# The file suddenly disappeared or it's a broken symlink.
			pass
		else:
			if last_pfile_md5 == newmd5:
				return old_pfile
	return new_pfile
예제 #29
0
	def AddPackagesInDir(path):
		""" Given a list of dirs, add any packages in it """
		ret = []
		pkgdirs = os.listdir(path)
		for d in pkgdirs:
			if d == 'CVS' or d.startswith('.'):
				continue
			p = os.path.join(path, d)

			if os.path.isdir(p):
				cat_pkg_dir = os.path.join(*p.split(os.path.sep)[-2:])
				logging.debug('adding %s to scanlist' % cat_pkg_dir)
				ret.append(cat_pkg_dir)
		return ret
예제 #30
0
	def AddPackagesInDir(path):
		""" Given a list of dirs, add any packages in it """
		ret = []
		pkgdirs = os.listdir(path)
		for d in pkgdirs:
			if d == 'CVS' or d.startswith('.'):
				continue
			p = os.path.join(path, d)

			if os.path.isdir(p):
				cat_pkg_dir = os.path.join(*p.split(os.path.sep)[-2:])
				logging.debug('adding %s to scanlist' % cat_pkg_dir)
				ret.append(cat_pkg_dir)
		return ret
예제 #31
0
def _calc_changelog(ebuildpath, current, next):
    if ebuildpath == None or not os.path.exists(ebuildpath):
        return []
    current = "-".join(catpkgsplit(current)[1:])
    if current.endswith("-r0"):
        current = current[:-3]
    next = "-".join(catpkgsplit(next)[1:])
    if next.endswith("-r0"):
        next = next[:-3]

    changelogdir = os.path.dirname(ebuildpath)
    changelogs = ["ChangeLog"]
    # ChangeLog-YYYY (see bug #389611)
    changelogs.extend(sorted((fn for fn in os.listdir(changelogdir) if fn.startswith("ChangeLog-")), reverse=True))

    divisions = []
    found_current = False
    for fn in changelogs:
        changelogpath = os.path.join(changelogdir, fn)
        try:
            with io.open(
                _unicode_encode(changelogpath, encoding=_encodings["fs"], errors="strict"),
                mode="r",
                encoding=_encodings["repo.content"],
                errors="replace",
            ) as f:
                changelog = f.read()
        except EnvironmentError:
            return []
        for node in _find_changelog_tags(changelog):
            if node[0] == current:
                found_current = True
                break
            else:
                divisions.append(node)
        if found_current:
            break

    if not found_current:
        return []

        # print 'XX from',current,'to',next
        # for div,text in divisions: print 'XX',div
        # skip entries for all revisions above the one we are about to emerge
    for i in range(len(divisions)):
        if divisions[i][0] == next:
            divisions = divisions[i:]
            break

    return divisions
예제 #32
0
def _calc_changelog(ebuildpath,current,next):
	if ebuildpath == None or not os.path.exists(ebuildpath):
		return []
	current = '-'.join(catpkgsplit(current)[1:])
	if current.endswith('-r0'):
		current = current[:-3]
	next = '-'.join(catpkgsplit(next)[1:])
	if next.endswith('-r0'):
		next = next[:-3]

	changelogdir = os.path.dirname(ebuildpath)
	changelogs = ['ChangeLog']
	# ChangeLog-YYYY (see bug #389611)
	changelogs.extend(sorted((fn for fn in os.listdir(changelogdir)
		if fn.startswith('ChangeLog-')), reverse=True))

	divisions = []
	found_current = False
	for fn in changelogs:
		changelogpath = os.path.join(changelogdir, fn)
		try:
			with io.open(_unicode_encode(changelogpath,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['repo.content'],
				errors='replace') as f:
				changelog = f.read()
		except EnvironmentError:
			return []
		for node in _find_changelog_tags(changelog):
			if node[0] == current:
				found_current = True
				break
			else:
				divisions.append(node)
		if found_current:
			break

	if not found_current:
		return []

	#print 'XX from',current,'to',next
	#for div,text in divisions: print 'XX',div
	# skip entries for all revisions above the one we are about to emerge
	for i in range(len(divisions)):
		if divisions[i][0]==next:
			divisions = divisions[i:]
			break

	return divisions
예제 #33
0
	def __iter__(self):
		"""generator for walking the dir struct"""
		dirs = [self.location]
		len_base = len(self.location)
		while len(dirs):
			for l in os.listdir(dirs[0]):
				if l.endswith(".cpickle"):
					continue
				p = os.path.join(dirs[0],l)
				st = os.lstat(p)
				if stat.S_ISDIR(st.st_mode):
					dirs.append(p)
					continue
				yield p[len_base+1:]
			dirs.pop(0)
예제 #34
0
	def assertExists(self, path):
		"""Make sure |path| exists"""
		if not os.path.exists(path):
			msg = ['path is missing: %s' % (path,)]
			while path != '/':
				path = os.path.dirname(path)
				if not path:
					# If we're given something like "foo", abort once we get to "".
					break
				result = os.path.exists(path)
				msg.append('\tos.path.exists(%s): %s' % (path, result))
				if result:
					msg.append('\tcontents: %r' % os.listdir(path))
					break
			raise self.failureException('\n'.join(msg))
예제 #35
0
파일: __init__.py 프로젝트: sifive/portage
	def assertExists(self, path):
		"""Make sure |path| exists"""
		if not os.path.exists(path):
			msg = ['path is missing: %s' % (path,)]
			while path != '/':
				path = os.path.dirname(path)
				if not path:
					# If we're given something like "foo", abort once we get to "".
					break
				result = os.path.exists(path)
				msg.append('\tos.path.exists(%s): %s' % (path, result))
				if result:
					msg.append('\tcontents: %r' % os.listdir(path))
					break
			raise self.failureException('\n'.join(msg))
예제 #36
0
파일: merges.py 프로젝트: steeznson/portage
	def _scan(self, onProgress=None):
		"""
		Scan the file system for failed merges and return any found.

		@param onProgress: function to call for updating progress
		@type onProgress: Function
		@rtype: dict
		@return: dictionary of packages that failed to merges
		"""
		failed_pkgs = {}
		for cat in os.listdir(self._vardb_path):
			pkgs_path = os.path.join(self._vardb_path, cat)
			if not os.path.isdir(pkgs_path):
				continue
			pkgs = os.listdir(pkgs_path)
			maxval = len(pkgs)
			for i, pkg in enumerate(pkgs):
				if onProgress:
					onProgress(maxval, i+1)
				if MERGING_IDENTIFIER in pkg:
					mtime = int(os.stat(os.path.join(pkgs_path, pkg)).st_mtime)
					pkg = os.path.join(cat, pkg)
					failed_pkgs[pkg] = mtime
		return failed_pkgs
예제 #37
0
 def __iter__(self):
     """generator for walking the dir struct"""
     dirs = [self.location]
     len_base = len(self.location)
     while len(dirs):
         for l in os.listdir(dirs[0]):
             if l.endswith(".cpickle"):
                 continue
             p = os.path.join(dirs[0], l)
             st = os.lstat(p)
             if stat.S_ISDIR(st.st_mode):
                 dirs.append(p)
                 continue
             yield p[len_base + 1:]
         dirs.pop(0)
예제 #38
0
	def _scan(self, onProgress=None):
		"""
		Scan the file system for failed merges and return any found.

		@param onProgress: function to call for updating progress
		@type onProgress: Function
		@rtype: dict
		@return: dictionary of packages that failed to merges
		"""
		failed_pkgs = {}
		for cat in os.listdir(self._vardb_path):
			pkgs_path = os.path.join(self._vardb_path, cat)
			if not os.path.isdir(pkgs_path):
				continue
			pkgs = os.listdir(pkgs_path)
			maxval = len(pkgs)
			for i, pkg in enumerate(pkgs):
				if onProgress:
					onProgress(maxval, i+1)
				if MERGING_IDENTIFIER in pkg:
					mtime = int(os.stat(os.path.join(pkgs_path, pkg)).st_mtime)
					pkg = os.path.join(cat, pkg)
					failed_pkgs[pkg] = mtime
		return failed_pkgs
예제 #39
0
def grab_updates(updpath, prev_mtimes=None):
    """Returns all the updates from the given directory as a sorted list of
	tuples, each containing (file_path, statobj, content).  If prev_mtimes is
	given then updates are only returned if one or more files have different
	mtimes. When a change is detected for a given file, updates will be
	returned for that file and any files that come after it in the entire
	sequence. This ensures that all relevant updates are returned for cases
	in which the destination package of an earlier move corresponds to
	the source package of a move that comes somewhere later in the entire
	sequence of files.
	"""
    try:
        mylist = os.listdir(updpath)
    except OSError as oe:
        if oe.errno == errno.ENOENT:
            raise DirectoryNotFound(updpath)
        raise
    if prev_mtimes is None:
        prev_mtimes = {}
    # validate the file name (filter out CVS directory, etc...)
    mylist = [
        myfile for myfile in mylist if len(myfile) == 7 and myfile[1:3] == "Q-"
    ]
    if len(mylist) == 0:
        return []

    # update names are mangled to make them sort properly
    mylist = [myfile[3:] + "-" + myfile[:2] for myfile in mylist]
    mylist.sort()
    mylist = [myfile[5:] + "-" + myfile[:4] for myfile in mylist]

    update_data = []
    for myfile in mylist:
        file_path = os.path.join(updpath, myfile)
        mystat = os.stat(file_path)
        if update_data or \
         file_path not in prev_mtimes or \
         long(prev_mtimes[file_path]) != mystat[stat.ST_MTIME]:
            f = io.open(_unicode_encode(file_path,
                                        encoding=_encodings['fs'],
                                        errors='strict'),
                        mode='r',
                        encoding=_encodings['repo.content'],
                        errors='replace')
            content = f.read()
            f.close()
            update_data.append((file_path, mystat, content))
    return update_data
예제 #40
0
def collect_binaries_from_dir(dirs, mask, logger):
    ''' Collects all binaries from specified list of directories.
		mask is list of pathes, that are ommited in scanning,
		can be eighter single file or entire directory
		Returns list of binaries
	'''

    # contains list of directories found
    # allows us to reduce number of fnc calls
    found_directories = set()
    found_files = set()

    for _dir in dirs:
        if _dir in mask:
            continue

        try:
            for _listing in os.listdir(_dir):
                listing = os.path.join(_dir, _listing)
                if listing in mask or _listing in mask:
                    continue

                if os.path.isdir(listing):
                    if os.path.islink(listing):
                        #we do not want scan symlink-directories
                        pass
                    else:
                        found_directories.add(listing)
                elif os.path.isfile(listing):
                    # we're looking for binaries
                    # and with binaries we do not need links
                    # thus we can optimize a bit
                    if not os.path.islink(listing):
                        prv = os.stat(listing)[stat.ST_MODE]
                        if prv & stat.S_IXUSR == stat.S_IXUSR or \
                          prv & stat.S_IXGRP == stat.S_IXGRP or \
                          prv & stat.S_IXOTH == stat.S_IXOTH:
                            found_files.add(listing)
        except Exception as ex:
            logger.debug('\t' +
                         yellow('Exception during binaries collecting: ' +
                                blue('%s') % str(ex)))

    if found_directories:
        found_files.update(
            collect_binaries_from_dir(found_directories, mask, logger))

    return found_files
예제 #41
0
def fixdbentries(update_iter, dbdir):
	"""Performs update commands which result in search and replace operations
	for each of the files in dbdir (excluding CONTENTS and environment.bz2).
	Returns True when actual modifications are necessary and False otherwise."""
	mydata = {}
	for myfile in [f for f in os.listdir(dbdir) if f not in ignored_dbentries]:
		file_path = os.path.join(dbdir, myfile)
		mydata[myfile] = codecs.open(_unicode_encode(file_path,
			encoding=_encodings['fs'], errors='strict'),
			mode='r', encoding=_encodings['repo.content'],
			errors='replace').read()
	updated_items = update_dbentries(update_iter, mydata)
	for myfile, mycontent in updated_items.items():
		file_path = os.path.join(dbdir, myfile)
		write_atomic(file_path, mycontent, encoding=_encodings['repo.content'])
	return len(updated_items) > 0
예제 #42
0
def collect_binaries_from_dir(dirs, mask, logger):
	''' Collects all binaries from specified list of directories.
		mask is list of pathes, that are ommited in scanning,
		can be eighter single file or entire directory
		Returns list of binaries
	'''

	# contains list of directories found
	# allows us to reduce number of fnc calls
	found_directories = set()
	found_files = set()

	for _dir in dirs:
		if _dir in mask:
			continue

		try:
			for _listing in os.listdir(_dir):
				listing = os.path.join(_dir, _listing)
				if listing in mask or _listing in mask:
					continue

				if os.path.isdir(listing):
					if os.path.islink(listing):
						#we do not want scan symlink-directories
						pass
					else:
						found_directories.add(listing)
				elif os.path.isfile(listing):
					# we're looking for binaries
					# and with binaries we do not need links
					# thus we can optimize a bit
					if not os.path.islink(listing):
						prv = os.stat(listing)[stat.ST_MODE]
						if prv & stat.S_IXUSR == stat.S_IXUSR or \
								prv & stat.S_IXGRP == stat.S_IXGRP or \
								prv & stat.S_IXOTH == stat.S_IXOTH:
							found_files.add(listing)
		except Exception as ex:
			logger.debug('\t' +
				yellow('Exception during binaries collecting: '+
				blue('%s') %str(ex)))

	if found_directories:
		found_files.update(collect_binaries_from_dir(found_directories, mask, logger))

	return found_files
예제 #43
0
	def garbage_collection(self):
		snap_ttl = datetime.timedelta(days=self._ttl_days)
		snapshots = sorted(int(name) for name in os.listdir(self._snapshots_dir))
		# always preserve the latest snapshot
		protect_count = self._spare_snapshots + 1
		while snapshots and protect_count:
			protect_count -= 1
			snapshots.pop()
		for snap_id in snapshots:
			snap_path = os.path.join(self._snapshots_dir, str(snap_id))
			try:
				st = os.stat(snap_path)
			except OSError:
				continue
			snap_timestamp = datetime.datetime.utcfromtimestamp(st.st_mtime)
			if (datetime.datetime.utcnow() - snap_timestamp) < snap_ttl:
				continue
			yield self._check_call(['rm', '-rf', snap_path])
예제 #44
0
def fixdbentries(update_iter, dbdir):
    """Performs update commands which result in search and replace operations
	for each of the files in dbdir (excluding CONTENTS and environment.bz2).
	Returns True when actual modifications are necessary and False otherwise."""
    mydata = {}
    for myfile in [f for f in os.listdir(dbdir) if f not in ignored_dbentries]:
        file_path = os.path.join(dbdir, myfile)
        mydata[myfile] = codecs.open(_unicode_encode(file_path,
                                                     encoding=_encodings['fs'],
                                                     errors='strict'),
                                     mode='r',
                                     encoding=_encodings['repo.content'],
                                     errors='replace').read()
    updated_items = update_dbentries(update_iter, mydata)
    for myfile, mycontent in updated_items.items():
        file_path = os.path.join(dbdir, myfile)
        write_atomic(file_path, mycontent, encoding=_encodings['repo.content'])
    return len(updated_items) > 0
예제 #45
0
	def _scan_cat(self, cat):
		for repo in self._repo_list:
			cat_dir = repo.location + "/" + cat
			try:
				pkg_list = os.listdir(cat_dir)
			except OSError as e:
				if e.errno not in (errno.ENOTDIR, errno.ENOENT, errno.ESTALE):
					raise
				continue
			for p in pkg_list:
				try:
					atom = Atom("%s/%s" % (cat, p))
				except InvalidAtom:
					continue
				if atom != atom.cp:
					continue
				self._items[atom.cp].append(repo)
		self._scanned_cats.add(cat)
예제 #46
0
    def update_eclasses(self):
        self.eclasses = {}
        self._eclass_locations = {}
        master_eclasses = {}
        eclass_len = len(".eclass")
        ignored_listdir_errnos = (errno.ENOENT, errno.ENOTDIR)
        for x in [
                normalize_path(os.path.join(y, "eclass"))
                for y in self.porttrees
        ]:
            try:
                eclass_filenames = os.listdir(x)
            except OSError as e:
                if e.errno in ignored_listdir_errnos:
                    del e
                    continue
                elif e.errno == PermissionDenied.errno:
                    raise PermissionDenied(x)
                raise
            for y in eclass_filenames:
                if not y.endswith(".eclass"):
                    continue
                obj = hashed_path(os.path.join(x, y))
                obj.eclass_dir = x
                try:
                    mtime = obj.mtime
                except FileNotFound:
                    continue
                ys = y[:-eclass_len]
                if x == self._master_eclass_root:
                    master_eclasses[ys] = mtime
                    self.eclasses[ys] = obj
                    self._eclass_locations[ys] = x
                    continue

                master_mtime = master_eclasses.get(ys)
                if master_mtime is not None:
                    if master_mtime == mtime:
                        # It appears to be identical to the master,
                        # so prefer the master entry.
                        continue

                self.eclasses[ys] = obj
                self._eclass_locations[ys] = x
예제 #47
0
파일: update.py 프로젝트: entoo/portage-src
def grab_updates(updpath, prev_mtimes=None):
	"""Returns all the updates from the given directory as a sorted list of
	tuples, each containing (file_path, statobj, content).  If prev_mtimes is
	given then updates are only returned if one or more files have different
	mtimes. When a change is detected for a given file, updates will be
	returned for that file and any files that come after it in the entire
	sequence. This ensures that all relevant updates are returned for cases
	in which the destination package of an earlier move corresponds to
	the source package of a move that comes somewhere later in the entire
	sequence of files.
	"""
	try:
		mylist = os.listdir(updpath)
	except OSError as oe:
		if oe.errno == errno.ENOENT:
			raise DirectoryNotFound(updpath)
		raise
	if prev_mtimes is None:
		prev_mtimes = {}
	# validate the file name (filter out CVS directory, etc...)
	mylist = [myfile for myfile in mylist if len(myfile) == 7 and myfile[1:3] == "Q-"]
	if len(mylist) == 0:
		return []
	
	# update names are mangled to make them sort properly
	mylist = [myfile[3:]+"-"+myfile[:2] for myfile in mylist]
	mylist.sort()
	mylist = [myfile[5:]+"-"+myfile[:4] for myfile in mylist]

	update_data = []
	for myfile in mylist:
		file_path = os.path.join(updpath, myfile)
		mystat = os.stat(file_path)
		if update_data or \
			file_path not in prev_mtimes or \
			long(prev_mtimes[file_path]) != mystat[stat.ST_MTIME]:
			f = io.open(_unicode_encode(file_path,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['repo.content'], errors='replace')
			content = f.read()
			f.close()
			update_data.append((file_path, mystat, content))
	return update_data
예제 #48
0
 def garbage_collection(self):
     snap_ttl = datetime.timedelta(days=self._ttl_days)
     snapshots = sorted(
         int(name) for name in os.listdir(self._snapshots_dir))
     # always preserve the latest snapshot
     protect_count = self._spare_snapshots + 1
     while snapshots and protect_count:
         protect_count -= 1
         snapshots.pop()
     for snap_id in snapshots:
         snap_path = os.path.join(self._snapshots_dir, str(snap_id))
         try:
             st = os.stat(snap_path)
         except OSError:
             continue
         snap_timestamp = datetime.datetime.utcfromtimestamp(st.st_mtime)
         if (datetime.datetime.utcnow() - snap_timestamp) < snap_ttl:
             continue
         yield self._check_call(['rm', '-rf', snap_path])
예제 #49
0
파일: fetch.py 프로젝트: gentoo/portage
def _checksum_failure_temp_file(settings, distdir, basename):
	"""
	First try to find a duplicate temp file with the same checksum and return
	that filename if available. Otherwise, use mkstemp to create a new unique
	filename._checksum_failure_.$RANDOM, rename the given file, and return the
	new filename. In any case, filename will be renamed or removed before this
	function returns a temp filename.
	"""

	filename = os.path.join(distdir, basename)
	if basename.endswith(_download_suffix):
		normal_basename = basename[:-len(_download_suffix)]
	else:
		normal_basename = basename
	size = os.stat(filename).st_size
	checksum = None
	tempfile_re = re.compile(re.escape(normal_basename) + r'\._checksum_failure_\..*')
	for temp_filename in os.listdir(distdir):
		if not tempfile_re.match(temp_filename):
			continue
		temp_filename = os.path.join(distdir, temp_filename)
		try:
			if size != os.stat(temp_filename).st_size:
				continue
		except OSError:
			continue
		try:
			temp_checksum = perform_md5(temp_filename)
		except FileNotFound:
			# Apparently the temp file disappeared. Let it go.
			continue
		if checksum is None:
			checksum = perform_md5(filename)
		if checksum == temp_checksum:
			os.unlink(filename)
			return temp_filename

	fd, temp_filename = \
		tempfile.mkstemp("", normal_basename + "._checksum_failure_.", distdir)
	os.close(fd)
	_movefile(filename, temp_filename, mysettings=settings)
	return temp_filename
예제 #50
0
def _checksum_failure_temp_file(settings, distdir, basename):
	"""
	First try to find a duplicate temp file with the same checksum and return
	that filename if available. Otherwise, use mkstemp to create a new unique
	filename._checksum_failure_.$RANDOM, rename the given file, and return the
	new filename. In any case, filename will be renamed or removed before this
	function returns a temp filename.
	"""

	filename = os.path.join(distdir, basename)
	if basename.endswith(_download_suffix):
		normal_basename = basename[:-len(_download_suffix)]
	else:
		normal_basename = basename
	size = os.stat(filename).st_size
	checksum = None
	tempfile_re = re.compile(re.escape(normal_basename) + r'\._checksum_failure_\..*')
	for temp_filename in os.listdir(distdir):
		if not tempfile_re.match(temp_filename):
			continue
		temp_filename = os.path.join(distdir, temp_filename)
		try:
			if size != os.stat(temp_filename).st_size:
				continue
		except OSError:
			continue
		try:
			temp_checksum = perform_md5(temp_filename)
		except FileNotFound:
			# Apparently the temp file disappeared. Let it go.
			continue
		if checksum is None:
			checksum = perform_md5(filename)
		if checksum == temp_checksum:
			os.unlink(filename)
			return temp_filename

	fd, temp_filename = \
		tempfile.mkstemp("", normal_basename + "._checksum_failure_.", distdir)
	os.close(fd)
	_movefile(filename, temp_filename, mysettings=settings)
	return temp_filename
예제 #51
0
def getTests(path, base_path):
	"""

	path is the path to a given subdir ( 'portage/' for example)
	This does a simple filter on files in that dir to give us modules
	to import

	"""
	files = os.listdir(path)
	files = [ f[:-3] for f in files if f.startswith("test") and f.endswith(".py") ]
	parent_path = path[len(base_path)+1:]
	parent_module = ".".join(("portage", "tests", parent_path))
	parent_module = parent_module.replace('/', '.')
	result = []
	for mymodule in files:
		# Make the trailing / a . for module importing
		modname = ".".join((parent_module, mymodule))
		mod = my_import(modname)
		result.append(unittest.TestLoader().loadTestsFromModule(mod))
	return result
예제 #52
0
def getTests(path, base_path):
	"""

	path is the path to a given subdir ( 'portage/' for example)
	This does a simple filter on files in that dir to give us modules
	to import

	"""
	files = os.listdir(path)
	files = [ f[:-3] for f in files if f.startswith("test") and f.endswith(".py") ]
	parent_path = path[len(base_path)+1:]
	parent_module = ".".join(("portage", "tests", parent_path))
	parent_module = parent_module.replace('/', '.')
	result = []
	for mymodule in files:
		# Make the trailing / a . for module importing
		modname = ".".join((parent_module, mymodule))
		mod = my_import(modname)
		result.append(unittest.TestLoader().loadTestsFromModule(mod))
	return result
예제 #53
0
def grab_updates(updpath, prev_mtimes=None):
    """Returns all the updates from the given directory as a sorted list of
    tuples, each containing (file_path, statobj, content).  If prev_mtimes is
    given then updates are only returned if one or more files have different
    mtimes. When a change is detected for a given file, updates will be
    returned for that file and any files that come after it in the entire
    sequence. This ensures that all relevant updates are returned for cases
    in which the destination package of an earlier move corresponds to
    the source package of a move that comes somewhere later in the entire
    sequence of files.
    """
    try:
        mylist = os.listdir(updpath)
    except OSError as oe:
        if oe.errno == errno.ENOENT:
            raise DirectoryNotFound(updpath)
        raise
    if prev_mtimes is None:
        prev_mtimes = {}

    update_data = []
    for myfile in mylist:
        if myfile.startswith("."):
            continue
        file_path = os.path.join(updpath, myfile)
        mystat = os.stat(file_path)
        if not stat.S_ISREG(mystat.st_mode):
            continue
        if int(prev_mtimes.get(file_path, -1)) != mystat[stat.ST_MTIME]:
            f = io.open(
                _unicode_encode(file_path,
                                encoding=_encodings["fs"],
                                errors="strict"),
                mode="r",
                encoding=_encodings["repo.content"],
                errors="replace",
            )
            content = f.read()
            f.close()
            update_data.append((file_path, mystat, content))
    return update_data
예제 #54
0
	def update_eclasses(self):
		self.eclasses = {}
		self._eclass_locations = {}
		master_eclasses = {}
		eclass_len = len(".eclass")
		ignored_listdir_errnos = (errno.ENOENT, errno.ENOTDIR)
		for x in [normalize_path(os.path.join(y,"eclass")) for y in self.porttrees]:
			try:
				eclass_filenames = os.listdir(x)
			except OSError as e:
				if e.errno in ignored_listdir_errnos:
					del e
					continue
				elif e.errno == PermissionDenied.errno:
					raise PermissionDenied(x)
				raise
			for y in eclass_filenames:
				if not y.endswith(".eclass"):
					continue
				obj = hashed_path(os.path.join(x, y))
				obj.eclass_dir = x
				try:
					mtime = obj.mtime
				except FileNotFound:
					continue
				ys = y[:-eclass_len]
				if x == self._master_eclass_root:
					master_eclasses[ys] = mtime
					self.eclasses[ys] = obj
					self._eclass_locations[ys] = x
					continue

				master_mtime = master_eclasses.get(ys)
				if master_mtime is not None:
					if master_mtime == mtime:
						# It appears to be identical to the master,
						# so prefer the master entry.
						continue

				self.eclasses[ys] = obj
				self._eclass_locations[ys] = x
예제 #55
0
	def commit_update(self):
		update_location = self.current_update
		self._update_location = None
		try:
			snapshots = [int(name) for name in os.listdir(self._snapshots_dir)]
		except OSError:
			snapshots = []
			portage.util.ensure_dirs(self._snapshots_dir)
			portage.util.apply_stat_permissions(self._snapshots_dir,
				os.stat(self._storage_location))
		if snapshots:
			new_id = max(snapshots) + 1
		else:
			new_id = 1
		os.rename(update_location, os.path.join(self._snapshots_dir, str(new_id)))
		new_symlink = self._latest_symlink + '.new'
		try:
			os.unlink(new_symlink)
		except OSError:
			pass
		os.symlink('snapshots/{}'.format(new_id), new_symlink)
		os.rename(new_symlink, self._latest_symlink)

		try:
			user_location_correct = os.path.samefile(self._user_location, self._latest_symlink)
		except OSError:
			user_location_correct = False

		if not user_location_correct:
			new_symlink = self._user_location + '.new'
			try:
				os.unlink(new_symlink)
			except OSError:
				pass
			os.symlink(self._latest_symlink, new_symlink)
			os.rename(new_symlink, self._user_location)

		coroutine_return()
		yield None
예제 #56
0
파일: update.py 프로젝트: entoo/portage-src
def fixdbentries(update_iter, dbdir, eapi=None, parent=None):
	"""Performs update commands which result in search and replace operations
	for each of the files in dbdir (excluding CONTENTS and environment.bz2).
	Returns True when actual modifications are necessary and False otherwise."""

	warnings.warn("portage.update.fixdbentries() is deprecated",
		DeprecationWarning, stacklevel=2)

	mydata = {}
	for myfile in [f for f in os.listdir(dbdir) if f not in ignored_dbentries]:
		file_path = os.path.join(dbdir, myfile)
		with io.open(_unicode_encode(file_path,
			encoding=_encodings['fs'], errors='strict'),
			mode='r', encoding=_encodings['repo.content'],
			errors='replace') as f:
			mydata[myfile] = f.read()
	updated_items = update_dbentries(update_iter, mydata,
		eapi=eapi, parent=parent)
	for myfile, mycontent in updated_items.items():
		file_path = os.path.join(dbdir, myfile)
		write_atomic(file_path, mycontent, encoding=_encodings['repo.content'])
	return len(updated_items) > 0
예제 #57
0
def grablines(myfilename,recursive=0):
	mylines=[]
	if recursive and os.path.isdir(myfilename):
		if myfilename in ["RCS", "CVS", "SCCS"]:
			return mylines
		dirlist = os.listdir(myfilename)
		dirlist.sort()
		for f in dirlist:
			if not f.startswith(".") and not f.endswith("~"):
				mylines.extend(grablines(
					os.path.join(myfilename, f), recursive))
	else:
		try:
			myfile = codecs.open(_unicode_encode(myfilename,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['content'], errors='replace')
			mylines = myfile.readlines()
			myfile.close()
		except IOError as e:
			if e.errno == PermissionDenied.errno:
				raise PermissionDenied(myfilename)
			pass
	return mylines
예제 #58
0
	def cp_list(self, mycp, use_cache=1, mytree=None):
		if self.frozen and mytree is None:
			cachelist = self.xcache["cp-list"].get(mycp)
			if cachelist is not None:
				# Try to propagate this to the match-all cache here for
				# repoman since he uses separate match-all caches for each
				# profile (due to old-style virtuals). Do not propagate
				# old-style virtuals since cp_list() doesn't expand them.
				if not (not cachelist and mycp.startswith("virtual/")):
					self.xcache["match-all"][mycp] = cachelist
				return cachelist[:]
		mysplit = mycp.split("/")
		invalid_category = mysplit[0] not in self._categories
		glep55 = 'parse-eapi-glep-55' in self.doebuild_settings.features
		d={}
		if mytree:
			mytrees = [mytree]
		else:
			mytrees = self.porttrees
		for oroot in mytrees:
			try:
				file_list = os.listdir(os.path.join(oroot, mycp))
			except OSError:
				continue
			for x in file_list:
				pf = None
				if glep55:
					pf, eapi = portage._split_ebuild_name_glep55(x)
				elif x[-7:] == '.ebuild':
					pf = x[:-7]

				if pf is not None:
					ps = pkgsplit(pf)
					if not ps:
						writemsg(_("\nInvalid ebuild name: %s\n") % \
							os.path.join(oroot, mycp, x), noiselevel=-1)
						continue
					if ps[0] != mysplit[1]:
						writemsg(_("\nInvalid ebuild name: %s\n") % \
							os.path.join(oroot, mycp, x), noiselevel=-1)
						continue
					ver_match = ver_regexp.match("-".join(ps[1:]))
					if ver_match is None or not ver_match.groups():
						writemsg(_("\nInvalid ebuild version: %s\n") % \
							os.path.join(oroot, mycp, x), noiselevel=-1)
						continue
					d[mysplit[0]+"/"+pf] = None
		if invalid_category and d:
			writemsg(_("\n!!! '%s' has a category that is not listed in " \
				"%setc/portage/categories\n") % \
				(mycp, self.settings["PORTAGE_CONFIGROOT"]), noiselevel=-1)
			mylist = []
		else:
			mylist = list(d)
		# Always sort in ascending order here since it's handy
		# and the result can be easily cached and reused.
		self._cpv_sort_ascending(mylist)
		if self.frozen and mytree is None:
			cachelist = mylist[:]
			self.xcache["cp-list"][mycp] = cachelist
			# Do not propagate old-style virtuals since
			# cp_list() doesn't expand them.
			if not (not cachelist and mycp.startswith("virtual/")):
				self.xcache["match-all"][mycp] = cachelist
		return mylist