Esempio n. 1
0
    def testEbuildFetch(self):

        user_config = {
            "make.conf": ('GENTOO_MIRRORS="{scheme}://{host}:{port}"', ),
        }

        distfiles = {
            'bar': b'bar\n',
            'foo': b'foo\n',
        }

        ebuilds = {
            'dev-libs/A-1': {
                'EAPI':
                '7',
                'SRC_URI':
                '''{scheme}://{host}:{port}/distfiles/bar.txt -> bar
					{scheme}://{host}:{port}/distfiles/foo.txt -> foo''',
            },
        }

        loop = SchedulerInterface(global_event_loop())

        scheme = 'http'
        host = '127.0.0.1'
        content = {}

        with AsyncHTTPServer(host, content, loop) as server:
            ebuilds_subst = {}
            for cpv, metadata in ebuilds.items():
                metadata = metadata.copy()
                metadata['SRC_URI'] = metadata['SRC_URI'].format(
                    scheme=scheme, host=host, port=server.server_port)
                ebuilds_subst[cpv] = metadata

            user_config_subst = user_config.copy()
            for configname, configdata in user_config.items():

                configdata_sub = []
                for line in configdata:
                    configdata_sub.append(
                        line.format(scheme=scheme,
                                    host=host,
                                    port=server.server_port))
                user_config_subst[configname] = tuple(configdata_sub)

            playground = ResolverPlayground(ebuilds=ebuilds_subst,
                                            distfiles=distfiles,
                                            user_config=user_config_subst)
            ro_distdir = tempfile.mkdtemp()
            try:
                self._testEbuildFetch(loop, scheme, host, distfiles, ebuilds,
                                      content, server, playground, ro_distdir)
            finally:
                shutil.rmtree(ro_distdir)
                playground.cleanup()
Esempio n. 2
0
def spawn_nofetch(portdb, ebuild_path, settings=None, fd_pipes=None):
    """
	Create a NofetchPrivateTmpdir instance, and execute it synchronously.
	This function must not be called from asynchronous code, since it will
	trigger event loop recursion which is incompatible with asyncio.
	"""
    nofetch = SpawnNofetchWithoutBuilddir(background=False,
                                          portdb=portdb,
                                          ebuild_path=ebuild_path,
                                          scheduler=SchedulerInterface(
                                              asyncio._safe_loop()),
                                          fd_pipes=fd_pipes,
                                          settings=settings)

    nofetch.start()
    return nofetch.wait()
Esempio n. 3
0
    def __init__(self, main=False, event_loop=None):
        """
		@param main: If True then use global_event_loop(), otherwise use
			a local EventLoop instance (default is False, for safe use in
			a non-main thread)
		@type main: bool
		"""
        self._terminated = threading.Event()
        self._terminated_tasks = False
        self._max_jobs = 1
        self._max_load = None
        self._scheduling = False
        self._background = False
        if event_loop is not None:
            self._event_loop = event_loop
        elif main:
            self._event_loop = global_event_loop()
        else:
            self._event_loop = EventLoop(main=False)
        self._sched_iface = SchedulerInterface(
            self._event_loop, is_background=self._is_background)
Esempio n. 4
0
def emirrordist_main(args):

	# The calling environment is ignored, so the program is
	# completely controlled by commandline arguments.
	env = {}

	if not sys.stdout.isatty():
		portage.output.nocolor()
		env['NOCOLOR'] = 'true'

	parser, options, args = parse_args(args)

	if options.version:
		sys.stdout.write("Portage %s\n" % portage.VERSION)
		return os.EX_OK

	config_root = options.config_root

	if options.repositories_configuration is not None:
		env['PORTAGE_REPOSITORIES'] = options.repositories_configuration

	settings = portage.config(config_root=config_root,
		local_config=False, env=env)

	default_opts = None
	if not options.ignore_default_opts:
		default_opts = settings.get('EMIRRORDIST_DEFAULT_OPTS', '').split()

	if default_opts:
		parser, options, args = parse_args(default_opts + args)

		settings = portage.config(config_root=config_root,
			local_config=False, env=env)

	if options.repo is None:
		if len(settings.repositories.prepos) == 2:
			for repo in settings.repositories:
				if repo.name != "DEFAULT":
					options.repo = repo.name
					break

		if options.repo is None:
			parser.error("--repo option is required")

	repo_path = settings.repositories.treemap.get(options.repo)
	if repo_path is None:
		parser.error("Unable to locate repository named '%s'" % (options.repo,))

	if options.jobs is not None:
		options.jobs = int(options.jobs)

	if options.load_average is not None:
		options.load_average = float(options.load_average)

	if options.failure_log is not None:
		options.failure_log = normalize_path(
			os.path.abspath(options.failure_log))

		parent_dir = os.path.dirname(options.failure_log)
		if not (os.path.isdir(parent_dir) and
			os.access(parent_dir, os.W_OK|os.X_OK)):
			parser.error(("--failure-log '%s' parent is not a "
				"writable directory") % options.failure_log)

	if options.success_log is not None:
		options.success_log = normalize_path(
			os.path.abspath(options.success_log))

		parent_dir = os.path.dirname(options.success_log)
		if not (os.path.isdir(parent_dir) and
			os.access(parent_dir, os.W_OK|os.X_OK)):
			parser.error(("--success-log '%s' parent is not a "
				"writable directory") % options.success_log)

	if options.scheduled_deletion_log is not None:
		options.scheduled_deletion_log = normalize_path(
			os.path.abspath(options.scheduled_deletion_log))

		parent_dir = os.path.dirname(options.scheduled_deletion_log)
		if not (os.path.isdir(parent_dir) and
			os.access(parent_dir, os.W_OK|os.X_OK)):
			parser.error(("--scheduled-deletion-log '%s' parent is not a "
				"writable directory") % options.scheduled_deletion_log)

		if options.deletion_db is None:
			parser.error("--scheduled-deletion-log requires --deletion-db")

	if options.deletion_delay is not None:
		options.deletion_delay = long(options.deletion_delay)
		if options.deletion_db is None:
			parser.error("--deletion-delay requires --deletion-db")

	if options.deletion_db is not None:
		if options.deletion_delay is None:
			parser.error("--deletion-db requires --deletion-delay")
		options.deletion_db = normalize_path(
			os.path.abspath(options.deletion_db))

	if options.temp_dir is not None:
		options.temp_dir = normalize_path(
			os.path.abspath(options.temp_dir))

		if not (os.path.isdir(options.temp_dir) and
			os.access(options.temp_dir, os.W_OK|os.X_OK)):
			parser.error(("--temp-dir '%s' is not a "
				"writable directory") % options.temp_dir)

	if options.distfiles is not None:
		options.distfiles = normalize_path(
			os.path.abspath(options.distfiles))

		if not (os.path.isdir(options.distfiles) and
			os.access(options.distfiles, os.W_OK|os.X_OK)):
			parser.error(("--distfiles '%s' is not a "
				"writable directory") % options.distfiles)
	else:
		parser.error("missing required --distfiles parameter")

	if options.mirror_overrides is not None:
		options.mirror_overrides = normalize_path(
			os.path.abspath(options.mirror_overrides))

		if not (os.access(options.mirror_overrides, os.R_OK) and
			os.path.isfile(options.mirror_overrides)):
			parser.error(
				"--mirror-overrides-file '%s' is not a readable file" %
				options.mirror_overrides)

	if options.distfiles_local is not None:
		options.distfiles_local = normalize_path(
			os.path.abspath(options.distfiles_local))

		if not (os.path.isdir(options.distfiles_local) and
			os.access(options.distfiles_local, os.W_OK|os.X_OK)):
			parser.error(("--distfiles-local '%s' is not a "
				"writable directory") % options.distfiles_local)

	if options.distfiles_db is not None:
		options.distfiles_db = normalize_path(
			os.path.abspath(options.distfiles_db))

	if options.tries is not None:
		options.tries = int(options.tries)

	if options.recycle_dir is not None:
		options.recycle_dir = normalize_path(
			os.path.abspath(options.recycle_dir))
		if not (os.path.isdir(options.recycle_dir) and
			os.access(options.recycle_dir, os.W_OK|os.X_OK)):
			parser.error(("--recycle-dir '%s' is not a "
				"writable directory") % options.recycle_dir)

	if options.recycle_db is not None:
		if options.recycle_dir is None:
			parser.error("--recycle-db requires "
				"--recycle-dir to be specified")
		options.recycle_db = normalize_path(
			os.path.abspath(options.recycle_db))

	if options.recycle_deletion_delay is not None:
		options.recycle_deletion_delay = \
			long(options.recycle_deletion_delay)

	if options.fetch_log_dir is not None:
		options.fetch_log_dir = normalize_path(
			os.path.abspath(options.fetch_log_dir))

		if not (os.path.isdir(options.fetch_log_dir) and
			os.access(options.fetch_log_dir, os.W_OK|os.X_OK)):
			parser.error(("--fetch-log-dir '%s' is not a "
				"writable directory") % options.fetch_log_dir)

	if options.whitelist_from:
		normalized_paths = []
		for x in options.whitelist_from:
			path = normalize_path(os.path.abspath(x))
			if not os.access(path, os.R_OK):
				parser.error("--whitelist-from '%s' is not readable" % x)
			if os.path.isfile(path):
				normalized_paths.append(path)
			elif os.path.isdir(path):
				for file in _recursive_file_list(path):
					if not os.access(file, os.R_OK):
						parser.error("--whitelist-from '%s' directory contains not readable file '%s'" % (x, file))
					normalized_paths.append(file)
			else:
				parser.error("--whitelist-from '%s' is not a regular file or a directory" % x)
		options.whitelist_from = normalized_paths

	if options.strict_manifests is not None:
		if options.strict_manifests == "y":
			settings.features.add("strict")
		else:
			settings.features.discard("strict")

	settings.lock()

	portdb = portage.portdbapi(mysettings=settings)

	# Limit ebuilds to the specified repo.
	portdb.porttrees = [repo_path]

	portage.util.initialize_logger()

	if options.verbose > 0:
		l = logging.getLogger()
		l.setLevel(l.getEffectiveLevel() - 10 * options.verbose)

	with Config(options, portdb,
		SchedulerInterface(global_event_loop())) as config:

		if not options.mirror:
			parser.error('No action specified')

		returncode = os.EX_OK

		if options.mirror:
			signum = run_main_scheduler(MirrorDistTask(config))
			if signum is not None:
				sys.exit(128 + signum)

	return returncode
Esempio n. 5
0
    def testDoebuildSpawn(self):

        ebuild_body = textwrap.dedent("""
			pkg_nofetch() { : ; }
		""")

        ebuilds = {
            "sys-apps/portage-2.1": {
                "EAPI": "2",
                "IUSE": "build doc epydoc python3 selinux",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "RDEPEND": ">=app-shells/bash-3.2_p17 >=dev-lang/python-2.6",
                "SLOT": "0",
                "MISC_CONTENT": ebuild_body,
            }
        }

        playground = ResolverPlayground(ebuilds=ebuilds)
        try:
            root_config = playground.trees[playground.eroot]["root_config"]
            portdb = root_config.trees["porttree"].dbapi
            settings = config(clone=playground.settings)
            if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
                settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = os.environ[
                    "__PORTAGE_TEST_HARDLINK_LOCKS"]
                settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")

            cpv = "sys-apps/portage-2.1"
            metadata = dict(
                zip(Package.metadata_keys,
                    portdb.aux_get(cpv, Package.metadata_keys)))

            pkg = Package(
                built=False,
                cpv=cpv,
                installed=False,
                metadata=metadata,
                root_config=root_config,
                type_name="ebuild",
            )
            settings.setcpv(pkg)
            settings["PORTAGE_PYTHON"] = _python_interpreter
            settings["PORTAGE_BUILDDIR"] = os.path.join(
                settings["PORTAGE_TMPDIR"], cpv)
            settings["PYTHONDONTWRITEBYTECODE"] = os.environ.get(
                "PYTHONDONTWRITEBYTECODE", "")
            settings["HOME"] = os.path.join(settings["PORTAGE_BUILDDIR"],
                                            "homedir")
            settings["T"] = os.path.join(settings["PORTAGE_BUILDDIR"], "temp")
            for x in ("PORTAGE_BUILDDIR", "HOME", "T"):
                os.makedirs(settings[x])
            # Create a fake environment, to pretend as if the ebuild
            # has been sourced already.
            open(os.path.join(settings["T"], "environment"), "wb").close()

            scheduler = SchedulerInterface(global_event_loop())
            for phase in ("_internal_test", ):

                # Test EbuildSpawnProcess by calling doebuild.spawn() with
                # returnpid=False. This case is no longer used by portage
                # internals since EbuildPhase is used instead and that passes
                # returnpid=True to doebuild.spawn().
                rval = doebuild_spawn(
                    "%s %s" % (
                        _shell_quote(
                            os.path.join(
                                settings["PORTAGE_BIN_PATH"],
                                os.path.basename(EBUILD_SH_BINARY),
                            )),
                        phase,
                    ),
                    settings,
                    free=1,
                )
                self.assertEqual(rval, os.EX_OK)

                ebuild_phase = EbuildPhase(
                    background=False,
                    phase=phase,
                    scheduler=scheduler,
                    settings=settings,
                )
                ebuild_phase.start()
                ebuild_phase.wait()
                self.assertEqual(ebuild_phase.returncode, os.EX_OK)

            ebuild_phase = MiscFunctionsProcess(
                background=False,
                commands=["success_hooks"],
                scheduler=scheduler,
                settings=settings,
            )
            ebuild_phase.start()
            ebuild_phase.wait()
            self.assertEqual(ebuild_phase.returncode, os.EX_OK)

            spawn_nofetch(portdb, portdb.findname(cpv), settings=settings)
        finally:
            playground.cleanup()
Esempio n. 6
0
	def testEbuildFetch(self):

		user_config = {
			"make.conf":
				(
					'GENTOO_MIRRORS="{scheme}://{host}:{port}"',
				),
		}

		distfiles = {
			'bar': b'bar\n',
			'foo': b'foo\n',
		}

		ebuilds = {
			'dev-libs/A-1': {
				'EAPI': '7',
				'SRC_URI': '''{scheme}://{host}:{port}/distfiles/bar.txt -> bar
					{scheme}://{host}:{port}/distfiles/foo.txt -> foo''',
			},
		}

		loop = SchedulerInterface(global_event_loop())

		def run_async(func, *args, **kwargs):
			with ForkExecutor(loop=loop) as executor:
				return loop.run_until_complete(loop.run_in_executor(executor,
					functools.partial(func, *args, **kwargs)))

		scheme = 'http'
		host = '127.0.0.1'
		content = {}

		content['/distfiles/layout.conf'] = b'[structure]\n0=flat\n'

		for k, v in distfiles.items():
			# mirror path
			content['/distfiles/{}'.format(k)] = v
			# upstream path
			content['/distfiles/{}.txt'.format(k)] = v

		with AsyncHTTPServer(host, content, loop) as server:
			ebuilds_subst = {}
			for cpv, metadata in ebuilds.items():
				metadata = metadata.copy()
				metadata['SRC_URI'] = metadata['SRC_URI'].format(
					scheme=scheme, host=host, port=server.server_port)
				ebuilds_subst[cpv] = metadata

			user_config_subst = user_config.copy()
			for configname, configdata in user_config.items():

				configdata_sub = []
				for line in configdata:
					configdata_sub.append(line.format(
					scheme=scheme, host=host, port=server.server_port))
				user_config_subst[configname] = tuple(configdata_sub)

			playground = ResolverPlayground(ebuilds=ebuilds_subst, distfiles=distfiles, user_config=user_config_subst)
			ro_distdir = tempfile.mkdtemp()
			eubin = os.path.join(playground.eprefix, "usr", "bin")
			try:
				fetchcommand = portage.util.shlex_split(playground.settings['FETCHCOMMAND'])
				fetch_bin = portage.process.find_binary(fetchcommand[0])
				if fetch_bin is None:
					self.skipTest('FETCHCOMMAND not found: {}'.format(playground.settings['FETCHCOMMAND']))
				os.symlink(fetch_bin, os.path.join(eubin, os.path.basename(fetch_bin)))
				resumecommand = portage.util.shlex_split(playground.settings['RESUMECOMMAND'])
				resume_bin = portage.process.find_binary(resumecommand[0])
				if resume_bin is None:
					self.skipTest('RESUMECOMMAND not found: {}'.format(playground.settings['RESUMECOMMAND']))
				if resume_bin != fetch_bin:
					os.symlink(resume_bin, os.path.join(eubin, os.path.basename(resume_bin)))
				root_config = playground.trees[playground.eroot]['root_config']
				portdb = root_config.trees["porttree"].dbapi
				settings = config(clone=playground.settings)

				# Demonstrate that fetch preserves a stale file in DISTDIR when no digests are given.
				foo_uri = {'foo': ('{scheme}://{host}:{port}/distfiles/foo'.format(scheme=scheme, host=host, port=server.server_port),)}
				foo_path = os.path.join(settings['DISTDIR'], 'foo')
				foo_stale_content = b'stale content\n'
				with open(foo_path, 'wb') as f:
					f.write(b'stale content\n')

				self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False)))

				with open(foo_path, 'rb') as f:
					self.assertEqual(f.read(), foo_stale_content)
				with open(foo_path, 'rb') as f:
					self.assertNotEqual(f.read(), distfiles['foo'])

				# Use force=True to update the stale file.
				self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False, force=True)))

				with open(foo_path, 'rb') as f:
					self.assertEqual(f.read(), distfiles['foo'])

				# Test force=True with FEATURES=skiprocheck, using read-only DISTDIR.
				# FETCHCOMMAND is set to temporarily chmod +w DISTDIR. Note that
				# FETCHCOMMAND must perform atomic rename itself due to read-only
				# DISTDIR.
				with open(foo_path, 'wb') as f:
					f.write(b'stale content\n')
				orig_fetchcommand = settings['FETCHCOMMAND']
				orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode
				temp_fetchcommand = os.path.join(eubin, 'fetchcommand')
				with open(temp_fetchcommand, 'w') as f:
					f.write("""
					set -e
					URI=$1
					DISTDIR=$2
					FILE=$3
					trap 'chmod a-w "${DISTDIR}"' EXIT
					chmod ug+w "${DISTDIR}"
					%s
					mv -f "${DISTDIR}/${FILE}.__download__" "${DISTDIR}/${FILE}"
				""" % orig_fetchcommand.replace('${FILE}', '${FILE}.__download__'))
				settings['FETCHCOMMAND'] = '"%s" "%s" "${URI}" "${DISTDIR}" "${FILE}"' % (BASH_BINARY, temp_fetchcommand)
				settings.features.add('skiprocheck')
				settings.features.remove('distlocks')
				os.chmod(settings['DISTDIR'], 0o555)
				try:
					self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False, force=True)))
				finally:
					settings['FETCHCOMMAND'] = orig_fetchcommand
					os.chmod(settings['DISTDIR'], orig_distdir_mode)
					settings.features.remove('skiprocheck')
					settings.features.add('distlocks')
					os.unlink(temp_fetchcommand)

				with open(foo_path, 'rb') as f:
					self.assertEqual(f.read(), distfiles['foo'])

				# Test emirrordist invocation.
				emirrordist_cmd = (portage._python_interpreter, '-b', '-Wd',
					os.path.join(self.bindir, 'emirrordist'),
					'--distfiles', settings['DISTDIR'],
					'--config-root', settings['EPREFIX'],
					'--repositories-configuration', settings.repositories.config_string(),
					'--repo', 'test_repo', '--mirror')

				env = settings.environ()
				env['PYTHONPATH'] = ':'.join(
					filter(None, [PORTAGE_PYM_PATH] + os.environ.get('PYTHONPATH', '').split(':')))

				for k in distfiles:
					os.unlink(os.path.join(settings['DISTDIR'], k))

				proc = loop.run_until_complete(asyncio.create_subprocess_exec(*emirrordist_cmd, env=env))
				self.assertEqual(loop.run_until_complete(proc.wait()), 0)

				for k in distfiles:
					with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
						self.assertEqual(f.read(), distfiles[k])

				# Tests only work with one ebuild at a time, so the config
				# pool only needs a single config instance.
				class config_pool:
					@staticmethod
					def allocate():
						return settings
					@staticmethod
					def deallocate(settings):
						pass

				def async_fetch(pkg, ebuild_path):
					fetcher = EbuildFetcher(config_pool=config_pool, ebuild_path=ebuild_path,
						fetchonly=False, fetchall=True, pkg=pkg, scheduler=loop)
					fetcher.start()
					return fetcher.async_wait()

				for cpv in ebuilds:
					metadata = dict(zip(Package.metadata_keys,
						portdb.aux_get(cpv, Package.metadata_keys)))

					pkg = Package(built=False, cpv=cpv, installed=False,
						metadata=metadata, root_config=root_config,
						type_name='ebuild')

					settings.setcpv(pkg)
					ebuild_path = portdb.findname(pkg.cpv)
					portage.doebuild_environment(ebuild_path, 'fetch', settings=settings, db=portdb)

					# Test good files in DISTDIR
					for k in settings['AA'].split():
						os.stat(os.path.join(settings['DISTDIR'], k))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test digestgen with fetch
					os.unlink(os.path.join(os.path.dirname(ebuild_path), 'Manifest'))
					for k in settings['AA'].split():
						os.unlink(os.path.join(settings['DISTDIR'], k))
					with ForkExecutor(loop=loop) as executor:
						self.assertTrue(bool(loop.run_until_complete(
							loop.run_in_executor(executor, functools.partial(
								digestgen, mysettings=settings, myportdb=portdb)))))
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test missing files in DISTDIR
					for k in settings['AA'].split():
						os.unlink(os.path.join(settings['DISTDIR'], k))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test empty files in DISTDIR
					for k in settings['AA'].split():
						file_path = os.path.join(settings['DISTDIR'], k)
						with open(file_path, 'wb') as f:
							pass
						self.assertEqual(os.stat(file_path).st_size, 0)
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test non-empty files containing null bytes in DISTDIR
					for k in settings['AA'].split():
						file_path = os.path.join(settings['DISTDIR'], k)
						with open(file_path, 'wb') as f:
							f.write(len(distfiles[k]) * b'\0')
						self.assertEqual(os.stat(file_path).st_size, len(distfiles[k]))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test PORTAGE_RO_DISTDIRS
					settings['PORTAGE_RO_DISTDIRS'] = '"{}"'.format(ro_distdir)
					orig_fetchcommand = settings['FETCHCOMMAND']
					orig_resumecommand = settings['RESUMECOMMAND']
					try:
						settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = ''
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							os.rename(file_path, os.path.join(ro_distdir, k))
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							self.assertTrue(os.path.islink(file_path))
							with open(file_path, 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
							os.unlink(file_path)
					finally:
						settings.pop('PORTAGE_RO_DISTDIRS')
						settings['FETCHCOMMAND'] = orig_fetchcommand
						settings['RESUMECOMMAND'] = orig_resumecommand

					# Test local filesystem in GENTOO_MIRRORS
					orig_mirrors = settings['GENTOO_MIRRORS']
					orig_fetchcommand = settings['FETCHCOMMAND']
					try:
						settings['GENTOO_MIRRORS'] = ro_distdir
						settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = ''
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings['GENTOO_MIRRORS'] = orig_mirrors
						settings['FETCHCOMMAND'] = orig_fetchcommand
						settings['RESUMECOMMAND'] = orig_resumecommand

					# Test readonly DISTDIR
					orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode
					try:
						os.chmod(settings['DISTDIR'], 0o555)
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						os.chmod(settings['DISTDIR'], orig_distdir_mode)

					# Test parallel-fetch mode
					settings['PORTAGE_PARALLEL_FETCHONLY'] = '1'
					try:
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
						for k in settings['AA'].split():
							os.unlink(os.path.join(settings['DISTDIR'], k))
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings.pop('PORTAGE_PARALLEL_FETCHONLY')

					# Test RESUMECOMMAND
					orig_resume_min_size = settings['PORTAGE_FETCH_RESUME_MIN_SIZE']
					try:
						settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = '2'
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							os.unlink(file_path)
							with open(file_path + _download_suffix, 'wb') as f:
								f.write(distfiles[k][:2])
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = orig_resume_min_size

					# Test readonly DISTDIR + skiprocheck, with FETCHCOMMAND set to temporarily chmod DISTDIR
					orig_fetchcommand = settings['FETCHCOMMAND']
					orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode
					for k in settings['AA'].split():
						os.unlink(os.path.join(settings['DISTDIR'], k))
					try:
						os.chmod(settings['DISTDIR'], 0o555)
						settings['FETCHCOMMAND'] = '"%s" -c "chmod ug+w \\"${DISTDIR}\\"; %s; status=\\$?; chmod a-w \\"${DISTDIR}\\"; exit \\$status"' % (BASH_BINARY, orig_fetchcommand.replace('"', '\\"'))
						settings.features.add('skiprocheck')
						settings.features.remove('distlocks')
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					finally:
						settings['FETCHCOMMAND'] = orig_fetchcommand
						os.chmod(settings['DISTDIR'], orig_distdir_mode)
						settings.features.remove('skiprocheck')
						settings.features.add('distlocks')
			finally:
				shutil.rmtree(ro_distdir)
				playground.cleanup()
def spawn_nofetch(portdb, ebuild_path, settings=None, fd_pipes=None):
    """
	This spawns pkg_nofetch if appropriate. The settings parameter
	is useful only if setcpv has already been called in order
	to cache metadata. It will be cloned internally, in order to
	prevent any changes from interfering with the calling code.
	If settings is None then a suitable config instance will be
	acquired from the given portdbapi instance. Do not use the
	settings parameter unless setcpv has been called on the given
	instance, since otherwise it's possible to trigger issues like
	bug #408817 due to fragile assumptions involving the config
	state inside doebuild_environment().

	A private PORTAGE_BUILDDIR will be created and cleaned up, in
	order to avoid any interference with any other processes.
	If PORTAGE_TMPDIR is writable, that will be used, otherwise
	the default directory for the tempfile module will be used.

	We only call the pkg_nofetch phase if either RESTRICT=fetch
	is set or the package has explicitly overridden the default
	pkg_nofetch implementation. This allows specialized messages
	to be displayed for problematic packages even though they do
	not set RESTRICT=fetch (bug #336499).

	This function does nothing if the PORTAGE_PARALLEL_FETCHONLY
	variable is set in the config instance.
	"""

    if settings is None:
        settings = config(clone=portdb.settings)
    else:
        settings = config(clone=settings)

    if 'PORTAGE_PARALLEL_FETCHONLY' in settings:
        return os.EX_OK

    # We must create our private PORTAGE_TMPDIR before calling
    # doebuild_environment(), since lots of variables such
    # as PORTAGE_BUILDDIR refer to paths inside PORTAGE_TMPDIR.
    portage_tmpdir = settings.get('PORTAGE_TMPDIR')
    if not portage_tmpdir or not os.access(portage_tmpdir, os.W_OK):
        portage_tmpdir = None
    private_tmpdir = tempfile.mkdtemp(dir=portage_tmpdir)
    settings['PORTAGE_TMPDIR'] = private_tmpdir
    settings.backup_changes('PORTAGE_TMPDIR')
    # private temp dir was just created, so it's not locked yet
    settings.pop('PORTAGE_BUILDDIR_LOCKED', None)

    try:
        doebuild_environment(ebuild_path,
                             'nofetch',
                             settings=settings,
                             db=portdb)
        restrict = settings['PORTAGE_RESTRICT'].split()
        defined_phases = settings['DEFINED_PHASES'].split()
        if not defined_phases:
            # When DEFINED_PHASES is undefined, assume all
            # phases are defined.
            defined_phases = EBUILD_PHASES

        if 'fetch' not in restrict and \
         'nofetch' not in defined_phases:
            return os.EX_OK

        prepare_build_dirs(settings=settings)
        ebuild_phase = EbuildPhase(
            background=False,
            phase='nofetch',
            scheduler=SchedulerInterface(
                portage._internal_caller and global_event_loop()
                or EventLoop(main=False)),
            fd_pipes=fd_pipes,
            settings=settings)
        ebuild_phase.start()
        ebuild_phase.wait()
        elog_process(settings.mycpv, settings)
    finally:
        shutil.rmtree(private_tmpdir)

    return ebuild_phase.returncode
    def testDoebuildSpawn(self):

        ebuild_body = textwrap.dedent("""
			pkg_nofetch() { : ; }
		""")

        ebuilds = {
            'sys-apps/portage-2.1': {
                'EAPI': '2',
                'IUSE': 'build doc epydoc python3 selinux',
                'KEYWORDS': 'x86',
                'LICENSE': 'GPL-2',
                'RDEPEND': '>=app-shells/bash-3.2_p17 >=dev-lang/python-2.6',
                'SLOT': '0',
                "MISC_CONTENT": ebuild_body,
            }
        }

        playground = ResolverPlayground(ebuilds=ebuilds)
        try:
            root_config = playground.trees[playground.eroot]['root_config']
            portdb = root_config.trees["porttree"].dbapi
            settings = config(clone=playground.settings)
            if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
                settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
                 os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
                settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")

            cpv = 'sys-apps/portage-2.1'
            metadata = dict(
                zip(Package.metadata_keys,
                    portdb.aux_get(cpv, Package.metadata_keys)))

            pkg = Package(built=False,
                          cpv=cpv,
                          installed=False,
                          metadata=metadata,
                          root_config=root_config,
                          type_name='ebuild')
            settings.setcpv(pkg)
            settings['PORTAGE_PYTHON'] = _python_interpreter
            settings['PORTAGE_BUILDDIR'] = os.path.join(
                settings['PORTAGE_TMPDIR'], cpv)
            settings['T'] = os.path.join(settings['PORTAGE_BUILDDIR'], 'temp')
            for x in ('PORTAGE_BUILDDIR', 'T'):
                os.makedirs(settings[x])
            # Create a fake environment, to pretend as if the ebuild
            # has been sourced already.
            open(os.path.join(settings['T'], 'environment'), 'wb').close()

            scheduler = SchedulerInterface(global_event_loop())
            for phase in ('_internal_test', ):

                # Test EbuildSpawnProcess by calling doebuild.spawn() with
                # returnpid=False. This case is no longer used by portage
                # internals since EbuildPhase is used instead and that passes
                # returnpid=True to doebuild.spawn().
                rval = doebuild_spawn("%s %s" % (_shell_quote(
                    os.path.join(settings["PORTAGE_BIN_PATH"],
                                 os.path.basename(EBUILD_SH_BINARY))), phase),
                                      settings,
                                      free=1)
                self.assertEqual(rval, os.EX_OK)

                ebuild_phase = EbuildPhase(background=False,
                                           phase=phase,
                                           scheduler=scheduler,
                                           settings=settings)
                ebuild_phase.start()
                ebuild_phase.wait()
                self.assertEqual(ebuild_phase.returncode, os.EX_OK)

            ebuild_phase = MiscFunctionsProcess(background=False,
                                                commands=['success_hooks'],
                                                scheduler=scheduler,
                                                settings=settings)
            ebuild_phase.start()
            ebuild_phase.wait()
            self.assertEqual(ebuild_phase.returncode, os.EX_OK)

            spawn_nofetch(portdb, portdb.findname(cpv), settings=settings)
        finally:
            playground.cleanup()
Esempio n. 9
0
	def testEbuildFetch(self):

		distfiles = {
			'bar': b'bar\n',
			'foo': b'foo\n',
		}

		ebuilds = {
			'dev-libs/A-1': {
				'EAPI': '7',
				'RESTRICT': 'primaryuri',
				'SRC_URI': '''{scheme}://{host}:{port}/distfiles/bar.txt -> bar
					{scheme}://{host}:{port}/distfiles/foo.txt -> foo''',
			},
		}

		loop = SchedulerInterface(global_event_loop())
		scheme = 'http'
		host = '127.0.0.1'
		content = {}
		for k, v in distfiles.items():
			content['/distfiles/{}.txt'.format(k)] = v

		with AsyncHTTPServer(host, content, loop) as server:
			ebuilds_subst = {}
			for cpv, metadata in ebuilds.items():
				metadata = metadata.copy()
				metadata['SRC_URI'] = metadata['SRC_URI'].format(
					scheme=scheme, host=host, port=server.server_port)
				ebuilds_subst[cpv] = metadata

			playground = ResolverPlayground(ebuilds=ebuilds_subst, distfiles=distfiles)
			ro_distdir = tempfile.mkdtemp()
			try:
				fetchcommand = portage.util.shlex_split(playground.settings['FETCHCOMMAND'])
				fetch_bin = portage.process.find_binary(fetchcommand[0])
				if fetch_bin is None:
					self.skipTest('FETCHCOMMAND not found: {}'.format(playground.settings['FETCHCOMMAND']))
				resumecommand = portage.util.shlex_split(playground.settings['RESUMECOMMAND'])
				resume_bin = portage.process.find_binary(resumecommand[0])
				if resume_bin is None:
					self.skipTest('RESUMECOMMAND not found: {}'.format(playground.settings['RESUMECOMMAND']))
				root_config = playground.trees[playground.eroot]['root_config']
				portdb = root_config.trees["porttree"].dbapi
				settings = config(clone=playground.settings)

				# Tests only work with one ebuild at a time, so the config
				# pool only needs a single config instance.
				class config_pool:
					@staticmethod
					def allocate():
						return settings
					@staticmethod
					def deallocate(settings):
						pass

				def async_fetch(pkg, ebuild_path):
					fetcher = EbuildFetcher(config_pool=config_pool, ebuild_path=ebuild_path,
						fetchonly=False, fetchall=True, pkg=pkg, scheduler=loop)
					fetcher.start()
					return fetcher.async_wait()

				for cpv in ebuilds:
					metadata = dict(zip(Package.metadata_keys,
						portdb.aux_get(cpv, Package.metadata_keys)))

					pkg = Package(built=False, cpv=cpv, installed=False,
						metadata=metadata, root_config=root_config,
						type_name='ebuild')

					settings.setcpv(pkg)
					ebuild_path = portdb.findname(pkg.cpv)
					portage.doebuild_environment(ebuild_path, 'fetch', settings=settings, db=portdb)

					# Test good files in DISTDIR
					for k in settings['AA'].split():
						os.stat(os.path.join(settings['DISTDIR'], k))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test digestgen with fetch
					os.unlink(os.path.join(os.path.dirname(ebuild_path), 'Manifest'))
					for k in settings['AA'].split():
						os.unlink(os.path.join(settings['DISTDIR'], k))
					with ForkExecutor(loop=loop) as executor:
						self.assertTrue(bool(loop.run_until_complete(
							loop.run_in_executor(executor, functools.partial(
								digestgen, mysettings=settings, myportdb=portdb)))))
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test missing files in DISTDIR
					for k in settings['AA'].split():
						os.unlink(os.path.join(settings['DISTDIR'], k))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test empty files in DISTDIR
					for k in settings['AA'].split():
						file_path = os.path.join(settings['DISTDIR'], k)
						with open(file_path, 'wb') as f:
							pass
						self.assertEqual(os.stat(file_path).st_size, 0)
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test non-empty files containing null bytes in DISTDIR
					for k in settings['AA'].split():
						file_path = os.path.join(settings['DISTDIR'], k)
						with open(file_path, 'wb') as f:
							f.write(len(distfiles[k]) * b'\0')
						self.assertEqual(os.stat(file_path).st_size, len(distfiles[k]))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test PORTAGE_RO_DISTDIRS
					settings['PORTAGE_RO_DISTDIRS'] = '"{}"'.format(ro_distdir)
					orig_fetchcommand = settings['FETCHCOMMAND']
					orig_resumecommand = settings['RESUMECOMMAND']
					try:
						settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = ''
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							os.rename(file_path, os.path.join(ro_distdir, k))
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							self.assertTrue(os.path.islink(file_path))
							with open(file_path, 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
							os.unlink(file_path)
					finally:
						settings.pop('PORTAGE_RO_DISTDIRS')
						settings['FETCHCOMMAND'] = orig_fetchcommand
						settings['RESUMECOMMAND'] = orig_resumecommand

					# Test local filesystem in GENTOO_MIRRORS
					orig_mirrors = settings['GENTOO_MIRRORS']
					orig_fetchcommand = settings['FETCHCOMMAND']
					try:
						settings['GENTOO_MIRRORS'] = ro_distdir
						settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = ''
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings['GENTOO_MIRRORS'] = orig_mirrors
						settings['FETCHCOMMAND'] = orig_fetchcommand
						settings['RESUMECOMMAND'] = orig_resumecommand

					# Test readonly DISTDIR
					orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode
					try:
						os.chmod(settings['DISTDIR'], 0o555)
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						os.chmod(settings['DISTDIR'], orig_distdir_mode)

					# Test parallel-fetch mode
					settings['PORTAGE_PARALLEL_FETCHONLY'] = '1'
					try:
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
						for k in settings['AA'].split():
							os.unlink(os.path.join(settings['DISTDIR'], k))
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings.pop('PORTAGE_PARALLEL_FETCHONLY')

					# Test RESUMECOMMAND
					orig_resume_min_size = settings['PORTAGE_FETCH_RESUME_MIN_SIZE']
					try:
						settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = '2'
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							os.unlink(file_path)
							with open(file_path + _download_suffix, 'wb') as f:
								f.write(distfiles[k][:2])
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = orig_resume_min_size
			finally:
				shutil.rmtree(ro_distdir)
				playground.cleanup()