Beispiel #1
0
        def test(loop):
            output = None
            try:
                proc = loop.run_until_complete(
                    asyncio.create_subprocess_exec(cat_binary,
                                                   stdin=stdin_pr,
                                                   stdout=stdout_pw,
                                                   stderr=stdout_pw))

                # These belong exclusively to the subprocess now.
                stdout_pw.close()
                stdin_pr.close()

                output = asyncio.ensure_future(reader(stdout_pr, loop=loop),
                                               loop=loop)

                with ForkExecutor(loop=loop) as executor:
                    writer = asyncio.ensure_future(loop.run_in_executor(
                        executor, stdin_pw.write, stdin_data),
                                                   loop=loop)

                    # This belongs exclusively to the writer now.
                    stdin_pw.close()
                    loop.run_until_complete(writer)

                self.assertEqual(loop.run_until_complete(proc.wait()),
                                 os.EX_OK)
                self.assertEqual(loop.run_until_complete(output), stdin_data)
            finally:
                if output is not None and not output.done():
                    output.cancel()
                for f in files:
                    f.close()
Beispiel #2
0
 def test_soname_atom_pickle(self):
     loop = asyncio._wrap_loop()
     with ForkExecutor(loop=loop) as executor:
         result = loop.run_until_complete(
             loop.run_in_executor(executor, self._get_all_provides)
         )
     self.assertEqual(self._ALL_PROVIDES, result)
Beispiel #3
0
	async def _async_check(self, loop, **kwargs):
		'''Perform async profile dependant dependency checks

		@param arches:
		@param pkg: Package in which we check (object).
		@param ebuild: Ebuild which we check (object).
		@param baddepsyntax: boolean
		@param unknown_pkgs: set of tuples (type, atom.unevaluated_atom)
		@returns: dictionary
		'''
		ebuild = kwargs.get('ebuild').get()
		pkg = kwargs.get('pkg').get()
		unknown_pkgs = ebuild.unknown_pkgs
		baddepsyntax = ebuild.baddepsyntax

		# Use max_workers=True to ensure immediate fork, since _iter_tasks
		# needs the fork to create a snapshot of current state.
		executor = ForkExecutor(max_workers=self.options.jobs)

		if self.options.jobs > 1:
			for future_done_set in async_iter_completed(self._iter_tasks(loop, executor, ebuild, pkg),
				max_jobs=self.options.jobs, max_load=self.options.load_average):
				for task in await future_done_set:
					task, results = task.result()
					for result in results:
						self._check_result(task, result)

		if not baddepsyntax and unknown_pkgs:
			type_map = {}
			for mytype, atom in unknown_pkgs:
				type_map.setdefault(mytype, set()).add(atom)
			for mytype, atoms in type_map.items():
				self.qatracker.add_error(
					"dependency.unknown", "%s: %s: %s"
					% (ebuild.relative_path, mytype, ", ".join(sorted(atoms))))
Beispiel #4
0
    def _refresh_keys(self, openpgp_env):
        """
		Refresh keys stored in openpgp_env. Raises gemato.exceptions.GematoException
		or asyncio.TimeoutError on failure.

		@param openpgp_env: openpgp environment
		@type openpgp_env: gemato.openpgp.OpenPGPEnvironment
		"""
        out = portage.output.EOutput(
            quiet=('--quiet' in self.options['emerge_config'].opts))
        out.ebegin('Refreshing keys via WKD')
        if openpgp_env.refresh_keys_wkd():
            out.eend(0)
            return
        out.eend(1)

        out.ebegin('Refreshing keys from keyserver{}'.format(
            ('' if self.repo.sync_openpgp_keyserver is None else ' ' +
             self.repo.sync_openpgp_keyserver)))
        retry_decorator = self._key_refresh_retry_decorator()
        if retry_decorator is None:
            openpgp_env.refresh_keys_keyserver(
                keyserver=self.repo.sync_openpgp_keyserver)
        else:

            def noisy_refresh_keys():
                """
				Since retry does not help for some types of
				errors, display errors as soon as they occur.
				"""
                try:
                    openpgp_env.refresh_keys_keyserver(
                        keyserver=self.repo.sync_openpgp_keyserver)
                except Exception as e:
                    writemsg_level("%s\n" % (e, ),
                                   level=logging.ERROR,
                                   noiselevel=-1)
                    raise  # retry

            # The ThreadPoolExecutor that asyncio uses by default
            # does not support cancellation of tasks, therefore
            # use ForkExecutor for task cancellation support, in
            # order to enforce timeouts.
            loop = global_event_loop()
            with ForkExecutor(loop=loop) as executor:
                func_coroutine = functools.partial(loop.run_in_executor,
                                                   executor,
                                                   noisy_refresh_keys)
                decorated_func = retry_decorator(func_coroutine, loop=loop)
                loop.run_until_complete(decorated_func())
        out.eend(0)
Beispiel #5
0
	def _soname_deps_qa(self):

		vardb = QueryCommand.get_db()[self.settings['EROOT']]['vartree'].dbapi

		all_provides = (yield self.scheduler.run_in_executor(ForkExecutor(loop=self.scheduler), _get_all_provides, vardb))

		unresolved = _get_unresolved_soname_deps(os.path.join(self.settings['PORTAGE_BUILDDIR'], 'build-info'), all_provides)

		if unresolved:
			unresolved.sort()
			qa_msg = ["QA Notice: Unresolved soname dependencies:"]
			qa_msg.append("")
			qa_msg.extend("\t%s: %s" % (filename, " ".join(sorted(soname_deps)))
				for filename, soname_deps in unresolved)
			qa_msg.append("")
			yield self.elog("eqawarn", qa_msg)
Beispiel #6
0
	def testEbuildFetch(self):

		user_config = {
			"make.conf":
				(
					'GENTOO_MIRRORS="{scheme}://{host}:{port}"',
				),
		}

		distfiles = {
			'bar': b'bar\n',
			'foo': b'foo\n',
		}

		ebuilds = {
			'dev-libs/A-1': {
				'EAPI': '7',
				'SRC_URI': '''{scheme}://{host}:{port}/distfiles/bar.txt -> bar
					{scheme}://{host}:{port}/distfiles/foo.txt -> foo''',
			},
		}

		loop = SchedulerInterface(global_event_loop())

		def run_async(func, *args, **kwargs):
			with ForkExecutor(loop=loop) as executor:
				return loop.run_until_complete(loop.run_in_executor(executor,
					functools.partial(func, *args, **kwargs)))

		scheme = 'http'
		host = '127.0.0.1'
		content = {}

		content['/distfiles/layout.conf'] = b'[structure]\n0=flat\n'

		for k, v in distfiles.items():
			# mirror path
			content['/distfiles/{}'.format(k)] = v
			# upstream path
			content['/distfiles/{}.txt'.format(k)] = v

		with AsyncHTTPServer(host, content, loop) as server:
			ebuilds_subst = {}
			for cpv, metadata in ebuilds.items():
				metadata = metadata.copy()
				metadata['SRC_URI'] = metadata['SRC_URI'].format(
					scheme=scheme, host=host, port=server.server_port)
				ebuilds_subst[cpv] = metadata

			user_config_subst = user_config.copy()
			for configname, configdata in user_config.items():

				configdata_sub = []
				for line in configdata:
					configdata_sub.append(line.format(
					scheme=scheme, host=host, port=server.server_port))
				user_config_subst[configname] = tuple(configdata_sub)

			playground = ResolverPlayground(ebuilds=ebuilds_subst, distfiles=distfiles, user_config=user_config_subst)
			ro_distdir = tempfile.mkdtemp()
			eubin = os.path.join(playground.eprefix, "usr", "bin")
			try:
				fetchcommand = portage.util.shlex_split(playground.settings['FETCHCOMMAND'])
				fetch_bin = portage.process.find_binary(fetchcommand[0])
				if fetch_bin is None:
					self.skipTest('FETCHCOMMAND not found: {}'.format(playground.settings['FETCHCOMMAND']))
				os.symlink(fetch_bin, os.path.join(eubin, os.path.basename(fetch_bin)))
				resumecommand = portage.util.shlex_split(playground.settings['RESUMECOMMAND'])
				resume_bin = portage.process.find_binary(resumecommand[0])
				if resume_bin is None:
					self.skipTest('RESUMECOMMAND not found: {}'.format(playground.settings['RESUMECOMMAND']))
				if resume_bin != fetch_bin:
					os.symlink(resume_bin, os.path.join(eubin, os.path.basename(resume_bin)))
				root_config = playground.trees[playground.eroot]['root_config']
				portdb = root_config.trees["porttree"].dbapi
				settings = config(clone=playground.settings)

				# Demonstrate that fetch preserves a stale file in DISTDIR when no digests are given.
				foo_uri = {'foo': ('{scheme}://{host}:{port}/distfiles/foo'.format(scheme=scheme, host=host, port=server.server_port),)}
				foo_path = os.path.join(settings['DISTDIR'], 'foo')
				foo_stale_content = b'stale content\n'
				with open(foo_path, 'wb') as f:
					f.write(b'stale content\n')

				self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False)))

				with open(foo_path, 'rb') as f:
					self.assertEqual(f.read(), foo_stale_content)
				with open(foo_path, 'rb') as f:
					self.assertNotEqual(f.read(), distfiles['foo'])

				# Use force=True to update the stale file.
				self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False, force=True)))

				with open(foo_path, 'rb') as f:
					self.assertEqual(f.read(), distfiles['foo'])

				# Test force=True with FEATURES=skiprocheck, using read-only DISTDIR.
				# FETCHCOMMAND is set to temporarily chmod +w DISTDIR. Note that
				# FETCHCOMMAND must perform atomic rename itself due to read-only
				# DISTDIR.
				with open(foo_path, 'wb') as f:
					f.write(b'stale content\n')
				orig_fetchcommand = settings['FETCHCOMMAND']
				orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode
				temp_fetchcommand = os.path.join(eubin, 'fetchcommand')
				with open(temp_fetchcommand, 'w') as f:
					f.write("""
					set -e
					URI=$1
					DISTDIR=$2
					FILE=$3
					trap 'chmod a-w "${DISTDIR}"' EXIT
					chmod ug+w "${DISTDIR}"
					%s
					mv -f "${DISTDIR}/${FILE}.__download__" "${DISTDIR}/${FILE}"
				""" % orig_fetchcommand.replace('${FILE}', '${FILE}.__download__'))
				settings['FETCHCOMMAND'] = '"%s" "%s" "${URI}" "${DISTDIR}" "${FILE}"' % (BASH_BINARY, temp_fetchcommand)
				settings.features.add('skiprocheck')
				settings.features.remove('distlocks')
				os.chmod(settings['DISTDIR'], 0o555)
				try:
					self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False, force=True)))
				finally:
					settings['FETCHCOMMAND'] = orig_fetchcommand
					os.chmod(settings['DISTDIR'], orig_distdir_mode)
					settings.features.remove('skiprocheck')
					settings.features.add('distlocks')
					os.unlink(temp_fetchcommand)

				with open(foo_path, 'rb') as f:
					self.assertEqual(f.read(), distfiles['foo'])

				# Test emirrordist invocation.
				emirrordist_cmd = (portage._python_interpreter, '-b', '-Wd',
					os.path.join(self.bindir, 'emirrordist'),
					'--distfiles', settings['DISTDIR'],
					'--config-root', settings['EPREFIX'],
					'--repositories-configuration', settings.repositories.config_string(),
					'--repo', 'test_repo', '--mirror')

				env = settings.environ()
				env['PYTHONPATH'] = ':'.join(
					filter(None, [PORTAGE_PYM_PATH] + os.environ.get('PYTHONPATH', '').split(':')))

				for k in distfiles:
					os.unlink(os.path.join(settings['DISTDIR'], k))

				proc = loop.run_until_complete(asyncio.create_subprocess_exec(*emirrordist_cmd, env=env))
				self.assertEqual(loop.run_until_complete(proc.wait()), 0)

				for k in distfiles:
					with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
						self.assertEqual(f.read(), distfiles[k])

				# Tests only work with one ebuild at a time, so the config
				# pool only needs a single config instance.
				class config_pool:
					@staticmethod
					def allocate():
						return settings
					@staticmethod
					def deallocate(settings):
						pass

				def async_fetch(pkg, ebuild_path):
					fetcher = EbuildFetcher(config_pool=config_pool, ebuild_path=ebuild_path,
						fetchonly=False, fetchall=True, pkg=pkg, scheduler=loop)
					fetcher.start()
					return fetcher.async_wait()

				for cpv in ebuilds:
					metadata = dict(zip(Package.metadata_keys,
						portdb.aux_get(cpv, Package.metadata_keys)))

					pkg = Package(built=False, cpv=cpv, installed=False,
						metadata=metadata, root_config=root_config,
						type_name='ebuild')

					settings.setcpv(pkg)
					ebuild_path = portdb.findname(pkg.cpv)
					portage.doebuild_environment(ebuild_path, 'fetch', settings=settings, db=portdb)

					# Test good files in DISTDIR
					for k in settings['AA'].split():
						os.stat(os.path.join(settings['DISTDIR'], k))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test digestgen with fetch
					os.unlink(os.path.join(os.path.dirname(ebuild_path), 'Manifest'))
					for k in settings['AA'].split():
						os.unlink(os.path.join(settings['DISTDIR'], k))
					with ForkExecutor(loop=loop) as executor:
						self.assertTrue(bool(loop.run_until_complete(
							loop.run_in_executor(executor, functools.partial(
								digestgen, mysettings=settings, myportdb=portdb)))))
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test missing files in DISTDIR
					for k in settings['AA'].split():
						os.unlink(os.path.join(settings['DISTDIR'], k))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test empty files in DISTDIR
					for k in settings['AA'].split():
						file_path = os.path.join(settings['DISTDIR'], k)
						with open(file_path, 'wb') as f:
							pass
						self.assertEqual(os.stat(file_path).st_size, 0)
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test non-empty files containing null bytes in DISTDIR
					for k in settings['AA'].split():
						file_path = os.path.join(settings['DISTDIR'], k)
						with open(file_path, 'wb') as f:
							f.write(len(distfiles[k]) * b'\0')
						self.assertEqual(os.stat(file_path).st_size, len(distfiles[k]))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test PORTAGE_RO_DISTDIRS
					settings['PORTAGE_RO_DISTDIRS'] = '"{}"'.format(ro_distdir)
					orig_fetchcommand = settings['FETCHCOMMAND']
					orig_resumecommand = settings['RESUMECOMMAND']
					try:
						settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = ''
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							os.rename(file_path, os.path.join(ro_distdir, k))
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							self.assertTrue(os.path.islink(file_path))
							with open(file_path, 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
							os.unlink(file_path)
					finally:
						settings.pop('PORTAGE_RO_DISTDIRS')
						settings['FETCHCOMMAND'] = orig_fetchcommand
						settings['RESUMECOMMAND'] = orig_resumecommand

					# Test local filesystem in GENTOO_MIRRORS
					orig_mirrors = settings['GENTOO_MIRRORS']
					orig_fetchcommand = settings['FETCHCOMMAND']
					try:
						settings['GENTOO_MIRRORS'] = ro_distdir
						settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = ''
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings['GENTOO_MIRRORS'] = orig_mirrors
						settings['FETCHCOMMAND'] = orig_fetchcommand
						settings['RESUMECOMMAND'] = orig_resumecommand

					# Test readonly DISTDIR
					orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode
					try:
						os.chmod(settings['DISTDIR'], 0o555)
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						os.chmod(settings['DISTDIR'], orig_distdir_mode)

					# Test parallel-fetch mode
					settings['PORTAGE_PARALLEL_FETCHONLY'] = '1'
					try:
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
						for k in settings['AA'].split():
							os.unlink(os.path.join(settings['DISTDIR'], k))
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings.pop('PORTAGE_PARALLEL_FETCHONLY')

					# Test RESUMECOMMAND
					orig_resume_min_size = settings['PORTAGE_FETCH_RESUME_MIN_SIZE']
					try:
						settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = '2'
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							os.unlink(file_path)
							with open(file_path + _download_suffix, 'wb') as f:
								f.write(distfiles[k][:2])
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = orig_resume_min_size

					# Test readonly DISTDIR + skiprocheck, with FETCHCOMMAND set to temporarily chmod DISTDIR
					orig_fetchcommand = settings['FETCHCOMMAND']
					orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode
					for k in settings['AA'].split():
						os.unlink(os.path.join(settings['DISTDIR'], k))
					try:
						os.chmod(settings['DISTDIR'], 0o555)
						settings['FETCHCOMMAND'] = '"%s" -c "chmod ug+w \\"${DISTDIR}\\"; %s; status=\\$?; chmod a-w \\"${DISTDIR}\\"; exit \\$status"' % (BASH_BINARY, orig_fetchcommand.replace('"', '\\"'))
						settings.features.add('skiprocheck')
						settings.features.remove('distlocks')
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					finally:
						settings['FETCHCOMMAND'] = orig_fetchcommand
						os.chmod(settings['DISTDIR'], orig_distdir_mode)
						settings.features.remove('skiprocheck')
						settings.features.add('distlocks')
			finally:
				shutil.rmtree(ro_distdir)
				playground.cleanup()
Beispiel #7
0
 def _setUpExecutor(self):
     self._executor = ForkExecutor()
Beispiel #8
0
class RetryForkExecutorTestCase(RetryTestCase):
    """
	Wrap each coroutine function with AbstractEventLoop.run_in_executor,
	in order to test the event loop's default executor. The executor
	may use either a thread or a subprocess, and either case is
	automatically detected and handled.
	"""
    def __init__(self, *pargs, **kwargs):
        super(RetryForkExecutorTestCase, self).__init__(*pargs, **kwargs)
        self._executor = None

    def _setUpExecutor(self):
        self._executor = ForkExecutor()

    def _tearDownExecutor(self):
        if self._executor is not None:
            self._executor.shutdown(wait=True)
            self._executor = None

    def setUp(self):
        self._setUpExecutor()

    def tearDown(self):
        self._tearDownExecutor()

    def _wrap_coroutine_func(self, coroutine_func):
        parent_loop = global_event_loop()

        # Since ThreadPoolExecutor does not propagate cancellation of a
        # parent_future to the underlying coroutine, use kill_switch to
        # propagate task cancellation to wrapper, so that HangForever's
        # thread returns when retry eventually cancels parent_future.
        def wrapper(kill_switch):
            loop = global_event_loop()
            if loop is parent_loop:
                # thread in main process
                result = coroutine_func()
                event = threading.Event()
                loop.call_soon_threadsafe(result.add_done_callback,
                                          lambda result: event.set())
                loop.call_soon_threadsafe(kill_switch.add_done_callback,
                                          lambda kill_switch: event.set())
                event.wait()
                return result.result()
            else:
                # child process
                try:
                    return loop.run_until_complete(coroutine_func())
                finally:
                    loop.close()

        def execute_wrapper():
            kill_switch = parent_loop.create_future()
            parent_future = asyncio.ensure_future(parent_loop.run_in_executor(
                self._executor, wrapper, kill_switch),
                                                  loop=parent_loop)
            parent_future.add_done_callback(
                lambda parent_future: None
                if kill_switch.done() else kill_switch.set_result(None))
            return parent_future

        return execute_wrapper
Beispiel #9
0
    def _test_mod(self, auxdbmodule, multiproc=True):
        ebuilds = {
            "cat/A-1": {
                "EAPI": "7",
                "MISC_CONTENT": "inherit foo",
            },
            "cat/B-1": {
                "EAPI": "7",
                "MISC_CONTENT": "inherit foo",
            },
        }

        ebuild_inherited = frozenset(["bar", "foo"])
        eclass_defined_phases = "prepare"
        eclass_depend = "bar/foo"

        eclasses = {
            "foo": ("inherit bar", ),
            "bar": (
                "EXPORT_FUNCTIONS src_prepare",
                "DEPEND=\"{}\"".format(eclass_depend),
                "bar_src_prepare() { default; }",
            ),
        }

        playground = ResolverPlayground(
            ebuilds=ebuilds,
            eclasses=eclasses,
            user_config={
                'modules': ('portdbapi.auxdbmodule = %s' % auxdbmodule, )
            })

        portdb = playground.trees[playground.eroot]["porttree"].dbapi

        def test_func():
            loop = asyncio._wrap_loop()
            return loop.run_until_complete(
                self._test_mod_async(ebuilds,
                                     ebuild_inherited,
                                     eclass_defined_phases,
                                     eclass_depend,
                                     portdb,
                                     loop=loop))

        self.assertTrue(test_func())

        loop = asyncio._wrap_loop()
        self.assertTrue(
            loop.run_until_complete(
                loop.run_in_executor(ForkExecutor(), test_func)))

        auxdb = portdb.auxdb[portdb.getRepositoryPath('test_repo')]
        cpv = next(iter(ebuilds))

        def modify_auxdb():
            metadata = auxdb[cpv]
            metadata['RESTRICT'] = 'test'
            try:
                del metadata['_eclasses_']
            except KeyError:
                pass
            auxdb[cpv] = metadata

        if multiproc:
            loop.run_until_complete(
                loop.run_in_executor(ForkExecutor(), modify_auxdb))
        else:
            modify_auxdb()

        self.assertEqual(auxdb[cpv]['RESTRICT'], 'test')
Beispiel #10
0
class RetryForkExecutorTestCase(RetryTestCase):
	"""
	Wrap each coroutine function with AbstractEventLoop.run_in_executor,
	in order to test the event loop's default executor. The executor
	may use either a thread or a subprocess, and either case is
	automatically detected and handled.
	"""
	def __init__(self, *pargs, **kwargs):
		super(RetryForkExecutorTestCase, self).__init__(*pargs, **kwargs)
		self._executor = None

	def _setUpExecutor(self):
		self._executor = ForkExecutor()

	def _tearDownExecutor(self):
		if self._executor is not None:
			self._executor.shutdown(wait=True)
			self._executor = None

	def setUp(self):
		self._setUpExecutor()

	def tearDown(self):
		self._tearDownExecutor()

	def _wrap_coroutine_func(self, coroutine_func):
		parent_loop = global_event_loop()

		# Since ThreadPoolExecutor does not propagate cancellation of a
		# parent_future to the underlying coroutine, use kill_switch to
		# propagate task cancellation to wrapper, so that HangForever's
		# thread returns when retry eventually cancels parent_future.
		def wrapper(kill_switch):
			loop = global_event_loop()
			if loop is parent_loop:
				# thread in main process
				result = coroutine_func()
				event = threading.Event()
				loop.call_soon_threadsafe(result.add_done_callback,
					lambda result: event.set())
				loop.call_soon_threadsafe(kill_switch.add_done_callback,
					lambda kill_switch: event.set())
				event.wait()
				return result.result()
			else:
				# child process
				try:
					return loop.run_until_complete(coroutine_func())
				finally:
					loop.close()

		def execute_wrapper():
			kill_switch = parent_loop.create_future()
			parent_future = asyncio.ensure_future(
				parent_loop.run_in_executor(self._executor, wrapper, kill_switch),
				loop=parent_loop)
			parent_future.add_done_callback(
				lambda parent_future: None if kill_switch.done()
				else kill_switch.set_result(None))
			return parent_future

		return execute_wrapper
Beispiel #11
0
    def _testEbuildFetch(
        self,
        loop,
        scheme,
        host,
        orig_distfiles,
        ebuilds,
        content,
        server,
        playground,
        ro_distdir,
    ):
        mirror_layouts = (
            (
                "[structure]",
                "0=filename-hash BLAKE2B 8",
                "1=flat",
            ),
            (
                "[structure]",
                "1=filename-hash BLAKE2B 8",
                "0=flat",
            ),
            (
                "[structure]",
                "0=content-hash SHA512 8:8:8",
                "1=flat",
            ),
        )

        fetchcommand = portage.util.shlex_split(
            playground.settings["FETCHCOMMAND"])
        fetch_bin = portage.process.find_binary(fetchcommand[0])
        if fetch_bin is None:
            self.skipTest("FETCHCOMMAND not found: {}".format(
                playground.settings["FETCHCOMMAND"]))
        eubin = os.path.join(playground.eprefix, "usr", "bin")
        os.symlink(fetch_bin, os.path.join(eubin, os.path.basename(fetch_bin)))
        resumecommand = portage.util.shlex_split(
            playground.settings["RESUMECOMMAND"])
        resume_bin = portage.process.find_binary(resumecommand[0])
        if resume_bin is None:
            self.skipTest("RESUMECOMMAND not found: {}".format(
                playground.settings["RESUMECOMMAND"]))
        if resume_bin != fetch_bin:
            os.symlink(resume_bin,
                       os.path.join(eubin, os.path.basename(resume_bin)))
        root_config = playground.trees[playground.eroot]["root_config"]
        portdb = root_config.trees["porttree"].dbapi

        def run_async(func, *args, **kwargs):
            with ForkExecutor(loop=loop) as executor:
                return loop.run_until_complete(
                    loop.run_in_executor(
                        executor, functools.partial(func, *args, **kwargs)))

        for layout_lines in mirror_layouts:
            settings = config(clone=playground.settings)
            layout_data = "".join("{}\n".format(line) for line in layout_lines)
            mirror_conf = MirrorLayoutConfig()
            mirror_conf.read_from_file(io.StringIO(layout_data))
            layouts = mirror_conf.get_all_layouts()
            content["/distfiles/layout.conf"] = layout_data.encode("utf8")
            distfiles = {}
            for k, v in orig_distfiles.items():
                filename = DistfileName(
                    k,
                    digests=dict((algo, checksum_str(v, hashname=algo))
                                 for algo in MANIFEST2_HASH_DEFAULTS),
                )
                distfiles[filename] = v

                # mirror path
                for layout in layouts:
                    content["/distfiles/" + layout.get_path(filename)] = v
                # upstream path
                content["/distfiles/{}.txt".format(k)] = v

            shutil.rmtree(settings["DISTDIR"])
            os.makedirs(settings["DISTDIR"])
            with open(os.path.join(settings["DISTDIR"], "layout.conf"),
                      "wt") as f:
                f.write(layout_data)

            if any(
                    isinstance(layout, ContentHashLayout)
                    for layout in layouts):
                content_db = os.path.join(playground.eprefix,
                                          "var/db/emirrordist/content.db")
                os.makedirs(os.path.dirname(content_db), exist_ok=True)
                try:
                    os.unlink(content_db)
                except OSError:
                    pass
            else:
                content_db = None

            # Demonstrate that fetch preserves a stale file in DISTDIR when no digests are given.
            foo_uri = {
                "foo": ("{scheme}://{host}:{port}/distfiles/foo".format(
                    scheme=scheme, host=host, port=server.server_port), )
            }
            foo_path = os.path.join(settings["DISTDIR"], "foo")
            foo_stale_content = b"stale content\n"
            with open(foo_path, "wb") as f:
                f.write(b"stale content\n")

            self.assertTrue(
                bool(run_async(fetch, foo_uri, settings, try_mirrors=False)))

            with open(foo_path, "rb") as f:
                self.assertEqual(f.read(), foo_stale_content)
            with open(foo_path, "rb") as f:
                self.assertNotEqual(f.read(), distfiles["foo"])

            # Use force=True to update the stale file.
            self.assertTrue(
                bool(
                    run_async(fetch,
                              foo_uri,
                              settings,
                              try_mirrors=False,
                              force=True)))

            with open(foo_path, "rb") as f:
                self.assertEqual(f.read(), distfiles["foo"])

            # Test force=True with FEATURES=skiprocheck, using read-only DISTDIR.
            # FETCHCOMMAND is set to temporarily chmod +w DISTDIR. Note that
            # FETCHCOMMAND must perform atomic rename itself due to read-only
            # DISTDIR.
            with open(foo_path, "wb") as f:
                f.write(b"stale content\n")
            orig_fetchcommand = settings["FETCHCOMMAND"]
            orig_distdir_mode = os.stat(settings["DISTDIR"]).st_mode
            temp_fetchcommand = os.path.join(eubin, "fetchcommand")
            with open(temp_fetchcommand, "w") as f:
                f.write("""
					set -e
					URI=$1
					DISTDIR=$2
					FILE=$3
					trap 'chmod a-w "${DISTDIR}"' EXIT
					chmod ug+w "${DISTDIR}"
					%s
					mv -f "${DISTDIR}/${FILE}.__download__" "${DISTDIR}/${FILE}"
				""" % orig_fetchcommand.replace("${FILE}", "${FILE}.__download__"))
            settings[
                "FETCHCOMMAND"] = '"%s" "%s" "${URI}" "${DISTDIR}" "${FILE}"' % (
                    BASH_BINARY,
                    temp_fetchcommand,
                )
            settings.features.add("skiprocheck")
            settings.features.remove("distlocks")
            os.chmod(settings["DISTDIR"], 0o555)
            try:
                self.assertTrue(
                    bool(
                        run_async(fetch,
                                  foo_uri,
                                  settings,
                                  try_mirrors=False,
                                  force=True)))
            finally:
                settings["FETCHCOMMAND"] = orig_fetchcommand
                os.chmod(settings["DISTDIR"], orig_distdir_mode)
                settings.features.remove("skiprocheck")
                settings.features.add("distlocks")
                os.unlink(temp_fetchcommand)

            with open(foo_path, "rb") as f:
                self.assertEqual(f.read(), distfiles["foo"])

            # Test emirrordist invocation.
            emirrordist_cmd = (
                portage._python_interpreter,
                "-b",
                "-Wd",
                os.path.join(self.bindir, "emirrordist"),
                "--distfiles",
                settings["DISTDIR"],
                "--config-root",
                settings["EPREFIX"],
                "--delete",
                "--repositories-configuration",
                settings.repositories.config_string(),
                "--repo",
                "test_repo",
                "--mirror",
            )

            if content_db is not None:
                emirrordist_cmd = emirrordist_cmd + (
                    "--content-db",
                    content_db,
                )

            env = settings.environ()
            env["PYTHONPATH"] = ":".join(
                filter(
                    None,
                    [PORTAGE_PYM_PATH] +
                    os.environ.get("PYTHONPATH", "").split(":"),
                ))

            for k in distfiles:
                try:
                    os.unlink(os.path.join(settings["DISTDIR"], k))
                except OSError:
                    pass

            proc = loop.run_until_complete(
                asyncio.create_subprocess_exec(*emirrordist_cmd, env=env))
            self.assertEqual(loop.run_until_complete(proc.wait()), 0)

            for k in distfiles:
                with open(
                        os.path.join(settings["DISTDIR"],
                                     layouts[0].get_path(k)), "rb") as f:
                    self.assertEqual(f.read(), distfiles[k])

            if content_db is not None:
                loop.run_until_complete(
                    self._test_content_db(
                        emirrordist_cmd,
                        env,
                        layouts,
                        content_db,
                        distfiles,
                        settings,
                        portdb,
                    ))

            # Tests only work with one ebuild at a time, so the config
            # pool only needs a single config instance.
            class config_pool:
                @staticmethod
                def allocate():
                    return settings

                @staticmethod
                def deallocate(settings):
                    pass

            def async_fetch(pkg, ebuild_path):
                fetcher = EbuildFetcher(
                    config_pool=config_pool,
                    ebuild_path=ebuild_path,
                    fetchonly=False,
                    fetchall=True,
                    pkg=pkg,
                    scheduler=loop,
                )
                fetcher.start()
                return fetcher.async_wait()

            for cpv in ebuilds:
                metadata = dict(
                    zip(
                        Package.metadata_keys,
                        portdb.aux_get(cpv, Package.metadata_keys),
                    ))

                pkg = Package(
                    built=False,
                    cpv=cpv,
                    installed=False,
                    metadata=metadata,
                    root_config=root_config,
                    type_name="ebuild",
                )

                settings.setcpv(pkg)
                ebuild_path = portdb.findname(pkg.cpv)
                portage.doebuild_environment(ebuild_path,
                                             "fetch",
                                             settings=settings,
                                             db=portdb)

                # Test good files in DISTDIR
                for k in settings["AA"].split():
                    os.stat(os.path.join(settings["DISTDIR"], k))
                self.assertEqual(
                    loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
                for k in settings["AA"].split():
                    with open(os.path.join(settings["DISTDIR"], k), "rb") as f:
                        self.assertEqual(f.read(), distfiles[k])

                # Test digestgen with fetch
                os.unlink(
                    os.path.join(os.path.dirname(ebuild_path), "Manifest"))
                for k in settings["AA"].split():
                    os.unlink(os.path.join(settings["DISTDIR"], k))
                with ForkExecutor(loop=loop) as executor:
                    self.assertTrue(
                        bool(
                            loop.run_until_complete(
                                loop.run_in_executor(
                                    executor,
                                    functools.partial(digestgen,
                                                      mysettings=settings,
                                                      myportdb=portdb),
                                ))))
                for k in settings["AA"].split():
                    with open(os.path.join(settings["DISTDIR"], k), "rb") as f:
                        self.assertEqual(f.read(), distfiles[k])

                # Test missing files in DISTDIR
                for k in settings["AA"].split():
                    os.unlink(os.path.join(settings["DISTDIR"], k))
                self.assertEqual(
                    loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
                for k in settings["AA"].split():
                    with open(os.path.join(settings["DISTDIR"], k), "rb") as f:
                        self.assertEqual(f.read(), distfiles[k])

                # Test empty files in DISTDIR
                for k in settings["AA"].split():
                    file_path = os.path.join(settings["DISTDIR"], k)
                    with open(file_path, "wb") as f:
                        pass
                    self.assertEqual(os.stat(file_path).st_size, 0)
                self.assertEqual(
                    loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
                for k in settings["AA"].split():
                    with open(os.path.join(settings["DISTDIR"], k), "rb") as f:
                        self.assertEqual(f.read(), distfiles[k])

                # Test non-empty files containing null bytes in DISTDIR
                for k in settings["AA"].split():
                    file_path = os.path.join(settings["DISTDIR"], k)
                    with open(file_path, "wb") as f:
                        f.write(len(distfiles[k]) * b"\0")
                    self.assertEqual(
                        os.stat(file_path).st_size, len(distfiles[k]))
                self.assertEqual(
                    loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
                for k in settings["AA"].split():
                    with open(os.path.join(settings["DISTDIR"], k), "rb") as f:
                        self.assertEqual(f.read(), distfiles[k])

                # Test PORTAGE_RO_DISTDIRS
                settings["PORTAGE_RO_DISTDIRS"] = '"{}"'.format(ro_distdir)
                orig_fetchcommand = settings["FETCHCOMMAND"]
                orig_resumecommand = settings["RESUMECOMMAND"]
                try:
                    settings["FETCHCOMMAND"] = settings["RESUMECOMMAND"] = ""
                    for k in settings["AA"].split():
                        file_path = os.path.join(settings["DISTDIR"], k)
                        os.rename(file_path, os.path.join(ro_distdir, k))
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings["AA"].split():
                        file_path = os.path.join(settings["DISTDIR"], k)
                        self.assertTrue(os.path.islink(file_path))
                        with open(file_path, "rb") as f:
                            self.assertEqual(f.read(), distfiles[k])
                        os.unlink(file_path)
                finally:
                    settings.pop("PORTAGE_RO_DISTDIRS")
                    settings["FETCHCOMMAND"] = orig_fetchcommand
                    settings["RESUMECOMMAND"] = orig_resumecommand

                # Test local filesystem in GENTOO_MIRRORS
                orig_mirrors = settings["GENTOO_MIRRORS"]
                orig_fetchcommand = settings["FETCHCOMMAND"]
                try:
                    settings["GENTOO_MIRRORS"] = ro_distdir
                    settings["FETCHCOMMAND"] = settings["RESUMECOMMAND"] = ""
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings["AA"].split():
                        with open(os.path.join(settings["DISTDIR"], k),
                                  "rb") as f:
                            self.assertEqual(f.read(), distfiles[k])
                finally:
                    settings["GENTOO_MIRRORS"] = orig_mirrors
                    settings["FETCHCOMMAND"] = orig_fetchcommand
                    settings["RESUMECOMMAND"] = orig_resumecommand

                # Test readonly DISTDIR
                orig_distdir_mode = os.stat(settings["DISTDIR"]).st_mode
                try:
                    os.chmod(settings["DISTDIR"], 0o555)
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings["AA"].split():
                        with open(os.path.join(settings["DISTDIR"], k),
                                  "rb") as f:
                            self.assertEqual(f.read(), distfiles[k])
                finally:
                    os.chmod(settings["DISTDIR"], orig_distdir_mode)

                # Test parallel-fetch mode
                settings["PORTAGE_PARALLEL_FETCHONLY"] = "1"
                try:
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings["AA"].split():
                        with open(os.path.join(settings["DISTDIR"], k),
                                  "rb") as f:
                            self.assertEqual(f.read(), distfiles[k])
                    for k in settings["AA"].split():
                        os.unlink(os.path.join(settings["DISTDIR"], k))
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings["AA"].split():
                        with open(os.path.join(settings["DISTDIR"], k),
                                  "rb") as f:
                            self.assertEqual(f.read(), distfiles[k])
                finally:
                    settings.pop("PORTAGE_PARALLEL_FETCHONLY")

                # Test RESUMECOMMAND
                orig_resume_min_size = settings[
                    "PORTAGE_FETCH_RESUME_MIN_SIZE"]
                try:
                    settings["PORTAGE_FETCH_RESUME_MIN_SIZE"] = "2"
                    for k in settings["AA"].split():
                        file_path = os.path.join(settings["DISTDIR"], k)
                        os.unlink(file_path)
                        with open(file_path + _download_suffix, "wb") as f:
                            f.write(distfiles[k][:2])
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings["AA"].split():
                        with open(os.path.join(settings["DISTDIR"], k),
                                  "rb") as f:
                            self.assertEqual(f.read(), distfiles[k])
                finally:
                    settings[
                        "PORTAGE_FETCH_RESUME_MIN_SIZE"] = orig_resume_min_size

                # Test readonly DISTDIR + skiprocheck, with FETCHCOMMAND set to temporarily chmod DISTDIR
                orig_fetchcommand = settings["FETCHCOMMAND"]
                orig_distdir_mode = os.stat(settings["DISTDIR"]).st_mode
                for k in settings["AA"].split():
                    os.unlink(os.path.join(settings["DISTDIR"], k))
                try:
                    os.chmod(settings["DISTDIR"], 0o555)
                    settings["FETCHCOMMAND"] = (
                        '"%s" -c "chmod ug+w \\"${DISTDIR}\\"; %s; status=\\$?; chmod a-w \\"${DISTDIR}\\"; exit \\$status"'
                        % (BASH_BINARY, orig_fetchcommand.replace('"', '\\"')))
                    settings.features.add("skiprocheck")
                    settings.features.remove("distlocks")
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                finally:
                    settings["FETCHCOMMAND"] = orig_fetchcommand
                    os.chmod(settings["DISTDIR"], orig_distdir_mode)
                    settings.features.remove("skiprocheck")
                    settings.features.add("distlocks")
Beispiel #12
0
class RetryForkExecutorTestCase(RetryTestCase):
    """
	Wrap each coroutine function with AbstractEventLoop.run_in_executor,
	in order to test the event loop's default executor. The executor
	may use either a thread or a subprocess, and either case is
	automatically detected and handled.
	"""
    def __init__(self, *pargs, **kwargs):
        super(RetryForkExecutorTestCase, self).__init__(*pargs, **kwargs)
        self._executor = None

    def _setUpExecutor(self):
        self._executor = ForkExecutor()

    def _tearDownExecutor(self):
        if self._executor is not None:
            self._executor.shutdown(wait=True)
            self._executor = None

    def setUp(self):
        self._setUpExecutor()

    def tearDown(self):
        self._tearDownExecutor()

    @contextlib.contextmanager
    def _wrap_coroutine_func(self, coroutine_func):
        parent_loop = global_event_loop()
        parent_pid = portage.getpid()
        pending = weakref.WeakValueDictionary()

        # Since ThreadPoolExecutor does not propagate cancellation of a
        # parent_future to the underlying coroutine, use kill_switch to
        # propagate task cancellation to wrapper, so that HangForever's
        # thread returns when retry eventually cancels parent_future.
        def wrapper(kill_switch):
            if portage.getpid() == parent_pid:
                # thread in main process
                def done_callback(result):
                    result.cancelled() or result.exception() or result.result()
                    kill_switch.set()

                def start_coroutine(future):
                    result = asyncio.ensure_future(coroutine_func(),
                                                   loop=parent_loop)
                    pending[id(result)] = result
                    result.add_done_callback(done_callback)
                    future.set_result(result)

                future = Future()
                parent_loop.call_soon_threadsafe(start_coroutine, future)
                kill_switch.wait()
                if not future.done():
                    future.cancel()
                    raise asyncio.CancelledError
                elif not future.result().done():
                    future.result().cancel()
                    raise asyncio.CancelledError
                else:
                    return future.result().result()

            # child process
            loop = global_event_loop()
            try:
                return loop.run_until_complete(coroutine_func())
            finally:
                loop.close()

        def execute_wrapper():
            kill_switch = threading.Event()
            parent_future = asyncio.ensure_future(parent_loop.run_in_executor(
                self._executor, wrapper, kill_switch),
                                                  loop=parent_loop)

            def kill_callback(parent_future):
                if not kill_switch.is_set():
                    kill_switch.set()

            parent_future.add_done_callback(kill_callback)
            return parent_future

        try:
            yield execute_wrapper
        finally:
            while True:
                try:
                    _, future = pending.popitem()
                except KeyError:
                    break
                try:
                    parent_loop.run_until_complete(future)
                except (Exception, asyncio.CancelledError):
                    pass
                future.cancelled() or future.exception() or future.result()
Beispiel #13
0
    def update(self):
        '''Internal update function which performs the transfer'''
        opts = self.options.get('emerge_config').opts
        self.usersync_uid = self.options.get('usersync_uid', None)
        enter_invalid = '--ask-enter-invalid' in opts
        quiet = '--quiet' in opts
        out = portage.output.EOutput(quiet=quiet)
        syncuri = self.repo.sync_uri
        if self.repo.module_specific_options.get('sync-rsync-vcs-ignore',
                                                 'false').lower() == 'true':
            vcs_dirs = ()
        else:
            vcs_dirs = frozenset(VCS_DIRS)
            vcs_dirs = vcs_dirs.intersection(os.listdir(self.repo.location))

        for vcs_dir in vcs_dirs:
            writemsg_level(("!!! %s appears to be under revision " + \
             "control (contains %s).\n!!! Aborting rsync sync "
             "(override with \"sync-rsync-vcs-ignore = true\" in repos.conf).\n") % \
             (self.repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
            return (1, False)
        self.timeout = 180

        rsync_opts = []
        if self.settings["PORTAGE_RSYNC_OPTS"] == "":
            rsync_opts = self._set_rsync_defaults()
        else:
            rsync_opts = self._validate_rsync_opts(rsync_opts, syncuri)
        self.rsync_opts = self._rsync_opts_extend(opts, rsync_opts)

        self.extra_rsync_opts = list()
        if self.repo.module_specific_options.get('sync-rsync-extra-opts'):
            self.extra_rsync_opts.extend(
                portage.util.shlex_split(
                    self.repo.module_specific_options['sync-rsync-extra-opts'])
            )

        # Process GLEP74 verification options.
        # Default verification to 'no'; it's enabled for ::gentoo
        # via default repos.conf though.
        self.verify_metamanifest = (self.repo.module_specific_options.get(
            'sync-rsync-verify-metamanifest', 'no') in ('yes', 'true'))
        # Support overriding job count.
        self.verify_jobs = self.repo.module_specific_options.get(
            'sync-rsync-verify-jobs', None)
        if self.verify_jobs is not None:
            try:
                self.verify_jobs = int(self.verify_jobs)
                if self.verify_jobs < 0:
                    raise ValueError(self.verify_jobs)
            except ValueError:
                writemsg_level(
                    "!!! sync-rsync-verify-jobs not a positive integer: %s\n" %
                    (self.verify_jobs, ),
                    level=logging.WARNING,
                    noiselevel=-1)
                self.verify_jobs = None
            else:
                if self.verify_jobs == 0:
                    # Use the apparent number of processors if gemato
                    # supports it.
                    self.verify_jobs = None
        # Support overriding max age.
        self.max_age = self.repo.module_specific_options.get(
            'sync-rsync-verify-max-age', '')
        if self.max_age:
            try:
                self.max_age = int(self.max_age)
                if self.max_age < 0:
                    raise ValueError(self.max_age)
            except ValueError:
                writemsg_level(
                    "!!! sync-rsync-max-age must be a non-negative integer: %s\n"
                    % (self.max_age, ),
                    level=logging.WARNING,
                    noiselevel=-1)
                self.max_age = 0
        else:
            self.max_age = 0

        openpgp_env = None
        if self.verify_metamanifest and gemato is not None:
            # Use isolated environment if key is specified,
            # system environment otherwise
            if self.repo.sync_openpgp_key_path is not None:
                openpgp_env = gemato.openpgp.OpenPGPEnvironment()
            else:
                openpgp_env = gemato.openpgp.OpenPGPSystemEnvironment()

        try:
            # Load and update the keyring early. If it fails, then verification
            # will not be performed and the user will have to fix it and try again,
            # so we may as well bail out before actual rsync happens.
            if openpgp_env is not None and self.repo.sync_openpgp_key_path is not None:

                try:
                    out.einfo('Using keys from %s' %
                              (self.repo.sync_openpgp_key_path, ))
                    with io.open(self.repo.sync_openpgp_key_path, 'rb') as f:
                        openpgp_env.import_key(f)
                    out.ebegin('Refreshing keys from keyserver')
                    retry_decorator = self._key_refresh_retry_decorator()
                    if retry_decorator is None:
                        openpgp_env.refresh_keys()
                    else:

                        def noisy_refresh_keys():
                            """
							Since retry does not help for some types of
							errors, display errors as soon as they occur.
							"""
                            try:
                                openpgp_env.refresh_keys()
                            except Exception as e:
                                writemsg_level("%s\n" % (e, ),
                                               level=logging.ERROR,
                                               noiselevel=-1)
                                raise  # retry

                        # The ThreadPoolExecutor that asyncio uses by default
                        # does not support cancellation of tasks, therefore
                        # use ForkExecutor for task cancellation support, in
                        # order to enforce timeouts.
                        loop = global_event_loop()
                        with ForkExecutor(loop=loop) as executor:
                            func_coroutine = functools.partial(
                                loop.run_in_executor, executor,
                                noisy_refresh_keys)
                            decorated_func = retry_decorator(func_coroutine,
                                                             loop=loop)
                            loop.run_until_complete(decorated_func())
                    out.eend(0)
                except (GematoException, asyncio.TimeoutError) as e:
                    writemsg_level(
                        "!!! Manifest verification impossible due to keyring problem:\n%s\n"
                        % (e, ),
                        level=logging.ERROR,
                        noiselevel=-1)
                    return (1, False)

            # Real local timestamp file.
            self.servertimestampfile = os.path.join(self.repo.location,
                                                    "metadata",
                                                    "timestamp.chk")

            content = portage.util.grabfile(self.servertimestampfile)
            timestamp = 0
            if content:
                try:
                    timestamp = time.mktime(
                        time.strptime(content[0], TIMESTAMP_FORMAT))
                except (OverflowError, ValueError):
                    pass
            del content

            try:
                self.rsync_initial_timeout = \
                 int(self.settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
            except ValueError:
                self.rsync_initial_timeout = 15

            try:
                maxretries = int(self.settings["PORTAGE_RSYNC_RETRIES"])
            except SystemExit as e:
                raise  # Needed else can't exit
            except:
                maxretries = -1  #default number of retries

            if syncuri.startswith("file://"):
                self.proto = "file"
                dosyncuri = syncuri[7:]
                unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
                    dosyncuri, timestamp, opts)
                self._process_exitcode(exitcode, dosyncuri, out, 1)
                return (exitcode, updatecache_flg)

            retries = 0
            try:
                self.proto, user_name, hostname, port = re.split(
                    r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
                    syncuri,
                    maxsplit=4)[1:5]
            except ValueError:
                writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
                               noiselevel=-1,
                               level=logging.ERROR)
                return (1, False)

            self.ssh_opts = self.settings.get("PORTAGE_SSH_OPTS")

            if port is None:
                port = ""
            if user_name is None:
                user_name = ""
            if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
                getaddrinfo_host = hostname
            else:
                # getaddrinfo needs the brackets stripped
                getaddrinfo_host = hostname[1:-1]
            updatecache_flg = False
            all_rsync_opts = set(self.rsync_opts)
            all_rsync_opts.update(self.extra_rsync_opts)

            family = socket.AF_UNSPEC
            if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
                family = socket.AF_INET
            elif socket.has_ipv6 and \
             ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
                family = socket.AF_INET6

            addrinfos = None
            uris = []

            try:
                addrinfos = getaddrinfo_validate(
                    socket.getaddrinfo(getaddrinfo_host, None, family,
                                       socket.SOCK_STREAM))
            except socket.error as e:
                writemsg_level("!!! getaddrinfo failed for '%s': %s\n" %
                               (_unicode_decode(hostname), _unicode(e)),
                               noiselevel=-1,
                               level=logging.ERROR)

            if addrinfos:

                AF_INET = socket.AF_INET
                AF_INET6 = None
                if socket.has_ipv6:
                    AF_INET6 = socket.AF_INET6

                ips_v4 = []
                ips_v6 = []

                for addrinfo in addrinfos:
                    if addrinfo[0] == AF_INET:
                        ips_v4.append("%s" % addrinfo[4][0])
                    elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
                        # IPv6 addresses need to be enclosed in square brackets
                        ips_v6.append("[%s]" % addrinfo[4][0])

                random.shuffle(ips_v4)
                random.shuffle(ips_v6)

                # Give priority to the address family that
                # getaddrinfo() returned first.
                if AF_INET6 is not None and addrinfos and \
                 addrinfos[0][0] == AF_INET6:
                    ips = ips_v6 + ips_v4
                else:
                    ips = ips_v4 + ips_v6

                for ip in ips:
                    uris.append(
                        syncuri.replace(
                            "//" + user_name + hostname + port + "/",
                            "//" + user_name + ip + port + "/", 1))

            if not uris:
                # With some configurations we need to use the plain hostname
                # rather than try to resolve the ip addresses (bug #340817).
                uris.append(syncuri)

            # reverse, for use with pop()
            uris.reverse()
            uris_orig = uris[:]

            effective_maxretries = maxretries
            if effective_maxretries < 0:
                effective_maxretries = len(uris) - 1

            local_state_unchanged = True
            while (1):
                if uris:
                    dosyncuri = uris.pop()
                elif maxretries < 0 or retries > maxretries:
                    writemsg("!!! Exhausted addresses for %s\n" %
                             _unicode_decode(hostname),
                             noiselevel=-1)
                    return (1, False)
                else:
                    uris.extend(uris_orig)
                    dosyncuri = uris.pop()

                if (retries == 0):
                    if "--ask" in opts:
                        uq = UserQuery(opts)
                        if uq.query("Do you want to sync your Portage tree " + \
                         "with the mirror at\n" + blue(dosyncuri) + bold("?"),
                         enter_invalid) == "No":
                            print()
                            print("Quitting.")
                            print()
                            sys.exit(128 + signal.SIGINT)
                    self.logger(self.xterm_titles,
                                ">>> Starting rsync with " + dosyncuri)
                    if "--quiet" not in opts:
                        print(">>> Starting rsync with " + dosyncuri + "...")
                else:
                    self.logger(self.xterm_titles,
                     ">>> Starting retry %d of %d with %s" % \
                      (retries, effective_maxretries, dosyncuri))
                    writemsg_stdout(
                     "\n\n>>> Starting retry %d of %d with %s\n" % \
                     (retries, effective_maxretries, dosyncuri), noiselevel=-1)

                if dosyncuri.startswith('ssh://'):
                    dosyncuri = dosyncuri[6:].replace('/', ':/', 1)

                unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
                    dosyncuri, timestamp, opts)
                if not unchanged:
                    local_state_unchanged = False
                if is_synced:
                    break

                retries = retries + 1

                if maxretries < 0 or retries <= maxretries:
                    print(">>> Retrying...")
                else:
                    # over retries
                    # exit loop
                    exitcode = EXCEEDED_MAX_RETRIES
                    break
            self._process_exitcode(exitcode, dosyncuri, out, maxretries)

            # if synced successfully, verify now
            if exitcode == 0 and self.verify_metamanifest:
                if gemato is None:
                    writemsg_level(
                        "!!! Unable to verify: gemato-11.0+ is required\n",
                        level=logging.ERROR,
                        noiselevel=-1)
                    exitcode = 127
                else:
                    try:
                        # we always verify the Manifest signature, in case
                        # we had to deal with key revocation case
                        m = gemato.recursiveloader.ManifestRecursiveLoader(
                            os.path.join(self.repo.location, 'Manifest'),
                            verify_openpgp=True,
                            openpgp_env=openpgp_env,
                            max_jobs=self.verify_jobs)
                        if not m.openpgp_signed:
                            raise RuntimeError(
                                'OpenPGP signature not found on Manifest')

                        ts = m.find_timestamp()
                        if ts is None:
                            raise RuntimeError(
                                'Timestamp not found in Manifest')
                        if (self.max_age != 0
                                and (datetime.datetime.utcnow() - ts.ts).days >
                                self.max_age):
                            out.quiet = False
                            out.ewarn(
                                'Manifest is over %d days old, this is suspicious!'
                                % (self.max_age, ))
                            out.ewarn(
                                'You may want to try using another mirror and/or reporting this one:'
                            )
                            out.ewarn('  %s' % (dosyncuri, ))
                            out.ewarn('')
                            out.quiet = quiet

                        out.einfo('Manifest timestamp: %s UTC' % (ts.ts, ))
                        out.einfo('Valid OpenPGP signature found:')
                        out.einfo(
                            '- primary key: %s' %
                            (m.openpgp_signature.primary_key_fingerprint))
                        out.einfo('- subkey: %s' %
                                  (m.openpgp_signature.fingerprint))
                        out.einfo('- timestamp: %s UTC' %
                                  (m.openpgp_signature.timestamp))

                        # if nothing has changed, skip the actual Manifest
                        # verification
                        if not local_state_unchanged:
                            out.ebegin('Verifying %s' % (self.repo.location, ))
                            m.assert_directory_verifies()
                            out.eend(0)
                    except GematoException as e:
                        writemsg_level(
                            "!!! Manifest verification failed:\n%s\n" % (e, ),
                            level=logging.ERROR,
                            noiselevel=-1)
                        exitcode = 1

            return (exitcode, updatecache_flg)
        finally:
            if openpgp_env is not None:
                openpgp_env.close()
Beispiel #14
0
		def run_async(func, *args, **kwargs):
			with ForkExecutor(loop=loop) as executor:
				return loop.run_until_complete(loop.run_in_executor(executor,
					functools.partial(func, *args, **kwargs)))
Beispiel #15
0
	def _setUpExecutor(self):
		self._executor = ForkExecutor()
Beispiel #16
0
 def _start(self):
     self.future = asyncio.ensure_future(
         self.scheduler.run_in_executor(ForkExecutor(loop=self.scheduler),
                                        self._run))
     super(FileCopier, self)._start()
Beispiel #17
0
	def testEbuildFetch(self):

		distfiles = {
			'bar': b'bar\n',
			'foo': b'foo\n',
		}

		ebuilds = {
			'dev-libs/A-1': {
				'EAPI': '7',
				'RESTRICT': 'primaryuri',
				'SRC_URI': '''{scheme}://{host}:{port}/distfiles/bar.txt -> bar
					{scheme}://{host}:{port}/distfiles/foo.txt -> foo''',
			},
		}

		loop = SchedulerInterface(global_event_loop())
		scheme = 'http'
		host = '127.0.0.1'
		content = {}
		for k, v in distfiles.items():
			content['/distfiles/{}.txt'.format(k)] = v

		with AsyncHTTPServer(host, content, loop) as server:
			ebuilds_subst = {}
			for cpv, metadata in ebuilds.items():
				metadata = metadata.copy()
				metadata['SRC_URI'] = metadata['SRC_URI'].format(
					scheme=scheme, host=host, port=server.server_port)
				ebuilds_subst[cpv] = metadata

			playground = ResolverPlayground(ebuilds=ebuilds_subst, distfiles=distfiles)
			ro_distdir = tempfile.mkdtemp()
			try:
				fetchcommand = portage.util.shlex_split(playground.settings['FETCHCOMMAND'])
				fetch_bin = portage.process.find_binary(fetchcommand[0])
				if fetch_bin is None:
					self.skipTest('FETCHCOMMAND not found: {}'.format(playground.settings['FETCHCOMMAND']))
				resumecommand = portage.util.shlex_split(playground.settings['RESUMECOMMAND'])
				resume_bin = portage.process.find_binary(resumecommand[0])
				if resume_bin is None:
					self.skipTest('RESUMECOMMAND not found: {}'.format(playground.settings['RESUMECOMMAND']))
				root_config = playground.trees[playground.eroot]['root_config']
				portdb = root_config.trees["porttree"].dbapi
				settings = config(clone=playground.settings)

				# Tests only work with one ebuild at a time, so the config
				# pool only needs a single config instance.
				class config_pool:
					@staticmethod
					def allocate():
						return settings
					@staticmethod
					def deallocate(settings):
						pass

				def async_fetch(pkg, ebuild_path):
					fetcher = EbuildFetcher(config_pool=config_pool, ebuild_path=ebuild_path,
						fetchonly=False, fetchall=True, pkg=pkg, scheduler=loop)
					fetcher.start()
					return fetcher.async_wait()

				for cpv in ebuilds:
					metadata = dict(zip(Package.metadata_keys,
						portdb.aux_get(cpv, Package.metadata_keys)))

					pkg = Package(built=False, cpv=cpv, installed=False,
						metadata=metadata, root_config=root_config,
						type_name='ebuild')

					settings.setcpv(pkg)
					ebuild_path = portdb.findname(pkg.cpv)
					portage.doebuild_environment(ebuild_path, 'fetch', settings=settings, db=portdb)

					# Test good files in DISTDIR
					for k in settings['AA'].split():
						os.stat(os.path.join(settings['DISTDIR'], k))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test digestgen with fetch
					os.unlink(os.path.join(os.path.dirname(ebuild_path), 'Manifest'))
					for k in settings['AA'].split():
						os.unlink(os.path.join(settings['DISTDIR'], k))
					with ForkExecutor(loop=loop) as executor:
						self.assertTrue(bool(loop.run_until_complete(
							loop.run_in_executor(executor, functools.partial(
								digestgen, mysettings=settings, myportdb=portdb)))))
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test missing files in DISTDIR
					for k in settings['AA'].split():
						os.unlink(os.path.join(settings['DISTDIR'], k))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test empty files in DISTDIR
					for k in settings['AA'].split():
						file_path = os.path.join(settings['DISTDIR'], k)
						with open(file_path, 'wb') as f:
							pass
						self.assertEqual(os.stat(file_path).st_size, 0)
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test non-empty files containing null bytes in DISTDIR
					for k in settings['AA'].split():
						file_path = os.path.join(settings['DISTDIR'], k)
						with open(file_path, 'wb') as f:
							f.write(len(distfiles[k]) * b'\0')
						self.assertEqual(os.stat(file_path).st_size, len(distfiles[k]))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test PORTAGE_RO_DISTDIRS
					settings['PORTAGE_RO_DISTDIRS'] = '"{}"'.format(ro_distdir)
					orig_fetchcommand = settings['FETCHCOMMAND']
					orig_resumecommand = settings['RESUMECOMMAND']
					try:
						settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = ''
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							os.rename(file_path, os.path.join(ro_distdir, k))
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							self.assertTrue(os.path.islink(file_path))
							with open(file_path, 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
							os.unlink(file_path)
					finally:
						settings.pop('PORTAGE_RO_DISTDIRS')
						settings['FETCHCOMMAND'] = orig_fetchcommand
						settings['RESUMECOMMAND'] = orig_resumecommand

					# Test local filesystem in GENTOO_MIRRORS
					orig_mirrors = settings['GENTOO_MIRRORS']
					orig_fetchcommand = settings['FETCHCOMMAND']
					try:
						settings['GENTOO_MIRRORS'] = ro_distdir
						settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = ''
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings['GENTOO_MIRRORS'] = orig_mirrors
						settings['FETCHCOMMAND'] = orig_fetchcommand
						settings['RESUMECOMMAND'] = orig_resumecommand

					# Test readonly DISTDIR
					orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode
					try:
						os.chmod(settings['DISTDIR'], 0o555)
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						os.chmod(settings['DISTDIR'], orig_distdir_mode)

					# Test parallel-fetch mode
					settings['PORTAGE_PARALLEL_FETCHONLY'] = '1'
					try:
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
						for k in settings['AA'].split():
							os.unlink(os.path.join(settings['DISTDIR'], k))
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings.pop('PORTAGE_PARALLEL_FETCHONLY')

					# Test RESUMECOMMAND
					orig_resume_min_size = settings['PORTAGE_FETCH_RESUME_MIN_SIZE']
					try:
						settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = '2'
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							os.unlink(file_path)
							with open(file_path + _download_suffix, 'wb') as f:
								f.write(distfiles[k][:2])
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = orig_resume_min_size
			finally:
				shutil.rmtree(ro_distdir)
				playground.cleanup()