def testLogfile(self): logfile = None try: fd, logfile = tempfile.mkstemp() os.close(fd) null_fd = os.open('/dev/null', os.O_RDWR) test_string = 2 * "blah blah blah\n" task_scheduler = TaskScheduler() proc = SpawnProcess( args=[BASH_BINARY, "-c", "echo -n '%s'" % test_string], env={}, fd_pipes={0:sys.stdin.fileno(), 1:null_fd, 2:null_fd}, scheduler=task_scheduler.sched_iface, logfile=logfile) task_scheduler.add(proc) task_scheduler.run() os.close(null_fd) f = codecs.open(_unicode_encode(logfile, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='strict') log_content = f.read() f.close() # When logging passes through a pty, this comparison will fail # unless the oflag terminal attributes have the termios.OPOST # bit disabled. Otherwise, tranformations such as \n -> \r\n # may occur. self.assertEqual(test_string, log_content) finally: if logfile: try: os.unlink(logfile) except EnvironmentError as e: if e.errno != errno.ENOENT: raise del e
def testDoebuildSpawn(self): playground = ResolverPlayground() try: settings = config(clone=playground.settings) cpv = 'sys-apps/portage-2.1' metadata = { 'EAPI' : '2', 'INHERITED' : 'python eutils', 'IUSE' : 'build doc epydoc python3 selinux', 'LICENSE' : 'GPL-2', 'PROVIDE' : 'virtual/portage', 'RDEPEND' : '>=app-shells/bash-3.2_p17 >=dev-lang/python-2.6', 'SLOT' : '0', } root_config = playground.trees[playground.root]['root_config'] pkg = Package(built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name='ebuild') settings.setcpv(pkg) settings['PORTAGE_PYTHON'] = _python_interpreter settings['PORTAGE_BUILDDIR'] = os.path.join( settings['PORTAGE_TMPDIR'], cpv) settings['T'] = os.path.join( settings['PORTAGE_BUILDDIR'], 'temp') for x in ('PORTAGE_BUILDDIR', 'T'): os.makedirs(settings[x]) # Create a fake environment, to pretend as if the ebuild # has been sourced already. open(os.path.join(settings['T'], 'environment'), 'wb') task_scheduler = TaskScheduler() for phase in ('_internal_test',): # Test EbuildSpawnProcess by calling doebuild.spawn() with # returnpid=False. This case is no longer used by portage # internals since EbuildPhase is used instead and that passes # returnpid=True to doebuild.spawn(). rval = doebuild_spawn("%s %s" % (_shell_quote( os.path.join(settings["PORTAGE_BIN_PATH"], os.path.basename(EBUILD_SH_BINARY))), phase), settings, free=1) self.assertEqual(rval, os.EX_OK) ebuild_phase = EbuildPhase(background=False, phase=phase, scheduler=task_scheduler.sched_iface, settings=settings) task_scheduler.add(ebuild_phase) task_scheduler.run() self.assertEqual(ebuild_phase.returncode, os.EX_OK) ebuild_phase = MiscFunctionsProcess(background=False, commands=['success_hooks'], scheduler=task_scheduler.sched_iface, settings=settings) task_scheduler.add(ebuild_phase) task_scheduler.run() self.assertEqual(ebuild_phase.returncode, os.EX_OK) finally: playground.cleanup()
def testIpcDaemon(self): tmpdir = tempfile.mkdtemp() try: env = {} # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they # need to be inherited by ebuild subprocesses. if 'PORTAGE_USERNAME' in os.environ: env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME'] if 'PORTAGE_GRPNAME' in os.environ: env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME'] env['PORTAGE_PYTHON'] = _python_interpreter env['PORTAGE_BIN_PATH'] = PORTAGE_BIN_PATH env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH env['PORTAGE_BUILDDIR'] = tmpdir input_fifo = os.path.join(tmpdir, '.ipc_in') output_fifo = os.path.join(tmpdir, '.ipc_out') os.mkfifo(input_fifo) os.mkfifo(output_fifo) for exitcode in (0, 1, 2): task_scheduler = TaskScheduler(max_jobs=2) exit_command = ExitCommand() commands = {'exit': exit_command} daemon = EbuildIpcDaemon(commands=commands, input_fifo=input_fifo, output_fifo=output_fifo, scheduler=task_scheduler.sched_iface) proc = SpawnProcess(args=[ BASH_BINARY, "-c", '"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode ], env=env, scheduler=task_scheduler.sched_iface) def exit_command_callback(): proc.cancel() daemon.cancel() exit_command.reply_hook = exit_command_callback task_scheduler.add(daemon) task_scheduler.add(proc) task_scheduler.run() self.assertEqual(exit_command.exitcode, exitcode) finally: shutil.rmtree(tmpdir)
def _testPipeReader(self, test_string): """ Use a poll loop to read data from a pipe and assert that the data written to the pipe is identical to the data read from the pipe. """ if self._use_pty: got_pty, master_fd, slave_fd = _create_pty_or_pipe() if not got_pty: os.close(slave_fd) os.close(master_fd) skip_reason = "pty not acquired" self.portage_skip = skip_reason self.fail(skip_reason) return else: master_fd, slave_fd = os.pipe() # WARNING: It is very important to use unbuffered mode here, # in order to avoid issue 5380 with python3. master_file = os.fdopen(master_fd, 'rb', 0) slave_file = os.fdopen(slave_fd, 'wb', 0) task_scheduler = TaskScheduler(max_jobs=2) producer = SpawnProcess( args=["bash", "-c", self._echo_cmd % test_string], env=os.environ, fd_pipes={1: slave_fd}, scheduler=task_scheduler.sched_iface) task_scheduler.add(producer) slave_file.close() consumer = PipeReader(input_files={"producer": master_file}, scheduler=task_scheduler.sched_iface, _use_array=self._use_array) task_scheduler.add(consumer) # This will ensure that both tasks have exited, which # is necessary to avoid "ResourceWarning: unclosed file" # warnings since Python 3.2 (and also ensures that we # don't leave any zombie child processes). task_scheduler.run() self.assertEqual(producer.returncode, os.EX_OK) self.assertEqual(consumer.returncode, os.EX_OK) return consumer.getvalue().decode('ascii', 'replace')
def _testPipeReader(self, test_string): """ Use a poll loop to read data from a pipe and assert that the data written to the pipe is identical to the data read from the pipe. """ if self._use_pty: got_pty, master_fd, slave_fd = _create_pty_or_pipe() if not got_pty: os.close(slave_fd) os.close(master_fd) skip_reason = "pty not acquired" self.portage_skip = skip_reason self.fail(skip_reason) return else: master_fd, slave_fd = os.pipe() # WARNING: It is very important to use unbuffered mode here, # in order to avoid issue 5380 with python3. master_file = os.fdopen(master_fd, 'rb', 0) slave_file = os.fdopen(slave_fd, 'wb', 0) task_scheduler = TaskScheduler(max_jobs=2) producer = SpawnProcess( args=["bash", "-c", self._echo_cmd % test_string], env=os.environ, fd_pipes={1:slave_fd}, scheduler=task_scheduler.sched_iface) task_scheduler.add(producer) slave_file.close() consumer = PipeReader( input_files={"producer" : master_file}, scheduler=task_scheduler.sched_iface, _use_array=self._use_array) task_scheduler.add(consumer) # This will ensure that both tasks have exited, which # is necessary to avoid "ResourceWarning: unclosed file" # warnings since Python 3.2 (and also ensures that we # don't leave any zombie child processes). task_scheduler.run() self.assertEqual(producer.returncode, os.EX_OK) self.assertEqual(consumer.returncode, os.EX_OK) return consumer.getvalue().decode('ascii', 'replace')
def testIpcDaemon(self): tmpdir = tempfile.mkdtemp() try: env = {} # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they # need to be inherited by ebuild subprocesses. if 'PORTAGE_USERNAME' in os.environ: env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME'] if 'PORTAGE_GRPNAME' in os.environ: env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME'] env['PORTAGE_PYTHON'] = _python_interpreter env['PORTAGE_BIN_PATH'] = PORTAGE_BIN_PATH env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH env['PORTAGE_BUILDDIR'] = tmpdir input_fifo = os.path.join(tmpdir, '.ipc_in') output_fifo = os.path.join(tmpdir, '.ipc_out') os.mkfifo(input_fifo) os.mkfifo(output_fifo) for exitcode in (0, 1, 2): task_scheduler = TaskScheduler(max_jobs=2) exit_command = ExitCommand() commands = {'exit' : exit_command} daemon = EbuildIpcDaemon(commands=commands, input_fifo=input_fifo, output_fifo=output_fifo, scheduler=task_scheduler.sched_iface) proc = SpawnProcess( args=[BASH_BINARY, "-c", '"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode], env=env, scheduler=task_scheduler.sched_iface) def exit_command_callback(): proc.cancel() daemon.cancel() exit_command.reply_hook = exit_command_callback task_scheduler.add(daemon) task_scheduler.add(proc) task_scheduler.run() self.assertEqual(exit_command.exitcode, exitcode) finally: shutil.rmtree(tmpdir)
def testLogfile(self): logfile = None try: fd, logfile = tempfile.mkstemp() os.close(fd) null_fd = os.open('/dev/null', os.O_RDWR) test_string = 2 * "blah blah blah\n" task_scheduler = TaskScheduler() proc = SpawnProcess( args=[BASH_BINARY, "-c", "echo -n '%s'" % test_string], env={}, fd_pipes={ 0: sys.stdin.fileno(), 1: null_fd, 2: null_fd }, scheduler=task_scheduler.sched_iface, logfile=logfile) task_scheduler.add(proc) task_scheduler.run() os.close(null_fd) f = codecs.open(_unicode_encode(logfile, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='strict') log_content = f.read() f.close() # When logging passes through a pty, this comparison will fail # unless the oflag terminal attributes have the termios.OPOST # bit disabled. Otherwise, tranformations such as \n -> \r\n # may occur. self.assertEqual(test_string, log_content) finally: if logfile: try: os.unlink(logfile) except EnvironmentError as e: if e.errno != errno.ENOENT: raise del e
def testPipeReader(self): """ Use a poll loop to read data from a pipe and assert that the data written to the pipe is identical to the data read from the pipe. """ test_string = 2 * "blah blah blah\n" master_fd, slave_fd = self._create_pipe() master_file = os.fdopen(master_fd, 'rb') task_scheduler = TaskScheduler(max_jobs=2) scheduler = task_scheduler.sched_iface class Producer(SpawnProcess): def _spawn(self, args, **kwargs): rval = SpawnProcess._spawn(self, args, **kwargs) os.close(kwargs['fd_pipes'][1]) return rval producer = Producer( args=["bash", "-c", "echo -n '%s'" % test_string], fd_pipes={1:slave_fd}, scheduler=scheduler) consumer = PipeReader( input_files={"producer" : master_file}, scheduler=scheduler) task_scheduler.add(producer) task_scheduler.add(consumer) task_scheduler.run() if sys.hexversion >= 0x3000000: test_string = test_string.encode() self._assertEqual(test_string, consumer.getvalue())
def testIpcDaemon(self): tmpdir = tempfile.mkdtemp() build_dir = None try: env = {} # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they # need to be inherited by ebuild subprocesses. if 'PORTAGE_USERNAME' in os.environ: env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME'] if 'PORTAGE_GRPNAME' in os.environ: env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME'] env['PORTAGE_PYTHON'] = _python_interpreter env['PORTAGE_BIN_PATH'] = PORTAGE_BIN_PATH env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH env['PORTAGE_BUILDDIR'] = os.path.join(tmpdir, 'cat', 'pkg-1') task_scheduler = TaskScheduler(max_jobs=2) build_dir = EbuildBuildDir( scheduler=task_scheduler.sched_iface, settings=env) build_dir.lock() ensure_dirs(env['PORTAGE_BUILDDIR']) input_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_in') output_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_out') os.mkfifo(input_fifo) os.mkfifo(output_fifo) for exitcode in (0, 1, 2): exit_command = ExitCommand() commands = {'exit' : exit_command} daemon = EbuildIpcDaemon(commands=commands, input_fifo=input_fifo, output_fifo=output_fifo, scheduler=task_scheduler.sched_iface) proc = SpawnProcess( args=[BASH_BINARY, "-c", '"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode], env=env, scheduler=task_scheduler.sched_iface) self.received_command = False def exit_command_callback(): self.received_command = True proc.cancel() daemon.cancel() exit_command.reply_hook = exit_command_callback task_scheduler.add(daemon) task_scheduler.add(proc) start_time = time.time() task_scheduler.run(timeout=self._SCHEDULE_TIMEOUT) task_scheduler.clear() self.assertEqual(self.received_command, True, "command not received after %d seconds" % \ (time.time() - start_time,)) self.assertEqual(proc.isAlive(), False) self.assertEqual(daemon.isAlive(), False) self.assertEqual(exit_command.exitcode, exitcode) # Intentionally short timeout test for QueueScheduler.run() sleep_time_s = 10 # 10.000 seconds short_timeout_ms = 10 # 0.010 seconds for i in range(3): exit_command = ExitCommand() commands = {'exit' : exit_command} daemon = EbuildIpcDaemon(commands=commands, input_fifo=input_fifo, output_fifo=output_fifo, scheduler=task_scheduler.sched_iface) proc = SpawnProcess( args=[BASH_BINARY, "-c", 'exec sleep %d' % sleep_time_s], env=env, scheduler=task_scheduler.sched_iface) self.received_command = False def exit_command_callback(): self.received_command = True proc.cancel() daemon.cancel() exit_command.reply_hook = exit_command_callback task_scheduler.add(daemon) task_scheduler.add(proc) start_time = time.time() task_scheduler.run(timeout=short_timeout_ms) task_scheduler.clear() self.assertEqual(self.received_command, False, "command received after %d seconds" % \ (time.time() - start_time,)) self.assertEqual(proc.isAlive(), False) self.assertEqual(daemon.isAlive(), False) self.assertEqual(proc.returncode == os.EX_OK, False) finally: if build_dir is not None: build_dir.unlock() shutil.rmtree(tmpdir)
def aux_get(self, mycpv, mylist, mytree=None): "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc." 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]' 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error' cache_me = False if not mytree: cache_me = True if not mytree and not self._known_keys.intersection( mylist).difference(self._aux_cache_keys): aux_cache = self._aux_cache.get(mycpv) if aux_cache is not None: return [aux_cache.get(x, "") for x in mylist] cache_me = True global auxdbkeys, auxdbkeylen try: cat, pkg = mycpv.split("/", 1) except ValueError: # Missing slash. Can't find ebuild so raise KeyError. raise KeyError(mycpv) myebuild, mylocation = self.findname2(mycpv, mytree) if not myebuild: writemsg("!!! aux_get(): %s\n" % \ _("ebuild not found for '%s'") % mycpv, noiselevel=1) raise KeyError(mycpv) mydata, st, emtime = self._pull_valid_cache(mycpv, myebuild, mylocation) doregen = mydata is None if doregen: if myebuild in self._broken_ebuilds: raise KeyError(mycpv) if not self._have_root_eclass_dir: raise KeyError(mycpv) self.doebuild_settings.setcpv(mycpv) eapi = None if eapi is None and \ 'parse-eapi-ebuild-head' in self.doebuild_settings.features: eapi = portage._parse_eapi_ebuild_head(codecs.open( _unicode_encode(myebuild, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['repo.content'], errors='replace')) if eapi is not None: self.doebuild_settings.configdict['pkg']['EAPI'] = eapi if eapi is not None and not portage.eapi_is_supported(eapi): mydata = self._metadata_callback( mycpv, myebuild, mylocation, {'EAPI':eapi}, emtime) else: sched = TaskScheduler() proc = EbuildMetadataPhase(cpv=mycpv, ebuild_path=myebuild, ebuild_mtime=emtime, metadata_callback=self._metadata_callback, portdb=self, repo_path=mylocation, scheduler=sched.sched_iface, settings=self.doebuild_settings) sched.add(proc) sched.run() if proc.returncode != os.EX_OK: self._broken_ebuilds.add(myebuild) raise KeyError(mycpv) mydata = proc.metadata # do we have a origin repository name for the current package mydata["repository"] = self._repository_map.get(mylocation, "") mydata["INHERITED"] = ' '.join(mydata.get("_eclasses_", [])) mydata["_mtime_"] = st[stat.ST_MTIME] eapi = mydata.get("EAPI") if not eapi: eapi = "0" mydata["EAPI"] = eapi if not eapi_is_supported(eapi): for k in set(mydata).difference(("_mtime_", "_eclasses_")): mydata[k] = "" mydata["EAPI"] = "-" + eapi.lstrip("-") #finally, we look at our internal cache entry and return the requested data. returnme = [mydata.get(x, "") for x in mylist] if cache_me: aux_cache = {} for x in self._aux_cache_keys: aux_cache[x] = mydata.get(x, "") self._aux_cache[mycpv] = aux_cache return returnme
def testIpcDaemon(self): tmpdir = tempfile.mkdtemp() build_dir = None try: env = {} # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they # need to be inherited by ebuild subprocesses. if 'PORTAGE_USERNAME' in os.environ: env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME'] if 'PORTAGE_GRPNAME' in os.environ: env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME'] env['PORTAGE_PYTHON'] = _python_interpreter env['PORTAGE_BIN_PATH'] = PORTAGE_BIN_PATH env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH env['PORTAGE_BUILDDIR'] = os.path.join(tmpdir, 'cat', 'pkg-1') if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] task_scheduler = TaskScheduler(max_jobs=2) build_dir = EbuildBuildDir(scheduler=task_scheduler.sched_iface, settings=env) build_dir.lock() ensure_dirs(env['PORTAGE_BUILDDIR']) input_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_in') output_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_out') os.mkfifo(input_fifo) os.mkfifo(output_fifo) for exitcode in (0, 1, 2): exit_command = ExitCommand() commands = {'exit': exit_command} daemon = EbuildIpcDaemon(commands=commands, input_fifo=input_fifo, output_fifo=output_fifo, scheduler=task_scheduler.sched_iface) proc = SpawnProcess(args=[ BASH_BINARY, "-c", '"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode ], env=env, scheduler=task_scheduler.sched_iface) self.received_command = False def exit_command_callback(): self.received_command = True task_scheduler.clear() task_scheduler.wait() exit_command.reply_hook = exit_command_callback start_time = time.time() task_scheduler.add(daemon) task_scheduler.add(proc) task_scheduler.run(timeout=self._SCHEDULE_TIMEOUT) task_scheduler.clear() task_scheduler.wait() hardlock_cleanup(env['PORTAGE_BUILDDIR'], remove_all_locks=True) self.assertEqual(self.received_command, True, "command not received after %d seconds" % \ (time.time() - start_time,)) self.assertEqual(proc.isAlive(), False) self.assertEqual(daemon.isAlive(), False) self.assertEqual(exit_command.exitcode, exitcode) # Intentionally short timeout test for QueueScheduler.run() sleep_time_s = 10 # 10.000 seconds short_timeout_ms = 10 # 0.010 seconds for i in range(3): exit_command = ExitCommand() commands = {'exit': exit_command} daemon = EbuildIpcDaemon(commands=commands, input_fifo=input_fifo, output_fifo=output_fifo, scheduler=task_scheduler.sched_iface) proc = SpawnProcess( args=[BASH_BINARY, "-c", 'exec sleep %d' % sleep_time_s], env=env, scheduler=task_scheduler.sched_iface) self.received_command = False def exit_command_callback(): self.received_command = True task_scheduler.clear() task_scheduler.wait() exit_command.reply_hook = exit_command_callback start_time = time.time() task_scheduler.add(daemon) task_scheduler.add(proc) task_scheduler.run(timeout=short_timeout_ms) task_scheduler.clear() task_scheduler.wait() hardlock_cleanup(env['PORTAGE_BUILDDIR'], remove_all_locks=True) self.assertEqual(self.received_command, False, "command received after %d seconds" % \ (time.time() - start_time,)) self.assertEqual(proc.isAlive(), False) self.assertEqual(daemon.isAlive(), False) self.assertEqual(proc.returncode == os.EX_OK, False) finally: if build_dir is not None: build_dir.unlock() shutil.rmtree(tmpdir)
def aux_get(self, mycpv, mylist, mytree=None): "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc." 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]' 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error' cache_me = False if not mytree: cache_me = True if not mytree and not self._known_keys.intersection(mylist).difference( self._aux_cache_keys): aux_cache = self._aux_cache.get(mycpv) if aux_cache is not None: return [aux_cache.get(x, "") for x in mylist] cache_me = True global auxdbkeys, auxdbkeylen try: cat, pkg = mycpv.split("/", 1) except ValueError: # Missing slash. Can't find ebuild so raise KeyError. raise KeyError(mycpv) myebuild, mylocation = self.findname2(mycpv, mytree) if not myebuild: writemsg("!!! aux_get(): %s\n" % \ _("ebuild not found for '%s'") % mycpv, noiselevel=1) raise KeyError(mycpv) mydata, st, emtime = self._pull_valid_cache(mycpv, myebuild, mylocation) doregen = mydata is None if doregen: if myebuild in self._broken_ebuilds: raise KeyError(mycpv) if not self._have_root_eclass_dir: raise KeyError(mycpv) self.doebuild_settings.setcpv(mycpv) eapi = None if eapi is None and \ 'parse-eapi-ebuild-head' in self.doebuild_settings.features: eapi = portage._parse_eapi_ebuild_head( codecs.open(_unicode_encode(myebuild, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['repo.content'], errors='replace')) if eapi is not None: self.doebuild_settings.configdict['pkg']['EAPI'] = eapi if eapi is not None and not portage.eapi_is_supported(eapi): mydata = self._metadata_callback(mycpv, myebuild, mylocation, {'EAPI': eapi}, emtime) else: sched = TaskScheduler() proc = EbuildMetadataPhase( cpv=mycpv, ebuild_path=myebuild, ebuild_mtime=emtime, metadata_callback=self._metadata_callback, portdb=self, repo_path=mylocation, scheduler=sched.sched_iface, settings=self.doebuild_settings) sched.add(proc) sched.run() if proc.returncode != os.EX_OK: self._broken_ebuilds.add(myebuild) raise KeyError(mycpv) mydata = proc.metadata # do we have a origin repository name for the current package mydata["repository"] = self._repository_map.get(mylocation, "") mydata["INHERITED"] = ' '.join(mydata.get("_eclasses_", [])) mydata["_mtime_"] = st[stat.ST_MTIME] eapi = mydata.get("EAPI") if not eapi: eapi = "0" mydata["EAPI"] = eapi if not eapi_is_supported(eapi): for k in set(mydata).difference(("_mtime_", "_eclasses_")): mydata[k] = "" mydata["EAPI"] = "-" + eapi.lstrip("-") #finally, we look at our internal cache entry and return the requested data. returnme = [mydata.get(x, "") for x in mylist] if cache_me: aux_cache = {} for x in self._aux_cache_keys: aux_cache[x] = mydata.get(x, "") self._aux_cache[mycpv] = aux_cache return returnme