def finalize(self, metadata=None): """ Scoop up all of the individual pieces and put them together """ root = etree.Element('testsuites') piglit = etree.Element('testsuite', name='piglit') root.append(piglit) for each in os.listdir(os.path.join(self._dest, 'tests')): with open(os.path.join(self._dest, 'tests', each), 'r') as f: # parse returns an element tree, and that's not what we want, # we want the first (and only) Element node # If the element cannot be properly parsed then consider it a # failed transaction and ignore it. try: piglit.append(etree.parse(f).getroot()) except etree.ParseError: continue num_tests = len(piglit) if not num_tests: raise exceptions.PiglitUserError( 'No tests were run, not writing a result file', exitcode=2) # set the test count by counting the number of tests. # This must be unicode (py3 str) piglit.attrib['tests'] = str(num_tests) with open(os.path.join(self._dest, 'results.xml'), 'w') as f: f.write("<?xml version='1.0' encoding='utf-8'?>\n") # lxml has a pretty print we want to use if etree.__name__ == 'lxml.etree': out = etree.tostring(root, pretty_print=True) else: out = etree.tostring(root) f.write(out.decode('utf-8')) shutil.rmtree(os.path.join(self._dest, 'tests'))
def finalize(self, metadata=None): """ End json serialization and cleanup This method is called after all of tests are written, it closes any containers that are still open and closes the file """ tests_dir = os.path.join(self._dest, 'tests') file_list = sorted( (f for f in os.listdir(tests_dir) if f.endswith('.json')), key=lambda p: int(os.path.splitext(p)[0])) # If jsonstreams is not present then build a complete tree of all of # the data and write it with json.dump if not _STREAMS: # Create a dictionary that is full of data to be written to a # single file data = collections.OrderedDict() # Load the metadata and put it into a dictionary with open(os.path.join(self._dest, 'metadata.json'), 'r') as f: data.update(json.load(f)) # If there is more metadata add it the dictionary if metadata: data.update(metadata) # Add the tests to the dictionary data['tests'] = collections.OrderedDict() for test in file_list: test = os.path.join(tests_dir, test) if os.path.isfile(test): # Try to open the json snippets. If we fail to open a test # then throw the whole thing out. This gives us atomic # writes, the writing worked and is valid or it didn't # work. try: with open(test, 'r') as f: data['tests'].update(json.load(f)) except ValueError: pass if not data['tests']: raise exceptions.PiglitUserError( 'No tests were run, not writing a result file', exitcode=2) data = results.TestrunResult.from_dict(data) # write out the combined file. Use the compression writer from the # FileBackend with self._write_final(os.path.join(self._dest, 'results.json')) as f: json.dump(data, f, default=piglit_encoder, indent=INDENT) # Otherwise use jsonstreams to write the final dictionary. This uses an # external library, but is slightly faster and uses considerably less # memory that building a complete tree. else: encoder = functools.partial(json.JSONEncoder, default=piglit_encoder) with self._write_final(os.path.join(self._dest, 'results.json')) as f: with jsonstreams.Stream(jsonstreams.Type.object, fd=f, indent=4, encoder=encoder, pretty=True) as s: s.write('__type__', 'TestrunResult') with open(os.path.join(self._dest, 'metadata.json'), 'r') as n: s.iterwrite( json.load(n, object_pairs_hook=collections.OrderedDict ).items()) if metadata: s.iterwrite(metadata.items()) with s.subobject('tests') as t: wrote = False for test in file_list: test = os.path.join(tests_dir, test) if os.path.isfile(test): try: with open(test, 'r') as f: a = json.load(f) except ValueError: continue t.iterwrite(a.items()) wrote = True if not wrote: raise exceptions.PiglitUserError('No tests were run.', exitcode=2) # Delete the temporary files os.unlink(os.path.join(self._dest, 'metadata.json')) shutil.rmtree(os.path.join(self._dest, 'tests'))
def run(profiles, logger, backend, concurrency): """Runs all tests using Thread pool. When called this method will flatten out self.tests into self.test_list, then will prepare a logger, and begin executing tests through it's Thread pools. Based on the value of options.OPTIONS.concurrent it will either run all the tests concurrently, all serially, or first the thread safe tests then the serial tests. Finally it will print a final summary of the tests. Arguments: profiles -- a list of Profile instances. logger -- a log.LogManager instance. backend -- a results.Backend derived instance. """ chunksize = 1 # The logger needs to know how many tests are running. Because of filters # there's no way to do that without making a concrete list out of the # filters profiles. profiles = [(p, list(p.itertests())) for p in profiles] log = LogManager(logger, sum(len(l) for _, l in profiles)) # check that after the filters are run there are actually tests to run. if not any(l for _, l in profiles): raise exceptions.PiglitUserError('no matching tests') def test(name, test, profile, this_pool=None): """Function to call test.execute from map""" with backend.write_test(name) as w: test.execute(name, log.get(), profile.options) w(test.result) if profile.options['monitor'].abort_needed: this_pool.terminate() def run_threads(pool, profile, test_list, filterby=None): """ Open a pool, close it, and join it """ if filterby: # Although filterby could be attached to TestProfile as a filter, # it would have to be removed when run_threads exits, requiring # more code, and adding side-effects test_list = (x for x in test_list if filterby(x)) pool.imap(lambda pair: test(pair[0], pair[1], profile, pool), test_list, chunksize) def run_profile(profile, test_list): """Run an individual profile.""" profile.setup() if concurrency == "all": run_threads(multi, profile, test_list) elif concurrency == "none": run_threads(single, profile, test_list) else: assert concurrency == "some" # Filter and return only thread safe tests to the threaded pool run_threads(multi, profile, test_list, lambda x: x[1].run_concurrent) # Filter and return the non thread safe tests to the single # pool run_threads(single, profile, test_list, lambda x: not x[1].run_concurrent) profile.teardown() # Multiprocessing.dummy is a wrapper around Threading that provides a # multiprocessing compatible API # # The default value of pool is the number of virtual processor cores single = multiprocessing.dummy.Pool(1) multi = multiprocessing.dummy.Pool() try: for p in profiles: run_profile(*p) for pool in [single, multi]: pool.close() pool.join() finally: log.get().summary() for p, _ in profiles: if p.options['monitor'].abort_needed: raise exceptions.PiglitAbort(p.options['monitor'].error_message)