def main(): cli_opts() extensions = get_extensions() parse_args(sys.argv) targets = get_targets(extensions) if CONF.subunit_files: if len(CONF.subunit_files) > 1 and CONF.run_id: print("You can not specify a run id for adding more than 1 stream") return 3 streams = [subunit.ReadSubunit(open(s, 'r'), attachments=CONF.store_attachments, attr_regex=CONF.attr_regex, targets=targets, use_wall_time=CONF.use_run_wall_time, non_subunit_name=CONF.non_subunit_name) for s in CONF.subunit_files] else: streams = [subunit.ReadSubunit(sys.stdin, attachments=CONF.store_attachments, attr_regex=CONF.attr_regex, targets=targets, use_wall_time=CONF.use_run_wall_time, non_subunit_name=CONF.non_subunit_name)] for stream in streams: process_results(stream.get_results())
def test_targets_not_modified(self, ttc_mock): # A regression test that verifies that the subunit reader does # not modify the passed in value of targets. targets = ['foo'] subunit.ReadSubunit(mock.MagicMock(), targets=targets) ntargets1 = len(ttc_mock.call_args[0][0]) subunit.ReadSubunit(mock.MagicMock(), targets=targets) ntargets2 = len(ttc_mock.call_args[0][0]) self.assertEqual(ntargets1, ntargets2) self.assertEqual(targets, ['foo'])
def test_cleanup_test_name_no_attr_matches(self): fake_id = 'test_fake.TestThing.testB`good_test,fun_test,legit`' read = subunit.ReadSubunit(io.BytesIO()) test_name = read.cleanup_test_name(fake_id, strip_tags=True, strip_scenarios=False) self.assertEqual(fake_id, test_name)
def test_cleanup_test_name_default_attr_regex(self): fake_id = 'test_fake.TestThing.testA[good_test,fun_test,legit]' read = subunit.ReadSubunit(io.BytesIO()) test_name = read.cleanup_test_name(fake_id, strip_tags=True, strip_scenarios=False) self.assertEqual('test_fake.TestThing.testA', test_name)
def create_run(): try: url = request.args.get('build_url') user = request.args.get('user') password = request.args.get('password') except KeyError as err: return 'Wrong url parameters! \nExpecting %s from jenkins.' % err if url.endswith('/'): url = url[:-1] # make configurable try: json_resp = requests_get('%s/api/json' % url, auth=(user, password)) subunit_data = requests_get('%s/artifact/subunit.stream' % url, auth=(user, password)) except ValueError as err: return 'Failed getting build info... %s' % err try: url = get_url(json_resp) except ValueError as err: return 'Looks like your test json data is bad: %s' % err try: subunit_file = io.BytesIO(subunit_data.content) except ValueError as err: return 'Looks like your subunit stream is bad: %s' % err set_artifacts_link(url) set_metadata(url) stream = read_subunit.ReadSubunit(subunit_file) shell.process_results(stream.get_results()) return "success for %s" % url
def _write_to_db(self, subunit): subunit_v2 = subunit.pop('subunit') # Set run metadata from gearman log_url = subunit.pop('log_url', None) subunit = self._uniquify_name(subunit) if log_url: log_dir = os.path.dirname(log_url) # log_dir should be the top-level directory containing a job run, # but the subunit file may be nested in 0 - 2 subdirectories (top, # logs/, or logs/old/), so we need to safely correct the path here log_base = os.path.basename(log_dir) if log_base == 'logs': log_dir = os.path.dirname(log_dir) elif log_base == 'old': log_dir = os.path.dirname(os.path.dirname(log_dir)) shell.CONF.set_override('artifacts', log_dir) shell.CONF.set_override('run_meta', subunit) # Parse subunit stream and store in DB if subunit_v2.closed: logging.debug('Trying to convert closed subunit v2 stream: %s to ' 'SQL' % subunit_v2) else: logging.debug('Converting Subunit V2 stream: %s to SQL' % subunit_v2) stream = read_subunit.ReadSubunit(subunit_v2, targets=self.extra_targets, use_wall_time=True) results = stream.get_results() start_time = sorted( [results[x]['start_time'] for x in results if x != 'run_time'])[0] shell.CONF.set_override('run_at', start_time.isoformat()) shell.process_results(results) subunit_v2.close()
def test_cleanup_test_name_with_attrs_leave_scenarios(self): fake_subunit = subunit.ReadSubunit(mock.MagicMock()) fake_name = ('fake_dir.fake_module.fakeClass.test_fake[attr1,attr2]' '(scenario)') name = fake_subunit.cleanup_test_name(fake_name) self.assertEqual( name, 'fake_dir.fake_module.fakeClass.test_fake' '(scenario)')
def test_attrs_default_regex(self): fake_id = 'test_fake.TestThing.testA[good_test,fun_test,legit]' read = subunit.ReadSubunit(io.BytesIO()) attrs = read.get_attrs(fake_id) attrs_list = attrs.split(',') self.assertIn('legit', attrs_list) self.assertIn('fun_test', attrs_list) self.assertIn('good_test', attrs_list)
def test_attrs_non_default_regex(self): fake_id = 'test_fake.TestThing.testB`good_test,fun_test,legit`' regex = '\`(.*)\`' read = subunit.ReadSubunit(io.BytesIO(), attr_regex=regex) attrs = read.get_attrs(fake_id) attrs_list = attrs.split(',') self.assertIn('legit', attrs_list) self.assertIn('fun_test', attrs_list) self.assertIn('good_test', attrs_list)
def test_run_time(self): fake_subunit = subunit.ReadSubunit(mock.MagicMock()) fake_results = {} fifty_sec_run_result = { 'start_time': datetime.datetime(1914, 6, 28, 10, 45, 0), 'end_time': datetime.datetime(1914, 6, 28, 10, 45, 50), } for num in range(100): test_name = 'test_fake_' + str(num) fake_results[test_name] = fifty_sec_run_result fake_subunit.results = fake_results runtime = fake_subunit.run_time() self.assertEqual(runtime, 5000.0)
def handle_subunit_event(self): # Pull subunit event from queue and separate stream from metadata subunit = self.subunitq.get() subunit_v2 = subunit.pop('subunit') # Set run metadata from gearman log_url = subunit.pop('log_url', None) if log_url: log_dir = os.path.dirname(os.path.dirname(log_url)) shell.CONF.set_override('artifacts', log_dir) run_id = subunit.get('build_uuid', None) if run_id: shell.CONF.set_override('run_id', run_id) shell.CONF.set_override('run_meta', subunit) # Parse subunit stream and store in DB logging.debug('Converting Subunit V2 stream to SQL') stream = read_subunit.ReadSubunit(subunit_v2) shell.process_results(stream.get_results())
def test_not_subunit_no_subunit_name_set(self): stream_buf = io.BytesIO() stream = subunit_lib.StreamResultToBytes(stream_buf) stream.status(test_id='test_a', test_status='inprogress') stream.status(test_id='test_a', test_status='success') stream_buf.write(b'I AM NOT SUBUNIT') stream_buf.seek(0) result = subunit.ReadSubunit(stream_buf) exc_found = False try: result.get_results() # NOTE(mtreinish): Subunit raises the generic Exception class # so manually inspect the Exception object to check the error # message except Exception as e: self.assertIsInstance(e, Exception) self.assertEqual(e.args, ('Non subunit content', b'I')) exc_found = True self.assertTrue(exc_found, 'subunit exception not raised on invalid content')
def test_parse_outcome(self): fake_subunit = subunit.ReadSubunit(mock.MagicMock()) fake_id = 'fake_dir.fake_module.fakeClass.test_fake[attr1,attr2]' fake_timestamps = [ datetime.datetime(1914, 8, 26, 20, 00, 00), datetime.datetime(2014, 8, 26, 20, 00, 00) ] fake_status = 'skip' tags = set(['worker-0']) fake_results = { 'status': fake_status, 'details': { 'reason': 'fake reason' }, 'id': fake_id, 'timestamps': fake_timestamps, 'tags': tags } fake_subunit.parse_outcome(fake_results) parsed_results = fake_subunit.results # assert that the dict root key is the test name - the fake_id stripped # of the tags fake_test_name = fake_id[:fake_id.find('[')] self.assertEqual(list(parsed_results.keys()), [fake_test_name]) self.assertEqual(parsed_results[fake_test_name]['status'], fake_status) self.assertEqual(parsed_results[fake_test_name]['start_time'], fake_timestamps[0]) self.assertEqual(parsed_results[fake_test_name]['end_time'], fake_timestamps[1]) fake_attrs = fake_id[fake_id.find('[') + 1:fake_id.find(']')] self.assertEqual(parsed_results[fake_test_name]['metadata']['attrs'], fake_attrs) self.assertEqual(parsed_results[fake_test_name]['metadata']['tags'], ','.join(tags))
def test_wall_run_time(self): fake_subunit = subunit.ReadSubunit(mock.MagicMock(), use_wall_time=True) fake_results = {} start_time = datetime.datetime(1914, 6, 28, 10, 45, 0) stop_time = datetime.datetime(1914, 6, 28, 10, 45, 50) fifty_sec_run_result = { 'start_time': start_time, 'end_time': stop_time, } fake_results['first'] = fifty_sec_run_result for num in range(100): test_name = 'test_fake_' + str(num) start_time = start_time + datetime.timedelta(minutes=1) stop_time = stop_time + datetime.timedelta(minutes=1) fake_result = { 'start_time': start_time, 'end_time': stop_time, } fake_results[test_name] = fake_result fake_subunit.results = fake_results runtime = fake_subunit.run_time() # Wall time should be (60 * 100) + 50 self.assertEqual(runtime, 6050.0)
def test_targets_added_to_result(self, ttc_mock): subunit.ReadSubunit(mock.MagicMock(), targets=['foo']) self.assertIn('foo', ttc_mock.call_args[0][0])
def test_attrs_no_matches(self): fake_id = 'test_fake.TestThing.testB`good_test,fun_test,legit`' read = subunit.ReadSubunit(io.BytesIO()) attrs = read.get_attrs(fake_id) self.assertIsNone(attrs)
def test_cleanup_test_name_strip_nothing(self): fake_subunit = subunit.ReadSubunit(mock.MagicMock()) fake_name = ('fake_dir.fake_module.fakeClass.test_fake[attr1,attr2]' '(scenario)') name = fake_subunit.cleanup_test_name(fake_name, strip_tags=False) self.assertEqual(name, fake_name)
def test_non_subunit_name(self): non_subunit_name = 'fake_non_subunit' fake_subunit = subunit.ReadSubunit(mock.MagicMock(), non_subunit_name=non_subunit_name) self.assertEqual(fake_subunit.stream.non_subunit_name, non_subunit_name)
def test_get_attrs(self): fake_subunit = subunit.ReadSubunit(mock.MagicMock()) fake_name = 'fake_dir.fake_module.fakeClass.test_fake[attr1,attr2]' attrs = fake_subunit.get_attrs(fake_name) self.assertEqual(attrs, 'attr1,attr2')