def testInvalidDescription(self): perf_uploader.OutputPerfValue(self.file_name, 'a' * 256, 0, 'ignored') self.assertRaises(ValueError, perf_uploader.OutputPerfValue, 'ignored', 'a' * 257, 0, 'ignored') perf_uploader.OutputPerfValue(self.file_name, 'abcXYZ09-_.', 0, 'ignored') self.assertRaises(ValueError, perf_uploader.OutputPerfValue, 'ignored', 'a\x00c', 0, 'ignored')
def testLoadTwoValues(self): perf_uploader.OutputPerfValue(self.file_name, 'desc', 41, 'units') perf_uploader.OutputPerfValue(self.file_name, 'desc2', 42, 'units2') entries = perf_uploader.LoadPerfValues(self.file_name) self.assertEqual(2, len(entries)) self.assertEqual(41, entries[0].value) self.assertEqual(42, entries[1].value) self.assertEqual('desc2', entries[1].description) self.assertEqual(None, entries[1].graph)
def OutputPerfValue(self, description, value, units, higher_is_better=True, graph=None): """Record a perf value. If graph name is not provided, the test method name will be used as the graph name. Args: description: A string description of the value such as "partition-0". A special description "ref" is taken as the reference. value: A float value. units: A string describing the unit of measurement such as "KB", "meter". higher_is_better: A boolean indicating if higher value means better performance. graph: A string name of the graph this value will be plotted on. If not provided, the graph name will take the test method name. """ if not self._result_dir: logging.warning( 'Result directory is not set. Ignore OutputPerfValue.') return if graph is None: graph = self._testMethodName file_name = self._GeneratePerfFileName() perf_uploader.OutputPerfValue(file_name, description, value, units, higher_is_better, graph)
def testLoadOneValue(self): perf_uploader.OutputPerfValue(self.file_name, 'desc', 41, 'units') entries = perf_uploader.LoadPerfValues(self.file_name) self.assertEqual(1, len(entries)) self.assertEqual(41, entries[0].value) self.assertEqual('desc', entries[0].description) self.assertEqual(True, entries[0].higher_is_better)
def _SendPerfValues(buildroot, sdk_tarball, buildbot_uri_log, version, platform_name): """Generate & upload perf data for the build""" perf_path = SdkPerfPath(buildroot) test_name = 'sdk' units = 'bytes' # Make sure the file doesn't contain previous data. osutils.SafeUnlink(perf_path) common_kwargs = { 'higher_is_better': False, 'graph': 'cros-sdk-size', 'stdio_uri': buildbot_uri_log, } sdk_size = os.path.getsize(sdk_tarball) perf_uploader.OutputPerfValue(perf_path, 'base', sdk_size, units, **common_kwargs) for tarball in glob.glob( os.path.join(buildroot, constants.DEFAULT_CHROOT_DIR, constants.SDK_TOOLCHAINS_OUTPUT, '*.tar.*')): name = os.path.basename(tarball).rsplit('.', 2)[0] size = os.path.getsize(tarball) perf_uploader.OutputPerfValue(perf_path, name, size, units, **common_kwargs) perf_uploader.OutputPerfValue(perf_path, 'base_plus_%s' % name, sdk_size + size, units, **common_kwargs) # Due to limitations in the perf dashboard, we have to create an integer # based on the current timestamp. This field only accepts integers, and # the perf dashboard accepts this or CrOS+Chrome official versions. revision = int(version.replace('.', '')) perf_values = perf_uploader.LoadPerfValues(perf_path) retry_util.RetryException(perf_uploader.PerfUploadingError, 3, perf_uploader.UploadPerfValues, perf_values, platform_name, test_name, revision=revision)
def testDryRun(self): """Make sure dryrun mode doesn't upload.""" self.send_func.side_effect = AssertionError('dryrun should not upload') perf_uploader.OutputPerfValue(self.file_name, 'desc1', 40, 'unit') perf_values = perf_uploader.LoadPerfValues(self.file_name) perf_uploader.UploadPerfValues(perf_values, 'platform', 'TestName', cros_version='cros', chrome_version='chrome', dry_run=True)
def testTwoEntriesOfSameTest(self): """Upload one test, two perf values.""" perf_uploader.OutputPerfValue(self.file_name, 'desc1', 40, 'unit') perf_uploader.OutputPerfValue(self.file_name, 'desc1', 42, 'unit') perf_values = perf_uploader.LoadPerfValues(self.file_name) perf_uploader.UploadPerfValues(perf_values, 'platform', 'TestName', cros_version='cros', chrome_version='chrome') positional_args, _ = self.send_func.call_args first_param = positional_args[0] data = json.loads(first_param['data']) self.assertEqual(1, len(data)) entry = data[0] self.assertEqual('unit', entry['units']) # Average of 40 and 42 self.assertEqual(41, entry['value']) # Standard deviation sqrt(2) self.assertEqual(1.4142, entry['error'])
def testTwoTests(self): """Upload two tests, one perf value each.""" perf_uploader.OutputPerfValue(self.file_name, 'desc1', 40, 'unit') perf_uploader.OutputPerfValue(self.file_name, 'desc2', 42, 'unit') perf_values = perf_uploader.LoadPerfValues(self.file_name) perf_uploader.UploadPerfValues(perf_values, 'platform', 'TestName', cros_version='cros', chrome_version='chrome') positional_args, _ = self.send_func.call_args first_param = positional_args[0] data = json.loads(first_param['data']) self.assertEqual(2, len(data)) data = sorted(data, key=lambda x: x['test']) entry = data[0] self.assertEqual(40, entry['value']) self.assertEqual(0, entry['error']) entry = data[1] self.assertEqual(42, entry['value']) self.assertEqual(0, entry['error'])
def testCustomDashboard(self): """Verify we can set data to different dashboards.""" perf_uploader.OutputPerfValue(self.file_name, 'desc1', 42, 'unit') perf_values = perf_uploader.LoadPerfValues(self.file_name) perf_uploader.UploadPerfValues(perf_values, 'platform', 'TestName', cros_version='cros', chrome_version='chrome', dashboard='http://localhost') request = self.urlopen.call_args[0][0] self.assertEqual('http://localhost/add_point', request.get_full_url())
def testRevision(self): """Verify revision is accepted over cros/chrome version.""" perf_uploader.OutputPerfValue(self.file_name, 'desc1', 42, 'unit') perf_values = perf_uploader.LoadPerfValues(self.file_name) perf_uploader.UploadPerfValues(perf_values, 'platform', 'TestName', revision=12345) positional_args, _ = self.send_func.call_args first_param = positional_args[0] data = json.loads(first_param['data']) entry = data[0] self.assertEqual(12345, entry['revision'])
def main(argv): parser = GetParser() opts = parser.parse_args(argv) opts.Freeze() logging.info('Uploading results to %s', opts.url) logging.info('Master name: %s', opts.master) logging.info('Test name: %s', opts.test) with tempfile.NamedTemporaryFile() as output: perf_uploader.OutputPerfValue( output.name, opts.description, float(opts.data), opts.units, graph=opts.graph, stdio_uri=opts.stdio_uri) perf_values = perf_uploader.LoadPerfValues(output.name) logging.debug('Uploading:') for value in perf_values: logging.debug(' %s', value) perf_uploader.UploadPerfValues( perf_values, opts.bot, opts.test, revision=opts.revision, cros_version=opts.cros_version, chrome_version=opts.chrome_version, dashboard=opts.url, master_name=opts.master, test_prefix='', platform_prefix='', dry_run=opts.dry_run) data_name = opts.graph if opts.graph else opts.description args = { 'masters': opts.master, 'tests': '%s/%s' % (opts.test, data_name), 'bots': opts.bot, } view_url = os.path.join(opts.url, 'report?%s' % urllib.parse.urlencode(args)) logging.info('View results at %s', view_url) logging.info('Note: To make tests public, visit %s', os.path.join(opts.url, 'change_internal_only')) logging.info('Note: To update the test list, visit %s', os.path.join(opts.url, 'update_test_suites'))
def testOneEntry(self): perf_uploader.OutputPerfValue(self.file_name, 'desc1', 42, 'unit') perf_values = perf_uploader.LoadPerfValues(self.file_name) perf_uploader.UploadPerfValues(perf_values, 'platform', 'TestName', cros_version='cros', chrome_version='chrome') request = self.urlopen.call_args[0][0] self.assertEqual( os.path.join(perf_uploader.DASHBOARD_URL, 'add_point'), request.get_full_url()) data = request.data data = urllib.parse.parse_qs(data)[b'data'] entries = [json.loads(x) for x in data] entry = entries[0][0] self.assertEqual('cros', entry['supplemental_columns']['r_cros_version']) self.assertEqual(42, entry['value']) self.assertEqual('cbuildbot.TestName/desc1', entry['test']) self.assertEqual('unit', entry['units'])
def testOneEntry(self): """Upload one perf value.""" perf_uploader.OutputPerfValue(self.file_name, 'desc1', 42, 'unit') perf_values = perf_uploader.LoadPerfValues(self.file_name) perf_uploader.UploadPerfValues(perf_values, 'platform', 'TestName', cros_version='cros', chrome_version='chrome') positional_args, _ = self.send_func.call_args first_param = positional_args[0] data = json.loads(first_param['data']) self.assertEqual(1, len(data)) entry = data[0] self.assertEqual('unit', entry['units']) self.assertEqual('cros', entry['supplemental_columns']['r_cros_version']) self.assertEqual('chrome', entry['supplemental_columns']['r_chrome_version']) self.assertEqual('cros-platform', entry['bot']) self.assertEqual(42, entry['value']) self.assertEqual(0, entry['error'])
def testValidJson(self): perf_uploader.OutputPerfValue(self.file_name, 'desc', 42, 'units') data = osutils.ReadFile(self.file_name) entry = json.loads(data) self.assertTrue(isinstance(entry, dict))