def test_loader_uses_decimal_to_parse_numbers(self): text = resource_string('linaro_dashboard_bundle', 'test_documents/dummy_doc_with_numbers.json') fmt, doc = DocumentIO.loads(text) measurement = doc["test_runs"][0]["test_results"][0]["measurement"] self.assertEqual(measurement, Decimal("1.5")) self.assertTrue(isinstance(measurement, Decimal))
def generate_bundle(serial=None, result_id=None, test=None, test_id=None, attachments=[]): if result_id is None: return {} config = get_config() adb = ADB(serial) resultdir = os.path.join(config.resultsdir_android, result_id) if not adb.exists(resultdir): raise Exception("The result (%s) is not existed." % result_id) bundle_text = adb.read_file(os.path.join(resultdir, "testdata.json")) bundle = DocumentIO.loads(bundle_text)[1] test_tmp = None if test: test_tmp = test else: test_tmp = TestProvider().load_test(bundle['test_runs'][0]['test_id'], serial) if test_id: bundle['test_runs'][0]['test_id'] = test_id else: attrs = bundle['test_runs'][0].get('attributes') if attrs: run_options = attrs.get('run_options') if run_options: test_id = '%s(%s)' % (bundle['test_runs'][0]['test_id'], run_options) bundle['test_runs'][0]['test_id'] = test_id test_tmp.parse(result_id) stdout_text = adb.read_file(os.path.join(resultdir, os.path.basename(test_tmp.org_ouput_file))) if stdout_text is None: stdout_text = '' stderr_text = adb.read_file(os.path.join(resultdir, 'stderr.log')) if stderr_text is None: stderr_text = '' bundle['test_runs'][0]["test_results"] = test_tmp.parser.results[ "test_results"] ## following part is used for generating the attachment for normal test attachment_bundles = [] for attachment in test_tmp.attachments: data_bundle = attachment.generate_bundle(adb=adb, resultsdir=resultdir) if data_bundle: attachment_bundles.append(data_bundle) bundle['test_runs'][0]["attachments"] = attachment_bundles ##following used for the attachment for monkeyrunner test for attach in attachments: if os.path.exists(attach): with open(attach, 'rb') as stream: data = stream.read() if data: bundle['test_runs'][0]["attachments"].append({ "pathname": os.path.basename(attach), "mime_type": 'image/png', "content": base64.standard_b64encode(data)}) return bundle
def test_load_and_save_does_not_clobber_the_data(self): original_text = resource_string('linaro_dashboard_bundle', 'test_documents/' + self.filename) fmt, doc = DocumentIO.loads(original_text) final_text = DocumentIO.dumps(doc) final_text += "\n" # the original string has newline at the end self.assertEqual(final_text, original_text)
def test_load_and_save_does_not_clobber_the_data(self): original_text = resource_string( 'linaro_dashboard_bundle', 'test_documents/' + self.filename) fmt, doc = DocumentIO.loads(original_text) final_text = DocumentIO.dumps(doc,) final_text += "\n" # the original string has newline at the end self.assertEqual(final_text, original_text)
def test_loader_uses_decimal_to_parse_numbers(self): text = resource_string( 'linaro_dashboard_bundle', 'test_documents/dummy_doc_with_numbers.json') fmt, doc = DocumentIO.loads(text) measurement = doc["test_runs"][0]["test_results"][0]["measurement"] self.assertEqual(measurement, Decimal("1.5")) self.assertTrue(isinstance(measurement, Decimal))
def invoke(self): if not os.path.exists(self.args.result_file): raise LavaCommandError("The specified result file(%s) " "does not exist." % self.args.result_file) msg = "extract attachment file from result bundle file(%s)" % ( self.args.result_file) self.say_begin(msg) badchars = "[^a-zA-Z0-9\._-]" with open(self.args.result_file) as stream: jobdata = stream.read() result_data = DocumentIO.loads(jobdata)[1] test_runs = result_data.get('test_runs') if not self.args.directory: attachment_dir = mkdtemp(prefix='attachments-', dir=os.path.curdir) elif not os.path.exists(self.args.directory): os.makedirs(self.args.directory) attachment_dir = self.args.directory elif not os.path.isdir(self.args.directory): raise LavaCommandError( "The specified path(%s) is not a directory." % self.args.directory) else: attachment_dir = self.args.directory for test in test_runs: test_id = test.get('test_id').replace(" ", "_") test_id = re.sub(badchars, "_", test_id) target_dir = mkdtemp(prefix='%s' % test_id, dir=attachment_dir) print "The test id is: %s" % test_id attachments = test.get('attachments', []) for attach in attachments: pathname = attach.get('pathname') file_name = os.path.basename(pathname) content_decoded = base64.standard_b64decode( attach.get("content")) with open(os.path.join(target_dir, file_name), 'w') as fd: fd.write(content_decoded) self.say("All attachment files are put under directory(%s)" % (attachment_dir)) self.say_end(msg)
def _combine_bundles(self, dirname): """ Combine all bundles from a previous test run into one bundle. Returns the aggregated bundle object Load, parse and validate each bundle from the specified directory and combine them into one larger bundle. This is somewhat tricky. Each bundle we coalesce may be generated by a different, separate programs and may, thus, use different formats. To combine them all correctly we need to take two precautions: 1) All bundles must be updated to a single, common format 2) No bundle may be upgraded beyond the latest format known to this code. Since the hypothetical 2.0 format may be widely different that we cannot reliably interpret anything beyond the format field. To prevent this we use the evolution API to carefully upgrade only to the "sentinel" format, 1.3 (at this time) """ # Use DocumentIO.loads() to preserve the order of entries. # This is a very small touch but it makes reading the results # far more pleasant. aggregated_bundle = DocumentIO.loads('{\n' '"format": "' + self._desired_format + '",\n' '"test_runs": []\n' '}\n')[1] # Iterate over all files there for name in os.listdir(dirname): bundle_pathname = os.path.join(dirname, name) # Process bundle one by one try: format, bundle = self._load_bundle(bundle_pathname) self._convert_to_common_format(format, bundle) self._combine_with_aggregated(aggregated_bundle, bundle) except: logging.exception("Unable to process bundle %s", name) # Return the aggregated bundle return aggregated_bundle
def _combine_bundles(self, dirname): """ Combine all bundles from a previous test run into one bundle. Returns the aggregated bundle object Load, parse and validate each bundle from the specified directory and combine them into one larger bundle. This is somewhat tricky. Each bundle we coalesce may be generated by a different, separate programs and may, thus, use different formats. To combine them all correctly we need to take two precautions: 1) All bundles must be updated to a single, common format 2) No bundle may be upgraded beyond the latest format known to this code. Since the hypothetical 2.0 format may be widely different that we cannot reliably interpret anything beyond the format field. To prevent this we use the evolution API to carefully upgrade only to the "sentinel" format, 1.3 (at this time) """ # Use DocumentIO.loads() to preserve the order of entries. # This is a very small touch but it makes reading the results # far more pleasant. aggregated_bundle = DocumentIO.loads( '{\n' '"format": "' + self._desired_format + '",\n' '"test_runs": []\n' '}\n')[1] # Iterate over all files there for name in os.listdir(dirname): bundle_pathname = os.path.join(dirname, name) # Process bundle one by one try: format, bundle = self._load_bundle(bundle_pathname) self._convert_to_common_format(format, bundle) self._combine_with_aggregated(aggregated_bundle, bundle) except: logging.exception("Unable to process bundle %s", name) # Return the aggregated bundle return aggregated_bundle
def test_loads__with_enabled_retain_order__dict_class(self): fmt, doc = DocumentIO.loads(self.text, retain_order=True) observed_impl = type(doc) # Note: VVV self.assertNotEqual(observed_impl, dict)
def test_loads__with_disabled_retain_order__dict_class(self): fmt, doc = DocumentIO.loads(self.text, retain_order=False) observed_impl = type(doc) self.assertEqual(observed_impl, dict)
def test_loads__return_value(self): fmt, doc = DocumentIO.loads(self.text) self.assertEqual(fmt, self.expected_fmt) self.assertEqual(doc, self.expected_doc)
def test_loads__with_enabled_retain_order__key_order(self): fmt, doc = DocumentIO.loads(self.text, retain_order=True) observed_keys = doc.keys() self.assertEqual(observed_keys, self.expected_keys)
def generate_bundle(serial=None, result_id=None, test=None, test_id=None, attachments=[]): if result_id is None: return {} config = get_config() adb = ADB(serial) resultdir = os.path.join(config.resultsdir_android, result_id) if not adb.exists(resultdir): raise Exception("The result (%s) is not existed." % result_id) bundle_text = adb.read_file(os.path.join(resultdir, "testdata.json")) bundle = DocumentIO.loads(bundle_text)[1] test_tmp = None if test: test_tmp = test else: test_tmp = TestProvider().load_test(bundle['test_runs'][0]['test_id'], serial) if test_id: bundle['test_runs'][0]['test_id'] = test_id else: attrs = bundle['test_runs'][0].get('attributes') if attrs: run_options = attrs.get('run_options') if run_options: test_id = '%s(%s)' % (bundle['test_runs'][0]['test_id'], run_options) bundle['test_runs'][0]['test_id'] = test_id test_tmp.parse(result_id) stdout_text = adb.read_file( os.path.join(resultdir, os.path.basename(test_tmp.org_ouput_file))) if stdout_text is None: stdout_text = '' stderr_text = adb.read_file(os.path.join(resultdir, 'stderr.log')) if stderr_text is None: stderr_text = '' bundle['test_runs'][0]["test_results"] = test_tmp.parser.results[ "test_results"] ## following part is used for generating the attachment for normal test attachment_bundles = [] for attachment in test_tmp.attachments: data_bundle = attachment.generate_bundle(adb=adb, resultsdir=resultdir) if data_bundle: attachment_bundles.append(data_bundle) bundle['test_runs'][0]["attachments"] = attachment_bundles ##following used for the attachment for monkeyrunner test for attach in attachments: if os.path.exists(attach): with open(attach, 'rb') as stream: data = stream.read() if data: bundle['test_runs'][0]["attachments"].append({ "pathname": os.path.basename(attach), "mime_type": 'image/png', "content": base64.standard_b64encode(data) }) return bundle