def _write_results_bundle(self, bundle): rdir = self.context.host_result_dir (fd, name) = tempfile.mkstemp(prefix='lava-command', suffix='.bundle', dir=rdir) with os.fdopen(fd, 'w') as f: DocumentIO.dump(f, bundle)
def invoke_sub(self): bundle = generate_combined_bundle(self.args.serial, self.args.result_id) try: print DocumentIO.dumps(bundle) except IOError: pass
def test_load_and_save_does_not_clobber_the_data(self): original_text = resource_string('linaro_dashboard_bundle', 'test_documents/' + self.filename) fmt, doc = DocumentIO.loads(original_text) final_text = DocumentIO.dumps(doc) final_text += "\n" # the original string has newline at the end self.assertEqual(final_text, original_text)
def invoke_sub(self): tip_msg = self.get_tip_msg("Run test") self.say_begin(tip_msg) if not self.test_installed(self.args.test_id): raise LavaCommandError( "The test (%s) has not been installed yet." % self.args.test_id) test = TestProvider().load_test(self.args.test_id, self.args.serial) if not self.test_installed(test.testname): raise LavaCommandError( "The test (%s) has not been installed yet." % self.args.test_id) try: result_id = test.run(quiet=self.args.quiet, run_options=self.args.run_option) if self.args.output: output_dir = os.path.dirname(self.args.output) if output_dir and (not os.path.exists(output_dir)): os.makedirs(output_dir) bundle = generate_bundle(self.args.serial, result_id) with open(self.args.output, "wt") as stream: DocumentIO.dump(stream, bundle) except Exception as strerror: raise LavaCommandError("Test execution error: %s" % strerror) self.say_end(tip_msg)
def test_load_and_save_does_not_clobber_the_data(self): original_text = resource_string( 'linaro_dashboard_bundle', 'test_documents/' + self.filename) fmt, doc = DocumentIO.loads(original_text) final_text = DocumentIO.dumps(doc,) final_text += "\n" # the original string has newline at the end self.assertEqual(final_text, original_text)
def _bundle_results(self, target, signal_director, testdef_objs): """ Pulls the results from the target device and builds a bundle """ results_part = target.deployment_data['lava_test_results_part_attr'] results_part = getattr(target.config, results_part) rdir = self.context.host_result_dir parse_err_msg = None filesystem_access_failure = True try: with target.file_system(results_part, target.lava_test_results_dir) as d: filesystem_access_failure = False err_log = os.path.join(d, 'parse_err.log') results_dir = os.path.join(d, 'results') bundle = lava_test_shell.get_bundle(results_dir, testdef_objs, err_log) parse_err_msg = read_content(err_log, ignore_missing=True) if os.path.isfile(err_log): os.unlink(err_log) # lava/results must be empty, but we keep a copy named # lava/results-XXXXXXXXXX for post-mortem analysis timestamp = datetime.now().strftime("%s") os.rename(results_dir, results_dir + '-' + timestamp) utils.ensure_directory(results_dir) except Exception as e: if filesystem_access_failure: # a failure when accessing the filesystem means the device # probably crashed. We use the backup bundle then. bundle = self._backup_bundle logging.warning( """Error extracting test results from device: %s""" % e) logging.warning( """This may mean that the device under test crashed. """ """We will use test results parsed from the serial """ """output as a backup, but note that some test """ """artifacts (such as attachments and """ """hardware/software contexts) will not be available""") else: raise e signal_director.postprocess_bundle(bundle) (fd, name) = tempfile.mkstemp( prefix='lava-test-shell', suffix='.bundle', dir=rdir) with os.fdopen(fd, 'w') as f: DocumentIO.dump(f, bundle) printer = PrettyPrinter(self.context) printer.print_results(bundle) if parse_err_msg: raise GeneralError(parse_err_msg)
def _get_results_from_host(self): bundles = [] errors = [] try: bundle_list = os.listdir(self.context.host_result_dir) for bundle_name in bundle_list: bundle = "%s/%s" % (self.context.host_result_dir, bundle_name) content = None try: with open(bundle) as f: doc = DocumentIO.load(f)[1] DocumentEvolution.evolve_document(doc) bundles.append(doc) except ValueError: msg = 'Error adding host result bundle %s' % bundle errors.append(msg) logging.exception(msg) if content: logging.info('Adding bundle as attachment') attachment = create_attachment(bundle, content) self.context.test_data.add_attachments([attachment]) except: msg = 'Error getting all results from host' logging.exception(msg) raise GatherResultsError(msg, bundles) if len(errors) > 0: msg = ' '.join(errors) raise GatherResultsError(msg, bundles) return bundles
def generate_bundle(serial=None, result_id=None, test=None, test_id=None, attachments=[]): if result_id is None: return {} config = get_config() adb = ADB(serial) resultdir = os.path.join(config.resultsdir_android, result_id) if not adb.exists(resultdir): raise Exception("The result (%s) is not existed." % result_id) bundle_text = adb.read_file(os.path.join(resultdir, "testdata.json")) bundle = DocumentIO.loads(bundle_text)[1] test_tmp = None if test: test_tmp = test else: test_tmp = TestProvider().load_test(bundle['test_runs'][0]['test_id'], serial) if test_id: bundle['test_runs'][0]['test_id'] = test_id else: attrs = bundle['test_runs'][0].get('attributes') if attrs: run_options = attrs.get('run_options') if run_options: test_id = '%s(%s)' % (bundle['test_runs'][0]['test_id'], run_options) bundle['test_runs'][0]['test_id'] = test_id test_tmp.parse(result_id) stdout_text = adb.read_file(os.path.join(resultdir, os.path.basename(test_tmp.org_ouput_file))) if stdout_text is None: stdout_text = '' stderr_text = adb.read_file(os.path.join(resultdir, 'stderr.log')) if stderr_text is None: stderr_text = '' bundle['test_runs'][0]["test_results"] = test_tmp.parser.results[ "test_results"] ## following part is used for generating the attachment for normal test attachment_bundles = [] for attachment in test_tmp.attachments: data_bundle = attachment.generate_bundle(adb=adb, resultsdir=resultdir) if data_bundle: attachment_bundles.append(data_bundle) bundle['test_runs'][0]["attachments"] = attachment_bundles ##following used for the attachment for monkeyrunner test for attach in attachments: if os.path.exists(attach): with open(attach, 'rb') as stream: data = stream.read() if data: bundle['test_runs'][0]["attachments"].append({ "pathname": os.path.basename(attach), "mime_type": 'image/png', "content": base64.standard_b64encode(data)}) return bundle
def test_load_document(self): # Note: resource_string uses posix-style paths # regardless of the actual system paths fmt, doc = DocumentIO.load( resource_stream('linaro_dashboard_bundle', 'test_documents/' + self.filename)) self.assertIsNot(doc, None)
def _savetestdata(self, analyzer_assigned_uuid, run_options=""): config = get_config() TIMEFORMAT = '%Y-%m-%dT%H:%M:%SZ' bundle = { 'format': config.bundle_format, 'test_runs': [ { 'analyzer_assigned_uuid': analyzer_assigned_uuid, 'analyzer_assigned_date': self.runner.starttime.strftime(TIMEFORMAT), 'time_check_performed': False, 'attributes':{}, 'test_id': self.testname, 'test_results':[], 'attachments':[], 'hardware_context': hwprofile.get_hardware_context(self.adb), 'software_context': swprofile.get_software_context(self.adb) } ] } if run_options: bundle['test_runs'][0]['attributes']['run_options'] = run_options self._add_install_options(bundle, config) filename_host = os.path.join(config.tempdir_host, 'testdata.json') write_file(DocumentIO.dumps(bundle), filename_host) filename_target = os.path.join(self.resultsdir, 'testdata.json') self.adb.push(filename_host, filename_target)
def test_evolved_document_is_what_we_expect(self): DocumentEvolution.evolve_document(self.doc, one_step=True) fmt, evolved_doc = DocumentIO.load( resource_stream('linaro_dashboard_bundle', 'test_documents/evolution_1.7.1.json'), retain_order=False) self.assertEqual(self.doc, evolved_doc)
def _get_bundles(self, files): bundles = [] errors = [] for fname in files: if os.path.splitext(fname)[1] != ".bundle": continue content = None try: with open(fname, 'r') as f: doc = DocumentIO.load(f)[1] DocumentEvolution.evolve_document(doc) bundles.append(doc) except ValueError: msg = 'Error adding result bundle %s' % fname errors.append(msg) logging.exception(msg) if content: logging.info('Adding bundle as attachment') attachment = create_attachment(fname, content) self.context.test_data.add_attachments([attachment]) except KeyboardInterrupt: raise KeyboardInterrupt except: msg = 'Unknown error processing bundle' % fname logging.exception(msg) errors.append(msg) if len(errors) > 0: msg = ' '.join(errors) raise GatherResultsError(msg, bundles) return bundles
def test_loader_uses_decimal_to_parse_numbers(self): text = resource_string('linaro_dashboard_bundle', 'test_documents/dummy_doc_with_numbers.json') fmt, doc = DocumentIO.loads(text) measurement = doc["test_runs"][0]["test_results"][0]["measurement"] self.assertEqual(measurement, Decimal("1.5")) self.assertTrue(isinstance(measurement, Decimal))
def test_loader_uses_decimal_to_parse_numbers(self): text = resource_string( 'linaro_dashboard_bundle', 'test_documents/dummy_doc_with_numbers.json') fmt, doc = DocumentIO.loads(text) measurement = doc["test_runs"][0]["test_results"][0]["measurement"] self.assertEqual(measurement, Decimal("1.5")) self.assertTrue(isinstance(measurement, Decimal))
def invoke_sub(self): PATTERN = None if self.args.parse_regex: PATTERN = self.args.parse_regex test_name = 'custom' inst = AndroidTestInstaller() run = AndroidTestRunner() parser = AndroidTestParser(pattern=PATTERN) test = AndroidTest(testname=test_name, installer=inst, runner=run, parser=parser) test.parser.results = {'test_results': []} test.setadb(self.adb) bundle = generate_combined_bundle(self.args.serial, self.args.result_id, test=test) try: print DocumentIO.dumps(bundle) except IOError: pass
def _load_bundle(self, local_pathname): """ Load the bundle from local_pathname. There are various problems that can happen here but they should all be treated equally, the bundle not being used. This also transparently does schema validation so the chance of getting wrong data is lower. """ with open(local_pathname, 'rt') as stream: format, bundle = DocumentIO.load(stream) return format, bundle
def submit_bundle(self, main_bundle, server, stream, token): dashboard = _get_dashboard(server, token) json_bundle = DocumentIO.dumps(main_bundle) job_name = self.context.job_data.get('job_name', "LAVA Results") try: result = dashboard.put_ex(json_bundle, job_name, stream) print >> self.context.oob_file, 'dashboard-put-result:', result self.context.output.write_named_data('result-bundle', result) logging.info("Dashboard : %s" % result) except xmlrpclib.Fault, err: logging.warning("xmlrpclib.Fault occurred") logging.warning("Fault code: %d" % err.faultCode) logging.warning("Fault string: %s" % err.faultString) raise OperationFailed("could not push to dashboard")
def submit_bundle(self, main_bundle, server, stream, token): dashboard = _get_dashboard(server, token) json_bundle = DocumentIO.dumps(main_bundle) job_name = self.context.job_data.get('job_name', "LAVA Results") job_name = urllib2.quote(job_name.encode('utf-8')) try: result = dashboard.put_ex(json_bundle, job_name, stream) self.context.output.write_named_data('result-bundle', result) logging.info("Dashboard : %s" % result) except xmlrpclib.Fault, err: logging.warning("xmlrpclib.Fault occurred") logging.warning("Fault code: %d" % err.faultCode) logging.warning("Fault string: %s" % err.faultString) raise OperationFailed("could not push to dashboard")
def deserialize(self, s_bundle, prefer_evolution): """ Deserializes specified Bundle. :Discussion: This method also handles internal transaction handling. All operations performed during bundle deserialization are _rolled_back_ if anything fails. If prefer_evolution is enabled then the document is first evolved to the latest known format and only then imported into the database. This operation is currently disabled to ensure that all old documents are imported exactly as before. Enabling it should be quite safe though as it passes all tests. :Exceptions raised: json_schema_validator.ValidationError When the document does not match the appropriate schema. linaro_dashboard_bundle.errors.DocumentFormatError When the document format is not in the known set of formats. ValueError When the text does not represent a correct JSON document. """ assert s_bundle.is_deserialized is False s_bundle.content.open('rb') logger = logging.getLogger(__name__) try: logger.debug("Loading document") fmt, doc = DocumentIO.load(s_bundle.content) logger.debug("Document loaded") if prefer_evolution: logger.debug("Evolving document") DocumentEvolution.evolve_document(doc) logger.debug("Document evolution complete") fmt = doc["format"] finally: s_bundle.content.close() importer = self.IMPORTERS.get(fmt) if importer is None: raise DocumentFormatError(fmt) try: logger.debug("Importing document") importer().import_document(s_bundle, doc) logger.debug("Document import complete") except Exception as exc: logger.debug("Exception while importing document: %r", exc) raise
def invoke(self): if not os.path.exists(self.args.result_file): raise LavaCommandError("The specified result file(%s) " "does not exist." % self.args.result_file) msg = "extract attachment file from result bundle file(%s)" % ( self.args.result_file) self.say_begin(msg) badchars = "[^a-zA-Z0-9\._-]" with open(self.args.result_file) as stream: jobdata = stream.read() result_data = DocumentIO.loads(jobdata)[1] test_runs = result_data.get('test_runs') if not self.args.directory: attachment_dir = mkdtemp(prefix='attachments-', dir=os.path.curdir) elif not os.path.exists(self.args.directory): os.makedirs(self.args.directory) attachment_dir = self.args.directory elif not os.path.isdir(self.args.directory): raise LavaCommandError( "The specified path(%s) is not a directory." % self.args.directory) else: attachment_dir = self.args.directory for test in test_runs: test_id = test.get('test_id').replace(" ", "_") test_id = re.sub(badchars, "_", test_id) target_dir = mkdtemp(prefix='%s' % test_id, dir=attachment_dir) print "The test id is: %s" % test_id attachments = test.get('attachments', []) for attach in attachments: pathname = attach.get('pathname') file_name = os.path.basename(pathname) content_decoded = base64.standard_b64decode( attach.get("content")) with open(os.path.join(target_dir, file_name), 'w') as fd: fd.write(content_decoded) self.say("All attachment files are put under directory(%s)" % (attachment_dir)) self.say_end(msg)
def test_dumper_can_dump_decimals(self): doc = { "format": "Dashboard Bundle Format 1.0", "test_runs": [ { "test_id": "NOT RELEVANT", "analyzer_assigned_date": "2010-11-14T01:03:06Z", "analyzer_assigned_uuid": "NOT RELEVANT", "time_check_performed": False, "test_results": [ { "test_case_id": "NOT RELEVANT", "result": "unknown", "measurement": Decimal("1.5") } ] } ] } text = DocumentIO.dumps(doc) self.assertIn("1.5", text)
def _combine_bundles(self, dirname): """ Combine all bundles from a previous test run into one bundle. Returns the aggregated bundle object Load, parse and validate each bundle from the specified directory and combine them into one larger bundle. This is somewhat tricky. Each bundle we coalesce may be generated by a different, separate programs and may, thus, use different formats. To combine them all correctly we need to take two precautions: 1) All bundles must be updated to a single, common format 2) No bundle may be upgraded beyond the latest format known to this code. Since the hypothetical 2.0 format may be widely different that we cannot reliably interpret anything beyond the format field. To prevent this we use the evolution API to carefully upgrade only to the "sentinel" format, 1.3 (at this time) """ # Use DocumentIO.loads() to preserve the order of entries. # This is a very small touch but it makes reading the results # far more pleasant. aggregated_bundle = DocumentIO.loads( '{\n' '"format": "' + self._desired_format + '",\n' '"test_runs": []\n' '}\n')[1] # Iterate over all files there for name in os.listdir(dirname): bundle_pathname = os.path.join(dirname, name) # Process bundle one by one try: format, bundle = self._load_bundle(bundle_pathname) self._convert_to_common_format(format, bundle) self._combine_with_aggregated(aggregated_bundle, bundle) except: logging.exception("Unable to process bundle %s", name) # Return the aggregated bundle return aggregated_bundle
def _combine_bundles(self, dirname): """ Combine all bundles from a previous test run into one bundle. Returns the aggregated bundle object Load, parse and validate each bundle from the specified directory and combine them into one larger bundle. This is somewhat tricky. Each bundle we coalesce may be generated by a different, separate programs and may, thus, use different formats. To combine them all correctly we need to take two precautions: 1) All bundles must be updated to a single, common format 2) No bundle may be upgraded beyond the latest format known to this code. Since the hypothetical 2.0 format may be widely different that we cannot reliably interpret anything beyond the format field. To prevent this we use the evolution API to carefully upgrade only to the "sentinel" format, 1.3 (at this time) """ # Use DocumentIO.loads() to preserve the order of entries. # This is a very small touch but it makes reading the results # far more pleasant. aggregated_bundle = DocumentIO.loads('{\n' '"format": "' + self._desired_format + '",\n' '"test_runs": []\n' '}\n')[1] # Iterate over all files there for name in os.listdir(dirname): bundle_pathname = os.path.join(dirname, name) # Process bundle one by one try: format, bundle = self._load_bundle(bundle_pathname) self._convert_to_common_format(format, bundle) self._combine_with_aggregated(aggregated_bundle, bundle) except: logging.exception("Unable to process bundle %s", name) # Return the aggregated bundle return aggregated_bundle
def test_dump_produces_compact_sorted_output(self): stream = StringIO() DocumentIO.dump(stream, self.doc, human_readable=False, sort_keys=True) observed_text = stream.getvalue() self.assertEqual(observed_text, self.expected_compact_sorted_text)
def test_dumps_produces_compact_sorted_ouptut(self): observed_text = DocumentIO.dumps(self.doc, human_readable=False, sort_keys=True) self.assertEqual(observed_text, self.expected_compact_sorted_text)
def _write_results_bundle(self, bundle): rdir = self.context.host_result_dir (fd, name) = tempfile.mkstemp( prefix='lava-command', suffix='.bundle', dir=rdir) with os.fdopen(fd, 'w') as f: DocumentIO.dump(f, bundle)
def test_loads__with_disabled_retain_order__dict_class(self): fmt, doc = DocumentIO.loads(self.text, retain_order=False) observed_impl = type(doc) self.assertEqual(observed_impl, dict)
def test_load__with_enabled_retain_order__key_order(self): fmt, doc = DocumentIO.load(self.stream, retain_order=True) observed_keys = doc.keys() self.assertEqual(observed_keys, self.expected_keys)
def test_evolved_document_is_valid(self): DocumentEvolution.evolve_document(self.doc, one_step=True) self.assertEqual(DocumentIO.check(self.doc), "Dashboard Bundle Format 1.7")
def test_load__return_value(self): fmt, doc = DocumentIO.load(self.stream) self.assertEqual(fmt, self.expected_fmt) self.assertEqual(doc, self.expected_doc)
def generate_bundle(serial=None, result_id=None, test=None, test_id=None, attachments=[]): if result_id is None: return {} config = get_config() adb = ADB(serial) resultdir = os.path.join(config.resultsdir_android, result_id) if not adb.exists(resultdir): raise Exception("The result (%s) is not existed." % result_id) bundle_text = adb.read_file(os.path.join(resultdir, "testdata.json")) bundle = DocumentIO.loads(bundle_text)[1] test_tmp = None if test: test_tmp = test else: test_tmp = TestProvider().load_test(bundle['test_runs'][0]['test_id'], serial) if test_id: bundle['test_runs'][0]['test_id'] = test_id else: attrs = bundle['test_runs'][0].get('attributes') if attrs: run_options = attrs.get('run_options') if run_options: test_id = '%s(%s)' % (bundle['test_runs'][0]['test_id'], run_options) bundle['test_runs'][0]['test_id'] = test_id test_tmp.parse(result_id) stdout_text = adb.read_file( os.path.join(resultdir, os.path.basename(test_tmp.org_ouput_file))) if stdout_text is None: stdout_text = '' stderr_text = adb.read_file(os.path.join(resultdir, 'stderr.log')) if stderr_text is None: stderr_text = '' bundle['test_runs'][0]["test_results"] = test_tmp.parser.results[ "test_results"] ## following part is used for generating the attachment for normal test attachment_bundles = [] for attachment in test_tmp.attachments: data_bundle = attachment.generate_bundle(adb=adb, resultsdir=resultdir) if data_bundle: attachment_bundles.append(data_bundle) bundle['test_runs'][0]["attachments"] = attachment_bundles ##following used for the attachment for monkeyrunner test for attach in attachments: if os.path.exists(attach): with open(attach, 'rb') as stream: data = stream.read() if data: bundle['test_runs'][0]["attachments"].append({ "pathname": os.path.basename(attach), "mime_type": 'image/png', "content": base64.standard_b64encode(data) }) return bundle
def invoke_sub(self): test_name = 'custom' ADB_SHELL_STEPS = [] STEPS_HOST_PRE = [] STEPS_ADB_PRE = [] file_name = None if self.args.android_command: ADB_SHELL_STEPS = self.args.android_command cmds_str = ','.join(ADB_SHELL_STEPS) if len(cmds_str) > 40: cmds_str = '%s...' % (cmds_str[:40]) test_name_suffix = 'command=[%s]' % (cmds_str) elif self.args.command_file: file_url = self.args.command_file urlpath = urlparse.urlsplit(file_url).path file_name = os.path.basename(urlpath) target_path = os.path.join(self.config.installdir_android, test_name, file_name) STEPS_HOST_PRE = ["wget %s -O %s" % (file_url, file_name)] STEPS_ADB_PRE = ["push %s %s" % (file_name, target_path)] ADB_SHELL_STEPS = ["chmod 777 %s" % target_path, target_path] file_name_str = file_name if len(file_name_str) > 40: file_name_str = '%s...' % (cmds_str[:40]) test_name_suffix = 'command_file=%s' % (file_name_str) PATTERN = None if self.args.parse_regex: PATTERN = self.args.parse_regex tip_msg = '' if self.args.serial: tip_msg = ("Run following custom test(s) on device(%s):" "\n\tcommands=%s" "\n\tcommand-file=%s\n") % ( self.args.serial, '\n\t\t'.join(ADB_SHELL_STEPS), file_name) else: tip_msg = ("Run following custom test(s):" "\n\t\tcommands=%s" "\n\tcommand-file=%s\n") % ( '\n\t\t'.join(ADB_SHELL_STEPS), file_name) self.say_begin(tip_msg) inst = AndroidTestInstaller() run = AndroidTestRunner(steps_host_pre=STEPS_HOST_PRE, steps_adb_pre=STEPS_ADB_PRE, adbshell_steps=ADB_SHELL_STEPS) parser = AndroidTestParser(pattern=PATTERN) test = AndroidTest(testname=test_name, installer=inst, runner=run, parser=parser) test.parser.results = {'test_results': []} test.setadb(self.adb) if not self.test_installed(test.testname): test.install() try: result_id = test.run(quiet=self.args.quiet) if self.args.output: output_dir = os.path.dirname(self.args.output) if output_dir and (not os.path.exists(output_dir)): os.makedirs(output_dir) bundle = generate_bundle(self.args.serial, result_id, test=test, test_id='%s(%s)' % (test_name, test_name_suffix)) with open(self.args.output, "wt") as stream: DocumentIO.dump(stream, bundle) except Exception as strerror: raise LavaCommandError("Test execution error: %s" % strerror) self.say_end(tip_msg)
def invoke_sub(self): if not utils.check_command_exist('monkeyrunner'): raise LavaCommandError('The command monkeyrunner can not be found') if self.args.repo_type == 'git': target_dir = mkdtemp(prefix='git_repo', dir=self.config.tempdir_host) os.chmod(target_dir, 0755) GitRepository(self.args.url).checkout(target_dir) else: raise LavaCommandError("The repository type(%s) is not supported" % self.args.repo_type) script_list = utils.find_files(target_dir, '.py') test_id = self.args.url if len(test_id) > 40: test_id = '%s...' % (test_id[:40]) test_id = 'monkeyrunner_%s' % test_id tip_msg = ("Run monkeyrunner scripts in following url on device(%s):" "\n\turl=%s") % ( self.serial, self.args.url) self.say_begin(tip_msg) bundles = [] for script in script_list: if "monkeycommon.py" == os.path.basename(script): continue sub_bundle = {} from datetime import datetime starttime = datetime.utcnow() test_case_id = script.replace('%s/' % target_dir, '') if len(test_case_id) > 50: test_case_id = '%s...' % (test_case_id[:50]) try: sub_bundle = self.run_monkeyrunner_test(script, self.serial, test_case_id) test_result = {"test_case_id": test_case_id, "result": 'pass'} if sub_bundle: sub_bundle['test_runs'][0]['test_results'].append( test_result) except Exception as strerror: self.say('Failed to run script(%s) with error:\n%s' % ( script, strerror)) test_result = {"test_case_id": test_case_id, "result": 'fail'} TIMEFORMAT = '%Y-%m-%dT%H:%M:%SZ' sub_bundle['test_runs'] = [{'test_results': [test_result], 'test_id': 'monkeyrunner(%s)' % test_case_id, 'time_check_performed': False, 'analyzer_assigned_uuid': str(uuid4()), 'analyzer_assigned_date': starttime.strftime(TIMEFORMAT)}] if sub_bundle: bundles.append(sub_bundle) if self.args.output: output_dir = os.path.dirname(self.args.output) if output_dir and (not os.path.exists(output_dir)): os.makedirs(output_dir) with open(self.args.output, "wt") as stream: DocumentIO.dump(stream, merge_bundles(bundles)) self.say_end(tip_msg)
def test_load__with_enabled_retain_order__dict_class(self): fmt, doc = DocumentIO.load(self.stream, retain_order=True) observed_impl = type(doc) # Note: VVV self.assertNotEqual(observed_impl, dict)
def setUp(self): super(DocumentEvolutionTests_1_6_to_1_7, self).setUp() self.fmt, self.doc = DocumentIO.load(resource_stream( 'linaro_dashboard_bundle', 'test_documents/evolution_1.6.json'), retain_order=False)
def setUp(self): super(DocumentEvolutionTests_1_7_to_1_7_1, self).setUp() self.fmt, self.doc = DocumentIO.load( resource_stream('linaro_dashboard_bundle', 'test_documents/evolution_1.7.json'), retain_order=False)
def test_load__with_disabled_retain_order__dict_class(self): fmt, doc = DocumentIO.load(self.stream, retain_order=False) expected_impl = dict observed_impl = type(doc) self.assertEqual(observed_impl, expected_impl)
def test_dumps_produces_readable_ouptut(self): observed_text = DocumentIO.dumps(self.doc, human_readable=True) self.assertEqual(observed_text, self.expected_readable_text)
def invoke_sub(self): if not utils.check_command_exist('monkeyrunner'): raise LavaCommandError('The command monkeyrunner can not be found') if self.args.repo_type == 'git': target_dir = mkdtemp(prefix='git_repo', dir=self.config.tempdir_host) os.chmod(target_dir, 0755) GitRepository(self.args.url).checkout(target_dir) else: raise LavaCommandError("The repository type(%s) is not supported" % self.args.repo_type) script_list = utils.find_files(target_dir, '.py') test_id = self.args.url if len(test_id) > 40: test_id = '%s...' % (test_id[:40]) test_id = 'monkeyrunner_%s' % test_id tip_msg = ("Run monkeyrunner scripts in following url on device(%s):" "\n\turl=%s") % (self.serial, self.args.url) self.say_begin(tip_msg) bundles = [] for script in script_list: if "monkeycommon.py" == os.path.basename(script): continue sub_bundle = {} from datetime import datetime starttime = datetime.utcnow() test_case_id = script.replace('%s/' % target_dir, '') if len(test_case_id) > 50: test_case_id = '%s...' % (test_case_id[:50]) try: sub_bundle = self.run_monkeyrunner_test( script, self.serial, test_case_id) test_result = {"test_case_id": test_case_id, "result": 'pass'} if sub_bundle: sub_bundle['test_runs'][0]['test_results'].append( test_result) except Exception as strerror: self.say('Failed to run script(%s) with error:\n%s' % (script, strerror)) test_result = {"test_case_id": test_case_id, "result": 'fail'} TIMEFORMAT = '%Y-%m-%dT%H:%M:%SZ' sub_bundle['test_runs'] = [{ 'test_results': [test_result], 'test_id': 'monkeyrunner(%s)' % test_case_id, 'time_check_performed': False, 'analyzer_assigned_uuid': str(uuid4()), 'analyzer_assigned_date': starttime.strftime(TIMEFORMAT) }] if sub_bundle: bundles.append(sub_bundle) if self.args.output: output_dir = os.path.dirname(self.args.output) if output_dir and (not os.path.exists(output_dir)): os.makedirs(output_dir) with open(self.args.output, "wt") as stream: DocumentIO.dump(stream, merge_bundles(bundles)) self.say_end(tip_msg)
def test_evolved_document_is_valid(self): DocumentEvolution.evolve_document(self.doc, one_step=True) self.assertEqual(DocumentIO.check(self.doc), "Dashboard Bundle Format 1.7.1")