def testLogSingleSpan(self): """Tests that SpanStack.Span logs a span and sends it.""" stack = cloud_trace.SpanStack() context = stack.Span('foo') self.assertEqual(0, self.log_span_mock.call_count) with context: self.assertEqual(0, self.log_span_mock.call_count) self.assertEqual(1, self.log_span_mock.call_count)
def testHeaderValue(self): """Tests that the header value makes sense.""" trace_id = 'deadbeef12345678deadbeef12345678', stack = cloud_trace.SpanStack( traceId=trace_id, parentSpanId='0', global_context='deadbeefdeadbeefdeadbeefdeadbeef/0;o=1') self.assertTrue(stack.enabled) self.assertEqual(stack.traceId, trace_id) self.assertEqual(stack.last_span_id, '0')
def testSpannedDecorator(self): """Tests that @stack.Spanned() works.""" stack = cloud_trace.SpanStack() @stack.Spanned('foo') def decorated(): pass self.assertEqual(0, self.log_span_mock.call_count) decorated() self.assertEqual(1, self.log_span_mock.call_count)
def testCallLogSpanAtCloseOfStack(self): """Test that LogSpans is called after each span is popped.""" stack = cloud_trace.SpanStack() with stack.Span('foo'): self.assertEqual(0, self.log_span_mock.call_count) with stack.Span('bar'): self.assertEqual(0, self.log_span_mock.call_count) with stack.Span('zap'): self.assertEqual(0, self.log_span_mock.call_count) self.assertEqual(1, self.log_span_mock.call_count) self.assertEqual(2, self.log_span_mock.call_count) self.assertEqual(3, self.log_span_mock.call_count)
def testSpanContextDisabled(self): """Tests that o=1 in the global_context disables the spanstack.""" trace_id = 'deadbeef12345678deadbeef12345678' stack = cloud_trace.SpanStack( traceId=trace_id, parentSpanId='0', global_context='deadbeefdeadbeefdeadbeefdeadbeef/0;o=0') self.assertFalse(stack.enabled) with stack.Span('foo') as span: self.assertEqual(span.parentSpanId, '0') self.assertEqual(span.traceId, trace_id) self.assertEqual(self.log_span_mock.call_count, 0)
def testEnvironmentContextManager(self): """Tests that the environment context manager works.""" trace_id = 'deadbeef12345678deadbeef12345678' stack = cloud_trace.SpanStack(global_context='{trace_id}/0;o=1'.format( trace_id=trace_id)) old_env = self.env.get(cloud_trace.SpanStack.CLOUD_TRACE_CONTEXT_ENV) with stack.EnvironmentContext(): new_env = self.env.get( cloud_trace.SpanStack.CLOUD_TRACE_CONTEXT_ENV) after_env = self.env.get(cloud_trace.SpanStack.CLOUD_TRACE_CONTEXT_ENV) self.assertEqual(old_env, after_env) # Note the lack of a /0; the /0 is optional. self.assertEqual(new_env, "deadbeef12345678deadbeef12345678;o=1")
def testInitReadsEnvironment(self): """Tests that SpanStack reads the enivornment on init.""" trace_id = 'deadbeef12345678deadbeef12345678' global_context = '{trace_id}/0;o=1'.format(trace_id=trace_id) old_env = self.env.get(cloud_trace.SpanStack.CLOUD_TRACE_CONTEXT_ENV) try: self.env[ cloud_trace.SpanStack.CLOUD_TRACE_CONTEXT_ENV] = global_context stack = cloud_trace.SpanStack() self.assertEqual(stack.traceId, trace_id) self.assertEqual(stack.last_span_id, '0') finally: if old_env is not None: self.env[ cloud_trace.SpanStack.CLOUD_TRACE_CONTEXT_ENV] = old_env
def testSpanContextEnabled(self): """Tests that the span context manager updates the environment.""" trace_id = 'deadbeef12345678deadbeef12345678' stack = cloud_trace.SpanStack( traceId=trace_id, parentSpanId='0', global_context='deadbeefdeadbeefdeadbeefdeadbeef/0;o=1') self.assertTrue(stack.enabled) old_env = self.env.get(cloud_trace.SpanStack.CLOUD_TRACE_CONTEXT_ENV) with stack.Span('foo') as span: new_env = self.env.get( cloud_trace.SpanStack.CLOUD_TRACE_CONTEXT_ENV) self.assertTrue( new_env.startswith("deadbeef12345678deadbeef12345678/")) self.assertEqual(span.parentSpanId, '0') self.assertEqual(span.traceId, trace_id) after_env = self.env.get(cloud_trace.SpanStack.CLOUD_TRACE_CONTEXT_ENV) self.assertEqual(self.log_span_mock.call_count, 1) self.assertEqual(old_env, after_env)
def main(): start_time = datetime.datetime.now() parser = autoserv_parser.autoserv_parser parser.parse_args() if len(sys.argv) == 1: parser.parser.print_help() sys.exit(1) if parser.options.no_logging: results = None else: results = parser.options.results if not results: results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S') results = os.path.abspath(results) resultdir_exists = False for filename in ('control.srv', 'status.log', '.autoserv_execute'): if os.path.exists(os.path.join(results, filename)): resultdir_exists = True if not parser.options.use_existing_results and resultdir_exists: error = "Error: results directory already exists: %s\n" % results sys.stderr.write(error) sys.exit(1) # Now that we certified that there's no leftover results dir from # previous jobs, lets create the result dir since the logging system # needs to create the log file in there. if not os.path.isdir(results): os.makedirs(results) if parser.options.require_ssp: # This is currently only used for skylab (i.e., when --control-name is # used). use_ssp = _require_ssp_from_control(parser.options.control_name) else: use_ssp = False if use_ssp: log_dir = os.path.join(results, 'ssp_logs') if results else None if log_dir and not os.path.exists(log_dir): os.makedirs(log_dir) else: log_dir = results logging_manager.configure_logging( server_logging_config.ServerLoggingConfig(), results_dir=log_dir, use_console=not parser.options.no_tee, verbose=parser.options.verbose, no_console_prefix=parser.options.no_console_prefix) logging.debug('autoserv is running in drone %s.', socket.gethostname()) logging.debug('autoserv command was: %s', ' '.join(sys.argv)) logging.debug('autoserv parsed options: %s', parser.options) if use_ssp: ssp_url = _stage_ssp(parser, results) else: ssp_url = None if results: logging.info("Results placed in %s" % results) # wait until now to perform this check, so it get properly logged if (parser.options.use_existing_results and not resultdir_exists and not utils.is_in_container()): logging.error("No existing results directory found: %s", results) sys.exit(1) if parser.options.write_pidfile and results: pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label, results) pid_file_manager.open_file() else: pid_file_manager = None autotest.Autotest.set_install_in_tmpdir(parser.options.install_in_tmpdir) exit_code = 0 # TODO(beeps): Extend this to cover different failure modes. # Testing exceptions are matched against labels sent to autoserv. Eg, # to allow only the hostless job to run, specify # testing_exceptions: test_suite in the shadow_config. To allow both # the hostless job and dummy_Pass to run, specify # testing_exceptions: test_suite,dummy_Pass. You can figure out # what label autoserv is invoked with by looking through the logs of a test # for the autoserv command's -l option. testing_exceptions = _CONFIG.get_config_value('AUTOSERV', 'testing_exceptions', type=list, default=[]) test_mode = _CONFIG.get_config_value('AUTOSERV', 'testing_mode', type=bool, default=False) test_mode = ( results_mocker and test_mode and not any([ex in parser.options.label for ex in testing_exceptions])) is_task = (parser.options.verify or parser.options.repair or parser.options.provision or parser.options.reset or parser.options.cleanup or parser.options.collect_crashinfo) trace_labels = { 'job_id': job_directories.get_job_id_or_task_id(parser.options.results) } trace = cloud_trace.SpanStack( labels=trace_labels, global_context=parser.options.cloud_trace_context) trace.enabled = parser.options.cloud_trace_context_enabled == 'True' try: try: if test_mode: # The parser doesn't run on tasks anyway, so we can just return # happy signals without faking results. if not is_task: machine = parser.options.results.split('/')[-1] # TODO(beeps): The proper way to do this would be to # refactor job creation so we can invoke job.record # directly. To do that one needs to pipe the test_name # through run_autoserv and bail just before invoking # the server job. See the comment in # puppylab/results_mocker for more context. results_mocker.ResultsMocker('unknown-test', parser.options.results, machine).mock_results() return else: with trace.Span(get_job_status(parser.options)): run_autoserv(pid_file_manager, results, parser, ssp_url, use_ssp) except SystemExit as e: exit_code = e.code if exit_code: logging.exception('Uncaught SystemExit with code %s', exit_code) except Exception: # If we don't know what happened, we'll classify it as # an 'abort' and return 1. logging.exception('Uncaught Exception, exit_code = 1.') exit_code = 1 finally: if pid_file_manager: pid_file_manager.close_file(exit_code) sys.exit(exit_code)
def testCanSendLog(self): """Tests that Spans are sent to a log.""" stack = cloud_trace.SpanStack() with stack.Span('foo'): pass self.assertExists(cloud_trace.SPANS_LOG.format(pid=os.getpid()))
def main(): start_time = datetime.datetime.now() # grab the parser parser = autoserv_parser.autoserv_parser parser.parse_args() if len(sys.argv) == 1: parser.parser.print_help() sys.exit(1) if parser.options.no_logging: results = None else: results = parser.options.results if not results: results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S') results = os.path.abspath(results) resultdir_exists = False for filename in ('control.srv', 'status.log', '.autoserv_execute'): if os.path.exists(os.path.join(results, filename)): resultdir_exists = True if not parser.options.use_existing_results and resultdir_exists: error = "Error: results directory already exists: %s\n" % results sys.stderr.write(error) sys.exit(1) # Now that we certified that there's no leftover results dir from # previous jobs, lets create the result dir since the logging system # needs to create the log file in there. if not os.path.isdir(results): os.makedirs(results) # If the job requires to run with server-side package, try to stage server- # side package first. If that fails with error that autotest server package # does not exist, fall back to run the job without using server-side # packaging. If option warn_no_ssp is specified, that means autoserv is # running in a drone does not support SSP, thus no need to stage server-side # package. ssp_url = None ssp_url_warning = False if (not parser.options.warn_no_ssp and parser.options.require_ssp): ssp_url, ssp_error_msg = _stage_ssp(parser, results) # The build does not have autotest server package. Fall back to not # to use server-side package. Logging is postponed until logging being # set up. ssp_url_warning = not ssp_url # Server-side packaging will only be used if it's required and the package # is available. If warn_no_ssp is specified, it means that autoserv is # running in a drone does not have SSP supported and a warning will be logs. # Therefore, it should not run with SSP. use_ssp = (not parser.options.warn_no_ssp and parser.options.require_ssp and ssp_url) if use_ssp: log_dir = os.path.join(results, 'ssp_logs') if results else None if log_dir and not os.path.exists(log_dir): os.makedirs(log_dir) else: log_dir = results logging_manager.configure_logging( server_logging_config.ServerLoggingConfig(), results_dir=log_dir, use_console=not parser.options.no_tee, verbose=parser.options.verbose, no_console_prefix=parser.options.no_console_prefix) if ssp_url_warning: logging.warn( 'Autoserv is required to run with server-side packaging. ' 'However, no server-side package can be staged based on ' '`--image`, host attribute job_repo_url or host OS version ' 'label. It could be that the build to test is older than the ' 'minimum version that supports server-side packaging, or no ' 'devserver can be found to stage server-side package. The test ' 'will be executed without using erver-side packaging. ' 'Following is the detailed error:\n%s', ssp_error_msg) if results: logging.info("Results placed in %s" % results) # wait until now to perform this check, so it get properly logged if (parser.options.use_existing_results and not resultdir_exists and not utils.is_in_container()): logging.error("No existing results directory found: %s", results) sys.exit(1) logging.debug('autoserv is running in drone %s.', socket.gethostname()) logging.debug('autoserv command was: %s', ' '.join(sys.argv)) if parser.options.write_pidfile and results: pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label, results) pid_file_manager.open_file() else: pid_file_manager = None autotest.Autotest.set_install_in_tmpdir(parser.options.install_in_tmpdir) exit_code = 0 # TODO(beeps): Extend this to cover different failure modes. # Testing exceptions are matched against labels sent to autoserv. Eg, # to allow only the hostless job to run, specify # testing_exceptions: test_suite in the shadow_config. To allow both # the hostless job and dummy_Pass to run, specify # testing_exceptions: test_suite,dummy_Pass. You can figure out # what label autoserv is invoked with by looking through the logs of a test # for the autoserv command's -l option. testing_exceptions = _CONFIG.get_config_value('AUTOSERV', 'testing_exceptions', type=list, default=[]) test_mode = _CONFIG.get_config_value('AUTOSERV', 'testing_mode', type=bool, default=False) test_mode = ( results_mocker and test_mode and not any([ex in parser.options.label for ex in testing_exceptions])) is_task = (parser.options.verify or parser.options.repair or parser.options.provision or parser.options.reset or parser.options.cleanup or parser.options.collect_crashinfo) trace_labels = { 'job_id': job_directories.get_job_id_or_task_id(parser.options.results) } trace = cloud_trace.SpanStack( labels=trace_labels, global_context=parser.options.cloud_trace_context) trace.enabled = parser.options.cloud_trace_context_enabled == 'True' try: try: if test_mode: # The parser doesn't run on tasks anyway, so we can just return # happy signals without faking results. if not is_task: machine = parser.options.results.split('/')[-1] # TODO(beeps): The proper way to do this would be to # refactor job creation so we can invoke job.record # directly. To do that one needs to pipe the test_name # through run_autoserv and bail just before invoking # the server job. See the comment in # puppylab/results_mocker for more context. results_mocker.ResultsMocker('unknown-test', parser.options.results, machine).mock_results() return else: with trace.Span(get_job_status(parser.options)): run_autoserv(pid_file_manager, results, parser, ssp_url, use_ssp) except SystemExit as e: exit_code = e.code if exit_code: logging.exception('Uncaught SystemExit with code %s', exit_code) except Exception: # If we don't know what happened, we'll classify it as # an 'abort' and return 1. logging.exception('Uncaught Exception, exit_code = 1.') exit_code = 1 finally: if pid_file_manager: pid_file_manager.close_file(exit_code) # Record the autoserv duration time. Must be called # just before the system exits to ensure accuracy. record_autoserv(parser.options, start_time) sys.exit(exit_code)