def setup_class(cls): cls.LOG_DIR = tempfile.mkdtemp() LogOptions.set_log_dir(cls.LOG_DIR) LogOptions.set_disk_log_level("DEBUG") log.init("executor_logger") if not cls.PANTS_BUILT and "SKIP_PANTS_BUILD" not in os.environ: assert subprocess.call(["./pants", "src/main/python/apache/aurora/executor/bin:thermos_runner"]) == 0 cls.PANTS_BUILT = True
def run(self, lock): if self.options.dry_run: print "****** Dry Run ******" logger = None if self.options.log or self.options.log_level: from twitter.common.log import init from twitter.common.log.options import LogOptions LogOptions.set_stderr_log_level((self.options.log_level or "info").upper()) logdir = self.options.logdir or self.config.get("goals", "logdir", default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) init("goals") else: init() logger = log if self.options.recursive_directory: log.warn("--all-recursive is deprecated, use a target spec with the form [dir]:: instead") for dir in self.options.recursive_directory: self.add_target_recursive(dir) if self.options.target_directory: log.warn("--all is deprecated, use a target spec with the form [dir]: instead") for dir in self.options.target_directory: self.add_target_directory(dir) context = Context( self.config, self.options, self.targets, requested_goals=self.requested_goals, lock=lock, log=logger, timer=self.timer if self.options.time else None, ) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: print ("Unknown goal(s): %s" % " ".join(phase.name for phase in unknown)) print ("") return Phase.execute(context, "goals") if logger: logger.debug("Operating on targets: %s", self.targets) ret = Phase.attempt(context, self.phases) if self.options.time: print ("Timing report") print ("=============") self.timer.print_timings() return ret
def run(self, lock): # TODO(John Sirois): Consider moving to straight python logging. The divide between the # context/work-unit logging and standard python logging doesn't buy us anything. # Enable standard python logging for code with no handle to a context/work-unit. if self.options.log_level: LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper()) logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) log.init('goals') else: log.init() # Update the reporting settings, now that we have flags etc. def is_console_task(): for phase in self.phases: for goal in phase.goals(): if issubclass(goal.task_type, ConsoleTask): return True return False is_explain = self.options.explain update_reporting(self.options, is_console_task() or is_explain, self.run_tracker) if self.options.dry_run: print('****** Dry Run ******') context = Context( self.config, self.options, self.run_tracker, self.targets, requested_goals=self.requested_goals, lock=lock) if self.options.recursive_directory: context.log.warn( '--all-recursive is deprecated, use a target spec with the form [dir]:: instead') for dir in self.options.recursive_directory: self.add_target_recursive(dir) if self.options.target_directory: context.log.warn('--all is deprecated, use a target spec with the form [dir]: instead') for dir in self.options.target_directory: self.add_target_directory(dir) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: _list_goals(context, 'Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown)) return 1 return Goal._execute(context, self.phases, print_timing=self.options.time)
def setup_class(cls): cls.LOG_DIR = tempfile.mkdtemp() LogOptions.set_log_dir(cls.LOG_DIR) LogOptions.set_disk_log_level('DEBUG') log.init('executor_logger') if not cls.PANTS_BUILT and 'SKIP_PANTS_BUILD' not in os.environ: assert subprocess.call(["./pants", "binary", "src/main/python/apache/thermos/bin:thermos_runner"]) == 0 cls.PANTS_BUILT = True
def run(self, lock): with self.check_errors("Target contains a dependency cycle") as error: for target in self.targets: try: InternalTarget.check_cycles(target) except InternalTarget.CycleException as e: error(target.id) timer = None if self.options.time: class Timer(object): def now(self): return time.time() def log(self, message): print(message) timer = Timer() logger = None if self.options.log or self.options.log_level: from twitter.common.log import init from twitter.common.log.options import LogOptions LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper()) logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) init('goals') else: init() logger = log if self.options.recursive_directory: log.warn('--all-recursive is deprecated, use a target spec with the form [dir]:: instead') for dir in self.options.recursive_directory: self.add_target_recursive(dir) if self.options.target_directory: log.warn('--all is deprecated, use a target spec with the form [dir]: instead') for dir in self.options.target_directory: self.add_target_directory(dir) context = Context(self.config, self.options, self.targets, lock=lock, log=logger) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown)) print('') return Phase.execute(context, 'goals') if logger: logger.debug('Operating on targets: %s', self.targets) return Phase.attempt(context, self.phases, timer=timer)
def execute(self): def add_targets(dir, buildfile): try: self.targets.extend(Target.get(addr) for addr in Target.get_all_addresses(buildfile)) except (TypeError, ImportError): error(dir, include_traceback=True) except (IOError, SyntaxError): error(dir) if self.options.recursive_directory: with self.check_errors('There was a problem scanning the ' 'following directories for targets:') as error: for dir in self.options.recursive_directory: for buildfile in BuildFile.scan_buildfiles(self.root_dir, dir): add_targets(dir, buildfile) if self.options.target_directory: with self.check_errors("There was a problem loading targets " "from the following directory's BUILD files") as error: for dir in self.options.target_directory: add_targets(dir, BuildFile(self.root_dir, dir)) timer = None if self.options.time: class Timer(object): def now(self): return time.time() def log(self, message): print(message) timer = Timer() logger = None if self.options.log or self.options.log_level: from twitter.common.log import init from twitter.common.log.options import LogOptions LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper()) logdir = self.config.get('goals', 'logdir') if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) init('goals') logger = log context = Context(self.config, self.options, self.targets, log=logger) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown)) print() return Phase.execute(context, 'goals') return Phase.attempt(context, self.phases, timer=timer)
def setup_class(cls): cls.LOG_DIR = tempfile.mkdtemp() LogOptions.set_log_dir(cls.LOG_DIR) LogOptions.set_disk_log_level('DEBUG') log.init('executor_logger') if not cls.PEX_PATH: pex_dir = tempfile.mkdtemp() assert subprocess.call(["./pants", "--pants-distdir=%s" % pex_dir, "binary", "src/main/python/apache/thermos/runner:thermos_runner"]) == 0 cls.PEX_PATH = os.path.join(pex_dir, 'thermos_runner.pex')
def run(self, lock): with self.check_errors("Target contains a dependency cycle") as error: with self.timer.timing("parse:check_cycles"): for target in self.targets: try: InternalTarget.check_cycles(target) except InternalTarget.CycleException as e: error(target.id) logger = None if self.options.log or self.options.log_level: from twitter.common.log import init from twitter.common.log.options import LogOptions LogOptions.set_stderr_log_level((self.options.log_level or "info").upper()) logdir = self.options.logdir or self.config.get("goals", "logdir", default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) init("goals") else: init() logger = log if self.options.recursive_directory: log.warn("--all-recursive is deprecated, use a target spec with the form [dir]:: instead") for dir in self.options.recursive_directory: self.add_target_recursive(dir) if self.options.target_directory: log.warn("--all is deprecated, use a target spec with the form [dir]: instead") for dir in self.options.target_directory: self.add_target_directory(dir) context = Context(self.config, self.options, self.targets, lock=lock, log=logger) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: print("Unknown goal(s): %s" % " ".join(phase.name for phase in unknown)) print("") return Phase.execute(context, "goals") if logger: logger.debug("Operating on targets: %s", self.targets) ret = Phase.attempt(context, self.phases, timer=self.timer if self.options.time else None) if self.options.time: print("Timing report") print("=============") self.timer.print_timings() return ret
def run(self, lock): if self.options.dry_run: print '****** Dry Run ******' logger = None if self.options.log or self.options.log_level: from twitter.common.log import init from twitter.common.log.options import LogOptions LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper()) logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) init('goals') else: init() logger = log if self.options.recursive_directory: log.warn('--all-recursive is deprecated, use a target spec with the form [dir]:: instead') for dir in self.options.recursive_directory: self.add_target_recursive(dir) if self.options.target_directory: log.warn('--all is deprecated, use a target spec with the form [dir]: instead') for dir in self.options.target_directory: self.add_target_directory(dir) context = Context( self.config, self.options, self.targets, lock=lock, log=logger, timer=self.timer if self.options.time else None) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown)) print('') return Phase.execute(context, 'goals') if logger: logger.debug('Operating on targets: %s', self.targets) ret = Phase.attempt(context, self.phases) if self.options.time: print('Timing report') print('=============') self.timer.print_timings() return ret
def run(self, lock): # TODO(John Sirois): Consider moving to straight python logging. The divide between the # context/work-unit logging and standard python logging doesn't buy us anything. # Enable standard python logging for code with no handle to a context/work-unit. if self.options.log_level: LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper()) logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) log.init('goals') else: log.init() # Update the reporting settings, now that we have flags etc. def is_console_task(): for phase in self.phases: for goal in phase.goals(): if issubclass(goal.task_type, ConsoleTask): return True return False is_explain = self.options.explain update_reporting(self.options, is_console_task() or is_explain, self.run_tracker) context = Context( self.config, self.options, self.run_tracker, self.targets, requested_goals=self.requested_goals, build_graph=self.build_graph, build_file_parser=self.build_file_parser, lock=lock) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: context.log.error('Unknown goal(s): %s\n' % ' '.join(phase.name for phase in unknown)) return 1 engine = GroupEngine() return engine.execute(context, self.phases)
def _do_run(self): # TODO(John Sirois): Consider moving to straight python logging. The divide between the # context/work-unit logging and standard python logging doesn't buy us anything. # TODO(Eric Ayers) We are missing log messages. Set the log level earlier # Enable standard python logging for code with no handle to a context/work-unit. if self.global_options.level: LogOptions.set_stderr_log_level((self.global_options.level or 'info').upper()) logdir = self.global_options.logdir or self.config.get('goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) prev_log_level = None # If quiet, temporarily change stderr log level to kill init's output. if self.global_options.quiet: prev_log_level = LogOptions.loglevel_name(LogOptions.stderr_log_level()) # loglevel_name can fail, so only change level if we were able to get the current one. if prev_log_level is not None: LogOptions.set_stderr_log_level(LogOptions._LOG_LEVEL_NONE_KEY) log.init('goals') if prev_log_level is not None: LogOptions.set_stderr_log_level(prev_log_level) else: log.init() # Update the reporting settings, now that we have flags etc. def is_quiet_task(): for goal in self.goals: if goal.has_task_of_type(QuietTaskMixin): return True return False is_explain = self.global_options.explain update_reporting(self.global_options, is_quiet_task() or is_explain, self.run_tracker) context = Context( config=self.config, options=self.options, run_tracker=self.run_tracker, target_roots=self.targets, requested_goals=self.requested_goals, build_graph=self.build_graph, build_file_parser=self.build_file_parser, address_mapper=self.address_mapper, spec_excludes=self.get_spec_excludes() ) unknown = [] for goal in self.goals: if not goal.ordered_task_names(): unknown.append(goal) if unknown: context.log.error('Unknown goal(s): %s\n' % ' '.join(goal.name for goal in unknown)) return 1 engine = RoundEngine() return engine.execute(context, self.goals)
from apache.aurora.executor.common.executor_timeout import ExecutorTimeout from apache.aurora.executor.common.health_checker import HealthCheckerProvider from apache.aurora.executor.common.resource_manager import ResourceManagerProvider from apache.aurora.executor.common.sandbox import DefaultSandboxProvider from apache.aurora.executor.thermos_task_runner import ( DefaultThermosTaskRunnerProvider, UserOverrideThermosTaskRunnerProvider ) from apache.thermos.common.path import TaskPath CWD = os.environ.get('MESOS_SANDBOX', '.') app.configure(debug=True) LogOptions.set_simple(True) LogOptions.set_disk_log_level('DEBUG') LogOptions.set_log_dir(CWD) app.add_option( '--announcer-enable', dest='announcer_enable', action='store_true', default=False, help='Enable the ServerSet announcer for this executor. Jobs must still activate using ' 'the Announcer configuration.') app.add_option( '--announcer-ensemble', dest='announcer_ensemble', type=str,
import threading import time import unittest import zookeeper from twitter.common.zookeeper.client import ZooKeeper, ZooDefs from twitter.common.zookeeper.test_server import ZookeeperServer from twitter.common.zookeeper.group.group import ActiveGroup, Group, Membership if os.getenv('ZOOKEEPER_TEST_DEBUG'): from twitter.common import log from twitter.common.log.options import LogOptions LogOptions.set_stderr_log_level('DEBUG') LogOptions.set_disk_log_level('NONE') LogOptions.set_log_dir('/tmp') log.init('client_test') class AlternateGroup(Group): MEMBER_PREFIX = 'herpderp_' class TestGroup(unittest.TestCase): GroupImpl = Group MAX_EVENT_WAIT_SECS = 30.0 CONNECT_TIMEOUT_SECS = 10.0 CONNECT_RETRIES = 6 @classmethod def make_zk(cls, ensemble, **kw):
from twitter.aurora.executor.gc_executor import ThermosGCExecutor from twitter.common import app, log from twitter.common.log.options import LogOptions from twitter.common.metrics.sampler import DiskMetricWriter from twitter.thermos.common.path import TaskPath import mesos app.configure(debug=True) # locate logs locally in executor sandbox LogOptions.set_simple(True) LogOptions.set_disk_log_level('DEBUG') LogOptions.set_log_dir(ExecutorDetector.LOG_PATH) def proxy_main(): def main(): # Create executor stub thermos_gc_executor = ThermosGCExecutor(checkpoint_root=TaskPath.DEFAULT_CHECKPOINT_ROOT) thermos_gc_executor.start() # Start metrics collection metric_writer = DiskMetricWriter(thermos_gc_executor.metrics, ExecutorDetector.VARS_PATH) metric_writer.start() # Create driver stub driver = mesos.MesosExecutorDriver(thermos_gc_executor)
def setup_class(cls): cls.LOG_DIR = tempfile.mkdtemp() LogOptions.set_log_dir(cls.LOG_DIR) LogOptions.set_disk_log_level('DEBUG') log.init('executor_logger')
from twitter.common import app, log from twitter.common.log.options import LogOptions from apache.aurora.executor.common.executor_timeout import ExecutorTimeout from apache.aurora.executor.common.health_checker import HealthCheckerProvider from apache.aurora.executor.thermos_executor import ThermosExecutor from apache.aurora.executor.thermos_task_runner import DefaultThermosTaskRunnerProvider import mesos app.configure(debug=True) LogOptions.set_simple(True) LogOptions.set_disk_log_level('DEBUG') LogOptions.set_log_dir('.') # TODO(wickman) Consider just having the OSS version require pip installed # thermos_runner binaries on every machine and instead of embedding the pex # as a resource, shell out to one on the PATH. def dump_runner_pex(): import pkg_resources import apache.aurora.executor.resources pex_name = 'thermos_runner.pex' runner_pex = os.path.join(os.path.realpath('.'), pex_name) with open(runner_pex, 'w') as fp: # TODO(wickman) Use shutil.copyfileobj to reduce memory footprint here. fp.write(pkg_resources.resource_stream( apache.aurora.executor.resources.__name__, pex_name).read()) return runner_pex
def run(self, lock): # TODO(John Sirois): Consider moving to straight python logging. The divide between the # context/work-unit logging and standard python logging doesn't buy us anything. # Enable standard python logging for code with no handle to a context/work-unit. if self.options.log_level: LogOptions.set_stderr_log_level((self.options.log_level or "info").upper()) logdir = self.options.logdir or self.config.get("goals", "logdir", default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) log.init("goals") else: log.init() # Update the reporting settings, now that we have flags etc. def is_quiet_task(): for goal in self.goals: if goal.has_task_of_type(QuietTaskMixin): return True return False # Target specs are mapped to the patterns which match them, if any. This variable is a key for # specs which don't match any exclusion regexes. We know it won't already be in the list of # patterns, because the asterisks in its name make it an invalid regex. _UNMATCHED_KEY = "** unmatched **" def targets_by_pattern(targets, patterns): mapping = defaultdict(list) for target in targets: matched_pattern = None for pattern in patterns: if re.search(pattern, target.address.spec) is not None: matched_pattern = pattern break if matched_pattern is None: mapping[_UNMATCHED_KEY].append(target) else: mapping[matched_pattern].append(target) return mapping is_explain = self.options.explain update_reporting(self.options, is_quiet_task() or is_explain, self.run_tracker) if self.options.target_excludes: excludes = self.options.target_excludes log.debug("excludes:\n {excludes}".format(excludes="\n ".join(excludes))) by_pattern = targets_by_pattern(self.targets, excludes) self.targets = by_pattern[_UNMATCHED_KEY] # The rest of this if-statement is just for debug logging. log.debug( "Targets after excludes: {targets}".format(targets=", ".join(t.address.spec for t in self.targets)) ) excluded_count = sum(len(by_pattern[p]) for p in excludes) log.debug( "Excluded {count} target{plural}.".format( count=excluded_count, plural=("s" if excluded_count != 1 else "") ) ) for pattern in excludes: log.debug( "Targets excluded by pattern {pattern}\n {targets}".format( pattern=pattern, targets="\n ".join(t.address.spec for t in by_pattern[pattern]) ) ) context = Context( config=self.config, options=self.options, run_tracker=self.run_tracker, target_roots=self.targets, requested_goals=self.requested_goals, build_graph=self.build_graph, build_file_parser=self.build_file_parser, address_mapper=self.address_mapper, lock=lock, ) unknown = [] for goal in self.goals: if not goal.ordered_task_names(): unknown.append(goal) if unknown: context.log.error("Unknown goal(s): %s\n" % " ".join(goal.name for goal in unknown)) return 1 engine = RoundEngine() return engine.execute(context, self.goals)
def setup_class(cls): cls.LOG_DIR = tempfile.mkdtemp() LogOptions.set_log_dir(cls.LOG_DIR) LogOptions.set_disk_log_level('DEBUG') log.init('executor_logger')
from mesos.native import MesosExecutorDriver from twitter.common import app, log from twitter.common.log.options import LogOptions from twitter.common.metrics.sampler import DiskMetricWriter from apache.aurora.executor.executor_detector import ExecutorDetector from apache.aurora.executor.gc_executor import ThermosGCExecutor from apache.thermos.common.constants import DEFAULT_CHECKPOINT_ROOT app.configure(debug=True) # locate logs locally in executor sandbox LogOptions.set_simple(True) LogOptions.set_disk_log_level('DEBUG') LogOptions.set_log_dir(ExecutorDetector.LOG_PATH) def proxy_main(): def main(): # Create executor stub thermos_gc_executor = ThermosGCExecutor(checkpoint_root=DEFAULT_CHECKPOINT_ROOT) thermos_gc_executor.start() # Start metrics collection metric_writer = DiskMetricWriter(thermos_gc_executor.metrics, ExecutorDetector.VARS_PATH) metric_writer.start() # Create driver stub driver = MesosExecutorDriver(thermos_gc_executor)
from apache.aurora.executor.common.sandbox import DefaultSandboxProvider from apache.aurora.executor.thermos_task_runner import ( DefaultThermosTaskRunnerProvider, UserOverrideThermosTaskRunnerProvider) try: from mesos.executor import MesosExecutorDriver except ImportError: print(traceback.format_exc(), file=sys.stderr) MesosExecutorDriver = None CWD = os.environ.get('MESOS_SANDBOX', '.') app.configure(debug=True) LogOptions.set_simple(True) LogOptions.set_disk_log_level('DEBUG') LogOptions.set_log_dir(CWD) app.add_option( '--announcer-ensemble', dest='announcer_ensemble', type=str, default=None, help='The ensemble to which the Announcer should register ServerSets.') app.add_option( '--announcer-serverset-path', dest='announcer_serverset_path', type=str, default='/aurora', help= 'The root of the tree into which ServerSets should be announced. The paths will '
from twitter.common import log from twitter.common.log.options import LogOptions from twitter.common.zookeeper.client import ZooKeeper, ZooDefs from twitter.common.zookeeper.test_server import ZookeeperServer MAX_EVENT_WAIT_SECS = 30.0 MAX_EXPIRE_WAIT_SECS = 60.0 CONNECT_TIMEOUT_SECS = 10.0 CONNECT_RETRIES = 6 if os.getenv('ZOOKEEPER_TEST_DEBUG'): LogOptions.set_stderr_log_level('NONE') LogOptions.set_disk_log_level('DEBUG') LogOptions.set_log_dir('/tmp') log.init('client_test') def make_zk(server, **kw): return ZooKeeper('localhost:%d' % server.zookeeper_port, timeout_secs=CONNECT_TIMEOUT_SECS, max_reconnects=CONNECT_RETRIES, **kw) def test_client_connect(): with ZookeeperServer() as server: zk = make_zk(server) assert zk.get_children('/') == ['zookeeper']
def run(self): # TODO(John Sirois): Consider moving to straight python logging. The divide between the # context/work-unit logging and standard python logging doesn't buy us anything. # Enable standard python logging for code with no handle to a context/work-unit. if self.global_options.level: LogOptions.set_stderr_log_level((self.global_options.level or 'info').upper()) logdir = self.global_options.logdir or self.config.get('goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) prev_log_level = None # If quiet, temporarily change stderr log level to kill init's output. if self.global_options.quiet: prev_log_level = LogOptions.loglevel_name(LogOptions.stderr_log_level()) # loglevel_name can fail, so only change level if we were able to get the current one. if prev_log_level is not None: LogOptions.set_stderr_log_level(LogOptions._LOG_LEVEL_NONE_KEY) log.init('goals') if prev_log_level is not None: LogOptions.set_stderr_log_level(prev_log_level) else: log.init() # Update the reporting settings, now that we have flags etc. def is_quiet_task(): for goal in self.goals: if goal.has_task_of_type(QuietTaskMixin): return True return False # Target specs are mapped to the patterns which match them, if any. This variable is a key for # specs which don't match any exclusion regexes. We know it won't already be in the list of # patterns, because the asterisks in its name make it an invalid regex. _UNMATCHED_KEY = '** unmatched **' def targets_by_pattern(targets, patterns): mapping = defaultdict(list) for target in targets: matched_pattern = None for pattern in patterns: if re.search(pattern, target.address.spec) is not None: matched_pattern = pattern break if matched_pattern is None: mapping[_UNMATCHED_KEY].append(target) else: mapping[matched_pattern].append(target) return mapping is_explain = self.global_options.explain update_reporting(self.global_options, is_quiet_task() or is_explain, self.run_tracker) if self.global_options.exclude_target_regexp: excludes = self.global_options.exclude_target_regexp log.debug('excludes:\n {excludes}'.format(excludes='\n '.join(excludes))) by_pattern = targets_by_pattern(self.targets, excludes) self.targets = by_pattern[_UNMATCHED_KEY] # The rest of this if-statement is just for debug logging. log.debug('Targets after excludes: {targets}'.format( targets=', '.join(t.address.spec for t in self.targets))) excluded_count = sum(len(by_pattern[p]) for p in excludes) log.debug('Excluded {count} target{plural}.'.format(count=excluded_count, plural=('s' if excluded_count != 1 else ''))) for pattern in excludes: log.debug('Targets excluded by pattern {pattern}\n {targets}'.format(pattern=pattern, targets='\n '.join(t.address.spec for t in by_pattern[pattern]))) context = Context( config=self.config, new_options=self.new_options, run_tracker=self.run_tracker, target_roots=self.targets, requested_goals=self.requested_goals, build_graph=self.build_graph, build_file_parser=self.build_file_parser, address_mapper=self.address_mapper, spec_excludes=self.get_spec_excludes() ) unknown = [] for goal in self.goals: if not goal.ordered_task_names(): unknown.append(goal) if unknown: context.log.error('Unknown goal(s): %s\n' % ' '.join(goal.name for goal in unknown)) return 1 engine = RoundEngine() return engine.execute(context, self.goals)
def _do_run(self): # TODO(John Sirois): Consider moving to straight python logging. The divide between the # context/work-unit logging and standard python logging doesn't buy us anything. # TODO(Eric Ayers) We are missing log messages. Set the log level earlier # Enable standard python logging for code with no handle to a context/work-unit. if self.global_options.level: LogOptions.set_stderr_log_level((self.global_options.level or 'info').upper()) logdir = self.global_options.logdir or self.config.get( 'goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) prev_log_level = None # If quiet, temporarily change stderr log level to kill init's output. if self.global_options.quiet: prev_log_level = LogOptions.loglevel_name( LogOptions.stderr_log_level()) # loglevel_name can fail, so only change level if we were able to get the current one. if prev_log_level is not None: LogOptions.set_stderr_log_level( LogOptions._LOG_LEVEL_NONE_KEY) log.init('goals') if prev_log_level is not None: LogOptions.set_stderr_log_level(prev_log_level) else: log.init() # Update the reporting settings, now that we have flags etc. def is_quiet_task(): for goal in self.goals: if goal.has_task_of_type(QuietTaskMixin): return True return False is_explain = self.global_options.explain update_reporting(self.global_options, is_quiet_task() or is_explain, self.run_tracker) context = Context(config=self.config, options=self.options, run_tracker=self.run_tracker, target_roots=self.targets, requested_goals=self.requested_goals, build_graph=self.build_graph, build_file_parser=self.build_file_parser, address_mapper=self.address_mapper, spec_excludes=self.get_spec_excludes()) unknown = [] for goal in self.goals: if not goal.ordered_task_names(): unknown.append(goal) if unknown: context.log.error('Unknown goal(s): %s\n' % ' '.join(goal.name for goal in unknown)) return 1 engine = RoundEngine() return engine.execute(context, self.goals)