def set_up_mocks(self, su=None): self.mox.StubOutWithMock(dirutil, 'safe_mkdtemp') dirutil.safe_mkdtemp().AndReturn('/tmp/test') self.mox.StubOutWithMock(log, 'init') log.init('/tmp/test/current_run').AndReturn(0) self.mox.StubOutWithMock(CommandUtil, 'execute_and_get_output') stub = CommandUtil.execute_and_get_output(['git','remote', '-v']) stub.AndReturn((0, dedent("""origin https://git.twitter.biz/science (fetch) origin https://git.twitter.biz/science (push)"""))) stub2 = CommandUtil.execute_and_get_output(['git','rev-parse', '--abbrev-ref', 'HEAD']) stub2.AndReturn((0,"test_br")) self.mox.StubOutWithMock(psutil, 'cpu_percent') psutil.cpu_percent(interval=1).AndReturn(1.0) self.mox.StubOutWithMock(psutil, 'network_io_counters') psutil.network_io_counters().AndReturn("1000,10000,1000") self.mox.StubOutWithMock(psutil, 'NUM_CPUS') psutil.NUM_CPUS = 5 self.mox.StubOutWithMock(socket, 'gethostname') socket.gethostname().AndReturn("localhost") self.mox.StubOutWithMock(socket, 'gethostbyname') socket.gethostbyname("localhost").AndReturn("localhost") self.mox.StubOutWithMock(sys, 'exit') sys.exit(0).AndReturn(0) self.mox.ReplayAll()
def set_up_mocks(self, su=None): self.mox.StubOutWithMock(dirutil, 'safe_mkdtemp') dirutil.safe_mkdtemp().AndReturn('/tmp/test') self.mox.StubOutWithMock(log, 'init') log.init('/tmp/test/current_run').AndReturn(0) self.mox.StubOutWithMock(CommandUtil, 'execute_and_get_output') stub = CommandUtil.execute_and_get_output(['git', 'remote', '-v']) stub.AndReturn( (0, dedent("""origin https://git.twitter.biz/science (fetch) origin https://git.twitter.biz/science (push)"""))) stub2 = CommandUtil.execute_and_get_output( ['git', 'rev-parse', '--abbrev-ref', 'HEAD']) stub2.AndReturn((0, "test_br")) self.mox.StubOutWithMock(psutil, 'cpu_percent') psutil.cpu_percent(interval=1).AndReturn(1.0) self.mox.StubOutWithMock(psutil, 'network_io_counters') psutil.network_io_counters().AndReturn("1000,10000,1000") self.mox.StubOutWithMock(psutil, 'NUM_CPUS') psutil.NUM_CPUS = 5 self.mox.StubOutWithMock(socket, 'gethostname') socket.gethostname().AndReturn("localhost") self.mox.StubOutWithMock(socket, 'gethostbyname') socket.gethostbyname("localhost").AndReturn("localhost") self.mox.StubOutWithMock(sys, 'exit') sys.exit(0).AndReturn(0) self.mox.ReplayAll()
def setup_class(cls): cls.LOG_DIR = tempfile.mkdtemp() LogOptions.set_log_dir(cls.LOG_DIR) LogOptions.set_disk_log_level("DEBUG") log.init("executor_logger") if not cls.PANTS_BUILT and "SKIP_PANTS_BUILD" not in os.environ: assert subprocess.call(["./pants", "src/main/python/apache/aurora/executor/bin:thermos_runner"]) == 0 cls.PANTS_BUILT = True
def run(self, lock): # TODO(John Sirois): Consider moving to straight python logging. The divide between the # context/work-unit logging and standard python logging doesn't buy us anything. # Enable standard python logging for code with no handle to a context/work-unit. if self.options.log_level: LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper()) logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) log.init('goals') else: log.init() # Update the reporting settings, now that we have flags etc. def is_console_task(): for phase in self.phases: for goal in phase.goals(): if issubclass(goal.task_type, ConsoleTask): return True return False is_explain = self.options.explain update_reporting(self.options, is_console_task() or is_explain, self.run_tracker) if self.options.dry_run: print('****** Dry Run ******') context = Context( self.config, self.options, self.run_tracker, self.targets, requested_goals=self.requested_goals, lock=lock) if self.options.recursive_directory: context.log.warn( '--all-recursive is deprecated, use a target spec with the form [dir]:: instead') for dir in self.options.recursive_directory: self.add_target_recursive(dir) if self.options.target_directory: context.log.warn('--all is deprecated, use a target spec with the form [dir]: instead') for dir in self.options.target_directory: self.add_target_directory(dir) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: _list_goals(context, 'Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown)) return 1 return Goal._execute(context, self.phases, print_timing=self.options.time)
def run(self, lock): if self.options.dry_run: print "****** Dry Run ******" logger = None if self.options.log or self.options.log_level: from twitter.common.log import init from twitter.common.log.options import LogOptions LogOptions.set_stderr_log_level((self.options.log_level or "info").upper()) logdir = self.options.logdir or self.config.get("goals", "logdir", default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) init("goals") else: init() logger = log if self.options.recursive_directory: log.warn("--all-recursive is deprecated, use a target spec with the form [dir]:: instead") for dir in self.options.recursive_directory: self.add_target_recursive(dir) if self.options.target_directory: log.warn("--all is deprecated, use a target spec with the form [dir]: instead") for dir in self.options.target_directory: self.add_target_directory(dir) context = Context( self.config, self.options, self.targets, requested_goals=self.requested_goals, lock=lock, log=logger, timer=self.timer if self.options.time else None, ) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: print ("Unknown goal(s): %s" % " ".join(phase.name for phase in unknown)) print ("") return Phase.execute(context, "goals") if logger: logger.debug("Operating on targets: %s", self.targets) ret = Phase.attempt(context, self.phases) if self.options.time: print ("Timing report") print ("=============") self.timer.print_timings() return ret
def run(self, lock): with self.check_errors("Target contains a dependency cycle") as error: for target in self.targets: try: InternalTarget.check_cycles(target) except InternalTarget.CycleException as e: error(target.id) timer = None if self.options.time: class Timer(object): def now(self): return time.time() def log(self, message): print(message) timer = Timer() logger = None if self.options.log or self.options.log_level: from twitter.common.log import init from twitter.common.log.options import LogOptions LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper()) logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) init('goals') else: init() logger = log if self.options.recursive_directory: log.warn('--all-recursive is deprecated, use a target spec with the form [dir]:: instead') for dir in self.options.recursive_directory: self.add_target_recursive(dir) if self.options.target_directory: log.warn('--all is deprecated, use a target spec with the form [dir]: instead') for dir in self.options.target_directory: self.add_target_directory(dir) context = Context(self.config, self.options, self.targets, lock=lock, log=logger) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown)) print('') return Phase.execute(context, 'goals') if logger: logger.debug('Operating on targets: %s', self.targets) return Phase.attempt(context, self.phases, timer=timer)
def setup_class(cls): cls.LOG_DIR = tempfile.mkdtemp() LogOptions.set_log_dir(cls.LOG_DIR) LogOptions.set_disk_log_level('DEBUG') log.init('executor_logger') if not cls.PANTS_BUILT and 'SKIP_PANTS_BUILD' not in os.environ: assert subprocess.call(["./pants", "binary", "src/main/python/apache/thermos/bin:thermos_runner"]) == 0 cls.PANTS_BUILT = True
def setup_class(cls): cls.LOG_DIR = tempfile.mkdtemp() LogOptions.set_log_dir(cls.LOG_DIR) LogOptions.set_disk_log_level('DEBUG') log.init('executor_logger') if not cls.PANTS_BUILT and 'SKIP_PANTS_BUILD' not in os.environ: assert subprocess.call(["./pants", "src/main/python/apache/aurora/executor/bin:thermos_runner"]) == 0 cls.PANTS_BUILT = True
def execute(self): def add_targets(dir, buildfile): try: self.targets.extend(Target.get(addr) for addr in Target.get_all_addresses(buildfile)) except (TypeError, ImportError): error(dir, include_traceback=True) except (IOError, SyntaxError): error(dir) if self.options.recursive_directory: with self.check_errors('There was a problem scanning the ' 'following directories for targets:') as error: for dir in self.options.recursive_directory: for buildfile in BuildFile.scan_buildfiles(self.root_dir, dir): add_targets(dir, buildfile) if self.options.target_directory: with self.check_errors("There was a problem loading targets " "from the following directory's BUILD files") as error: for dir in self.options.target_directory: add_targets(dir, BuildFile(self.root_dir, dir)) timer = None if self.options.time: class Timer(object): def now(self): return time.time() def log(self, message): print(message) timer = Timer() logger = None if self.options.log or self.options.log_level: from twitter.common.log import init from twitter.common.log.options import LogOptions LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper()) logdir = self.config.get('goals', 'logdir') if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) init('goals') logger = log context = Context(self.config, self.options, self.targets, log=logger) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown)) print() return Phase.execute(context, 'goals') return Phase.attempt(context, self.phases, timer=timer)
def setup_class(cls): cls.LOG_DIR = tempfile.mkdtemp() LogOptions.set_log_dir(cls.LOG_DIR) LogOptions.set_disk_log_level('DEBUG') log.init('executor_logger') if not cls.PEX_PATH: pex_dir = tempfile.mkdtemp() assert subprocess.call(["./pants", "--pants-distdir=%s" % pex_dir, "binary", "src/main/python/apache/thermos/runner:thermos_runner"]) == 0 cls.PEX_PATH = os.path.join(pex_dir, 'thermos_runner.pex')
def __init__(self, host, port, endpoint, max_delay, stats_file, user, force_stats_upload=False): self.force_stats_upload = force_stats_upload self._stats_log_dir = dirutil.safe_mkdtemp() self._stats_log_file = os.path.join(self._stats_log_dir, "current_run") log.init(self._stats_log_file) self._stats_dir = os.path.join("/tmp", user, "stats_uploader_dir") self._stats_http_client = StatsHttpClient(host, port, endpoint, self._stats_dir) self._max_delay = max_delay self._pants_stat_file = stats_file self._user = user
def run(self, lock): with self.check_errors("Target contains a dependency cycle") as error: with self.timer.timing("parse:check_cycles"): for target in self.targets: try: InternalTarget.check_cycles(target) except InternalTarget.CycleException as e: error(target.id) logger = None if self.options.log or self.options.log_level: from twitter.common.log import init from twitter.common.log.options import LogOptions LogOptions.set_stderr_log_level((self.options.log_level or "info").upper()) logdir = self.options.logdir or self.config.get("goals", "logdir", default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) init("goals") else: init() logger = log if self.options.recursive_directory: log.warn("--all-recursive is deprecated, use a target spec with the form [dir]:: instead") for dir in self.options.recursive_directory: self.add_target_recursive(dir) if self.options.target_directory: log.warn("--all is deprecated, use a target spec with the form [dir]: instead") for dir in self.options.target_directory: self.add_target_directory(dir) context = Context(self.config, self.options, self.targets, lock=lock, log=logger) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: print("Unknown goal(s): %s" % " ".join(phase.name for phase in unknown)) print("") return Phase.execute(context, "goals") if logger: logger.debug("Operating on targets: %s", self.targets) ret = Phase.attempt(context, self.phases, timer=self.timer if self.options.time else None) if self.options.time: print("Timing report") print("=============") self.timer.print_timings() return ret
def run(self, lock): if self.options.dry_run: print '****** Dry Run ******' logger = None if self.options.log or self.options.log_level: from twitter.common.log import init from twitter.common.log.options import LogOptions LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper()) logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) init('goals') else: init() logger = log if self.options.recursive_directory: log.warn('--all-recursive is deprecated, use a target spec with the form [dir]:: instead') for dir in self.options.recursive_directory: self.add_target_recursive(dir) if self.options.target_directory: log.warn('--all is deprecated, use a target spec with the form [dir]: instead') for dir in self.options.target_directory: self.add_target_directory(dir) context = Context( self.config, self.options, self.targets, lock=lock, log=logger, timer=self.timer if self.options.time else None) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown)) print('') return Phase.execute(context, 'goals') if logger: logger.debug('Operating on targets: %s', self.targets) ret = Phase.attempt(context, self.phases) if self.options.time: print('Timing report') print('=============') self.timer.print_timings() return ret
def run(self, lock): with self.check_errors("Target contains a dependency cycle") as error: with self.timer.timing('parse:check_cycles'): for target in self.targets: try: InternalTarget.check_cycles(target) except InternalTarget.CycleException as e: error(target.id) logger = None if self.options.log or self.options.log_level: from twitter.common.log import init from twitter.common.log.options import LogOptions LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper()) logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) init('goals') else: init() logger = log if self.options.recursive_directory: log.warn('--all-recursive is deprecated, use a target spec with the form [dir]:: instead') for dir in self.options.recursive_directory: self.add_target_recursive(dir) if self.options.target_directory: log.warn('--all is deprecated, use a target spec with the form [dir]: instead') for dir in self.options.target_directory: self.add_target_directory(dir) context = Context(self.config, self.options, self.targets, lock=lock, log=logger) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown)) print('') return Phase.execute(context, 'goals') if logger: logger.debug('Operating on targets: %s', self.targets) ret = Phase.attempt(context, self.phases, timer=self.timer if self.options.time else None) if self.options.time: print('Timing report') print('=============') self.timer.print_timings() return ret
def setup_class(cls): cls.LOG_DIR = tempfile.mkdtemp() LogOptions.set_log_dir(cls.LOG_DIR) LogOptions.set_disk_log_level('DEBUG') log.init('executor_logger') if not cls.PEX_PATH: pex_dir = tempfile.mkdtemp() assert subprocess.call([ "./pants", "--pants-distdir=%s" % pex_dir, "binary", "src/main/python/apache/thermos/runner:thermos_runner" ]) == 0 cls.PEX_PATH = os.path.join(pex_dir, 'thermos_runner.pex')
def run(self, lock): # TODO(John Sirois): Consider moving to straight python logging. The divide between the # context/work-unit logging and standard python logging doesn't buy us anything. # Enable standard python logging for code with no handle to a context/work-unit. if self.options.log_level: LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper()) logdir = self.options.logdir or self.config.get( 'goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) log.init('goals') else: log.init() # Update the reporting settings, now that we have flags etc. def is_console_task(): for phase in self.phases: for goal in phase.goals(): if issubclass(goal.task_type, ConsoleTask): return True return False is_explain = self.options.explain update_reporting(self.options, is_console_task() or is_explain, self.run_tracker) context = Context(self.config, self.options, self.run_tracker, self.targets, requested_goals=self.requested_goals, build_graph=self.build_graph, build_file_parser=self.build_file_parser, lock=lock) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: context.log.error('Unknown goal(s): %s\n' % ' '.join(phase.name for phase in unknown)) return 1 engine = GroupEngine() return engine.execute(context, self.phases)
def run(self, lock): # TODO(John Sirois): Consider moving to straight python logging. The divide between the # context/work-unit logging and standard python logging doesn't buy us anything. # Enable standard python logging for code with no handle to a context/work-unit. if self.options.log_level: LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper()) logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) log.init('goals') else: log.init() # Update the reporting settings, now that we have flags etc. def is_console_task(): for phase in self.phases: for goal in phase.goals(): if issubclass(goal.task_type, ConsoleTask): return True return False is_explain = self.options.explain update_reporting(self.options, is_console_task() or is_explain, self.run_tracker) context = Context( self.config, self.options, self.run_tracker, self.targets, requested_goals=self.requested_goals, build_graph=self.build_graph, build_file_parser=self.build_file_parser, lock=lock) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: context.log.error('Unknown goal(s): %s\n' % ' '.join(phase.name for phase in unknown)) return 1 engine = GroupEngine() return engine.execute(context, self.phases)
# TODO(wickman) These should be constant sets in the Thermos thrift THERMOS_LIVES = (TaskState.ACTIVE, TaskState.CLEANING, TaskState.FINALIZING) THERMOS_TERMINALS = (TaskState.SUCCESS, TaskState.FAILED, TaskState.KILLED, TaskState.LOST) STARTING_STATES = (ScheduleStatus.STARTING, ScheduleStatus.ASSIGNED) TASK_ID = 'gc_executor_task_id' if 'THERMOS_DEBUG' in os.environ: from twitter.common import log from twitter.common.log.options import LogOptions LogOptions.set_disk_log_level('NONE') LogOptions.set_stderr_log_level('DEBUG') log.init('test_gc_executor') def thread_yield(): time.sleep(0.1) def setup_tree(td, lose=False): safe_rmtree(td) # TODO(wickman) These should be referred as resources= in the python_target instead. shutil.copytree('src/resources/org/apache/thermos/root', td) if lose: lost_age = time.time() - ( 2 * ThinTestThermosGCExecutor.MAX_CHECKPOINT_TIME_DRIFT.as_(
import tempfile import unittest from mysos.scheduler.scheduler import DEFAULT_TASK_CPUS, DEFAULT_TASK_MEM, DEFAULT_TASK_DISK from mysos.scheduler.state import (LocalStateProvider, MySQLCluster, MySQLTask, Scheduler) from mysos.scheduler.password import gen_encryption_key, PasswordBox from mesos.interface.mesos_pb2 import FrameworkInfo if 'MYSOS_DEBUG' in os.environ: from twitter.common import log from twitter.common.log.options import LogOptions LogOptions.set_stderr_log_level('google:DEBUG') LogOptions.set_simple(True) log.init('mysos_tests') class TestState(unittest.TestCase): def setUp(self): self._tmpdir = tempfile.mkdtemp() self._state_provider = LocalStateProvider(self._tmpdir) def tearDown(self): shutil.rmtree(self._tmpdir, True) def test_scheduler_state(self): expected = Scheduler( FrameworkInfo(user='******', name='test_fw_name', checkpoint=True))
def setup_class(cls): cls.LOG_DIR = tempfile.mkdtemp() LogOptions.set_log_dir(cls.LOG_DIR) LogOptions.set_disk_log_level('DEBUG') log.init('executor_logger')
def run(self): # TODO(John Sirois): Consider moving to straight python logging. The divide between the # context/work-unit logging and standard python logging doesn't buy us anything. # Enable standard python logging for code with no handle to a context/work-unit. if self.global_options.level: LogOptions.set_stderr_log_level((self.global_options.level or 'info').upper()) logdir = self.global_options.logdir or self.config.get('goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) prev_log_level = None # If quiet, temporarily change stderr log level to kill init's output. if self.global_options.quiet: prev_log_level = LogOptions.loglevel_name(LogOptions.stderr_log_level()) # loglevel_name can fail, so only change level if we were able to get the current one. if prev_log_level is not None: LogOptions.set_stderr_log_level(LogOptions._LOG_LEVEL_NONE_KEY) log.init('goals') if prev_log_level is not None: LogOptions.set_stderr_log_level(prev_log_level) else: log.init() # Update the reporting settings, now that we have flags etc. def is_quiet_task(): for goal in self.goals: if goal.has_task_of_type(QuietTaskMixin): return True return False # Target specs are mapped to the patterns which match them, if any. This variable is a key for # specs which don't match any exclusion regexes. We know it won't already be in the list of # patterns, because the asterisks in its name make it an invalid regex. _UNMATCHED_KEY = '** unmatched **' def targets_by_pattern(targets, patterns): mapping = defaultdict(list) for target in targets: matched_pattern = None for pattern in patterns: if re.search(pattern, target.address.spec) is not None: matched_pattern = pattern break if matched_pattern is None: mapping[_UNMATCHED_KEY].append(target) else: mapping[matched_pattern].append(target) return mapping is_explain = self.global_options.explain update_reporting(self.global_options, is_quiet_task() or is_explain, self.run_tracker) if self.global_options.exclude_target_regexp: excludes = self.global_options.exclude_target_regexp log.debug('excludes:\n {excludes}'.format(excludes='\n '.join(excludes))) by_pattern = targets_by_pattern(self.targets, excludes) self.targets = by_pattern[_UNMATCHED_KEY] # The rest of this if-statement is just for debug logging. log.debug('Targets after excludes: {targets}'.format( targets=', '.join(t.address.spec for t in self.targets))) excluded_count = sum(len(by_pattern[p]) for p in excludes) log.debug('Excluded {count} target{plural}.'.format(count=excluded_count, plural=('s' if excluded_count != 1 else ''))) for pattern in excludes: log.debug('Targets excluded by pattern {pattern}\n {targets}'.format(pattern=pattern, targets='\n '.join(t.address.spec for t in by_pattern[pattern]))) context = Context( config=self.config, new_options=self.new_options, run_tracker=self.run_tracker, target_roots=self.targets, requested_goals=self.requested_goals, build_graph=self.build_graph, build_file_parser=self.build_file_parser, address_mapper=self.address_mapper, spec_excludes=self.get_spec_excludes() ) unknown = [] for goal in self.goals: if not goal.ordered_task_names(): unknown.append(goal) if unknown: context.log.error('Unknown goal(s): %s\n' % ' '.join(goal.name for goal in unknown)) return 1 engine = RoundEngine() return engine.execute(context, self.goals)
import time import unittest import zookeeper from twitter.common.zookeeper.client import ZooKeeper, ZooDefs from twitter.common.zookeeper.test_server import ZookeeperServer from twitter.common.zookeeper.group.group import ActiveGroup, Group, Membership if os.getenv('ZOOKEEPER_TEST_DEBUG'): from twitter.common import log from twitter.common.log.options import LogOptions LogOptions.set_stderr_log_level('DEBUG') LogOptions.set_disk_log_level('NONE') LogOptions.set_log_dir('/tmp') log.init('client_test') class AlternateGroup(Group): MEMBER_PREFIX = 'herpderp_' class TestGroup(unittest.TestCase): GroupImpl = Group MAX_EVENT_WAIT_SECS = 30.0 CONNECT_TIMEOUT_SECS = 10.0 CONNECT_RETRIES = 6 @classmethod def make_zk(cls, ensemble, **kw): return ZooKeeper(ensemble,
from apache.aurora.executor.common.sandbox import DirectorySandbox, SandboxProvider from apache.aurora.executor.common.status_checker import ChainedStatusChecker from apache.aurora.executor.common.task_runner import TaskError from apache.aurora.executor.status_manager import StatusManager from apache.aurora.executor.thermos_task_runner import ( DefaultThermosTaskRunnerProvider, ThermosTaskRunner) from apache.thermos.core.runner import TaskRunner from apache.thermos.monitoring.monitor import TaskMonitor from gen.apache.aurora.api.constants import AURORA_EXECUTOR_NAME from gen.apache.aurora.api.ttypes import AssignedTask, ExecutorConfig, JobKey, TaskConfig if 'THERMOS_DEBUG' in os.environ: LogOptions.set_stderr_log_level('google:DEBUG') LogOptions.set_simple(True) log.init('executor_logger') class FastThermosExecutor(AuroraExecutor): STOP_WAIT = Amount(0, Time.SECONDS) class FastStatusManager(StatusManager): POLL_WAIT = Amount(10, Time.MILLISECONDS) class DefaultTestSandboxProvider(SandboxProvider): def from_assigned_task(self, assigned_task, **kwargs): return DirectorySandbox(safe_mkdtemp(), **kwargs)
def run(self, lock): timer = None if self.options.time: class Timer(object): def now(self): return time.time() def log(self, message): print(message) timer = Timer() logger = None if self.options.log or self.options.log_level: from twitter.common.log import init from twitter.common.log.options import LogOptions LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper()) logdir = self.options.logdir or self.config.get( 'goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) init('goals') else: init() logger = log if self.options.recursive_directory: log.warn( '--all-recursive is deprecated, use a target spec with the form [dir]:: instead' ) for dir in self.options.recursive_directory: self.add_target_recursive(dir) if self.options.target_directory: log.warn( '--all is deprecated, use a target spec with the form [dir]: instead' ) for dir in self.options.target_directory: self.add_target_directory(dir) context = Context(self.config, self.options, self.targets, lock=lock, log=logger) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown)) print('') return Phase.execute(context, 'goals') if logger: logger.debug('Operating on targets: %s', self.targets) return Phase.attempt(context, self.phases, timer=timer)
def run(self, lock): if self.options.dry_run: print '****** Dry Run ******' logger = None if self.options.log or self.options.log_level: from twitter.common.log import init from twitter.common.log.options import LogOptions LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper()) logdir = self.options.logdir or self.config.get( 'goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) init('goals') else: init() logger = log if self.options.recursive_directory: log.warn( '--all-recursive is deprecated, use a target spec with the form [dir]:: instead' ) for dir in self.options.recursive_directory: self.add_target_recursive(dir) if self.options.target_directory: log.warn( '--all is deprecated, use a target spec with the form [dir]: instead' ) for dir in self.options.target_directory: self.add_target_directory(dir) context = Context(self.config, self.options, self.targets, requested_goals=self.requested_goals, lock=lock, log=logger, timer=self.timer if self.options.time else None) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown)) print('') return Phase.execute(context, 'goals') if logger: logger.debug('Operating on targets: %s', self.targets) ret = Phase.attempt(context, self.phases) if self.options.cleanup_nailguns or self.config.get( 'nailgun', 'autokill', default=False): if log: log.debug('auto-killing nailguns') if NailgunTask.killall: NailgunTask.killall(log) if self.options.time: print('Timing report') print('=============') self.timer.print_timings() return ret
def _do_run(self): # TODO(John Sirois): Consider moving to straight python logging. The divide between the # context/work-unit logging and standard python logging doesn't buy us anything. # TODO(Eric Ayers) We are missing log messages. Set the log level earlier # Enable standard python logging for code with no handle to a context/work-unit. if self.global_options.level: LogOptions.set_stderr_log_level((self.global_options.level or 'info').upper()) logdir = self.global_options.logdir or self.config.get('goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) prev_log_level = None # If quiet, temporarily change stderr log level to kill init's output. if self.global_options.quiet: prev_log_level = LogOptions.loglevel_name(LogOptions.stderr_log_level()) # loglevel_name can fail, so only change level if we were able to get the current one. if prev_log_level is not None: LogOptions.set_stderr_log_level(LogOptions._LOG_LEVEL_NONE_KEY) log.init('goals') if prev_log_level is not None: LogOptions.set_stderr_log_level(prev_log_level) else: log.init() # Update the reporting settings, now that we have flags etc. def is_quiet_task(): for goal in self.goals: if goal.has_task_of_type(QuietTaskMixin): return True return False is_explain = self.global_options.explain update_reporting(self.global_options, is_quiet_task() or is_explain, self.run_tracker) context = Context( config=self.config, options=self.options, run_tracker=self.run_tracker, target_roots=self.targets, requested_goals=self.requested_goals, build_graph=self.build_graph, build_file_parser=self.build_file_parser, address_mapper=self.address_mapper, spec_excludes=self.get_spec_excludes() ) unknown = [] for goal in self.goals: if not goal.ordered_task_names(): unknown.append(goal) if unknown: context.log.error('Unknown goal(s): %s\n' % ' '.join(goal.name for goal in unknown)) return 1 engine = RoundEngine() return engine.execute(context, self.goals)
ServerInfo, SessionKey, TaskConfig, TaskConstraint, TaskQuery, ValueConstraint, ) # Debug output helper -> enables log.* in source. if "UPDATER_DEBUG" in environ: from twitter.common import log from twitter.common.log.options import LogOptions LogOptions.set_disk_log_level("NONE") LogOptions.set_stderr_log_level("DEBUG") log.init("test_updater") SERVER_INFO = ServerInfo(thriftAPIVersion=THRIFT_API_VERSION) def make_response(code, msg="test"): return Response(responseCode=code, serverInfo=SERVER_INFO, details=[ResponseDetail(message=msg)]) class FakeConfig(object): def __init__(self, role, name, env, update_config): self._role = role self._env = env self._name = name self._update_config = update_config self.job_config = None
def run(self): # TODO(John Sirois): Consider moving to straight python logging. The divide between the # context/work-unit logging and standard python logging doesn't buy us anything. # Enable standard python logging for code with no handle to a context/work-unit. if self.global_options.level: LogOptions.set_stderr_log_level((self.global_options.level or 'info').upper()) logdir = self.global_options.logdir or self.config.get( 'goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) prev_log_level = None # If quiet, temporarily change stderr log level to kill init's output. if self.global_options.quiet: prev_log_level = LogOptions.loglevel_name( LogOptions.stderr_log_level()) # loglevel_name can fail, so only change level if we were able to get the current one. if prev_log_level is not None: LogOptions.set_stderr_log_level( LogOptions._LOG_LEVEL_NONE_KEY) log.init('goals') if prev_log_level is not None: LogOptions.set_stderr_log_level(prev_log_level) else: log.init() # Update the reporting settings, now that we have flags etc. def is_quiet_task(): for goal in self.goals: if goal.has_task_of_type(QuietTaskMixin): return True return False # Target specs are mapped to the patterns which match them, if any. This variable is a key for # specs which don't match any exclusion regexes. We know it won't already be in the list of # patterns, because the asterisks in its name make it an invalid regex. _UNMATCHED_KEY = '** unmatched **' def targets_by_pattern(targets, patterns): mapping = defaultdict(list) for target in targets: matched_pattern = None for pattern in patterns: if re.search(pattern, target.address.spec) is not None: matched_pattern = pattern break if matched_pattern is None: mapping[_UNMATCHED_KEY].append(target) else: mapping[matched_pattern].append(target) return mapping is_explain = self.global_options.explain update_reporting(self.global_options, is_quiet_task() or is_explain, self.run_tracker) if self.global_options.exclude_target_regexp: excludes = self.global_options.exclude_target_regexp log.debug('excludes:\n {excludes}'.format( excludes='\n '.join(excludes))) by_pattern = targets_by_pattern(self.targets, excludes) self.targets = by_pattern[_UNMATCHED_KEY] # The rest of this if-statement is just for debug logging. log.debug('Targets after excludes: {targets}'.format( targets=', '.join(t.address.spec for t in self.targets))) excluded_count = sum(len(by_pattern[p]) for p in excludes) log.debug('Excluded {count} target{plural}.'.format( count=excluded_count, plural=('s' if excluded_count != 1 else ''))) for pattern in excludes: log.debug('Targets excluded by pattern {pattern}\n {targets}'. format(pattern=pattern, targets='\n '.join( t.address.spec for t in by_pattern[pattern]))) context = Context(config=self.config, new_options=self.new_options, run_tracker=self.run_tracker, target_roots=self.targets, requested_goals=self.requested_goals, build_graph=self.build_graph, build_file_parser=self.build_file_parser, address_mapper=self.address_mapper, spec_excludes=self.get_spec_excludes()) unknown = [] for goal in self.goals: if not goal.ordered_task_names(): unknown.append(goal) if unknown: context.log.error('Unknown goal(s): %s\n' % ' '.join(goal.name for goal in unknown)) return 1 engine = RoundEngine() return engine.execute(context, self.goals)
ScheduleStatusResult, ScheduledTask, TaskConfig, TaskQuery, ) from mox import MockObject, Replay, Verify from pytest import raises # Debug output helper -> enables log.* in source. if 'UPDATER_DEBUG' in environ: from twitter.common import log from twitter.common.log.options import LogOptions LogOptions.set_disk_log_level('NONE') LogOptions.set_stderr_log_level('DEBUG') log.init('test_updater') class FakeConfig(object): def __init__(self, role, name, env, update_config): self._role = role self._env = env self._name = name self._update_config = update_config self.job_config = None def role(self): return self._role def name(self): return self._name
from rainman.bounded_map import BoundedDecayingMap import pytest from tornado import gen from tornado.ioloop import IOLoop from tornado.testing import AsyncTestCase, gen_test from twitter.common.quantity import Amount, Time from twitter.common import log from twitter.common.log.options import LogOptions LogOptions.set_disk_log_level("NONE") LogOptions.set_stderr_log_level("google:DEBUG") log.init("derp") class TestClock(object): def __init__(self, initial=0): self._time = initial def advance(self, ticks): self._time += ticks def time(self): return float(self._time) CLOCK = TestClock() io_loop_impl = IOLoop.current().__class__
# TODO(wickman) These should be constant sets in the Thermos thrift THERMOS_LIVES = (TaskState.ACTIVE, TaskState.CLEANING, TaskState.FINALIZING) THERMOS_TERMINALS = (TaskState.SUCCESS, TaskState.FAILED, TaskState.KILLED, TaskState.LOST) STARTING_STATES = (ScheduleStatus.STARTING, ScheduleStatus.ASSIGNED) TASK_ID = 'gc_executor_task_id' if 'THERMOS_DEBUG' in os.environ: from twitter.common import log from twitter.common.log.options import LogOptions LogOptions.set_disk_log_level('NONE') LogOptions.set_stderr_log_level('DEBUG') log.init('test_gc_executor') def thread_yield(): time.sleep(0.1) def setup_tree(td, lose=False): safe_rmtree(td) # TODO(wickman) These should be referred as resources= in the python_target instead. shutil.copytree('src/resources/org/apache/thermos/root', td) if lose: lost_age = time.time() - ( 2 * ThinTestThermosGCExecutor.MAX_CHECKPOINT_TIME_DRIFT.as_(Time.SECONDS))
from rainman.testing import make_ensemble import mock from tornado import gen from tornado.testing import ( AsyncTestCase, gen_test, ) from twitter.common.quantity import Amount, Time from twitter.common import log from twitter.common.log.options import LogOptions LogOptions.set_disk_log_level('NONE') LogOptions.set_stderr_log_level('google:DEBUG') log.init('derp') class FastScheduler(Scheduler): CONNECTION_REFRESH_INTERVAL = Amount(100, Time.MILLISECONDS) CHOKE_INTERVAL = Amount(250, Time.MILLISECONDS) class TestIntegration(AsyncTestCase): @gen_test def test_single_seeder_single_leecher(self): torrent, seeders, leechers = make_ensemble( self.io_loop, num_seeders=1, num_leechers=1, fs=MemoryFilesystem()) seeder = seeders[0].client leecher = leechers[0].client
def _do_run(self): # TODO(John Sirois): Consider moving to straight python logging. The divide between the # context/work-unit logging and standard python logging doesn't buy us anything. # TODO(Eric Ayers) We are missing log messages. Set the log level earlier # Enable standard python logging for code with no handle to a context/work-unit. if self.global_options.level: LogOptions.set_stderr_log_level((self.global_options.level or 'info').upper()) logdir = self.global_options.logdir or self.config.get( 'goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) prev_log_level = None # If quiet, temporarily change stderr log level to kill init's output. if self.global_options.quiet: prev_log_level = LogOptions.loglevel_name( LogOptions.stderr_log_level()) # loglevel_name can fail, so only change level if we were able to get the current one. if prev_log_level is not None: LogOptions.set_stderr_log_level( LogOptions._LOG_LEVEL_NONE_KEY) log.init('goals') if prev_log_level is not None: LogOptions.set_stderr_log_level(prev_log_level) else: log.init() # Update the reporting settings, now that we have flags etc. def is_quiet_task(): for goal in self.goals: if goal.has_task_of_type(QuietTaskMixin): return True return False is_explain = self.global_options.explain update_reporting(self.global_options, is_quiet_task() or is_explain, self.run_tracker) context = Context(config=self.config, options=self.options, run_tracker=self.run_tracker, target_roots=self.targets, requested_goals=self.requested_goals, build_graph=self.build_graph, build_file_parser=self.build_file_parser, address_mapper=self.address_mapper, spec_excludes=self.get_spec_excludes()) unknown = [] for goal in self.goals: if not goal.ordered_task_names(): unknown.append(goal) if unknown: context.log.error('Unknown goal(s): %s\n' % ' '.join(goal.name for goal in unknown)) return 1 engine = RoundEngine() return engine.execute(context, self.goals)
def run(self, lock): # TODO(John Sirois): Consider moving to straight python logging. The divide between the # context/work-unit logging and standard python logging doesn't buy us anything. # Enable standard python logging for code with no handle to a context/work-unit. if self.options.log_level: LogOptions.set_stderr_log_level((self.options.log_level or "info").upper()) logdir = self.options.logdir or self.config.get("goals", "logdir", default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) log.init("goals") else: log.init() # Update the reporting settings, now that we have flags etc. def is_quiet_task(): for goal in self.goals: if goal.has_task_of_type(QuietTaskMixin): return True return False # Target specs are mapped to the patterns which match them, if any. This variable is a key for # specs which don't match any exclusion regexes. We know it won't already be in the list of # patterns, because the asterisks in its name make it an invalid regex. _UNMATCHED_KEY = "** unmatched **" def targets_by_pattern(targets, patterns): mapping = defaultdict(list) for target in targets: matched_pattern = None for pattern in patterns: if re.search(pattern, target.address.spec) is not None: matched_pattern = pattern break if matched_pattern is None: mapping[_UNMATCHED_KEY].append(target) else: mapping[matched_pattern].append(target) return mapping is_explain = self.options.explain update_reporting(self.options, is_quiet_task() or is_explain, self.run_tracker) if self.options.target_excludes: excludes = self.options.target_excludes log.debug("excludes:\n {excludes}".format(excludes="\n ".join(excludes))) by_pattern = targets_by_pattern(self.targets, excludes) self.targets = by_pattern[_UNMATCHED_KEY] # The rest of this if-statement is just for debug logging. log.debug( "Targets after excludes: {targets}".format(targets=", ".join(t.address.spec for t in self.targets)) ) excluded_count = sum(len(by_pattern[p]) for p in excludes) log.debug( "Excluded {count} target{plural}.".format( count=excluded_count, plural=("s" if excluded_count != 1 else "") ) ) for pattern in excludes: log.debug( "Targets excluded by pattern {pattern}\n {targets}".format( pattern=pattern, targets="\n ".join(t.address.spec for t in by_pattern[pattern]) ) ) context = Context( config=self.config, options=self.options, run_tracker=self.run_tracker, target_roots=self.targets, requested_goals=self.requested_goals, build_graph=self.build_graph, build_file_parser=self.build_file_parser, address_mapper=self.address_mapper, lock=lock, ) unknown = [] for goal in self.goals: if not goal.ordered_task_names(): unknown.append(goal) if unknown: context.log.error("Unknown goal(s): %s\n" % " ".join(goal.name for goal in unknown)) return 1 engine = RoundEngine() return engine.execute(context, self.goals)
from apache.aurora.executor.common.task_runner import TaskError from apache.aurora.executor.status_manager import StatusManager from apache.aurora.executor.thermos_task_runner import ( DefaultThermosTaskRunnerProvider, ThermosTaskRunner ) from apache.thermos.core.runner import TaskRunner from apache.thermos.monitoring.monitor import TaskMonitor from gen.apache.aurora.api.constants import AURORA_EXECUTOR_NAME from gen.apache.aurora.api.ttypes import AssignedTask, ExecutorConfig, JobKey, TaskConfig if 'THERMOS_DEBUG' in os.environ: LogOptions.set_stderr_log_level('google:DEBUG') LogOptions.set_simple(True) log.init('executor_logger') class FastThermosExecutor(AuroraExecutor): STOP_WAIT = Amount(0, Time.SECONDS) class FastStatusManager(StatusManager): POLL_WAIT = Amount(10, Time.MILLISECONDS) class DefaultTestSandboxProvider(SandboxProvider): def from_assigned_task(self, assigned_task, **kwargs): return DirectorySandbox(safe_mkdtemp(), **kwargs)
from kazoo.handlers.threading import SequentialThreadingHandler from mesos.interface.mesos_pb2 import DRIVER_STOPPED, FrameworkInfo from twitter.common import log from twitter.common.concurrent import deadline from twitter.common.dirutil import safe_mkdtemp from twitter.common.metrics import RootMetrics from twitter.common.quantity import Amount, Time from zake.fake_client import FakeClient from zake.fake_storage import FakeStorage if 'MYSOS_DEBUG' in os.environ: from twitter.common.log.options import LogOptions LogOptions.set_stderr_log_level('google:DEBUG') LogOptions.set_simple(True) log.init('mysos_tests') def test_scheduler_runs(): """ Verifies that the scheduler successfully launches 3 "no-op" MySQL tasks. NOTE: Due to the limitation of zake the scheduler's ZK operations are not propagated to executors in separate processes but they are unit-tested separately. """ import mesos.native # Make sure fake_mysos_executor.pex is available to be fetched by Mesos slave. assert os.path.isfile('dist/fake_mysos_executor.pex') storage = FakeStorage(SequentialThreadingHandler()) zk_client = FakeClient(storage=storage)
def run(self, lock): if self.options.dry_run: print('****** Dry Run ******') logger = None if self.options.log or self.options.log_level: log.LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper()) logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) log.LogOptions.set_log_dir(logdir) log.init('goals') else: log.init() logger = log if not self.options.no_color: def colorwrapper(func, clrname): @wraps(func) def wrapper(msg, *args, **kwargs): return func(clrname(msg), *args, **kwargs) return wrapper log.info = colorwrapper(log.info, green) log.warn = colorwrapper(log.warn, yellow) log.debug = colorwrapper(log.debug, cyan) if self.options.recursive_directory: log.warn('--all-recursive is deprecated, use a target spec with the form [dir]:: instead') for dir in self.options.recursive_directory: self.add_target_recursive(dir) if self.options.target_directory: log.warn('--all is deprecated, use a target spec with the form [dir]: instead') for dir in self.options.target_directory: self.add_target_directory(dir) context = Context( self.config, self.options, self.targets, requested_goals=self.requested_goals, lock=lock, log=logger, timer=self.timer if self.options.time else None) unknown = [] for phase in self.phases: if not phase.goals(): unknown.append(phase) if unknown: print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown)) print('') return Phase.execute(context, 'goals') if logger: logger.debug('Operating on targets: %s' % self.targets) ret = Phase.attempt(context, self.phases) if self.options.cleanup_nailguns or self.config.get('nailgun', 'autokill', default = False): if log: log.debug('auto-killing nailguns') if NailgunTask.killall: NailgunTask.killall(log) if self.options.time: print('Timing report') print('=============') self.timer.print_timings() return ret
ScheduledTask, TaskConfig, TaskQuery, ) from mox import MockObject, Replay, Verify from pytest import raises # Debug output helper -> enables log.* in source. if 'UPDATER_DEBUG' in environ: from twitter.common import log from twitter.common.log.options import LogOptions LogOptions.set_disk_log_level('NONE') LogOptions.set_stderr_log_level('DEBUG') log.init('test_updater') class FakeConfig(object): def __init__(self, role, name, env, update_config): self._role = role self._env = env self._name = name self._update_config = update_config self.job_config = None def role(self): return self._role def name(self): return self._name
from apache.aurora.executor.common.health_checker import HealthCheckerProvider from apache.aurora.executor.common.sandbox import DirectorySandbox, SandboxProvider from apache.aurora.executor.common.status_checker import ChainedStatusChecker from apache.aurora.executor.common.task_runner import TaskError from apache.aurora.executor.status_manager import StatusManager from apache.aurora.executor.thermos_task_runner import DefaultThermosTaskRunnerProvider, ThermosTaskRunner from apache.thermos.core.runner import TaskRunner from apache.thermos.monitoring.monitor import TaskMonitor from gen.apache.aurora.api.constants import AURORA_EXECUTOR_NAME from gen.apache.aurora.api.ttypes import AssignedTask, ExecutorConfig, Identity, JobKey, TaskConfig if "THERMOS_DEBUG" in os.environ: LogOptions.set_stderr_log_level("google:DEBUG") LogOptions.set_simple(True) log.init("executor_logger") class FastThermosExecutor(AuroraExecutor): STOP_WAIT = Amount(0, Time.SECONDS) class FastStatusManager(StatusManager): POLL_WAIT = Amount(10, Time.MILLISECONDS) class DefaultTestSandboxProvider(SandboxProvider): def from_assigned_task(self, assigned_task): return DirectorySandbox(safe_mkdtemp())