Ejemplo n.º 1
0
def setup():
    LogOptions.set_stderr_log_level('NONE')

    app.add_option('--iface',
                   default='eth0',
                   type=str,
                   help='The interface to sniff on')
    app.add_option('--port',
                   default=2889,
                   type=int,
                   help='The ZAB port used by the leader')
    app.add_option('-c',
                   '--colors',
                   default=False,
                   action='store_true',
                   help='Color each learner/leader stream differently')
    app.add_option('--dump-bad-packet',
                   default=False,
                   action='store_true',
                   help='Dump packets that cannot be deserialized')
    app.add_option(
        '--include-pings',
        default=False,
        action='store_true',
        help='Whether to include pings send from learners to the leader')
    app.add_option('--version', default=False, action='store_true')
Ejemplo n.º 2
0
def setup():
    LogOptions.set_stderr_log_level('NONE')

    app.add_option('--iface', default='eth0', type=str)
    app.add_option('--port', default=3888, type=int)
    app.add_option('-c', '--colors', default=False, action='store_true')
    app.add_option('--dump-bad-packet', default=False, action='store_true')
    app.add_option('--version', default=False, action='store_true')
Ejemplo n.º 3
0
def setup():
  LogOptions.set_stderr_log_level('NONE')

  app.add_option('--iface', default='eth0', type=str)
  app.add_option('--port', default=3888, type=int)
  app.add_option('-c', '--colors', default=False, action='store_true')
  app.add_option('--dump-bad-packet', default=False, action='store_true')
  app.add_option('--version', default=False, action='store_true')
Ejemplo n.º 4
0
    def run(self, lock):
        if self.options.dry_run:
            print "****** Dry Run ******"

        logger = None
        if self.options.log or self.options.log_level:
            from twitter.common.log import init
            from twitter.common.log.options import LogOptions

            LogOptions.set_stderr_log_level((self.options.log_level or "info").upper())
            logdir = self.options.logdir or self.config.get("goals", "logdir", default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)
                init("goals")
            else:
                init()
            logger = log

        if self.options.recursive_directory:
            log.warn("--all-recursive is deprecated, use a target spec with the form [dir]:: instead")
            for dir in self.options.recursive_directory:
                self.add_target_recursive(dir)

        if self.options.target_directory:
            log.warn("--all is deprecated, use a target spec with the form [dir]: instead")
            for dir in self.options.target_directory:
                self.add_target_directory(dir)

        context = Context(
            self.config,
            self.options,
            self.targets,
            requested_goals=self.requested_goals,
            lock=lock,
            log=logger,
            timer=self.timer if self.options.time else None,
        )

        unknown = []
        for phase in self.phases:
            if not phase.goals():
                unknown.append(phase)

        if unknown:
            print ("Unknown goal(s): %s" % " ".join(phase.name for phase in unknown))
            print ("")
            return Phase.execute(context, "goals")

        if logger:
            logger.debug("Operating on targets: %s", self.targets)

        ret = Phase.attempt(context, self.phases)
        if self.options.time:
            print ("Timing report")
            print ("=============")
            self.timer.print_timings()
        return ret
Ejemplo n.º 5
0
  def run(self, lock):
    # TODO(John Sirois): Consider moving to straight python logging.  The divide between the
    # context/work-unit logging and standard python logging doesn't buy us anything.

    # Enable standard python logging for code with no handle to a context/work-unit.
    if self.options.log_level:
      LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper())
      logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None)
      if logdir:
        safe_mkdir(logdir)
        LogOptions.set_log_dir(logdir)
        log.init('goals')
      else:
        log.init()

    # Update the reporting settings, now that we have flags etc.
    def is_console_task():
      for phase in self.phases:
        for goal in phase.goals():
          if issubclass(goal.task_type, ConsoleTask):
            return True
      return False

    is_explain = self.options.explain
    update_reporting(self.options, is_console_task() or is_explain, self.run_tracker)

    if self.options.dry_run:
      print('****** Dry Run ******')

    context = Context(
      self.config,
      self.options,
      self.run_tracker,
      self.targets,
      requested_goals=self.requested_goals,
      lock=lock)

    if self.options.recursive_directory:
      context.log.warn(
        '--all-recursive is deprecated, use a target spec with the form [dir]:: instead')
      for dir in self.options.recursive_directory:
        self.add_target_recursive(dir)

    if self.options.target_directory:
      context.log.warn('--all is deprecated, use a target spec with the form [dir]: instead')
      for dir in self.options.target_directory:
        self.add_target_directory(dir)

    unknown = []
    for phase in self.phases:
      if not phase.goals():
        unknown.append(phase)

    if unknown:
      _list_goals(context, 'Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown))
      return 1

    return Goal._execute(context, self.phases, print_timing=self.options.time)
Ejemplo n.º 6
0
  def run(self, lock):
    # TODO(John Sirois): Consider moving to straight python logging.  The divide between the
    # context/work-unit logging and standard python logging doesn't buy us anything.

    # Enable standard python logging for code with no handle to a context/work-unit.
    if self.options.log_level:
      LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper())
      logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None)
      if logdir:
        safe_mkdir(logdir)
        LogOptions.set_log_dir(logdir)
        log.init('goals')
      else:
        log.init()

    # Update the reporting settings, now that we have flags etc.
    def is_console_task():
      for phase in self.phases:
        for goal in phase.goals():
          if issubclass(goal.task_type, ConsoleTask):
            return True
      return False

    is_explain = self.options.explain
    update_reporting(self.options, is_console_task() or is_explain, self.run_tracker)

    if self.options.dry_run:
      print('****** Dry Run ******')

    context = Context(
      self.config,
      self.options,
      self.run_tracker,
      self.targets,
      requested_goals=self.requested_goals,
      lock=lock)

    if self.options.recursive_directory:
      context.log.warn(
        '--all-recursive is deprecated, use a target spec with the form [dir]:: instead')
      for dir in self.options.recursive_directory:
        self.add_target_recursive(dir)

    if self.options.target_directory:
      context.log.warn('--all is deprecated, use a target spec with the form [dir]: instead')
      for dir in self.options.target_directory:
        self.add_target_directory(dir)

    unknown = []
    for phase in self.phases:
      if not phase.goals():
        unknown.append(phase)

    if unknown:
      _list_goals(context, 'Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown))
      return 1

    return Goal._execute(context, self.phases, print_timing=self.options.time)
Ejemplo n.º 7
0
  def run(self, lock):
    with self.check_errors("Target contains a dependency cycle") as error:
      for target in self.targets:
        try:
          InternalTarget.check_cycles(target)
        except InternalTarget.CycleException as e:
          error(target.id)

    timer = None
    if self.options.time:
      class Timer(object):
        def now(self):
          return time.time()
        def log(self, message):
          print(message)
      timer = Timer()

    logger = None
    if self.options.log or self.options.log_level:
      from twitter.common.log import init
      from twitter.common.log.options import LogOptions
      LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper())
      logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None)
      if logdir:
        safe_mkdir(logdir)
        LogOptions.set_log_dir(logdir)
        init('goals')
      else:
        init()
      logger = log

    if self.options.recursive_directory:
      log.warn('--all-recursive is deprecated, use a target spec with the form [dir]:: instead')
      for dir in self.options.recursive_directory:
        self.add_target_recursive(dir)

    if self.options.target_directory:
      log.warn('--all is deprecated, use a target spec with the form [dir]: instead')
      for dir in self.options.target_directory:
        self.add_target_directory(dir)

    context = Context(self.config, self.options, self.targets, lock=lock, log=logger)

    unknown = []
    for phase in self.phases:
      if not phase.goals():
        unknown.append(phase)

    if unknown:
        print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown))
        print('')
        return Phase.execute(context, 'goals')

    if logger:
      logger.debug('Operating on targets: %s', self.targets)

    return Phase.attempt(context, self.phases, timer=timer)
Ejemplo n.º 8
0
  def execute(self):
    def add_targets(dir, buildfile):
      try:
        self.targets.extend(Target.get(addr) for addr in Target.get_all_addresses(buildfile))
      except (TypeError, ImportError):
        error(dir, include_traceback=True)
      except (IOError, SyntaxError):
        error(dir)

    if self.options.recursive_directory:
      with self.check_errors('There was a problem scanning the '
                             'following directories for targets:') as error:
        for dir in self.options.recursive_directory:
          for buildfile in BuildFile.scan_buildfiles(self.root_dir, dir):
            add_targets(dir, buildfile)

    if self.options.target_directory:
      with self.check_errors("There was a problem loading targets "
                             "from the following directory's BUILD files") as error:
        for dir in self.options.target_directory:
          add_targets(dir, BuildFile(self.root_dir, dir))

    timer = None
    if self.options.time:
      class Timer(object):
        def now(self):
          return time.time()
        def log(self, message):
          print(message)
      timer = Timer()

    logger = None
    if self.options.log or self.options.log_level:
      from twitter.common.log import init
      from twitter.common.log.options import LogOptions
      LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper())
      logdir = self.config.get('goals', 'logdir')
      if logdir:
        safe_mkdir(logdir)
        LogOptions.set_log_dir(logdir)
      init('goals')
      logger = log

    context = Context(self.config, self.options, self.targets, log=logger)

    unknown = []
    for phase in self.phases:
      if not phase.goals():
        unknown.append(phase)

    if unknown:
        print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown))
        print()
        return Phase.execute(context, 'goals')

    return Phase.attempt(context, self.phases, timer=timer)
Ejemplo n.º 9
0
    def run(self, lock):
        with self.check_errors("Target contains a dependency cycle") as error:
            with self.timer.timing("parse:check_cycles"):
                for target in self.targets:
                    try:
                        InternalTarget.check_cycles(target)
                    except InternalTarget.CycleException as e:
                        error(target.id)

        logger = None
        if self.options.log or self.options.log_level:
            from twitter.common.log import init
            from twitter.common.log.options import LogOptions

            LogOptions.set_stderr_log_level((self.options.log_level or "info").upper())
            logdir = self.options.logdir or self.config.get("goals", "logdir", default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)
                init("goals")
            else:
                init()
            logger = log

        if self.options.recursive_directory:
            log.warn("--all-recursive is deprecated, use a target spec with the form [dir]:: instead")
            for dir in self.options.recursive_directory:
                self.add_target_recursive(dir)

        if self.options.target_directory:
            log.warn("--all is deprecated, use a target spec with the form [dir]: instead")
            for dir in self.options.target_directory:
                self.add_target_directory(dir)

        context = Context(self.config, self.options, self.targets, lock=lock, log=logger)

        unknown = []
        for phase in self.phases:
            if not phase.goals():
                unknown.append(phase)

        if unknown:
            print("Unknown goal(s): %s" % " ".join(phase.name for phase in unknown))
            print("")
            return Phase.execute(context, "goals")

        if logger:
            logger.debug("Operating on targets: %s", self.targets)

        ret = Phase.attempt(context, self.phases, timer=self.timer if self.options.time else None)
        if self.options.time:
            print("Timing report")
            print("=============")
            self.timer.print_timings()
        return ret
Ejemplo n.º 10
0
  def run(self, lock):
    if self.options.dry_run:
      print '****** Dry Run ******'

    logger = None
    if self.options.log or self.options.log_level:
      from twitter.common.log import init
      from twitter.common.log.options import LogOptions
      LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper())
      logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None)
      if logdir:
        safe_mkdir(logdir)
        LogOptions.set_log_dir(logdir)
        init('goals')
      else:
        init()
      logger = log

    if self.options.recursive_directory:
      log.warn('--all-recursive is deprecated, use a target spec with the form [dir]:: instead')
      for dir in self.options.recursive_directory:
        self.add_target_recursive(dir)

    if self.options.target_directory:
      log.warn('--all is deprecated, use a target spec with the form [dir]: instead')
      for dir in self.options.target_directory:
        self.add_target_directory(dir)

    context = Context(
      self.config,
      self.options,
      self.targets,
      lock=lock,
      log=logger,
      timer=self.timer if self.options.time else None)

    unknown = []
    for phase in self.phases:
      if not phase.goals():
        unknown.append(phase)

    if unknown:
        print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown))
        print('')
        return Phase.execute(context, 'goals')

    if logger:
      logger.debug('Operating on targets: %s', self.targets)

    ret = Phase.attempt(context, self.phases)
    if self.options.time:
      print('Timing report')
      print('=============')
      self.timer.print_timings()
    return ret
Ejemplo n.º 11
0
  def run(self, lock):
    with self.check_errors("Target contains a dependency cycle") as error:
      with self.timer.timing('parse:check_cycles'):
        for target in self.targets:
          try:
            InternalTarget.check_cycles(target)
          except InternalTarget.CycleException as e:
            error(target.id)

    logger = None
    if self.options.log or self.options.log_level:
      from twitter.common.log import init
      from twitter.common.log.options import LogOptions
      LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper())
      logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None)
      if logdir:
        safe_mkdir(logdir)
        LogOptions.set_log_dir(logdir)
        init('goals')
      else:
        init()
      logger = log

    if self.options.recursive_directory:
      log.warn('--all-recursive is deprecated, use a target spec with the form [dir]:: instead')
      for dir in self.options.recursive_directory:
        self.add_target_recursive(dir)

    if self.options.target_directory:
      log.warn('--all is deprecated, use a target spec with the form [dir]: instead')
      for dir in self.options.target_directory:
        self.add_target_directory(dir)

    context = Context(self.config, self.options, self.targets, lock=lock, log=logger)

    unknown = []
    for phase in self.phases:
      if not phase.goals():
        unknown.append(phase)

    if unknown:
        print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown))
        print('')
        return Phase.execute(context, 'goals')

    if logger:
      logger.debug('Operating on targets: %s', self.targets)

    ret = Phase.attempt(context, self.phases, timer=self.timer if self.options.time else None)
    if self.options.time:
      print('Timing report')
      print('=============')
      self.timer.print_timings()
    return ret
Ejemplo n.º 12
0
def setup():
  LogOptions.set_stderr_log_level('NONE')

  app.add_option('--packet-filter', default='tcp', type=str,
                 help='pcap filter string. e.g. "tcp portrange 11221-32767" for JUnit tests')
  app.add_option('-c', '--colors', default=False, action='store_true')
  app.add_option('--dump-bad-packet', default=False, action='store_true')
  app.add_option('--include-pings', default=False, action='store_true',
                 help='Whether to include ZAB/ZK pings')
  app.add_option('--offline', default=None, type=str,
                 help='offline mode with a pcap file')
  app.add_option('--version', default=False, action='store_true')
Ejemplo n.º 13
0
def setup():
  LogOptions.set_stderr_log_level('NONE')

  app.add_option('--iface', default='eth0', type=str,
                 help='The interface to sniff on')
  app.add_option('--port', default=2889, type=int,
                 help='The ZAB port used by the leader')
  app.add_option('-c', '--colors', default=False, action='store_true',
                 help='Color each learner/leader stream differently')
  app.add_option('--dump-bad-packet', default=False, action='store_true',
                 help='Dump packets that cannot be deserialized')
  app.add_option('--include-pings', default=False, action='store_true',
                 help='Whether to include pings send from learners to the leader')
  app.add_option('--version', default=False, action='store_true')
Ejemplo n.º 14
0
    def run(self, lock):
        # TODO(John Sirois): Consider moving to straight python logging.  The divide between the
        # context/work-unit logging and standard python logging doesn't buy us anything.

        # Enable standard python logging for code with no handle to a context/work-unit.
        if self.options.log_level:
            LogOptions.set_stderr_log_level((self.options.log_level
                                             or 'info').upper())
            logdir = self.options.logdir or self.config.get(
                'goals', 'logdir', default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)
                log.init('goals')
            else:
                log.init()

        # Update the reporting settings, now that we have flags etc.
        def is_console_task():
            for phase in self.phases:
                for goal in phase.goals():
                    if issubclass(goal.task_type, ConsoleTask):
                        return True
            return False

        is_explain = self.options.explain
        update_reporting(self.options,
                         is_console_task() or is_explain, self.run_tracker)

        context = Context(self.config,
                          self.options,
                          self.run_tracker,
                          self.targets,
                          requested_goals=self.requested_goals,
                          build_graph=self.build_graph,
                          build_file_parser=self.build_file_parser,
                          lock=lock)

        unknown = []
        for phase in self.phases:
            if not phase.goals():
                unknown.append(phase)

        if unknown:
            context.log.error('Unknown goal(s): %s\n' %
                              ' '.join(phase.name for phase in unknown))
            return 1

        engine = GroupEngine()
        return engine.execute(context, self.phases)
Ejemplo n.º 15
0
  def run(self, lock):
    # TODO(John Sirois): Consider moving to straight python logging.  The divide between the
    # context/work-unit logging and standard python logging doesn't buy us anything.

    # Enable standard python logging for code with no handle to a context/work-unit.
    if self.options.log_level:
      LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper())
      logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None)
      if logdir:
        safe_mkdir(logdir)
        LogOptions.set_log_dir(logdir)
        log.init('goals')
      else:
        log.init()

    # Update the reporting settings, now that we have flags etc.
    def is_console_task():
      for phase in self.phases:
        for goal in phase.goals():
          if issubclass(goal.task_type, ConsoleTask):
            return True
      return False

    is_explain = self.options.explain
    update_reporting(self.options, is_console_task() or is_explain, self.run_tracker)

    context = Context(
      self.config,
      self.options,
      self.run_tracker,
      self.targets,
      requested_goals=self.requested_goals,
      build_graph=self.build_graph,
      build_file_parser=self.build_file_parser,
      lock=lock)

    unknown = []
    for phase in self.phases:
      if not phase.goals():
        unknown.append(phase)

    if unknown:
      context.log.error('Unknown goal(s): %s\n' % ' '.join(phase.name for phase in unknown))
      return 1

    engine = GroupEngine()
    return engine.execute(context, self.phases)
Ejemplo n.º 16
0
def setup():
  LogOptions.set_stderr_log_level('NONE')

  app.add_option('--iface', default='eth0', type=str, metavar='<iface>',
                 help='The interface to sniff on')
  app.add_option('--client-port', default=0, type=int, metavar='<client_port>',
                 help='The client port to filter by')
  app.add_option('--zookeeper-port', default=2181, type=int, metavar='<server_port>',
                 help='The ZooKeeper server port to filter by')
  app.add_option('--max-queued-requests', default=10000, type=int, metavar='<max>',
                 help='The maximum number of requests queued to be deserialized')
  app.add_option('--exclude-host',
                 dest='excluded_hosts',
                 metavar='<host>',
                 default=[],
                 action='append',
                 help='Host that should be excluded (you can use this multiple times)')
  app.add_option('--include-host',
                 dest='included_hosts',
                 metavar='<host>',
                 default=[],
                 action='append',
                 help='Host that should be included (you can use this multiple times)')
  app.add_option('--count-requests', default=0, type=int, metavar='<nreqs>',
                 help='Count N requests and report a summary (default: group by path)')
  app.add_option('--measure-latency', default=0, type=int, metavar='<nreqs>',
                 help='Measure latency of N pairs of requests and replies (default: group by path')
  app.add_option('--group-by', default='path', type=str, metavar='<group>',
                 help='Used with --count-requests or --measure-latency. Possible values: path, type or client')
  app.add_option('--sort-by', default='avg', type=str, metavar='<sort>',
                 help='Used with --measure-latency. Possible values: avg, p95 and p99')
  app.add_option("--aggregation-depth", default=0, type=int, metavar='<depth>',
                 help="Aggregate paths up to a certain depth. Used with --count-requests or --measure-latency")
  app.add_option('--unpaired', default=False, action='store_true',
                 help='Don\'t pair reqs/reps')
  app.add_option('-p', '--include-pings', default=False, action='store_true',
                 help='Whether to include ping requests and replies')
  app.add_option('-c', '--colors', default=False, action='store_true',
                 help='Color each client/server stream differently')
  app.add_option('--dump-bad-packet', default=False, action='store_true',
                 help='If unable to to deserialize a packet, print it out')
  app.add_option('--version', default=False, action='store_true')
Ejemplo n.º 17
0
def setup():
    LogOptions.set_stderr_log_level('NONE')

    app.add_option(
        '--packet-filter',
        default='tcp',
        type=str,
        help=
        'pcap filter string. e.g. "tcp portrange 11221-32767" for JUnit tests')
    app.add_option('-c', '--colors', default=False, action='store_true')
    app.add_option('--dump-bad-packet', default=False, action='store_true')
    app.add_option('--include-pings',
                   default=False,
                   action='store_true',
                   help='Whether to include ZAB/ZK pings')
    app.add_option('--offline',
                   default=None,
                   type=str,
                   help='offline mode with a pcap file')
    app.add_option('--version', default=False, action='store_true')
Ejemplo n.º 18
0
# These are are side-effecting imports in that they register commands via
# app.command.  This is a poor code practice and should be fixed long-term
# with the creation of twitter.common.cli that allows for argparse-style CLI
# composition.
from twitter.aurora.client.commands import (
    core,
    help,
    run,
    ssh,
)
from twitter.aurora.client.options import add_verbosity_options

app.register_commands_from(core, run, ssh)
app.register_commands_from(help)
add_verbosity_options()


def main():
    app.help()


LogOptions.set_stderr_log_level('INFO')
LogOptions.disable_disk_logging()
app.set_name('aurora-client')
app.set_usage(generate_terse_usage())


def proxy_main():
    app.main()
Ejemplo n.º 19
0
import zookeeper

from twitter.common import log
from twitter.common.log.options import LogOptions

from twitter.common.zookeeper.client import ZooKeeper, ZooDefs
from twitter.common.zookeeper.test_server import ZookeeperServer

MAX_EVENT_WAIT_SECS = 30.0
MAX_EXPIRE_WAIT_SECS = 60.0
CONNECT_TIMEOUT_SECS = 10.0
CONNECT_RETRIES = 6


if os.getenv('ZOOKEEPER_TEST_DEBUG'):
  LogOptions.set_stderr_log_level('NONE')
  LogOptions.set_disk_log_level('DEBUG')
  LogOptions.set_log_dir('/tmp')
  log.init('client_test')


def make_zk(server, **kw):
  return ZooKeeper('localhost:%d' % server.zookeeper_port,
                   timeout_secs=CONNECT_TIMEOUT_SECS,
                   max_reconnects=CONNECT_RETRIES,
                   **kw)


def test_client_connect():
  with ZookeeperServer() as server:
    zk = make_zk(server)
Ejemplo n.º 20
0
  def main(args, options):
    log.info("Options in use: %s", options)

    if not options.api_port:
      app.error('Must specify --port')

    if not options.mesos_master:
      app.error('Must specify --mesos_master')

    if not options.framework_user:
      app.error('Must specify --framework_user')

    if not options.executor_uri:
      app.error('Must specify --executor_uri')

    if not options.executor_cmd:
      app.error('Must specify --executor_cmd')

    if not options.zk_url:
      app.error('Must specify --zk_url')

    if not options.admin_keypath:
      app.error('Must specify --admin_keypath')

    if not options.scheduler_keypath:
      app.error('Must specify --scheduler_keypath')

    if options.verbose:
      LogOptions.set_stderr_log_level('google:DEBUG')

    try:
      election_timeout = parse_time(options.election_timeout)
      framework_failover_timeout = parse_time(options.framework_failover_timeout)
    except InvalidTime as e:
      app.error(e.message)

    try:
      _, zk_servers, zk_root = zookeeper.parse(options.zk_url)
    except Exception as e:
      app.error("Invalid --zk_url: %s" % e.message)

    web_assets_dir = os.path.join(options.work_dir, "web")
    pkgutil.unpack_assets(web_assets_dir, MYSOS_MODULE, ASSET_RELPATH)
    log.info("Extracted web assets into %s" % options.work_dir)

    fw_principal = None
    fw_secret = None
    if options.framework_authentication_file:
      try:
        with open(options.framework_authentication_file, "r") as f:
          cred = yaml.load(f)
        fw_principal = cred["principal"]
        fw_secret = cred["secret"]
        log.info("Loaded credential (principal=%s) for framework authentication" % fw_principal)
      except IOError as e:
        app.error("Unable to read the framework authentication key file: %s" % e)
      except (KeyError, yaml.YAMLError) as e:
        app.error("Invalid framework authentication key file format %s" % e)

    scheduler_key = None
    try:
      with open(options.scheduler_keypath, 'rb') as f:
        scheduler_key = f.read().strip()
        if not scheduler_key:
          raise ValueError("The key file is empty")
    except Exception as e:
      app.error("Cannot read --scheduler_keypath: %s" % e)

    log.info("Starting Mysos scheduler")

    kazoo = KazooClient(zk_servers)
    kazoo.start()

    if options.state_storage == 'zk':
      log.info("Using ZooKeeper (path: %s) for state storage" % zk_root)
      state_provider = ZooKeeperStateProvider(kazoo, zk_root)
    else:
      log.info("Using local disk for state storage")
      state_provider = LocalStateProvider(options.work_dir)

    try:
      state = state_provider.load_scheduler_state()
    except StateProvider.Error as e:
      app.error(e.message)

    if state:
      log.info("Successfully restored scheduler state")
      framework_info = state.framework_info
      if framework_info.HasField('id'):
        log.info("Recovered scheduler's FrameworkID is %s" % framework_info.id.value)
    else:
      log.info("No scheduler state to restore")
      framework_info = FrameworkInfo(
          user=options.framework_user,
          name=FRAMEWORK_NAME,
          checkpoint=True,
          failover_timeout=framework_failover_timeout.as_(Time.SECONDS),
          role=options.framework_role,
          hostname=options.hostname,
          webui_url="http://%s:%s/" % (options.hostname, options.api_port))
      if fw_principal:
        framework_info.principal = fw_principal
      state = Scheduler(framework_info)
      state_provider.dump_scheduler_state(state)

    scheduler = MysosScheduler(
        state,
        state_provider,
        options.framework_user,
        options.executor_uri,
        options.executor_cmd,
        kazoo,
        options.zk_url,
        election_timeout,
        options.admin_keypath,
        scheduler_key,
        installer_args=options.installer_args,
        backup_store_args=options.backup_store_args,
        executor_environ=options.executor_environ,
        executor_source_prefix=options.executor_source_prefix,
        docker_image=options.docker_image,
        framework_role=options.framework_role)

    RootMetrics().register_observable('scheduler', scheduler)

    if fw_principal and fw_secret:
      cred = Credential(principal=fw_principal, secret=fw_secret)
      scheduler_driver = mesos.native.MesosSchedulerDriver(
          scheduler,
          framework_info,
          options.mesos_master,
          cred)
    else:
      scheduler_driver = mesos.native.MesosSchedulerDriver(
          scheduler,
          framework_info,
          options.mesos_master)

    scheduler_driver.start()

    metric_sampler = MetricSampler(RootMetrics())
    metric_sampler.start()

    server = HttpServer()
    server.mount_routes(MysosServer(scheduler, web_assets_dir, metric_sampler))

    et = ExceptionalThread(
        target=server.run, args=('0.0.0.0', options.api_port, 'cherrypy'))
    et.daemon = True
    et.start()

    try:
      # Wait for the scheduler to stop.
      # The use of 'stopped' event instead of scheduler_driver.join() is necessary to stop the
      # process with SIGINT.
      while not scheduler.stopped.wait(timeout=0.5):
        pass
    except KeyboardInterrupt:
      log.info('Interrupted, exiting.')
    else:
      log.info('Scheduler exited.')

    app.shutdown(1)  # Mysos scheduler is supposed to be long-running thus the use of exit status 1.
Ejemplo n.º 21
0
    def run(self, lock):
        if self.options.dry_run:
            print '****** Dry Run ******'

        logger = None
        if self.options.log or self.options.log_level:
            from twitter.common.log import init
            from twitter.common.log.options import LogOptions
            LogOptions.set_stderr_log_level((self.options.log_level
                                             or 'info').upper())
            logdir = self.options.logdir or self.config.get(
                'goals', 'logdir', default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)
                init('goals')
            else:
                init()
            logger = log

        if self.options.recursive_directory:
            log.warn(
                '--all-recursive is deprecated, use a target spec with the form [dir]:: instead'
            )
            for dir in self.options.recursive_directory:
                self.add_target_recursive(dir)

        if self.options.target_directory:
            log.warn(
                '--all is deprecated, use a target spec with the form [dir]: instead'
            )
            for dir in self.options.target_directory:
                self.add_target_directory(dir)

        context = Context(self.config,
                          self.options,
                          self.targets,
                          requested_goals=self.requested_goals,
                          lock=lock,
                          log=logger,
                          timer=self.timer if self.options.time else None)

        unknown = []
        for phase in self.phases:
            if not phase.goals():
                unknown.append(phase)

        if unknown:
            print('Unknown goal(s): %s' % ' '.join(phase.name
                                                   for phase in unknown))
            print('')
            return Phase.execute(context, 'goals')

        if logger:
            logger.debug('Operating on targets: %s', self.targets)

        ret = Phase.attempt(context, self.phases)

        if self.options.cleanup_nailguns or self.config.get(
                'nailgun', 'autokill', default=False):
            if log:
                log.debug('auto-killing nailguns')
            if NailgunTask.killall:
                NailgunTask.killall(log)

        if self.options.time:
            print('Timing report')
            print('=============')
            self.timer.print_timings()

        return ret
Ejemplo n.º 22
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

from twitter.common import app
from twitter.common.log.options import LogOptions

from apache.aurora.admin import help as help_commands
from apache.aurora.admin import admin, maintenance

from .help import add_verbosity_options, generate_terse_usage

app.register_commands_from(admin, help_commands, maintenance)
add_verbosity_options()


def main():
  app.help()


LogOptions.set_stderr_log_level('INFO')
LogOptions.disable_disk_logging()
app.set_name('aurora-admin')
app.set_usage(generate_terse_usage())


def proxy_main():
  app.main()
Ejemplo n.º 23
0
import os
import pytest
import threading
import time
import unittest
import zookeeper

from twitter.common.zookeeper.client import ZooKeeper, ZooDefs
from twitter.common.zookeeper.test_server import ZookeeperServer
from twitter.common.zookeeper.group.group import ActiveGroup, Group, Membership


if os.getenv('ZOOKEEPER_TEST_DEBUG'):
  from twitter.common import log
  from twitter.common.log.options import LogOptions
  LogOptions.set_stderr_log_level('DEBUG')
  LogOptions.set_disk_log_level('NONE')
  LogOptions.set_log_dir('/tmp')
  log.init('client_test')


class AlternateGroup(Group):
  MEMBER_PREFIX = 'herpderp_'


class TestGroup(unittest.TestCase):
  GroupImpl = Group
  MAX_EVENT_WAIT_SECS = 30.0
  CONNECT_TIMEOUT_SECS = 10.0
  CONNECT_RETRIES = 6
Ejemplo n.º 24
0
    def _do_run(self):
        # TODO(John Sirois): Consider moving to straight python logging.  The divide between the
        # context/work-unit logging and standard python logging doesn't buy us anything.

        # TODO(Eric Ayers) We are missing log messages. Set the log level earlier
        # Enable standard python logging for code with no handle to a context/work-unit.
        if self.global_options.level:
            LogOptions.set_stderr_log_level((self.global_options.level
                                             or 'info').upper())
            logdir = self.global_options.logdir or self.config.get(
                'goals', 'logdir', default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)

                prev_log_level = None
                # If quiet, temporarily change stderr log level to kill init's output.
                if self.global_options.quiet:
                    prev_log_level = LogOptions.loglevel_name(
                        LogOptions.stderr_log_level())
                    # loglevel_name can fail, so only change level if we were able to get the current one.
                    if prev_log_level is not None:
                        LogOptions.set_stderr_log_level(
                            LogOptions._LOG_LEVEL_NONE_KEY)

                log.init('goals')

                if prev_log_level is not None:
                    LogOptions.set_stderr_log_level(prev_log_level)
            else:
                log.init()

        # Update the reporting settings, now that we have flags etc.
        def is_quiet_task():
            for goal in self.goals:
                if goal.has_task_of_type(QuietTaskMixin):
                    return True
            return False

        is_explain = self.global_options.explain
        update_reporting(self.global_options,
                         is_quiet_task() or is_explain, self.run_tracker)

        context = Context(config=self.config,
                          options=self.options,
                          run_tracker=self.run_tracker,
                          target_roots=self.targets,
                          requested_goals=self.requested_goals,
                          build_graph=self.build_graph,
                          build_file_parser=self.build_file_parser,
                          address_mapper=self.address_mapper,
                          spec_excludes=self.get_spec_excludes())

        unknown = []
        for goal in self.goals:
            if not goal.ordered_task_names():
                unknown.append(goal)

        if unknown:
            context.log.error('Unknown goal(s): %s\n' %
                              ' '.join(goal.name for goal in unknown))
            return 1

        engine = RoundEngine()
        return engine.execute(context, self.goals)
Ejemplo n.º 25
0
 def set_verbose(option, _1, _2, parser):
   setattr(parser.values, option.dest, 'verbose')
   LogOptions.set_stderr_log_level('DEBUG')
Ejemplo n.º 26
0
 def set_quiet(option, _1, _2, parser):
     setattr(parser.values, option.dest, "quiet")
     LogOptions.set_stderr_log_level("NONE")
Ejemplo n.º 27
0
    def run(self, lock):
        # TODO(John Sirois): Consider moving to straight python logging.  The divide between the
        # context/work-unit logging and standard python logging doesn't buy us anything.

        # Enable standard python logging for code with no handle to a context/work-unit.
        if self.options.log_level:
            LogOptions.set_stderr_log_level((self.options.log_level or "info").upper())
            logdir = self.options.logdir or self.config.get("goals", "logdir", default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)
                log.init("goals")
            else:
                log.init()

        # Update the reporting settings, now that we have flags etc.
        def is_quiet_task():
            for goal in self.goals:
                if goal.has_task_of_type(QuietTaskMixin):
                    return True
            return False

        # Target specs are mapped to the patterns which match them, if any. This variable is a key for
        # specs which don't match any exclusion regexes. We know it won't already be in the list of
        # patterns, because the asterisks in its name make it an invalid regex.
        _UNMATCHED_KEY = "** unmatched **"

        def targets_by_pattern(targets, patterns):
            mapping = defaultdict(list)
            for target in targets:
                matched_pattern = None
                for pattern in patterns:
                    if re.search(pattern, target.address.spec) is not None:
                        matched_pattern = pattern
                        break
                if matched_pattern is None:
                    mapping[_UNMATCHED_KEY].append(target)
                else:
                    mapping[matched_pattern].append(target)
            return mapping

        is_explain = self.options.explain
        update_reporting(self.options, is_quiet_task() or is_explain, self.run_tracker)

        if self.options.target_excludes:
            excludes = self.options.target_excludes
            log.debug("excludes:\n  {excludes}".format(excludes="\n  ".join(excludes)))
            by_pattern = targets_by_pattern(self.targets, excludes)
            self.targets = by_pattern[_UNMATCHED_KEY]
            # The rest of this if-statement is just for debug logging.
            log.debug(
                "Targets after excludes: {targets}".format(targets=", ".join(t.address.spec for t in self.targets))
            )
            excluded_count = sum(len(by_pattern[p]) for p in excludes)
            log.debug(
                "Excluded {count} target{plural}.".format(
                    count=excluded_count, plural=("s" if excluded_count != 1 else "")
                )
            )
            for pattern in excludes:
                log.debug(
                    "Targets excluded by pattern {pattern}\n  {targets}".format(
                        pattern=pattern, targets="\n  ".join(t.address.spec for t in by_pattern[pattern])
                    )
                )

        context = Context(
            config=self.config,
            options=self.options,
            run_tracker=self.run_tracker,
            target_roots=self.targets,
            requested_goals=self.requested_goals,
            build_graph=self.build_graph,
            build_file_parser=self.build_file_parser,
            address_mapper=self.address_mapper,
            lock=lock,
        )

        unknown = []
        for goal in self.goals:
            if not goal.ordered_task_names():
                unknown.append(goal)

        if unknown:
            context.log.error("Unknown goal(s): %s\n" % " ".join(goal.name for goal in unknown))
            return 1

        engine = RoundEngine()
        return engine.execute(context, self.goals)
Ejemplo n.º 28
0
    def run(self, lock):
        timer = None
        if self.options.time:

            class Timer(object):
                def now(self):
                    return time.time()

                def log(self, message):
                    print(message)

            timer = Timer()

        logger = None
        if self.options.log or self.options.log_level:
            from twitter.common.log import init
            from twitter.common.log.options import LogOptions
            LogOptions.set_stderr_log_level((self.options.log_level
                                             or 'info').upper())
            logdir = self.options.logdir or self.config.get(
                'goals', 'logdir', default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)
                init('goals')
            else:
                init()
            logger = log

        if self.options.recursive_directory:
            log.warn(
                '--all-recursive is deprecated, use a target spec with the form [dir]:: instead'
            )
            for dir in self.options.recursive_directory:
                self.add_target_recursive(dir)

        if self.options.target_directory:
            log.warn(
                '--all is deprecated, use a target spec with the form [dir]: instead'
            )
            for dir in self.options.target_directory:
                self.add_target_directory(dir)

        context = Context(self.config,
                          self.options,
                          self.targets,
                          lock=lock,
                          log=logger)

        unknown = []
        for phase in self.phases:
            if not phase.goals():
                unknown.append(phase)

        if unknown:
            print('Unknown goal(s): %s' % ' '.join(phase.name
                                                   for phase in unknown))
            print('')
            return Phase.execute(context, 'goals')

        if logger:
            logger.debug('Operating on targets: %s', self.targets)

        return Phase.attempt(context, self.phases, timer=timer)
Ejemplo n.º 29
0
from twitter.common import log
from twitter.common.log.options import LogOptions

from twitter.common.zookeeper.client import ZooKeeper, ZooDefs
from twitter.common.zookeeper.test_server import ZookeeperServer

import mox

MAX_EVENT_WAIT_SECS = 30.0
MAX_EXPIRE_WAIT_SECS = 60.0
CONNECT_TIMEOUT_SECS = 10.0
CONNECT_RETRIES = 6

if os.getenv('ZOOKEEPER_TEST_DEBUG'):
    LogOptions.set_stderr_log_level('NONE')
    LogOptions.set_disk_log_level('DEBUG')
    LogOptions.set_log_dir('/tmp')
    log.init('client_test')


def make_zk(server, **kw):
    return ZooKeeper('localhost:%d' % server.zookeeper_port,
                     timeout_secs=CONNECT_TIMEOUT_SECS,
                     max_reconnects=CONNECT_RETRIES,
                     **kw)


def test_client_connect():
    with ZookeeperServer() as server:
        zk = make_zk(server)
Ejemplo n.º 30
0
    return TaskObserver(path_detector, interval=polling_interval)


def handle_error(exc_type, value, traceback):
    """ Tear down the observer in case of unhandled errors.

  By using ExceptionalThread throughout the observer we have ensured that sys.excepthook will
  be called for every unhandled exception, even for those not originating in the main thread.
  """
    log.error("An unhandled error occured. Tearing down.", exc_info=(exc_type, value, traceback))
    # TODO: In Python 3.4 we will be able to use threading.main_thread()
    if not isinstance(threading.current_thread(), threading._MainThread):
        thread.interrupt_main()


def main(_, options):
    observer = initialize(options)
    observer.start()
    root_server = configure_server(observer)

    server = ExceptionalThread(target=lambda: root_server.run(options.ip, options.port, "cherrypy"))
    server.daemon = True
    server.start()

    sleep_forever()


sys.excepthook = handle_error
LogOptions.set_stderr_log_level("google:INFO")
app.main()
Ejemplo n.º 31
0
    def run(self):
        # TODO(John Sirois): Consider moving to straight python logging.  The divide between the
        # context/work-unit logging and standard python logging doesn't buy us anything.

        # Enable standard python logging for code with no handle to a context/work-unit.
        if self.global_options.level:
            LogOptions.set_stderr_log_level((self.global_options.level
                                             or 'info').upper())
            logdir = self.global_options.logdir or self.config.get(
                'goals', 'logdir', default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)

                prev_log_level = None
                # If quiet, temporarily change stderr log level to kill init's output.
                if self.global_options.quiet:
                    prev_log_level = LogOptions.loglevel_name(
                        LogOptions.stderr_log_level())
                    # loglevel_name can fail, so only change level if we were able to get the current one.
                    if prev_log_level is not None:
                        LogOptions.set_stderr_log_level(
                            LogOptions._LOG_LEVEL_NONE_KEY)

                log.init('goals')

                if prev_log_level is not None:
                    LogOptions.set_stderr_log_level(prev_log_level)
            else:
                log.init()

        # Update the reporting settings, now that we have flags etc.
        def is_quiet_task():
            for goal in self.goals:
                if goal.has_task_of_type(QuietTaskMixin):
                    return True
            return False

        # Target specs are mapped to the patterns which match them, if any. This variable is a key for
        # specs which don't match any exclusion regexes. We know it won't already be in the list of
        # patterns, because the asterisks in its name make it an invalid regex.
        _UNMATCHED_KEY = '** unmatched **'

        def targets_by_pattern(targets, patterns):
            mapping = defaultdict(list)
            for target in targets:
                matched_pattern = None
                for pattern in patterns:
                    if re.search(pattern, target.address.spec) is not None:
                        matched_pattern = pattern
                        break
                if matched_pattern is None:
                    mapping[_UNMATCHED_KEY].append(target)
                else:
                    mapping[matched_pattern].append(target)
            return mapping

        is_explain = self.global_options.explain
        update_reporting(self.global_options,
                         is_quiet_task() or is_explain, self.run_tracker)

        if self.global_options.exclude_target_regexp:
            excludes = self.global_options.exclude_target_regexp
            log.debug('excludes:\n  {excludes}'.format(
                excludes='\n  '.join(excludes)))
            by_pattern = targets_by_pattern(self.targets, excludes)
            self.targets = by_pattern[_UNMATCHED_KEY]
            # The rest of this if-statement is just for debug logging.
            log.debug('Targets after excludes: {targets}'.format(
                targets=', '.join(t.address.spec for t in self.targets)))
            excluded_count = sum(len(by_pattern[p]) for p in excludes)
            log.debug('Excluded {count} target{plural}.'.format(
                count=excluded_count,
                plural=('s' if excluded_count != 1 else '')))
            for pattern in excludes:
                log.debug('Targets excluded by pattern {pattern}\n  {targets}'.
                          format(pattern=pattern,
                                 targets='\n  '.join(
                                     t.address.spec
                                     for t in by_pattern[pattern])))

        context = Context(config=self.config,
                          new_options=self.new_options,
                          run_tracker=self.run_tracker,
                          target_roots=self.targets,
                          requested_goals=self.requested_goals,
                          build_graph=self.build_graph,
                          build_file_parser=self.build_file_parser,
                          address_mapper=self.address_mapper,
                          spec_excludes=self.get_spec_excludes())

        unknown = []
        for goal in self.goals:
            if not goal.ordered_task_names():
                unknown.append(goal)

        if unknown:
            context.log.error('Unknown goal(s): %s\n' %
                              ' '.join(goal.name for goal in unknown))
            return 1

        engine = RoundEngine()
        return engine.execute(context, self.goals)
Ejemplo n.º 32
0
from twitter.common import app, log
from twitter.common.exceptions import ExceptionalThread
from twitter.common.http import HttpServer
from twitter.common.log.options import LogOptions
from twitter.common.quantity import Time
from twitter.common.quantity.parse_simple import InvalidTime, parse_time
import yaml


FRAMEWORK_NAME = 'mysos'
MYSOS_MODULE = 'mysos.scheduler'
ASSET_RELPATH = 'assets'


LogOptions.disable_disk_logging()
LogOptions.set_stderr_log_level('google:INFO')


def proxy_main():
  app.add_option(
      '--port',
      dest='api_port',
      type='int',
      default=None,
      help='Port for the HTTP API server')

  app.add_option(
      '--mesos_master',
      dest='mesos_master',
      default=None,
      help='Mesos master address. It can be a ZooKeeper URL through which the master can be '
Ejemplo n.º 33
0
from mesos.interface.mesos_pb2 import Credential, FrameworkInfo
import mesos.native
from twitter.common import app, log
from twitter.common.exceptions import ExceptionalThread
from twitter.common.http import HttpServer
from twitter.common.log.options import LogOptions
from twitter.common.quantity import Time
from twitter.common.quantity.parse_simple import InvalidTime, parse_time
import yaml

FRAMEWORK_NAME = 'mysos'
MYSOS_MODULE = 'mysos.scheduler'
ASSET_RELPATH = 'assets'

LogOptions.disable_disk_logging()
LogOptions.set_stderr_log_level('google:INFO')


def proxy_main():
    app.add_option('--port',
                   dest='api_port',
                   type='int',
                   default=None,
                   help='Port for the HTTP API server')

    app.add_option(
        '--mesos_master',
        dest='mesos_master',
        default=None,
        help=
        'Mesos master address. It can be a ZooKeeper URL through which the master can be '
Ejemplo n.º 34
0
 def set_quiet(option, _1, _2, parser):
   setattr(parser.values, option.dest, 'quiet')
   LogOptions.set_stderr_log_level('NONE')
Ejemplo n.º 35
0
Archivo: zk.py Proyecto: XXXu/zktraffic
def setup():
    from twitter.common import app

    LogOptions.set_stderr_log_level('NONE')

    app.add_option('--iface',
                   default='eth0',
                   type=str,
                   metavar='<iface>',
                   help='The interface to sniff on')
    app.add_option('--client-port',
                   default=0,
                   type=int,
                   metavar='<client_port>',
                   help='The client port to filter by')
    app.add_option('--zookeeper-port',
                   default=2181,
                   type=int,
                   metavar='<server_port>',
                   help='The ZooKeeper server port to filter by')
    app.add_option(
        '--max-queued-requests',
        default=10000,
        type=int,
        metavar='<max>',
        help='The maximum number of requests queued to be deserialized')
    app.add_option(
        '--exclude-host',
        dest='excluded_hosts',
        metavar='<host>',
        default=[],
        action='append',
        help='Host that should be excluded (you can use this multiple times)')
    app.add_option(
        '--include-host',
        dest='included_hosts',
        metavar='<host>',
        default=[],
        action='append',
        help='Host that should be included (you can use this multiple times)')
    app.add_option(
        '--count-requests',
        default=0,
        type=int,
        metavar='<nreqs>',
        help='Count N requests and report a summary (default: group by path)')
    app.add_option(
        '--measure-latency',
        default=0,
        type=int,
        metavar='<nreqs>',
        help=
        'Measure latency of N pairs of requests and replies (default: group by path'
    )
    app.add_option(
        '--group-by',
        default='path',
        type=str,
        metavar='<group>',
        help=
        'Used with --count-requests or --measure-latency. Possible values: path, type or client'
    )
    app.add_option(
        '--sort-by',
        default='avg',
        type=str,
        metavar='<sort>',
        help='Used with --measure-latency. Possible values: avg, p95 and p99')
    app.add_option(
        "--aggregation-depth",
        default=0,
        type=int,
        metavar='<depth>',
        help=
        "Aggregate paths up to a certain depth. Used with --count-requests or --measure-latency"
    )
    app.add_option('--unpaired',
                   default=False,
                   action='store_true',
                   help='Don\'t pair reqs/reps')
    app.add_option('-p',
                   '--include-pings',
                   default=False,
                   action='store_true',
                   help='Whether to include ping requests and replies')
    app.add_option('-c',
                   '--colors',
                   default=False,
                   action='store_true',
                   help='Color each client/server stream differently')
    app.add_option('--dump-bad-packet',
                   default=False,
                   action='store_true',
                   help='If unable to to deserialize a packet, print it out')
    app.add_option('--version', default=False, action='store_true')
Ejemplo n.º 36
0
 def set_quiet(option, _1, _2, parser):
   setattr(parser.values, option.dest, 'quiet')
   LogOptions.set_stderr_log_level('NONE')
Ejemplo n.º 37
0
 def set_verbose(option, _1, _2, parser):
   setattr(parser.values, option.dest, 'verbose')
   LogOptions.set_stderr_log_level('DEBUG')
Ejemplo n.º 38
0
from apache.aurora.executor.common.executor_timeout import ExecutorTimeout
from apache.aurora.executor.common.health_checker import HealthCheckerProvider
from apache.aurora.executor.common.sandbox import DirectorySandbox, SandboxProvider
from apache.aurora.executor.common.status_checker import ChainedStatusChecker
from apache.aurora.executor.common.task_runner import TaskError
from apache.aurora.executor.status_manager import StatusManager
from apache.aurora.executor.thermos_task_runner import (
    DefaultThermosTaskRunnerProvider, ThermosTaskRunner)
from apache.thermos.core.runner import TaskRunner
from apache.thermos.monitoring.monitor import TaskMonitor

from gen.apache.aurora.api.constants import AURORA_EXECUTOR_NAME
from gen.apache.aurora.api.ttypes import AssignedTask, ExecutorConfig, JobKey, TaskConfig

if 'THERMOS_DEBUG' in os.environ:
    LogOptions.set_stderr_log_level('google:DEBUG')
    LogOptions.set_simple(True)
    log.init('executor_logger')


class FastThermosExecutor(AuroraExecutor):
    STOP_WAIT = Amount(0, Time.SECONDS)


class FastStatusManager(StatusManager):
    POLL_WAIT = Amount(10, Time.MILLISECONDS)


class DefaultTestSandboxProvider(SandboxProvider):
    def from_assigned_task(self, assigned_task, **kwargs):
        return DirectorySandbox(safe_mkdtemp(), **kwargs)
Ejemplo n.º 39
0
from apache.aurora.executor.aurora_executor import AuroraExecutor
from apache.aurora.executor.common.executor_timeout import ExecutorTimeout
from apache.aurora.executor.common.health_checker import HealthCheckerProvider
from apache.aurora.executor.common.sandbox import DirectorySandbox, SandboxProvider
from apache.aurora.executor.common.status_checker import ChainedStatusChecker
from apache.aurora.executor.common.task_runner import TaskError
from apache.aurora.executor.status_manager import StatusManager
from apache.aurora.executor.thermos_task_runner import DefaultThermosTaskRunnerProvider, ThermosTaskRunner
from apache.thermos.core.runner import TaskRunner
from apache.thermos.monitoring.monitor import TaskMonitor

from gen.apache.aurora.api.constants import AURORA_EXECUTOR_NAME
from gen.apache.aurora.api.ttypes import AssignedTask, ExecutorConfig, Identity, JobKey, TaskConfig

if "THERMOS_DEBUG" in os.environ:
    LogOptions.set_stderr_log_level("google:DEBUG")
    LogOptions.set_simple(True)
    log.init("executor_logger")


class FastThermosExecutor(AuroraExecutor):
    STOP_WAIT = Amount(0, Time.SECONDS)


class FastStatusManager(StatusManager):
    POLL_WAIT = Amount(10, Time.MILLISECONDS)


class DefaultTestSandboxProvider(SandboxProvider):
    def from_assigned_task(self, assigned_task):
        return DirectorySandbox(safe_mkdtemp())
from mysos.scheduler.state import LocalStateProvider, Scheduler

from kazoo.handlers.threading import SequentialThreadingHandler
from mesos.interface.mesos_pb2 import DRIVER_STOPPED, FrameworkInfo
from twitter.common import log
from twitter.common.concurrent import deadline
from twitter.common.dirutil import safe_mkdtemp
from twitter.common.metrics import RootMetrics
from twitter.common.quantity import Amount, Time
from zake.fake_client import FakeClient
from zake.fake_storage import FakeStorage


if 'MYSOS_DEBUG' in os.environ:
  from twitter.common.log.options import LogOptions
  LogOptions.set_stderr_log_level('google:DEBUG')
  LogOptions.set_simple(True)
  log.init('mysos_tests')


def test_scheduler_runs():
  """
    Verifies that the scheduler successfully launches 3 "no-op" MySQL tasks.
    NOTE: Due to the limitation of zake the scheduler's ZK operations are not propagated to
    executors in separate processes but they are unit-tested separately.
  """
  import mesos.native

  # Make sure fake_mysos_executor.pex is available to be fetched by Mesos slave.
  assert os.path.isfile('dist/fake_mysos_executor.pex')
Ejemplo n.º 41
0
  def _do_run(self):
    # TODO(John Sirois): Consider moving to straight python logging.  The divide between the
    # context/work-unit logging and standard python logging doesn't buy us anything.

    # TODO(Eric Ayers) We are missing log messages. Set the log level earlier
    # Enable standard python logging for code with no handle to a context/work-unit.
    if self.global_options.level:
      LogOptions.set_stderr_log_level((self.global_options.level or 'info').upper())
      logdir = self.global_options.logdir or self.config.get('goals', 'logdir', default=None)
      if logdir:
        safe_mkdir(logdir)
        LogOptions.set_log_dir(logdir)

        prev_log_level = None
        # If quiet, temporarily change stderr log level to kill init's output.
        if self.global_options.quiet:
          prev_log_level = LogOptions.loglevel_name(LogOptions.stderr_log_level())
          # loglevel_name can fail, so only change level if we were able to get the current one.
          if prev_log_level is not None:
            LogOptions.set_stderr_log_level(LogOptions._LOG_LEVEL_NONE_KEY)

        log.init('goals')

        if prev_log_level is not None:
          LogOptions.set_stderr_log_level(prev_log_level)
      else:
        log.init()

    # Update the reporting settings, now that we have flags etc.
    def is_quiet_task():
      for goal in self.goals:
        if goal.has_task_of_type(QuietTaskMixin):
          return True
      return False

    is_explain = self.global_options.explain
    update_reporting(self.global_options, is_quiet_task() or is_explain, self.run_tracker)

    context = Context(
      config=self.config,
      options=self.options,
      run_tracker=self.run_tracker,
      target_roots=self.targets,
      requested_goals=self.requested_goals,
      build_graph=self.build_graph,
      build_file_parser=self.build_file_parser,
      address_mapper=self.address_mapper,
      spec_excludes=self.get_spec_excludes()
    )

    unknown = []
    for goal in self.goals:
      if not goal.ordered_task_names():
        unknown.append(goal)

    if unknown:
      context.log.error('Unknown goal(s): %s\n' % ' '.join(goal.name for goal in unknown))
      return 1

    engine = RoundEngine()
    return engine.execute(context, self.goals)
Ejemplo n.º 42
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

from apache.aurora.client.base import generate_terse_usage
from apache.aurora.client.commands import admin, help
from apache.aurora.client.options import add_verbosity_options

from twitter.common import app
from twitter.common.log.options import LogOptions


app.register_commands_from(admin, help)
add_verbosity_options()


def main():
    app.help()


LogOptions.set_stderr_log_level("INFO")
LogOptions.disable_disk_logging()
app.set_name("aurora-admin")
app.set_usage(generate_terse_usage())


def proxy_main():
    app.main()
Ejemplo n.º 43
0
    ScheduleStatusResult,
    ServerInfo,
    SessionKey,
    TaskConfig,
    TaskConstraint,
    TaskQuery,
    ValueConstraint,
)

# Debug output helper -> enables log.* in source.
if "UPDATER_DEBUG" in environ:
    from twitter.common import log
    from twitter.common.log.options import LogOptions

    LogOptions.set_disk_log_level("NONE")
    LogOptions.set_stderr_log_level("DEBUG")
    log.init("test_updater")

SERVER_INFO = ServerInfo(thriftAPIVersion=THRIFT_API_VERSION)


def make_response(code, msg="test"):
    return Response(responseCode=code, serverInfo=SERVER_INFO, details=[ResponseDetail(message=msg)])


class FakeConfig(object):
    def __init__(self, role, name, env, update_config):
        self._role = role
        self._env = env
        self._name = name
        self._update_config = update_config
Ejemplo n.º 44
0
  ScheduleStatusResult,
  ScheduledTask,
  TaskConfig,
  TaskQuery,
)

from mox import MockObject, Replay, Verify
from pytest import raises


# Debug output helper -> enables log.* in source.
if 'UPDATER_DEBUG' in environ:
  from twitter.common import log
  from twitter.common.log.options import LogOptions
  LogOptions.set_disk_log_level('NONE')
  LogOptions.set_stderr_log_level('DEBUG')
  log.init('test_updater')

class FakeConfig(object):
  def __init__(self, role, name, env, update_config):
    self._role = role
    self._env = env
    self._name = name
    self._update_config = update_config
    self.job_config = None

  def role(self):
    return self._role

  def name(self):
    return self._name
Ejemplo n.º 45
0
  def run(self):
    # TODO(John Sirois): Consider moving to straight python logging.  The divide between the
    # context/work-unit logging and standard python logging doesn't buy us anything.

    # Enable standard python logging for code with no handle to a context/work-unit.
    if self.global_options.level:
      LogOptions.set_stderr_log_level((self.global_options.level or 'info').upper())
      logdir = self.global_options.logdir or self.config.get('goals', 'logdir', default=None)
      if logdir:
        safe_mkdir(logdir)
        LogOptions.set_log_dir(logdir)

        prev_log_level = None
        # If quiet, temporarily change stderr log level to kill init's output.
        if self.global_options.quiet:
          prev_log_level = LogOptions.loglevel_name(LogOptions.stderr_log_level())
          # loglevel_name can fail, so only change level if we were able to get the current one.
          if prev_log_level is not None:
            LogOptions.set_stderr_log_level(LogOptions._LOG_LEVEL_NONE_KEY)

        log.init('goals')

        if prev_log_level is not None:
          LogOptions.set_stderr_log_level(prev_log_level)
      else:
        log.init()

    # Update the reporting settings, now that we have flags etc.
    def is_quiet_task():
      for goal in self.goals:
        if goal.has_task_of_type(QuietTaskMixin):
          return True
      return False

    # Target specs are mapped to the patterns which match them, if any. This variable is a key for
    # specs which don't match any exclusion regexes. We know it won't already be in the list of
    # patterns, because the asterisks in its name make it an invalid regex.
    _UNMATCHED_KEY = '** unmatched **'

    def targets_by_pattern(targets, patterns):
      mapping = defaultdict(list)
      for target in targets:
        matched_pattern = None
        for pattern in patterns:
          if re.search(pattern, target.address.spec) is not None:
            matched_pattern = pattern
            break
        if matched_pattern is None:
          mapping[_UNMATCHED_KEY].append(target)
        else:
          mapping[matched_pattern].append(target)
      return mapping

    is_explain = self.global_options.explain
    update_reporting(self.global_options, is_quiet_task() or is_explain, self.run_tracker)

    if self.global_options.exclude_target_regexp:
      excludes = self.global_options.exclude_target_regexp
      log.debug('excludes:\n  {excludes}'.format(excludes='\n  '.join(excludes)))
      by_pattern = targets_by_pattern(self.targets, excludes)
      self.targets = by_pattern[_UNMATCHED_KEY]
      # The rest of this if-statement is just for debug logging.
      log.debug('Targets after excludes: {targets}'.format(
          targets=', '.join(t.address.spec for t in self.targets)))
      excluded_count = sum(len(by_pattern[p]) for p in excludes)
      log.debug('Excluded {count} target{plural}.'.format(count=excluded_count,
          plural=('s' if excluded_count != 1 else '')))
      for pattern in excludes:
        log.debug('Targets excluded by pattern {pattern}\n  {targets}'.format(pattern=pattern,
            targets='\n  '.join(t.address.spec for t in by_pattern[pattern])))

    context = Context(
      config=self.config,
      new_options=self.new_options,
      run_tracker=self.run_tracker,
      target_roots=self.targets,
      requested_goals=self.requested_goals,
      build_graph=self.build_graph,
      build_file_parser=self.build_file_parser,
      address_mapper=self.address_mapper,
      spec_excludes=self.get_spec_excludes()
    )

    unknown = []
    for goal in self.goals:
      if not goal.ordered_task_names():
        unknown.append(goal)

    if unknown:
      context.log.error('Unknown goal(s): %s\n' % ' '.join(goal.name for goal in unknown))
      return 1

    engine = RoundEngine()
    return engine.execute(context, self.goals)