Example #1
0
def _setup_aggregated_disk_logging(filebase):
  filename = os.path.join(LogOptions.log_dir(), filebase + '.log')
  formatter = ProxyFormatter(LogOptions.disk_log_scheme)
  file_handler = PreambleFileHandler(filename, formatter.preamble())
  file_handler.setFormatter(formatter)
  file_handler.addFilter(GenericFilter(lambda level: level >= LogOptions.disk_log_level()))
  return [file_handler]
Example #2
0
def init(filebase):
  """
    Set up default logging using:
      {--log_dir}/filebase.{INFO,WARNING,...}
  """
  logging._acquireLock()

  # set up permissive logger
  root_logger = logging.getLogger()
  root_logger.setLevel(logging.DEBUG)

  # clear existing handlers
  teardown_stderr_logging()
  teardown_disk_logging()

  # setup INFO...FATAL handlers
  num_disk_handlers = 0
  for handler in _setup_disk_logging(filebase):
    root_logger.addHandler(handler)
    _DISK_LOGGERS.append(handler)
  for handler in _setup_stderr_logging():
    root_logger.addHandler(handler)
    _STDERR_LOGGERS.append(handler)

  logging._releaseLock()

  if len(_DISK_LOGGERS) > 0 and LogOptions.stderr_log_level() != LogOptions.LOG_LEVEL_NONE:
    print('Writing log files to disk in %s' % LogOptions.log_dir(), file=sys.stderr)

  return root_logger
 def setup_class(cls):
     cls.LOG_DIR = tempfile.mkdtemp()
     LogOptions.set_log_dir(cls.LOG_DIR)
     LogOptions.set_disk_log_level("DEBUG")
     log.init("executor_logger")
     if not cls.PANTS_BUILT and "SKIP_PANTS_BUILD" not in os.environ:
         assert subprocess.call(["./pants", "src/main/python/apache/aurora/executor/bin:thermos_runner"]) == 0
         cls.PANTS_BUILT = True
Example #4
0
def setup():
  LogOptions.set_stderr_log_level('NONE')

  app.add_option('--iface', default='eth0', type=str)
  app.add_option('--port', default=3888, type=int)
  app.add_option('-c', '--colors', default=False, action='store_true')
  app.add_option('--dump-bad-packet', default=False, action='store_true')
  app.add_option('--version', default=False, action='store_true')
Example #5
0
    def run(self, lock):
        if self.options.dry_run:
            print "****** Dry Run ******"

        logger = None
        if self.options.log or self.options.log_level:
            from twitter.common.log import init
            from twitter.common.log.options import LogOptions

            LogOptions.set_stderr_log_level((self.options.log_level or "info").upper())
            logdir = self.options.logdir or self.config.get("goals", "logdir", default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)
                init("goals")
            else:
                init()
            logger = log

        if self.options.recursive_directory:
            log.warn("--all-recursive is deprecated, use a target spec with the form [dir]:: instead")
            for dir in self.options.recursive_directory:
                self.add_target_recursive(dir)

        if self.options.target_directory:
            log.warn("--all is deprecated, use a target spec with the form [dir]: instead")
            for dir in self.options.target_directory:
                self.add_target_directory(dir)

        context = Context(
            self.config,
            self.options,
            self.targets,
            requested_goals=self.requested_goals,
            lock=lock,
            log=logger,
            timer=self.timer if self.options.time else None,
        )

        unknown = []
        for phase in self.phases:
            if not phase.goals():
                unknown.append(phase)

        if unknown:
            print ("Unknown goal(s): %s" % " ".join(phase.name for phase in unknown))
            print ("")
            return Phase.execute(context, "goals")

        if logger:
            logger.debug("Operating on targets: %s", self.targets)

        ret = Phase.attempt(context, self.phases)
        if self.options.time:
            print ("Timing report")
            print ("=============")
            self.timer.print_timings()
        return ret
Example #6
0
  def run(self, lock):
    # TODO(John Sirois): Consider moving to straight python logging.  The divide between the
    # context/work-unit logging and standard python logging doesn't buy us anything.

    # Enable standard python logging for code with no handle to a context/work-unit.
    if self.options.log_level:
      LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper())
      logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None)
      if logdir:
        safe_mkdir(logdir)
        LogOptions.set_log_dir(logdir)
        log.init('goals')
      else:
        log.init()

    # Update the reporting settings, now that we have flags etc.
    def is_console_task():
      for phase in self.phases:
        for goal in phase.goals():
          if issubclass(goal.task_type, ConsoleTask):
            return True
      return False

    is_explain = self.options.explain
    update_reporting(self.options, is_console_task() or is_explain, self.run_tracker)

    if self.options.dry_run:
      print('****** Dry Run ******')

    context = Context(
      self.config,
      self.options,
      self.run_tracker,
      self.targets,
      requested_goals=self.requested_goals,
      lock=lock)

    if self.options.recursive_directory:
      context.log.warn(
        '--all-recursive is deprecated, use a target spec with the form [dir]:: instead')
      for dir in self.options.recursive_directory:
        self.add_target_recursive(dir)

    if self.options.target_directory:
      context.log.warn('--all is deprecated, use a target spec with the form [dir]: instead')
      for dir in self.options.target_directory:
        self.add_target_directory(dir)

    unknown = []
    for phase in self.phases:
      if not phase.goals():
        unknown.append(phase)

    if unknown:
      _list_goals(context, 'Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown))
      return 1

    return Goal._execute(context, self.phases, print_timing=self.options.time)
 def setup_class(cls):
   cls.LOG_DIR = tempfile.mkdtemp()
   LogOptions.set_log_dir(cls.LOG_DIR)
   LogOptions.set_disk_log_level('DEBUG')
   log.init('executor_logger')
   if not cls.PANTS_BUILT and 'SKIP_PANTS_BUILD' not in os.environ:
     assert subprocess.call(["./pants", "binary",
         "src/main/python/apache/thermos/bin:thermos_runner"]) == 0
     cls.PANTS_BUILT = True
Example #8
0
  def run(self, lock):
    with self.check_errors("Target contains a dependency cycle") as error:
      for target in self.targets:
        try:
          InternalTarget.check_cycles(target)
        except InternalTarget.CycleException as e:
          error(target.id)

    timer = None
    if self.options.time:
      class Timer(object):
        def now(self):
          return time.time()
        def log(self, message):
          print(message)
      timer = Timer()

    logger = None
    if self.options.log or self.options.log_level:
      from twitter.common.log import init
      from twitter.common.log.options import LogOptions
      LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper())
      logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None)
      if logdir:
        safe_mkdir(logdir)
        LogOptions.set_log_dir(logdir)
        init('goals')
      else:
        init()
      logger = log

    if self.options.recursive_directory:
      log.warn('--all-recursive is deprecated, use a target spec with the form [dir]:: instead')
      for dir in self.options.recursive_directory:
        self.add_target_recursive(dir)

    if self.options.target_directory:
      log.warn('--all is deprecated, use a target spec with the form [dir]: instead')
      for dir in self.options.target_directory:
        self.add_target_directory(dir)

    context = Context(self.config, self.options, self.targets, lock=lock, log=logger)

    unknown = []
    for phase in self.phases:
      if not phase.goals():
        unknown.append(phase)

    if unknown:
        print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown))
        print('')
        return Phase.execute(context, 'goals')

    if logger:
      logger.debug('Operating on targets: %s', self.targets)

    return Phase.attempt(context, self.phases, timer=timer)
Example #9
0
  def execute(self):
    def add_targets(dir, buildfile):
      try:
        self.targets.extend(Target.get(addr) for addr in Target.get_all_addresses(buildfile))
      except (TypeError, ImportError):
        error(dir, include_traceback=True)
      except (IOError, SyntaxError):
        error(dir)

    if self.options.recursive_directory:
      with self.check_errors('There was a problem scanning the '
                             'following directories for targets:') as error:
        for dir in self.options.recursive_directory:
          for buildfile in BuildFile.scan_buildfiles(self.root_dir, dir):
            add_targets(dir, buildfile)

    if self.options.target_directory:
      with self.check_errors("There was a problem loading targets "
                             "from the following directory's BUILD files") as error:
        for dir in self.options.target_directory:
          add_targets(dir, BuildFile(self.root_dir, dir))

    timer = None
    if self.options.time:
      class Timer(object):
        def now(self):
          return time.time()
        def log(self, message):
          print(message)
      timer = Timer()

    logger = None
    if self.options.log or self.options.log_level:
      from twitter.common.log import init
      from twitter.common.log.options import LogOptions
      LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper())
      logdir = self.config.get('goals', 'logdir')
      if logdir:
        safe_mkdir(logdir)
        LogOptions.set_log_dir(logdir)
      init('goals')
      logger = log

    context = Context(self.config, self.options, self.targets, log=logger)

    unknown = []
    for phase in self.phases:
      if not phase.goals():
        unknown.append(phase)

    if unknown:
        print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown))
        print()
        return Phase.execute(context, 'goals')

    return Phase.attempt(context, self.phases, timer=timer)
 def setup_class(cls):
   cls.LOG_DIR = tempfile.mkdtemp()
   LogOptions.set_log_dir(cls.LOG_DIR)
   LogOptions.set_disk_log_level('DEBUG')
   log.init('executor_logger')
   if not cls.PEX_PATH:
     pex_dir = tempfile.mkdtemp()
     assert subprocess.call(["./pants", "--pants-distdir=%s" % pex_dir, "binary",
         "src/main/python/apache/thermos/runner:thermos_runner"]) == 0
     cls.PEX_PATH = os.path.join(pex_dir, 'thermos_runner.pex')
Example #11
0
def _setup_scribe_logging():
  filter = GenericFilter(lambda r_l: r_l >= LogOptions.scribe_log_level())
  formatter = ProxyFormatter(LogOptions.scribe_log_scheme)
  scribe_handler = ScribeHandler(buffer=LogOptions.scribe_buffer(),
                                 category=LogOptions.scribe_category(),
                                 host=LogOptions.scribe_host(),
                                 port=LogOptions.scribe_port())
  scribe_handler.setFormatter(formatter)
  scribe_handler.addFilter(filter)
  return [scribe_handler]
Example #12
0
  def run(self, lock):
    if self.options.dry_run:
      print '****** Dry Run ******'

    logger = None
    if self.options.log or self.options.log_level:
      from twitter.common.log import init
      from twitter.common.log.options import LogOptions
      LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper())
      logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None)
      if logdir:
        safe_mkdir(logdir)
        LogOptions.set_log_dir(logdir)
        init('goals')
      else:
        init()
      logger = log

    if self.options.recursive_directory:
      log.warn('--all-recursive is deprecated, use a target spec with the form [dir]:: instead')
      for dir in self.options.recursive_directory:
        self.add_target_recursive(dir)

    if self.options.target_directory:
      log.warn('--all is deprecated, use a target spec with the form [dir]: instead')
      for dir in self.options.target_directory:
        self.add_target_directory(dir)

    context = Context(
      self.config,
      self.options,
      self.targets,
      lock=lock,
      log=logger,
      timer=self.timer if self.options.time else None)

    unknown = []
    for phase in self.phases:
      if not phase.goals():
        unknown.append(phase)

    if unknown:
        print('Unknown goal(s): %s' % ' '.join(phase.name for phase in unknown))
        print('')
        return Phase.execute(context, 'goals')

    if logger:
      logger.debug('Operating on targets: %s', self.targets)

    ret = Phase.attempt(context, self.phases)
    if self.options.time:
      print('Timing report')
      print('=============')
      self.timer.print_timings()
    return ret
Example #13
0
    def run(self, lock):
        with self.check_errors("Target contains a dependency cycle") as error:
            with self.timer.timing("parse:check_cycles"):
                for target in self.targets:
                    try:
                        InternalTarget.check_cycles(target)
                    except InternalTarget.CycleException as e:
                        error(target.id)

        logger = None
        if self.options.log or self.options.log_level:
            from twitter.common.log import init
            from twitter.common.log.options import LogOptions

            LogOptions.set_stderr_log_level((self.options.log_level or "info").upper())
            logdir = self.options.logdir or self.config.get("goals", "logdir", default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)
                init("goals")
            else:
                init()
            logger = log

        if self.options.recursive_directory:
            log.warn("--all-recursive is deprecated, use a target spec with the form [dir]:: instead")
            for dir in self.options.recursive_directory:
                self.add_target_recursive(dir)

        if self.options.target_directory:
            log.warn("--all is deprecated, use a target spec with the form [dir]: instead")
            for dir in self.options.target_directory:
                self.add_target_directory(dir)

        context = Context(self.config, self.options, self.targets, lock=lock, log=logger)

        unknown = []
        for phase in self.phases:
            if not phase.goals():
                unknown.append(phase)

        if unknown:
            print("Unknown goal(s): %s" % " ".join(phase.name for phase in unknown))
            print("")
            return Phase.execute(context, "goals")

        if logger:
            logger.debug("Operating on targets: %s", self.targets)

        ret = Phase.attempt(context, self.phases, timer=self.timer if self.options.time else None)
        if self.options.time:
            print("Timing report")
            print("=============")
            self.timer.print_timings()
        return ret
Example #14
0
def setup():
  LogOptions.set_stderr_log_level('NONE')

  app.add_option('--packet-filter', default='tcp', type=str,
                 help='pcap filter string. e.g. "tcp portrange 11221-32767" for JUnit tests')
  app.add_option('-c', '--colors', default=False, action='store_true')
  app.add_option('--dump-bad-packet', default=False, action='store_true')
  app.add_option('--include-pings', default=False, action='store_true',
                 help='Whether to include ZAB/ZK pings')
  app.add_option('--offline', default=None, type=str,
                 help='offline mode with a pcap file')
  app.add_option('--version', default=False, action='store_true')
Example #15
0
def setup():
  LogOptions.set_stderr_log_level('NONE')

  app.add_option('--iface', default='eth0', type=str,
                 help='The interface to sniff on')
  app.add_option('--port', default=2889, type=int,
                 help='The ZAB port used by the leader')
  app.add_option('-c', '--colors', default=False, action='store_true',
                 help='Color each learner/leader stream differently')
  app.add_option('--dump-bad-packet', default=False, action='store_true',
                 help='Dump packets that cannot be deserialized')
  app.add_option('--include-pings', default=False, action='store_true',
                 help='Whether to include pings send from learners to the leader')
  app.add_option('--version', default=False, action='store_true')
Example #16
0
def _setup_stderr_logging():
  filter = GenericFilter(lambda r_l: r_l >= LogOptions.stderr_log_level())
  formatter = ProxyFormatter(LogOptions.stderr_log_scheme)
  stderr_handler = logging.StreamHandler(sys.stderr)
  stderr_handler.setFormatter(formatter)
  stderr_handler.addFilter(filter)
  return [stderr_handler]
Example #17
0
  def _cmdline(self):
    host_sandbox = None
    if os.environ.get('MESOS_DIRECTORY'):
      host_sandbox = os.path.join(os.environ.get('MESOS_DIRECTORY'), 'sandbox')

    params = dict(log_dir=LogOptions.log_dir(),
                  log_to_disk='DEBUG',
                  checkpoint_root=self._checkpoint_root,
                  sandbox=host_sandbox or self._root,
                  task_id=self._task_id,
                  thermos_json=self._task_filename,
                  hostname=self._hostname,
                  process_logger_destination=self._process_logger_destination,
                  process_logger_mode=self._process_logger_mode,
                  rotate_log_size_mb=self._rotate_log_size_mb,
                  rotate_log_backups=self._rotate_log_backups)

    if getpass.getuser() == 'root' and self._role:
      params.update(setuid=self._role)

    cmdline_args = [sys.executable, self._runner_pex]
    cmdline_args.extend(
        '--%s=%s' % (flag, value) for flag, value in params.items() if value is not None)
    if self._enable_chroot:
      cmdline_args.extend(['--enable_chroot'])
    if self._preserve_env:
      cmdline_args.extend(['--preserve_env'])
    if self._sandbox.is_filesystem_image:
      cmdline_args.extend(
          ['--mesos_containerizer_path=%s' % self._mesos_containerizer_path])
    for name, port in self._ports.items():
      cmdline_args.extend(['--port=%s:%s' % (name, port)])
    return cmdline_args
Example #18
0
    def _cmdline(self):
        params = dict(
            log_dir=LogOptions.log_dir(),
            log_to_disk="DEBUG",
            checkpoint_root=self._checkpoint_root,
            sandbox=self._sandbox.root,
            container_sandbox=self._sandbox.container_root,
            task_id=self._task_id,
            thermos_json=self._task_filename,
            hostname=self._hostname,
            process_logger_destination=self._process_logger_destination,
            process_logger_mode=self._process_logger_mode,
            rotate_log_size_mb=self._rotate_log_size_mb,
            rotate_log_backups=self._rotate_log_backups,
        )

        if getpass.getuser() == "root" and self._role:
            params.update(setuid=self._role)

        cmdline_args = [sys.executable, self._runner_pex]
        cmdline_args.extend("--%s=%s" % (flag, value) for flag, value in params.items() if value is not None)
        if self._enable_chroot:
            cmdline_args.extend(["--enable_chroot"])
        if self._preserve_env:
            cmdline_args.extend(["--preserve_env"])
        if self._sandbox.is_filesystem_image:
            cmdline_args.extend(["--mesos_containerizer_path=%s" % self._mesos_containerizer_path])
        for name, port in self._ports.items():
            cmdline_args.extend(["--port=%s:%s" % (name, port)])
        return cmdline_args
Example #19
0
  def publish(self, ivyxml_path, jar, entry, repo, published):
    """Run ivy to publish a jar.  ivyxml_path is the path to the ivy file; published
    is a list of jars published so far (including this one). entry is a pushdb entry."""
    jvm_args = self._ivy_jvm_args(repo)
    resolver = repo['resolver']
    path = repo.get('path')

    try:
      ivy = Bootstrapper.default_ivy()
    except Bootstrapper.Error as e:
      raise TaskError('Failed to push {0}! {1}'.format(pushdb_coordinate(jar, entry), e))

    ivysettings = self.generate_ivysettings(ivy, published, publish_local=path)
    args = [
      '-settings', ivysettings,
      '-ivy', ivyxml_path,
      '-deliverto', '%s/[organisation]/[module]/ivy-[revision].xml' % self.workdir,
      '-publish', resolver,
      '-publishpattern', '%s/[organisation]/[module]/'
                         '[artifact]-[revision](-[classifier]).[ext]' % self.workdir,
      '-revision', entry.version().version(),
      '-m2compatible',
    ]

    if LogOptions.stderr_log_level() == logging.DEBUG:
      args.append('-verbose')

    if self.local_snapshot:
      args.append('-overwrite')

    try:
      ivy.execute(jvm_options=jvm_args, args=args,
                  workunit_factory=self.context.new_workunit, workunit_name='jar-publish')
    except Ivy.Error as e:
      raise TaskError('Failed to push {0}! {1}'.format(pushdb_coordinate(jar, entry), e))
    def _cmdline(self):
        host_sandbox = None
        if os.environ.get("MESOS_DIRECTORY"):
            host_sandbox = os.path.join(os.environ.get("MESOS_DIRECTORY"), "sandbox")

        params = dict(
            log_dir=LogOptions.log_dir(),
            log_to_disk="DEBUG",
            checkpoint_root=self._checkpoint_root,
            sandbox=host_sandbox or self._root,
            task_id=self._task_id,
            thermos_json=self._task_filename,
            hostname=self._hostname,
            process_logger_mode=self._process_logger_mode,
            rotate_log_size_mb=self._rotate_log_size_mb,
            rotate_log_backups=self._rotate_log_backups,
        )

        if getpass.getuser() == "root" and self._role:
            params.update(setuid=self._role)

        cmdline_args = [sys.executable, self._runner_pex]
        cmdline_args.extend("--%s=%s" % (flag, value) for flag, value in params.items() if value is not None)
        if self._enable_chroot:
            cmdline_args.extend(["--enable_chroot"])
        for name, port in self._ports.items():
            cmdline_args.extend(["--port=%s:%s" % (name, port)])
        return cmdline_args
Example #21
0
          def publish(ivyxml_path):
            ivysettings = self.generate_ivysettings(published, publish_local=path)
            args = [
              '-settings', ivysettings,
              '-ivy', ivyxml_path,
              '-deliverto', '%s/[organisation]/[module]/ivy-[revision].xml' % self.outdir,
              '-publish', resolver,
              '-publishpattern', '%s/[organisation]/[module]/'
                                 '[artifact]-[revision](-[classifier]).[ext]' % self.outdir,
              '-revision', newver.version(),
              '-m2compatible',
            ]

            if LogOptions.stderr_log_level() == logging.DEBUG:
              args.append('-verbose')

            if self.snapshot:
              args.append('-overwrite')

            try:
              ivy = Bootstrapper.default_ivy()
              ivy.execute(jvm_options=jvm_args, args=args,
                          workunit_factory=self.context.new_workunit, workunit_name = 'jar-publish')
            except (Bootstrapper.Error, Ivy.Error) as e:
              raise TaskError('Failed to push %s! %s' % (jar_coordinate(jar, newver.version()), e))
Example #22
0
  def run(self, lock):
    # TODO(John Sirois): Consider moving to straight python logging.  The divide between the
    # context/work-unit logging and standard python logging doesn't buy us anything.

    # Enable standard python logging for code with no handle to a context/work-unit.
    if self.options.log_level:
      LogOptions.set_stderr_log_level((self.options.log_level or 'info').upper())
      logdir = self.options.logdir or self.config.get('goals', 'logdir', default=None)
      if logdir:
        safe_mkdir(logdir)
        LogOptions.set_log_dir(logdir)
        log.init('goals')
      else:
        log.init()

    # Update the reporting settings, now that we have flags etc.
    def is_console_task():
      for phase in self.phases:
        for goal in phase.goals():
          if issubclass(goal.task_type, ConsoleTask):
            return True
      return False

    is_explain = self.options.explain
    update_reporting(self.options, is_console_task() or is_explain, self.run_tracker)

    context = Context(
      self.config,
      self.options,
      self.run_tracker,
      self.targets,
      requested_goals=self.requested_goals,
      build_graph=self.build_graph,
      build_file_parser=self.build_file_parser,
      lock=lock)

    unknown = []
    for phase in self.phases:
      if not phase.goals():
        unknown.append(phase)

    if unknown:
      context.log.error('Unknown goal(s): %s\n' % ' '.join(phase.name for phase in unknown))
      return 1

    engine = GroupEngine()
    return engine.execute(context, self.phases)
Example #23
0
def setup():
  LogOptions.set_stderr_log_level('NONE')

  app.add_option('--iface', default='eth0', type=str, metavar='<iface>',
                 help='The interface to sniff on')
  app.add_option('--client-port', default=0, type=int, metavar='<client_port>',
                 help='The client port to filter by')
  app.add_option('--zookeeper-port', default=2181, type=int, metavar='<server_port>',
                 help='The ZooKeeper server port to filter by')
  app.add_option('--max-queued-requests', default=10000, type=int, metavar='<max>',
                 help='The maximum number of requests queued to be deserialized')
  app.add_option('--exclude-host',
                 dest='excluded_hosts',
                 metavar='<host>',
                 default=[],
                 action='append',
                 help='Host that should be excluded (you can use this multiple times)')
  app.add_option('--include-host',
                 dest='included_hosts',
                 metavar='<host>',
                 default=[],
                 action='append',
                 help='Host that should be included (you can use this multiple times)')
  app.add_option('--count-requests', default=0, type=int, metavar='<nreqs>',
                 help='Count N requests and report a summary (default: group by path)')
  app.add_option('--measure-latency', default=0, type=int, metavar='<nreqs>',
                 help='Measure latency of N pairs of requests and replies (default: group by path')
  app.add_option('--group-by', default='path', type=str, metavar='<group>',
                 help='Used with --count-requests or --measure-latency. Possible values: path, type or client')
  app.add_option('--sort-by', default='avg', type=str, metavar='<sort>',
                 help='Used with --measure-latency. Possible values: avg, p95 and p99')
  app.add_option("--aggregation-depth", default=0, type=int, metavar='<depth>',
                 help="Aggregate paths up to a certain depth. Used with --count-requests or --measure-latency")
  app.add_option('--unpaired', default=False, action='store_true',
                 help='Don\'t pair reqs/reps')
  app.add_option('-p', '--include-pings', default=False, action='store_true',
                 help='Whether to include ping requests and replies')
  app.add_option('-c', '--colors', default=False, action='store_true',
                 help='Color each client/server stream differently')
  app.add_option('--dump-bad-packet', default=False, action='store_true',
                 help='If unable to to deserialize a packet, print it out')
  app.add_option('--version', default=False, action='store_true')
Example #24
0
 def _set_log_level(self, log_level_override=''):
   stderr_log_level = LogOptions.stderr_log_level()
   # set default level to FATAL.
   # we do this here (instead of add_option) to distinguish when an override is set.
   if stderr_log_level == log.INFO and log_level_override != 'INFO':
     stderr_log_level = log.FATAL
   # default to using stderr logging level, setting override if applicable
   log_level = getattr(log, log_level_override, stderr_log_level)
   # set the logger
   zk_log_level = ZookeeperLoggingSubsystem._ZK_LOG_LEVEL_MAP.get(
       log_level, zookeeper.LOG_LEVEL_ERROR)
   zookeeper.set_debug_level(zk_log_level)
  def _cmdline(self):
    params = dict(log_dir=LogOptions.log_dir(),
                  log_to_disk='DEBUG',
                  checkpoint_root=self._checkpoint_root,
                  sandbox=self._root,
                  task_id=self._task_id,
                  thermos_json=self._task_filename)

    if getpass.getuser() == 'root':
      params.update(setuid=self._role)

    cmdline_args = [self._runner_pex]
    cmdline_args.extend('--%s=%s' % (flag, value) for flag, value in params.items())
    if self._enable_chroot:
      cmdline_args.extend(['--enable_chroot'])
    for name, port in self._ports.items():
      cmdline_args.extend(['--port=%s:%s' % (name, port)])
    return cmdline_args
Example #26
0
def init(filebase=None):
  """
    Sets up default stderr logging and, if filebase is supplied, sets up disk logging using:
      {--log_dir}/filebase.{INFO,WARNING,...}

    If '--log_simple' is specified, logs are written into a single file:
      {--log_dir}/filebase.log
  """
  logging._acquireLock()

  # set up permissive logger
  root_logger = logging.getLogger()
  root_logger.setLevel(logging.DEBUG)

  # clear existing handlers
  teardown_scribe_logging()
  teardown_stderr_logging()
  teardown_disk_logging()
  for handler in root_logger.handlers:
    root_logger.removeHandler(handler)

  # setup INFO...FATAL handlers
  if filebase:
    _initialize_disk_logging()
    initializer = _setup_aggregated_disk_logging if LogOptions.simple() else _setup_disk_logging
    for handler in initializer(filebase):
      root_logger.addHandler(handler)
      _DISK_LOGGERS.append(handler)

  if LogOptions._is_scribe_logging_required():
    try:
      for handler in _setup_scribe_logging():
        root_logger.addHandler(handler)
        _SCRIBE_LOGGERS.append(handler)
    except ScribeHandler.ScribeHandlerException as err:
      print_stderr(err)

  for handler in _setup_stderr_logging():
    root_logger.addHandler(handler)
    _STDERR_LOGGERS.append(handler)

  logging._releaseLock()

  if len(_DISK_LOGGERS) > 0:
    print_stderr('Writing log files to disk in %s' % LogOptions.log_dir())
  if len(_SCRIBE_LOGGERS) > 0:
    print_stderr('Sending log messages to scribe host=%s:%d category=%s'
          % (LogOptions.scribe_host(), LogOptions.scribe_port(), LogOptions.scribe_category()))

  return root_logger
Example #27
0
def _setup_disk_logging(filebase):
  handlers = []
  logroot = LogOptions.log_dir()
  safe_mkdir(logroot)
  now = time.localtime()

  def gen_filter(level):
    return GenericFilter(
      lambda record_level: record_level == level and level >= LogOptions.disk_log_level())

  def gen_link_filename(filebase, level):
    return '%(filebase)s.%(level)s' % {
      'filebase': filebase,
      'level': level,
    }

  hostname = gethostname()
  username = getpass.getuser()
  pid = os.getpid()
  datestring = time.strftime('%Y%m%d-%H%M%S', time.localtime())
  def gen_verbose_filename(filebase, level):
    return '%(filebase)s.%(hostname)s.%(user)s.log.%(level)s.%(date)s.%(pid)s' % {
      'filebase': filebase,
      'hostname': hostname,
      'user': username,
      'level': level,
      'date': datestring,
      'pid': pid
    }

  for filter_type, filter_name in _FILTER_TYPES.items():
    formatter = ProxyFormatter(LogOptions.disk_log_scheme)
    filter = gen_filter(filter_type)
    full_filebase = os.path.join(logroot, filebase)
    logfile_link = gen_link_filename(full_filebase, filter_name)
    logfile_full = gen_verbose_filename(full_filebase, filter_name)
    file_handler = logging.FileHandler(logfile_full)
    file_handler.setFormatter(formatter)
    file_handler.addFilter(filter)
    handlers.append(file_handler)
    _safe_setup_link(logfile_link, logfile_full)
  return handlers
  def _cmdline(self):
    host_sandbox = None
    if os.environ.get('MESOS_DIRECTORY'):
      host_sandbox = os.path.join(os.environ.get('MESOS_DIRECTORY'), 'sandbox')

    params = dict(log_dir=LogOptions.log_dir(),
                  log_to_disk='DEBUG',
                  checkpoint_root=self._checkpoint_root,
                  sandbox=host_sandbox or self._root,
                  task_id=self._task_id,
                  thermos_json=self._task_filename,
                  hostname=self._hostname)

    if getpass.getuser() == 'root' and self._role:
      params.update(setuid=self._role)

    cmdline_args = [sys.executable, self._runner_pex]
    cmdline_args.extend('--%s=%s' % (flag, value) for flag, value in params.items())
    if self._enable_chroot:
      cmdline_args.extend(['--enable_chroot'])
    for name, port in self._ports.items():
      cmdline_args.extend(['--port=%s:%s' % (name, port)])
    return cmdline_args
Example #29
0
 def set_quiet(option, _1, _2, parser):
     setattr(parser.values, option.dest, 'quiet')
     LogOptions.set_stderr_log_level('NONE')
Example #30
0
 def set_verbose(option, _1, _2, parser):
     setattr(parser.values, option.dest, 'verbose')
     LogOptions.set_stderr_log_level('DEBUG')
Example #31
0
import os
import shutil
import tempfile
import unittest

from mysos.scheduler.state import (LocalStateProvider, MySQLCluster, MySQLTask,
                                   Scheduler)

from mesos.interface.mesos_pb2 import FrameworkInfo

if 'MYSOS_DEBUG' in os.environ:
    from twitter.common import log
    from twitter.common.log.options import LogOptions
    LogOptions.set_stderr_log_level('google:DEBUG')
    LogOptions.set_simple(True)
    log.init('mysos_tests')


class TestState(unittest.TestCase):
    def setUp(self):
        self._tmpdir = tempfile.mkdtemp()
        self._state_provider = LocalStateProvider(self._tmpdir)

    def tearDown(self):
        shutil.rmtree(self._tmpdir, True)

    def test_scheduler_state(self):
        expected = Scheduler(
            FrameworkInfo(user='******',
                          name='test_fw_name',
                          checkpoint=True))
from twitter.common import app
from twitter.common.log.options import LogOptions
from twitter.aurora.executor.thermos_runner import proxy_main as runner_proxy_main


LogOptions.set_simple(True)


def proxy_main():
  main = runner_proxy_main

  app.main()
Example #33
0
from apache.aurora.executor.thermos_task_runner import (
    DefaultThermosTaskRunnerProvider,
    UserOverrideThermosTaskRunnerProvider
)

try:
  from mesos.executor import MesosExecutorDriver
except ImportError:
  print(traceback.format_exc(), file=sys.stderr)
  MesosExecutorDriver = None


CWD = os.environ.get('MESOS_SANDBOX', '.')

app.configure(debug=True)
LogOptions.set_simple(True)
LogOptions.set_disk_log_level('DEBUG')
LogOptions.set_log_dir(CWD)


app.add_option(
    '--announcer-ensemble',
    dest='announcer_ensemble',
    type=str,
    default=None,
    help='The ensemble to which the Announcer should register ServerSets.')


app.add_option(
    '--announcer-serverset-path',
    dest='announcer_serverset_path',
Example #34
0
        _, servers, path = zookeeper.parse(zk_url)

        zk_client = FakeClient()
        zk_client.start()
        self_instance = ServiceInstance(
            Endpoint(socket.gethostbyname(socket.gethostname()), port))
        task_control = self._task_control_provider.from_task(task, sandbox)

        return MysosTaskRunner(self_instance, zk_client,
                               posixpath.join(path, cluster_name),
                               NoopPackageInstaller(), task_control, Fake())


# This is a testing executor. We log more verbosely.
LogOptions.disable_disk_logging()
LogOptions.set_stderr_log_level('google:DEBUG')


def proxy_main():
    def main(args, options):
        log.info('Starting testing mysos executor')

        executor = MysosExecutor(
            FakeTaskRunnerProvider(FakeTaskControlProvider()),
            Sandbox(SANDBOX_ROOT))

        driver = mesos.native.MesosExecutorDriver(executor)
        driver.run()

        log.info('Exiting executor main')
Example #35
0
def log_function(msg):
    if _LOG_MODULE:
        log.error(msg)
    # ensure that at least one message goes to stdout/stderr
    if not _LOG_MODULE or LogOptions.stderr_log_level() > logging.ERROR:
        sys.stderr.write(msg)
Example #36
0
from twitter.common import app
from twitter.common.log.options import LogOptions

from apache.aurora.client.base import generate_terse_usage
from apache.aurora.client.commands import help as help_commands
from apache.aurora.client.commands import core, run, ssh
from apache.aurora.client.options import add_verbosity_options

# These are are side-effecting imports in that they register commands via
# app.command.  This is a poor code practice and should be fixed long-term
# with the creation of twitter.common.cli that allows for argparse-style CLI
# composition.

app.register_commands_from(core, run, ssh)
app.register_commands_from(help_commands)
add_verbosity_options()


def main():
    app.help()


LogOptions.set_stderr_log_level('INFO')
LogOptions.disable_disk_logging()
app.set_name('aurora-client')
app.set_usage(generate_terse_usage())


def proxy_main():
    app.main()
Example #37
0
    def run(self, lock):
        if self.options.dry_run:
            print '****** Dry Run ******'

        logger = None
        if self.options.log or self.options.log_level:
            from twitter.common.log import init
            from twitter.common.log.options import LogOptions
            LogOptions.set_stderr_log_level((self.options.log_level
                                             or 'info').upper())
            logdir = self.options.logdir or self.config.get(
                'goals', 'logdir', default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)
                init('goals')
            else:
                init()
            logger = log

        if self.options.recursive_directory:
            log.warn(
                '--all-recursive is deprecated, use a target spec with the form [dir]:: instead'
            )
            for dir in self.options.recursive_directory:
                self.add_target_recursive(dir)

        if self.options.target_directory:
            log.warn(
                '--all is deprecated, use a target spec with the form [dir]: instead'
            )
            for dir in self.options.target_directory:
                self.add_target_directory(dir)

        context = Context(self.config,
                          self.options,
                          self.targets,
                          requested_goals=self.requested_goals,
                          lock=lock,
                          log=logger,
                          timer=self.timer if self.options.time else None)

        unknown = []
        for phase in self.phases:
            if not phase.goals():
                unknown.append(phase)

        if unknown:
            print('Unknown goal(s): %s' % ' '.join(phase.name
                                                   for phase in unknown))
            print('')
            return Phase.execute(context, 'goals')

        if logger:
            logger.debug('Operating on targets: %s', self.targets)

        ret = Phase.attempt(context, self.phases)

        if self.options.cleanup_nailguns or self.config.get(
                'nailgun', 'autokill', default=False):
            if log:
                log.debug('auto-killing nailguns')
            if NailgunTask.killall:
                NailgunTask.killall(log)

        if self.options.time:
            print('Timing report')
            print('=============')
            self.timer.print_timings()

        return ret
Example #38
0
 def gen_filter(level):
     return GenericFilter(lambda record_level: record_level == level and
                          level >= LogOptions.disk_log_level())
Example #39
0
    def run(self, lock):
        timer = None
        if self.options.time:

            class Timer(object):
                def now(self):
                    return time.time()

                def log(self, message):
                    print(message)

            timer = Timer()

        logger = None
        if self.options.log or self.options.log_level:
            from twitter.common.log import init
            from twitter.common.log.options import LogOptions
            LogOptions.set_stderr_log_level((self.options.log_level
                                             or 'info').upper())
            logdir = self.options.logdir or self.config.get(
                'goals', 'logdir', default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)
                init('goals')
            else:
                init()
            logger = log

        if self.options.recursive_directory:
            log.warn(
                '--all-recursive is deprecated, use a target spec with the form [dir]:: instead'
            )
            for dir in self.options.recursive_directory:
                self.add_target_recursive(dir)

        if self.options.target_directory:
            log.warn(
                '--all is deprecated, use a target spec with the form [dir]: instead'
            )
            for dir in self.options.target_directory:
                self.add_target_directory(dir)

        context = Context(self.config,
                          self.options,
                          self.targets,
                          lock=lock,
                          log=logger)

        unknown = []
        for phase in self.phases:
            if not phase.goals():
                unknown.append(phase)

        if unknown:
            print('Unknown goal(s): %s' % ' '.join(phase.name
                                                   for phase in unknown))
            print('')
            return Phase.execute(context, 'goals')

        if logger:
            logger.debug('Operating on targets: %s', self.targets)

        return Phase.attempt(context, self.phases, timer=timer)
"""

from mesos.native import MesosExecutorDriver
from twitter.common import app, log
from twitter.common.log.options import LogOptions
from twitter.common.metrics.sampler import DiskMetricWriter

from apache.aurora.executor.executor_detector import ExecutorDetector
from apache.aurora.executor.gc_executor import ThermosGCExecutor
from apache.thermos.common.path import TaskPath

app.configure(debug=True)

# locate logs locally in executor sandbox
LogOptions.set_simple(True)
LogOptions.set_disk_log_level('DEBUG')
LogOptions.set_log_dir(ExecutorDetector.LOG_PATH)


def proxy_main():
    def main():
        # Create executor stub
        thermos_gc_executor = ThermosGCExecutor(
            checkpoint_root=TaskPath.DEFAULT_CHECKPOINT_ROOT)
        thermos_gc_executor.start()

        # Start metrics collection
        metric_writer = DiskMetricWriter(thermos_gc_executor.metrics,
                                         ExecutorDetector.VARS_PATH)
        metric_writer.start()
Example #41
0
 def _set_default_log_level(self):
     log_level = LogOptions.stderr_log_level()
     zk_log_level = ZookeeperLoggingSubsystem._ZK_LOG_LEVEL_MAP.get(
         log_level, zookeeper.LOG_LEVEL_ERROR)
     zookeeper.set_debug_level(zk_log_level)
Example #42
0
    if len(args) == 0:
        app.help()
    for (command, doc) in app.get_commands_and_docstrings():
        if args[0] == command:
            print('command %s:' % command)
            print(doc)
            app.quit(0)
    print('unknown command: %s' % args[0], file=sys.stderr)


def generate_usage():
    usage = """
thermos

commands:
"""

    for (command, doc) in app.get_commands_and_docstrings():
        usage += '    ' + '%-10s' % command + '\t' + doc.split(
            '\n')[0].strip() + '\n'
    app.set_usage(usage)


LogOptions.set_disk_log_level('NONE')
LogOptions.set_stdout_log_level('INFO')
generate_usage()

proxy_main = app.main

proxy_main()
Example #43
0
    def _do_run(self):
        # TODO(John Sirois): Consider moving to straight python logging.  The divide between the
        # context/work-unit logging and standard python logging doesn't buy us anything.

        # TODO(Eric Ayers) We are missing log messages. Set the log level earlier
        # Enable standard python logging for code with no handle to a context/work-unit.
        if self.global_options.level:
            LogOptions.set_stderr_log_level((self.global_options.level
                                             or 'info').upper())
            logdir = self.global_options.logdir or self.config.get(
                'goals', 'logdir', default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)

                prev_log_level = None
                # If quiet, temporarily change stderr log level to kill init's output.
                if self.global_options.quiet:
                    prev_log_level = LogOptions.loglevel_name(
                        LogOptions.stderr_log_level())
                    # loglevel_name can fail, so only change level if we were able to get the current one.
                    if prev_log_level is not None:
                        LogOptions.set_stderr_log_level(
                            LogOptions._LOG_LEVEL_NONE_KEY)

                log.init('goals')

                if prev_log_level is not None:
                    LogOptions.set_stderr_log_level(prev_log_level)
            else:
                log.init()

        # Update the reporting settings, now that we have flags etc.
        def is_quiet_task():
            for goal in self.goals:
                if goal.has_task_of_type(QuietTaskMixin):
                    return True
            return False

        is_explain = self.global_options.explain
        update_reporting(self.global_options,
                         is_quiet_task() or is_explain, self.run_tracker)

        context = Context(config=self.config,
                          options=self.options,
                          run_tracker=self.run_tracker,
                          target_roots=self.targets,
                          requested_goals=self.requested_goals,
                          build_graph=self.build_graph,
                          build_file_parser=self.build_file_parser,
                          address_mapper=self.address_mapper,
                          spec_excludes=self.get_spec_excludes())

        unknown = []
        for goal in self.goals:
            if not goal.ordered_task_names():
                unknown.append(goal)

        if unknown:
            context.log.error('Unknown goal(s): %s\n' %
                              ' '.join(goal.name for goal in unknown))
            return 1

        engine = RoundEngine()
        return engine.execute(context, self.goals)
Example #44
0
        options.agent_api_url, options.executor_id_json_path,
        options.disk_usage_json_path,
        Amount(options.task_disk_collection_interval_secs, Time.SECONDS))

    return TaskObserver(
        path_detector,
        Amount(options.polling_interval_secs, Time.SECONDS),
        Amount(options.task_process_collection_interval_secs, Time.SECONDS),
        disable_task_resource_collection=options.
        disable_task_resource_collection,
        enable_mesos_disk_collector=options.enable_mesos_disk_collector,
        disk_collector_settings=disk_collector_settings)


def main(_, options):
    observer = initialize(options)
    observer.start()
    root_server = configure_server(observer)

    server = ExceptionalThread(
        target=lambda: root_server.run(options.ip, options.port, 'cherrypy'))
    server.daemon = True
    server.start()

    sleep_forever()


LogOptions.set_stderr_log_level('google:INFO')
app.register_module(ExceptionTerminationHandler())
app.main()
Example #45
0
    def run(self, lock):
        with self.check_errors("Target contains a dependency cycle") as error:
            with self.timer.timing('parse:check_cycles'):
                for target in self.targets:
                    try:
                        InternalTarget.check_cycles(target)
                    except InternalTarget.CycleException as e:
                        error(target.id)

        logger = None
        if self.options.log or self.options.log_level:
            from twitter.common.log import init
            from twitter.common.log.options import LogOptions
            LogOptions.set_stderr_log_level((self.options.log_level
                                             or 'info').upper())
            logdir = self.options.logdir or self.config.get(
                'goals', 'logdir', default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)
                init('goals')
            else:
                init()
            logger = log

        if self.options.recursive_directory:
            log.warn(
                '--all-recursive is deprecated, use a target spec with the form [dir]:: instead'
            )
            for dir in self.options.recursive_directory:
                self.add_target_recursive(dir)

        if self.options.target_directory:
            log.warn(
                '--all is deprecated, use a target spec with the form [dir]: instead'
            )
            for dir in self.options.target_directory:
                self.add_target_directory(dir)

        context = Context(self.config,
                          self.options,
                          self.targets,
                          lock=lock,
                          log=logger)

        unknown = []
        for phase in self.phases:
            if not phase.goals():
                unknown.append(phase)

        if unknown:
            print('Unknown goal(s): %s' % ' '.join(phase.name
                                                   for phase in unknown))
            print('')
            return Phase.execute(context, 'goals')

        if logger:
            logger.debug('Operating on targets: %s', self.targets)

        ret = Phase.attempt(context,
                            self.phases,
                            timer=self.timer if self.options.time else None)
        if self.options.time:
            print('Timing report')
            print('=============')
            self.timer.print_timings()
        return ret
Example #46
0
    def run(self):
        # TODO(John Sirois): Consider moving to straight python logging.  The divide between the
        # context/work-unit logging and standard python logging doesn't buy us anything.

        # Enable standard python logging for code with no handle to a context/work-unit.
        if self.global_options.level:
            LogOptions.set_stderr_log_level((self.global_options.level
                                             or 'info').upper())
            logdir = self.global_options.logdir or self.config.get(
                'goals', 'logdir', default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)

                prev_log_level = None
                # If quiet, temporarily change stderr log level to kill init's output.
                if self.global_options.quiet:
                    prev_log_level = LogOptions.loglevel_name(
                        LogOptions.stderr_log_level())
                    # loglevel_name can fail, so only change level if we were able to get the current one.
                    if prev_log_level is not None:
                        LogOptions.set_stderr_log_level(
                            LogOptions._LOG_LEVEL_NONE_KEY)

                log.init('goals')

                if prev_log_level is not None:
                    LogOptions.set_stderr_log_level(prev_log_level)
            else:
                log.init()

        # Update the reporting settings, now that we have flags etc.
        def is_quiet_task():
            for goal in self.goals:
                if goal.has_task_of_type(QuietTaskMixin):
                    return True
            return False

        # Target specs are mapped to the patterns which match them, if any. This variable is a key for
        # specs which don't match any exclusion regexes. We know it won't already be in the list of
        # patterns, because the asterisks in its name make it an invalid regex.
        _UNMATCHED_KEY = '** unmatched **'

        def targets_by_pattern(targets, patterns):
            mapping = defaultdict(list)
            for target in targets:
                matched_pattern = None
                for pattern in patterns:
                    if re.search(pattern, target.address.spec) is not None:
                        matched_pattern = pattern
                        break
                if matched_pattern is None:
                    mapping[_UNMATCHED_KEY].append(target)
                else:
                    mapping[matched_pattern].append(target)
            return mapping

        is_explain = self.global_options.explain
        update_reporting(self.global_options,
                         is_quiet_task() or is_explain, self.run_tracker)

        if self.global_options.exclude_target_regexp:
            excludes = self.global_options.exclude_target_regexp
            log.debug('excludes:\n  {excludes}'.format(
                excludes='\n  '.join(excludes)))
            by_pattern = targets_by_pattern(self.targets, excludes)
            self.targets = by_pattern[_UNMATCHED_KEY]
            # The rest of this if-statement is just for debug logging.
            log.debug('Targets after excludes: {targets}'.format(
                targets=', '.join(t.address.spec for t in self.targets)))
            excluded_count = sum(len(by_pattern[p]) for p in excludes)
            log.debug('Excluded {count} target{plural}.'.format(
                count=excluded_count,
                plural=('s' if excluded_count != 1 else '')))
            for pattern in excludes:
                log.debug('Targets excluded by pattern {pattern}\n  {targets}'.
                          format(pattern=pattern,
                                 targets='\n  '.join(
                                     t.address.spec
                                     for t in by_pattern[pattern])))

        context = Context(config=self.config,
                          new_options=self.new_options,
                          run_tracker=self.run_tracker,
                          target_roots=self.targets,
                          requested_goals=self.requested_goals,
                          build_graph=self.build_graph,
                          build_file_parser=self.build_file_parser,
                          address_mapper=self.address_mapper,
                          spec_excludes=self.get_spec_excludes())

        unknown = []
        for goal in self.goals:
            if not goal.ordered_task_names():
                unknown.append(goal)

        if unknown:
            context.log.error('Unknown goal(s): %s\n' %
                              ' '.join(goal.name for goal in unknown))
            return 1

        engine = RoundEngine()
        return engine.execute(context, self.goals)
Example #47
0
from gen.apache.aurora.api.AuroraSchedulerManager import Client as scheduler_client
from gen.apache.aurora.api.constants import ACTIVE_STATES, THRIFT_API_VERSION
from gen.apache.aurora.api.ttypes import (
    AcquireLockResult, AddInstancesConfig, AssignedTask, Constraint,
    ExecutorConfig, Identity, JobConfiguration, JobKey, LimitConstraint,
    LockKey, LockValidation, Metadata, PopulateJobResult, ResourceAggregate,
    Response, ResponseCode, ResponseDetail, Result, ScheduledTask,
    ScheduleStatusResult, ServerInfo, TaskConfig, TaskConstraint, TaskQuery,
    ValueConstraint)

# Debug output helper -> enables log.* in source.
if 'UPDATER_DEBUG' in environ:
    from twitter.common import log
    from twitter.common.log.options import LogOptions
    LogOptions.set_disk_log_level('NONE')
    LogOptions.set_stderr_log_level('DEBUG')
    log.init('test_updater')

SERVER_INFO = ServerInfo(thriftAPIVersion=THRIFT_API_VERSION)


def make_response(code, msg='test'):
    return Response(responseCode=code,
                    serverInfo=SERVER_INFO,
                    details=[ResponseDetail(message=msg)])


class FakeConfig(object):
    def __init__(self, role, name, env, update_config):
        self._role = role
Example #48
0
    def run(self, lock):
        # TODO(John Sirois): Consider moving to straight python logging.  The divide between the
        # context/work-unit logging and standard python logging doesn't buy us anything.

        # Enable standard python logging for code with no handle to a context/work-unit.
        if self.options.log_level:
            LogOptions.set_stderr_log_level((self.options.log_level
                                             or 'info').upper())
            logdir = self.options.logdir or self.config.get(
                'goals', 'logdir', default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)
                log.init('goals')
            else:
                log.init()

        # Update the reporting settings, now that we have flags etc.
        def is_console_task():
            for phase in self.phases:
                for goal in phase.goals():
                    if issubclass(goal.task_type, ConsoleTask):
                        return True
            return False

        is_explain = self.options.explain
        update_reporting(self.options,
                         is_console_task() or is_explain, self.run_tracker)

        if self.options.dry_run:
            print('****** Dry Run ******')

        context = Context(self.config,
                          self.options,
                          self.run_tracker,
                          self.targets,
                          requested_goals=self.requested_goals,
                          lock=lock)

        if self.options.recursive_directory:
            context.log.warn(
                '--all-recursive is deprecated, use a target spec with the form [dir]:: instead'
            )
            for dir in self.options.recursive_directory:
                self.add_target_recursive(dir)

        if self.options.target_directory:
            context.log.warn(
                '--all is deprecated, use a target spec with the form [dir]: instead'
            )
            for dir in self.options.target_directory:
                self.add_target_directory(dir)

        unknown = []
        for phase in self.phases:
            if not phase.goals():
                unknown.append(phase)

        if unknown:
            _list_goals(
                context,
                'Unknown goal(s): %s' % ' '.join(phase.name
                                                 for phase in unknown))
            return 1

        return Goal._execute(context,
                             self.phases,
                             print_timing=self.options.time)
Example #49
0
def print_stderr(message):
    """Emit a message on standard error if logging to stderr is permitted."""
    if LogOptions.stderr_log_level() != LogOptions.LOG_LEVEL_NONE:
        print(message, file=sys.stderr)
Example #50
0
def _initialize_disk_logging():
    safe_mkdir(LogOptions.log_dir())
Example #51
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

from twitter.common import app
from twitter.common.log.options import LogOptions

from apache.aurora.admin import help as help_commands
from apache.aurora.admin import admin, maintenance

from .help import add_verbosity_options, generate_terse_usage

app.register_commands_from(admin, help_commands, maintenance)
add_verbosity_options()


def main():
  app.help()


LogOptions.set_stderr_log_level('INFO')
LogOptions.disable_disk_logging()
app.set_name('aurora-admin')
app.set_usage(generate_terse_usage())


def proxy_main():
  app.main()
Example #52
0
File: zk.py Project: XXXu/zktraffic
def setup():
    from twitter.common import app

    LogOptions.set_stderr_log_level('NONE')

    app.add_option('--iface',
                   default='eth0',
                   type=str,
                   metavar='<iface>',
                   help='The interface to sniff on')
    app.add_option('--client-port',
                   default=0,
                   type=int,
                   metavar='<client_port>',
                   help='The client port to filter by')
    app.add_option('--zookeeper-port',
                   default=2181,
                   type=int,
                   metavar='<server_port>',
                   help='The ZooKeeper server port to filter by')
    app.add_option(
        '--max-queued-requests',
        default=10000,
        type=int,
        metavar='<max>',
        help='The maximum number of requests queued to be deserialized')
    app.add_option(
        '--exclude-host',
        dest='excluded_hosts',
        metavar='<host>',
        default=[],
        action='append',
        help='Host that should be excluded (you can use this multiple times)')
    app.add_option(
        '--include-host',
        dest='included_hosts',
        metavar='<host>',
        default=[],
        action='append',
        help='Host that should be included (you can use this multiple times)')
    app.add_option(
        '--count-requests',
        default=0,
        type=int,
        metavar='<nreqs>',
        help='Count N requests and report a summary (default: group by path)')
    app.add_option(
        '--measure-latency',
        default=0,
        type=int,
        metavar='<nreqs>',
        help=
        'Measure latency of N pairs of requests and replies (default: group by path'
    )
    app.add_option(
        '--group-by',
        default='path',
        type=str,
        metavar='<group>',
        help=
        'Used with --count-requests or --measure-latency. Possible values: path, type or client'
    )
    app.add_option(
        '--sort-by',
        default='avg',
        type=str,
        metavar='<sort>',
        help='Used with --measure-latency. Possible values: avg, p95 and p99')
    app.add_option(
        "--aggregation-depth",
        default=0,
        type=int,
        metavar='<depth>',
        help=
        "Aggregate paths up to a certain depth. Used with --count-requests or --measure-latency"
    )
    app.add_option('--unpaired',
                   default=False,
                   action='store_true',
                   help='Don\'t pair reqs/reps')
    app.add_option('-p',
                   '--include-pings',
                   default=False,
                   action='store_true',
                   help='Whether to include ping requests and replies')
    app.add_option('-c',
                   '--colors',
                   default=False,
                   action='store_true',
                   help='Color each client/server stream differently')
    app.add_option('--dump-bad-packet',
                   default=False,
                   action='store_true',
                   help='If unable to to deserialize a packet, print it out')
    app.add_option('--version', default=False, action='store_true')
  Result,
  ScheduleStatusResult,
  ScheduledTask,
  TaskConfig,
  TaskQuery,
)

from mox import MockObject, Replay, Verify
from pytest import raises


# Debug output helper -> enables log.* in source.
if 'UPDATER_DEBUG' in environ:
  from twitter.common import log
  from twitter.common.log.options import LogOptions
  LogOptions.set_disk_log_level('NONE')
  LogOptions.set_stderr_log_level('DEBUG')
  log.init('test_updater')

class FakeConfig(object):
  def __init__(self, role, name, env, update_config):
    self._role = role
    self._env = env
    self._name = name
    self._update_config = update_config
    self.job_config = None

  def role(self):
    return self._role

  def name(self):
Example #54
0
 def setup_class(cls):
     cls.LOG_DIR = tempfile.mkdtemp()
     LogOptions.set_log_dir(cls.LOG_DIR)
     LogOptions.set_disk_log_level('DEBUG')
     log.init('executor_logger')