Ejemplo n.º 1
0
def main():  # pragma: no cover
  args = parse_args()
  matchlist = buildbot_state.construct_pattern_matcher()
  logger = logging.getLogger(__name__)
  logs.add_handler(logger)

  if args.list_all_states:
    matchlist.print_all_states()
    return 0

  abs_master_directory = os.path.abspath(args.directory)

  state_machine = partial(run_state_machine_pass, logger,
        matchlist, abs_master_directory, args.emergency_file,
        args.desired_state, args.transition_time_utc, args.enable_gclient_sync,
        args.prod, args.connection_timeout, args.hostname)

  if args.loop:
    loop_opts = outer_loop.process_argparse_options(args)
    outer_loop.loop(
        state_machine, lambda: args.loop_sleep_secs, **loop_opts)
  else:
    return state_machine()

  return 0
Ejemplo n.º 2
0
def main():  # pragma: no cover
    args = parse_args()
    matchlist = buildbot_state.construct_pattern_matcher()
    logger = logging.getLogger(__name__)
    logs.add_handler(logger)

    if args.list_all_states:
        matchlist.print_all_states()
        return 0

    abs_master_directory = os.path.abspath(args.directory)

    state_machine = partial(run_state_machine_pass, logger, matchlist,
                            abs_master_directory, args.emergency_file,
                            args.desired_state, args.transition_time_utc,
                            args.enable_gclient_sync, args.prod,
                            args.connection_timeout, args.hostname)

    if args.loop:
        loop_opts = outer_loop.process_argparse_options(args)
        outer_loop.loop(state_machine, lambda: args.loop_sleep_secs,
                        **loop_opts)
    else:
        return state_machine()

    return 0
Ejemplo n.º 3
0
def main(argv):
    opts, loop_opts = parse_args(argv)

    if opts.root_setup:
        return root_setup.root_setup()

    def single_iteration():
        try:
            get_cpu_info()
            get_disk_info()
            get_mem_info()
            get_net_info()
            get_proc_info()
        finally:
            ts_mon.flush()
        return True

    # This returns a 0 value the first time it's called.  Call it now and discard
    # the return value.
    psutil.cpu_times_percent()

    # Wait a random amount of time before starting the loop in case sysmon is
    # started at exactly the same time on all machines.
    time.sleep(random.uniform(0, opts.interval))

    loop_results = outer_loop.loop(task=single_iteration,
                                   sleep_timeout=lambda: opts.interval,
                                   **loop_opts)

    return 0 if loop_results.success else 1
Ejemplo n.º 4
0
def main(argv):
  opts, loop_opts = parse_args(argv)

  if opts.root_setup:
    return root_setup.root_setup()

  def single_iteration():
    try:
      system_metrics.get_cpu_info()
      system_metrics.get_disk_info()
      system_metrics.get_mem_info()
      system_metrics.get_net_info()
      system_metrics.get_proc_info()
      puppet_metrics.get_puppet_summary()
    finally:
      ts_mon.flush()
    return True

  # This returns a 0 value the first time it's called.  Call it now and discard
  # the return value.
  psutil.cpu_times_percent()

  # Wait a random amount of time before starting the loop in case sysmon is
  # started at exactly the same time on all machines.
  time.sleep(random.uniform(0, opts.interval))

  loop_results = outer_loop.loop(
      task=single_iteration,
      sleep_timeout=lambda: opts.interval,
      **loop_opts)

  return 0 if loop_results.success else 1
Ejemplo n.º 5
0
def main(args):  # pragma: no cover
    opts, loop_opts = parse_args(args)

    if not os.path.isdir(opts.datadir):
        DEFAULT_LOGGER.info('Creating data directory.')
        os.makedirs(opts.datadir)

    with open(opts.credentials_db) as data_file:
        creds_data = json.load(data_file)

    # Use local json file
    if not opts.configfile:
        if not get_data(_create_http(creds_data)):
            DEFAULT_LOGGER.error('Failed to get data files.')
            return 1

    def outer_loop_iteration():
        return bugdroid.inner_loop(opts)

    loop_results = outer_loop.loop(task=outer_loop_iteration,
                                   sleep_timeout=lambda: 60.0,
                                   **loop_opts)

    # In case local json file is used, do not upload
    if not opts.configfile:
        if not update_data(_create_http(creds_data)):
            DEFAULT_LOGGER.error('Failed to update data files.')
            return 1

    DEFAULT_LOGGER.info('Outer loop finished with result %r',
                        loop_results.success)

    return 0 if loop_results.success else 1
Ejemplo n.º 6
0
def main(args):  # pragma: no cover
  opts = parse_args(args)
  commits_counter = ts_mon.CounterMetric('gsubtreed/commit_count')
  cref = gsubtreed.GsubtreedConfigRef(opts.repo)
  opts.repo.reify()

  summary = collections.defaultdict(int)
  def outer_loop_iteration():
    success, paths_counts = gsubtreed.inner_loop(opts.repo, cref)
    for path, count in paths_counts.iteritems():
      summary[path] += count
      commits_counter.increment_by(count, fields={'path': path})
    return success

  loop_results = outer_loop.loop(
      task=outer_loop_iteration,
      sleep_timeout=lambda: cref['interval'],
      **opts.loop_opts)

  if opts.json_output:
    with open(opts.json_output, 'w') as f:
      json.dump({
        'error_count': loop_results.error_count,
        'summary': summary,
      }, f)

  return 0 if loop_results.success else 1
Ejemplo n.º 7
0
def main(args):  # pragma: no cover
    opts = parse_args(args)
    commits_counter = ts_mon.CounterMetric('gsubtreed/commit_count')
    cref = gsubtreed.GsubtreedConfigRef(opts.repo)
    opts.repo.reify()

    summary = collections.defaultdict(int)

    def outer_loop_iteration():
        success, paths_counts = gsubtreed.inner_loop(opts.repo, cref)
        for path, count in paths_counts.iteritems():
            summary[path] += count
            commits_counter.increment_by(count, fields={'path': path})
        return success

    loop_results = outer_loop.loop(task=outer_loop_iteration,
                                   sleep_timeout=lambda: cref['interval'],
                                   **opts.loop_opts)

    if opts.json_output:
        with open(opts.json_output, 'w') as f:
            json.dump(
                {
                    'error_count': loop_results.error_count,
                    'summary': summary,
                }, f)

    return 0 if loop_results.success else 1
Ejemplo n.º 8
0
 def testLongUnsuccessfulJobStillFails(self):
     ret = outer_loop.loop(lambda: self.time_mod.sleep(100),
                           sleep_timeout=lambda: 1,
                           duration=1,
                           max_errors=5,
                           time_mod=self.time_mod)
     self.assertEqual(outer_loop.LoopResults(False, 1), ret)
     self.assertEqual([100], self.time_mod.sleeps)
Ejemplo n.º 9
0
 def testUntilDeadlineSlowTask(self):
   # This test exists mostly to satisfy 100% code coverage requirement.
   def task():
     self.time_mod.sleep(6)
     return True
   ret = outer_loop.loop(task, sleep_timeout=lambda: 1, duration=5,
                         time_mod=self.time_mod)
   self.assertEqual(outer_loop.LoopResults(True, 0), ret)
   self.assertEqual([6], self.time_mod.sleeps)
Ejemplo n.º 10
0
def main(args):
    parser = argparse.ArgumentParser(prog='run.py %s' % __package__)
    parser.add_argument('data_url', action='store', nargs='*')
    parser.add_argument('--use-cache', action='store_true')
    parser.add_argument('--master-filter', action='store')
    parser.add_argument('--builder-filter', action='store')
    parser.add_argument('--processes',
                        default=PARALLEL_TASKS,
                        action='store',
                        type=int)
    parser.add_argument('--jobs',
                        default=CONCURRENT_TASKS,
                        action='store',
                        type=int)
    logs.add_argparse_options(parser)
    outer_loop.add_argparse_options(parser)

    gatekeeper_json = os.path.join(build_scripts_dir, 'slave',
                                   'gatekeeper.json')
    parser.add_argument('--gatekeeper',
                        action='store',
                        default=gatekeeper_json)
    gatekeeper_trees_json = os.path.join(build_scripts_dir, 'slave',
                                         'gatekeeper_trees.json')
    parser.add_argument('--gatekeeper-trees',
                        action='store',
                        default=gatekeeper_trees_json)

    parser.add_argument('--findit-api-url',
                        help='Query findit results from this url.')

    args = parser.parse_args(args)
    logs.process_argparse_options(args)
    loop_args = outer_loop.process_argparse_options(args)

    # Suppress all logging from connectionpool; it is too verbose at info level.
    if args.log_level != logging.DEBUG:

        class _ConnectionpoolFilter(object):
            @staticmethod
            def filter(record):
                if record.levelno == logging.INFO:
                    return False
                return True

        logging.getLogger(
            'requests.packages.urllib3.connectionpool').addFilter(
                _ConnectionpoolFilter())

    def outer_loop_iteration():
        return inner_loop(args)

    loop_results = outer_loop.loop(task=outer_loop_iteration,
                                   sleep_timeout=lambda: 5,
                                   **loop_args)

    return 0 if loop_results.success else 1
Ejemplo n.º 11
0
 def testUntilDeadlineFastTask(self):
   calls = []
   def task():
     calls.append(1)
     return True
   ret = outer_loop.loop(task, sleep_timeout=lambda: 3, duration=10,
                         time_mod=self.time_mod)
   self.assertEqual(outer_loop.LoopResults(True, 0), ret)
   self.assertEqual(4, len(calls))
   self.assertEqual([3, 3, 3], self.time_mod.sleeps)
Ejemplo n.º 12
0
 def testUntilCtrlCWithErrors(self):
   tasks = [None, None, None]
   def task():
     if not tasks:
       raise KeyboardInterrupt()
     tasks.pop(0)
     raise Exception('Error')
   ret = outer_loop.loop(task, sleep_timeout=lambda: 1, time_mod=self.time_mod)
   self.assertEqual(outer_loop.LoopResults(True, 3), ret)
   self.assertEqual([1, 1, 1], self.time_mod.sleeps)
Ejemplo n.º 13
0
    def testUntilDeadlineSlowTask(self):
        # This test exists mostly to satisfy 100% code coverage requirement.
        def task():
            self.time_mod.sleep(6)
            return True

        ret = outer_loop.loop(task,
                              sleep_timeout=lambda: 1,
                              duration=5,
                              time_mod=self.time_mod)
        self.assertEqual(outer_loop.LoopResults(True, 0), ret)
        self.assertEqual([6], self.time_mod.sleeps)
Ejemplo n.º 14
0
 def testMaxErrorCount(self):
   tasks = ['ok', 'err', 'false', 'ok', 'err', 'false', 'err', 'skipped']
   def task():
     t = tasks.pop(0)
     if t == 'err':
       raise Exception('Horrible error')
     if t == 'false':
       return False
     return True
   ret = outer_loop.loop(task, sleep_timeout=lambda: 1, max_errors=3,
                         time_mod=self.time_mod)
   self.assertEqual(outer_loop.LoopResults(False, 5), ret)
   self.assertEqual(['skipped'], tasks)
   self.assertEqual([1, 1, 1, 1, 1, 1], self.time_mod.sleeps)
Ejemplo n.º 15
0
    def testUntilCtrlCWithErrors(self):
        tasks = [None, None, None]

        def task():
            if not tasks:
                raise KeyboardInterrupt()
            tasks.pop(0)
            raise Exception('Error')

        ret = outer_loop.loop(task,
                              sleep_timeout=lambda: 1,
                              time_mod=self.time_mod)
        self.assertEqual(outer_loop.LoopResults(True, 3), ret)
        self.assertEqual([1, 1, 1], self.time_mod.sleeps)
Ejemplo n.º 16
0
    def testUntilDeadlineFastTask(self):
        calls = []

        def task():
            calls.append(1)
            return True

        ret = outer_loop.loop(task,
                              sleep_timeout=lambda: 3,
                              duration=10,
                              time_mod=self.time_mod)
        self.assertEqual(outer_loop.LoopResults(True, 0), ret)
        self.assertEqual(4, len(calls))
        self.assertEqual([3, 3, 3], self.time_mod.sleeps)
Ejemplo n.º 17
0
def main(args):
  parser = argparse.ArgumentParser(prog='run.py %s' % __package__)
  parser.add_argument('data_url', action='store', nargs='*')
  parser.add_argument('--use-cache', action='store_true')
  parser.add_argument('--master-filter', action='store')
  parser.add_argument('--builder-filter', action='store')
  parser.add_argument('--processes', default=PARALLEL_TASKS, action='store',
                      type=int)
  parser.add_argument('--jobs', default=CONCURRENT_TASKS, action='store',
                      type=int)
  logs.add_argparse_options(parser)
  outer_loop.add_argparse_options(parser)

  gatekeeper_json = os.path.join(build_scripts_dir, 'slave', 'gatekeeper.json')
  parser.add_argument('--gatekeeper', action='store', default=gatekeeper_json)
  gatekeeper_trees_json = os.path.join(build_scripts_dir, 'slave',
                                       'gatekeeper_trees.json')
  parser.add_argument('--gatekeeper-trees', action='store',
                      default=gatekeeper_trees_json)

  parser.add_argument('--findit-api-url',
                      help='Query findit results from this url.')

  args = parser.parse_args(args)
  logs.process_argparse_options(args)
  loop_args = outer_loop.process_argparse_options(args)

  # Suppress all logging from connectionpool; it is too verbose at info level.
  if args.log_level != logging.DEBUG:
    class _ConnectionpoolFilter(object):

      @staticmethod
      def filter(record):
        if record.levelno == logging.INFO:
          return False
        return True
    logging.getLogger('requests.packages.urllib3.connectionpool').addFilter(
        _ConnectionpoolFilter())

  def outer_loop_iteration():
    return inner_loop(args)

  loop_results = outer_loop.loop(
      task=outer_loop_iteration,
      sleep_timeout=lambda: 5,
      **loop_args)

  return 0 if loop_results.success else 1
Ejemplo n.º 18
0
    def testMaxErrorCount(self):
        tasks = ['ok', 'err', 'false', 'ok', 'err', 'false', 'err', 'skipped']

        def task():
            t = tasks.pop(0)
            if t == 'err':
                raise Exception('Horrible error')
            if t == 'false':
                return False
            return True

        ret = outer_loop.loop(task,
                              sleep_timeout=lambda: 1,
                              max_errors=3,
                              time_mod=self.time_mod)
        self.assertEqual(outer_loop.LoopResults(False, 5), ret)
        self.assertEqual(['skipped'], tasks)
        self.assertEqual([1, 1, 1, 1, 1, 1], self.time_mod.sleeps)
Ejemplo n.º 19
0
def main(args):  # pragma: no cover
    opts = parse_args(args)
    commits_counter = ts_mon.CounterMetric('gnumbd/commit_count')
    cref = gnumbd.GnumbdConfigRef(opts.repo)
    opts.repo.reify()

    all_commits = []

    def outer_loop_iteration():
        success, commits = gnumbd.inner_loop(opts.repo, cref)
        all_commits.extend(commits)
        commits_counter.increment_by(len(commits))
        return success

    # TODO(iannucci): sleep_timeout should be an exponential backon/off.
    #   Whenever we push, we should decrease the interval at 'backon_rate'
    #   until we hit 'min_interval'.
    #   Whenever we fail/NOP, we should back off at 'backoff_rate' until we
    #   hit 'max_interval'.
    #
    #   When all is going well, this should be looping at < 1 sec. If things
    #   start going sideways, we should automatically back off.
    loop_results = outer_loop.loop(task=outer_loop_iteration,
                                   sleep_timeout=lambda: cref['interval'],
                                   **opts.loop_opts)

    if opts.json_output:
        with open(opts.json_output, 'w') as f:
            json.dump(
                {
                    'error_count':
                    loop_results.error_count,
                    'synthesized_commits':
                    [{
                        'commit': c.hsh,
                        'footers': infra_types.thaw(c.data.footers),
                    } for c in all_commits],
                }, f)

    return 0 if loop_results.success else 1
Ejemplo n.º 20
0
def main(args):  # pragma: no cover
  opts = parse_args(args)
  commits_counter = ts_mon.CounterMetric('gnumbd/commit_count')
  cref = gnumbd.GnumbdConfigRef(opts.repo)
  opts.repo.reify()

  all_commits = []
  def outer_loop_iteration():
    success, commits = gnumbd.inner_loop(opts.repo, cref)
    all_commits.extend(commits)
    commits_counter.increment_by(len(commits))
    return success

  # TODO(iannucci): sleep_timeout should be an exponential backon/off.
  #   Whenever we push, we should decrease the interval at 'backon_rate'
  #   until we hit 'min_interval'.
  #   Whenever we fail/NOP, we should back off at 'backoff_rate' until we
  #   hit 'max_interval'.
  #
  #   When all is going well, this should be looping at < 1 sec. If things
  #   start going sideways, we should automatically back off.
  loop_results = outer_loop.loop(
      task=outer_loop_iteration,
      sleep_timeout=lambda: cref['interval'],
      **opts.loop_opts)

  if opts.json_output:
    with open(opts.json_output, 'w') as f:
      json.dump({
        'error_count': loop_results.error_count,
        'synthesized_commits': [
          {
            'commit': c.hsh,
            'footers': infra_types.thaw(c.data.footers),
          } for c in all_commits
        ],
      }, f)

  return 0 if loop_results.success else 1
Ejemplo n.º 21
0
def main(args):  # pragma: no cover
  opts = parse_args(args)

  all_pushes = []
  def outer_loop_iteration():
    success = True
    for repo, push_list in opts.specs.iteritems():
      ok, pushed_refs = process_repo(repo, push_list)
      success = success and ok
      all_pushes.extend([
        {
          'repo': repo.url,
          'ref': ref.ref,
          'commit': commit.hsh,
        } for ref, commit in pushed_refs.iteritems()
      ])
    return success

  loop_results = outer_loop.loop(
      task=outer_loop_iteration,
      sleep_timeout=lambda: 10,
      **opts.loop_opts)

  if opts.json_output:
    # (repo_url, ref) -> last pushed hash.
    last_pushes = {(p['repo'], p['ref']): p['commit'] for p in all_pushes}
    with open(opts.json_output, 'w') as f:
      json.dump({
        'all_pushes': all_pushes,
        'last_pushes': [
          {
            'repo': repo,
            'ref': ref,
            'commit': commit,
          } for (repo, ref), commit in last_pushes.iteritems()
        ]
      }, f)

  return 0 if loop_results.success else 1
Ejemplo n.º 22
0
def main(argv):
  opts, loop_opts = parse_args(argv)

  if opts.url:
    # Monitor a single master specified on the commandline.
    monitors = [monitor.MasterMonitor(opts.url)]
  else:
    # Query the mastermap and monitor all the masters on a host.
    monitors = monitor.create_from_mastermap(opts.build_dir, opts.hostname)

  def single_iteration():
    try:
      for mon in monitors:
        mon.poll()
    finally:
      ts_mon.flush()
    return True

  loop_results = outer_loop.loop(
      task=single_iteration,
      sleep_timeout=lambda: opts.interval,
      **loop_opts)

  return 0 if loop_results.success else 1
Ejemplo n.º 23
0
def main(args):  # pragma: no cover
    opts, loop_opts = parse_args(args)

    if not os.path.isdir(opts.datadir):
        logging.info('Creating data directory.')
        os.makedirs(opts.datadir)

    # Use local json file
    if not opts.configfile:
        get_data(_create_http(opts.credentials_db))

    def outer_loop_iteration():
        return bugdroid.inner_loop(opts)

    loop_results = outer_loop.loop(task=outer_loop_iteration,
                                   sleep_timeout=lambda: 60.0,
                                   **loop_opts)

    # In case local json file is used, do not upload
    if not opts.configfile and not opts.dryrun:
        update_data(_create_http(opts.credentials_db))

    logging.info('Outer loop finished with result %r', loop_results.success)
    return 0 if loop_results.success else 1
Ejemplo n.º 24
0
def main(argv):
  opts, loop_opts = parse_args(argv)

  if opts.url:
    # Monitor a single master specified on the commandline.
    monitors = [monitor.MasterMonitor(opts.url)]
  else:
    # Query the mastermap and monitor all the masters on a host.
    monitors = monitor.create_from_mastermap(opts.build_dir, opts.hostname)

  def single_iteration():
    try:
      for mon in monitors:
        mon.poll()
    finally:
      ts_mon.flush()
    return True

  loop_results = outer_loop.loop(
      task=single_iteration,
      sleep_timeout=lambda: opts.interval,
      **loop_opts)

  return 0 if loop_results.success else 1
Ejemplo n.º 25
0
def main(args):
  parser = argparse.ArgumentParser(prog='run.py %s' % __package__)
  parser.add_argument('data_url', action='store', nargs='*')  # Deprecated
  parser.add_argument('--use-cache', action='store_true')
  parser.add_argument('--master-filter', action='store')
  parser.add_argument('--builder-filter', action='store')
  parser.add_argument('--processes', default=PARALLEL_TASKS, action='store',
                      type=int)
  parser.add_argument('--jobs', default=CONCURRENT_TASKS, action='store',
                      type=int)
  logs.add_argparse_options(parser)
  outer_loop.add_argparse_options(parser)

  gatekeeper_json = os.path.join(build_scripts_dir, 'slave', 'gatekeeper.json')
  parser.add_argument('--gatekeeper', action='store', default=gatekeeper_json)
  gatekeeper_trees_json = os.path.join(build_scripts_dir, 'slave',
                                       'gatekeeper_trees.json')
  parser.add_argument('--gatekeeper-trees', action='store',
                      default=gatekeeper_trees_json)

  parser.add_argument('--findit-api-url',
                      help='Query findit results from this url.')
  parser.add_argument('--crbug-service-account',
                      help='Path to a service account JSON file to be used to '
                           'search for relevant issues on crbug.com.')
  parser.add_argument('--use-monorail', default=False, action='store_true',
                      help='When specified, Monorail API is used to search for '
                           'issues on crbug')
  parser.add_argument('--api-endpoint-prefix',
                      help='Endpoint prefix for posting alerts. Old API '
                           'endpoint will be formed by adding value specified '
                           'in --old-api-path to the prefix, new API endpoints '
                           'will be formed by adding '
                           '/api/v1/alerts/<tree_name>.')
  parser.add_argument('--old-api-path',
                      help='Path to be appended to --api-endpoint-prefix to '
                           'form old API endpoint.')

  args = parser.parse_args(args)
  logs.process_argparse_options(args)
  loop_args = outer_loop.process_argparse_options(args)

  # TODO(sergiyb): Remove support for data_url when builder_alerts recipes are
  # updated and using new syntax to call this script.
  if args.data_url:
    if (len(args.data_url) == 1 and args.data_url[0].endswith('alerts') and
        not args.api_endpoint_prefix and not args.old_api_path):
      logging.warn(
          'You are using positional argument to specify URL to post updates '
          'to. Please use --api-endpoint-prefix and --old-api-path instead.')
      slash_index = args.data_url[0].rindex('/')
      args.api_endpoint_prefix = args.data_url[0][:slash_index]
      args.old_api_path = args.data_url[0][slash_index+1:]
    else:
      logging.error(
          'Unsupported positional argument(s) or used together with '
          '--api-endpoint-prefix/--old-api-path. Please use only '
          '--api-endpoint-prefix and --old-api-path to specify URL to post new '
          'alerts to.')
      return

  # Suppress all logging from connectionpool; it is too verbose at info level.
  if args.log_level != logging.DEBUG:
    class _ConnectionpoolFilter(object):

      @staticmethod
      def filter(record):
        if record.levelno == logging.INFO:
          return False
        return True
    logging.getLogger('requests.packages.urllib3.connectionpool').addFilter(
        _ConnectionpoolFilter())

  def outer_loop_iteration():
    return inner_loop(args)

  loop_results = outer_loop.loop(
      task=outer_loop_iteration,
      sleep_timeout=lambda: 5,
      **loop_args)

  return 0 if loop_results.success else 1
Ejemplo n.º 26
0
 def testLongUnsuccessfulJobStillFails(self):
   ret = outer_loop.loop(lambda: self.time_mod.sleep(100),
                         sleep_timeout=lambda: 1, duration=1, max_errors=5,
                         time_mod=self.time_mod)
   self.assertEqual(outer_loop.LoopResults(False, 1), ret)
   self.assertEqual([100], self.time_mod.sleeps)