Ejemplo n.º 1
0
def _RunTests(tests: List[_Test], parallelism: int) -> bool:
    """Run tests. Returns True if all tests succeeded."""
    running_tests = []
    for t in tests:
        while len(running_tests) >= parallelism:
            time.sleep(0.2)  # 200ms
            running_tests = [rt for rt in running_tests if not rt.Finished()]
        t.Run()
        running_tests.append(t)

    while running_tests:
        time.sleep(0.2)  # 200ms
        running_tests = [t for t in running_tests if not t.Finished()]

    failed_tests = [t for t in tests if not t.Succeeded()]
    logging.info("Ran %d tests. %d failed.", len(tests), len(failed_tests))
    for t in tests:
        logging.info("%s\t%s\t%.1fs", t,
                     "PASSED" if t.Succeeded() else "FAILED",
                     t.finish_time - t.begin_time)
    logging.flush()

    for ft in failed_tests:
        ft.PrintLogs()

    return not failed_tests
Ejemplo n.º 2
0
def _test_flush():
    """Test flush in various difficult cases."""
    # Flush, but one of the logfiles is closed
    log_filename = os.path.join(FLAGS.log_dir, 'a_thread_with_logfile.txt')
    with open(log_filename, 'w') as log_file:
        logging.get_absl_handler().python_handler.stream = log_file
    logging.flush()
Ejemplo n.º 3
0
def _RunTests(tests: List[_Test], parallelism: int) -> bool:
    """Run tests. Returns True if all tests succeeded."""
    running_tests = set()
    finished_tests = set()
    tests_to_run = sorted(tests, reverse=True)
    while tests_to_run or running_tests:
        time.sleep(0.2)  # 200ms
        updated_finished = set(t for t in running_tests if t.Finished())
        running_tests = running_tests - updated_finished
        while tests_to_run and len(running_tests) < parallelism:
            t = tests_to_run.pop()
            t.Run()
            running_tests.add(t)

        newly_finished = updated_finished - finished_tests
        finished_tests.update(updated_finished)
        for test in newly_finished:
            logging.info("%s\t%s\t%.1fs", test,
                         "PASSED" if test.Succeeded() else "FAILED",
                         test.finish_time - test.begin_time)
        if newly_finished:
            logging.flush()

    failed_tests = sorted([t for t in tests if not t.Succeeded()])
    logging.info("Ran %d tests. %d failed.", len(tests), len(failed_tests))
    logging.flush()

    for ft in failed_tests:
        ft.PrintLogs()

    return not failed_tests
Ejemplo n.º 4
0
    def set_test_logging(self):

        self.log_path = os.path.join(self.workspace, 'test_log')
        if not os.path.exists(self.log_path):

            os.mkdir(self.log_path)

        logging.flush()
        logging.get_absl_handler().use_absl_log_file(self.exp_name + '.log',
                                                     self.log_path)
Ejemplo n.º 5
0
def _test_do_logging():
    """Do some log operations."""
    logging.vlog(3, 'This line is VLOG level 3')
    logging.vlog(2, 'This line is VLOG level 2')
    logging.log(2, 'This line is log level 2')

    logging.vlog(1, 'This line is VLOG level 1')
    logging.log(1, 'This line is log level 1')
    logging.debug('This line is DEBUG')

    logging.vlog(0, 'This line is VLOG level 0')
    logging.log(0, 'This line is log level 0')
    logging.info('Interesting Stuff\0')
    logging.info('Interesting Stuff with Arguments: %d', 42)
    logging.info('%(a)s Stuff with %(b)s', {
        'a': 'Interesting',
        'b': 'Dictionary'
    })

    for i in xrange(1, 5):
        logging.log_first_n(logging.INFO, 'Info first %d of %d', 2, i, 2)
        logging.log_every_n(logging.INFO, 'Info %d (every %d)', 3, i, 3)

    logging.vlog(-1, 'This line is VLOG level -1')
    logging.log(-1, 'This line is log level -1')
    logging.warning('Worrying Stuff')
    for i in xrange(1, 5):
        logging.log_first_n(logging.WARNING, 'Warn first %d of %d', 2, i, 2)
        logging.log_every_n(logging.WARNING, 'Warn %d (every %d)', 3, i, 3)

    logging.vlog(-2, 'This line is VLOG level -2')
    logging.log(-2, 'This line is log level -2')
    try:
        raise OSError('Fake Error')
    except OSError:
        saved_exc_info = sys.exc_info()
        logging.exception('An Exception %s')
        logging.exception('Once more, %(reason)s', {'reason': 'just because'})
        logging.error('Exception 2 %s', exc_info=True)
        logging.error('Non-exception', exc_info=False)

    try:
        sys.exc_clear()
    except AttributeError:
        # No sys.exc_clear() in Python 3, but this will clear sys.exc_info() too.
        pass

    logging.error('Exception %s', '3', exc_info=saved_exc_info)
    logging.error('No traceback', exc_info=saved_exc_info[:2] + (None, ))

    logging.error('Alarming Stuff')
    for i in xrange(1, 5):
        logging.log_first_n(logging.ERROR, 'Error first %d of %d', 2, i, 2)
        logging.log_every_n(logging.ERROR, 'Error %d (every %d)', 3, i, 3)
    logging.flush()
Ejemplo n.º 6
0
  def __init__(self, config, name=None):
    super().__init__(name=name)
    self._config = config

    logging.info("--- dummy Task to get meta data ---")
    logging.info("--- do not care the Task mode here ---")
    task = utils.task(config, mode=utils.TRAIN)
    logging.info("--- dummy Task to get meta data ---")
    logging.flush()

    self._feat_shape = task.feat_shape
    self._vocab_size = task.vocab_size

    self.build()
Ejemplo n.º 7
0
def main(argv):
  del argv  # Unused.

  test_name = os.environ.get('TEST_NAME', None)
  test_fn = globals().get('_test_%s' % test_name)
  if test_fn is None:
    raise AssertionError('TEST_NAME must be set to a valid value')
  # Flush so previous messages are written to file before we switch to a new
  # file with use_absl_log_file.
  logging.flush()
  if os.environ.get('USE_ABSL_LOG_FILE') == '1':
    logging.get_absl_handler().use_absl_log_file('absl_log_file', FLAGS.log_dir)

  test_fn()
Ejemplo n.º 8
0
def TestingLoop(min_interesting_results: int,
                max_testing_time_seconds: int,
                batch_size: int,
                generator: base_generator.GeneratorServiceBase,
                dut_harness: base_harness.HarnessBase,
                gs_harness: base_harness.HarnessBase,
                filters: difftests.FiltersBase,
                interesting_results_dir: pathlib.Path,
                start_time: float = None) -> None:
    """The main fuzzing loop.

  Args:
    min_interesting_results: The minimum number of interesting results to find.
    max_testing_time_seconds: The maximum time allowed to find interesting
      results.
    batch_size: The number of testcases to generate and execute in each batch.
    generator: A testcase generator.
    dut_harness: The device under test.
    gs_harness: The device to compare outputs against.
    filters: A filters instance for testcases.
    interesting_results_dir: The directory to write interesting results to.
    start_time: The starting time, as returned by time.time(). If not provided,
      the starting time will be the moment that this function is called. Set
      this value if you would like to include initialization overhead in the
      calculated testing time.
  """
    start_time = start_time or time.time()
    interesting_results_dir.mkdir(parents=True, exist_ok=True)
    num_interesting_results = 0
    batch_num = 0
    while (num_interesting_results < min_interesting_results
           and time.time() < start_time + max_testing_time_seconds):
        batch_num += 1
        logging.info('Starting generate / test / eval batch %d ...', batch_num)
        interesting_results = RunBatch(generator, dut_harness, gs_harness,
                                       filters, batch_size)
        num_interesting_results += len(interesting_results)
        for result in interesting_results:
            pbutil.ToFile(
                result, interesting_results_dir /
                (str(labdate.MillisecondsTimestamp()) + '.pbtxt'))

    logging.info(
        'Stopping after %.2f seconds and %s batches (%.0fms / testcase).\n'
        'Found %s interesting results.',
        time.time() - start_time, humanize.intcomma(batch_num),
        (((time.time() - start_time) / (batch_num * batch_size)) * 1000),
        num_interesting_results)
    logging.flush()
Ejemplo n.º 9
0
def train(model_param, train_test_data, predict_f, loss_f, loss_adv_f,
          linearize_f, normalize_f, loss_and_prox_op, summary, config, rng_key
          ):
  """Train a model and log risks."""
  dloss_dw = jax.grad(loss_f, argnums=0)
  dloss_adv_dx = jax.grad(loss_adv_f, argnums=1)

  train_data = train_test_data[0]
  xtrain, ytrain = train_data

  # Precompute min-norm solutions
  if config.enable_cvxpy:
    min_norm_w = {}
    for norm_type in config.available_norm_types:
      min_norm_w[norm_type] = compute_min_norm_solution(xtrain, ytrain,
                                                        norm_type)
    if config.adv.eps_from_cvxpy:
      dual_norm = norm_type_dual(config.adv.norm_type)
      wcomp = min_norm_w[dual_norm]
      wnorm = norm_f(wcomp, dual_norm)
      margin = 1. / wnorm
      # The loop over the range of epsilons will cover 0.5 * (2 * margin)
      config.adv.eps_tot = config.adv.eps_iter = float(2 * margin)

    if config.optim.adv_train.eps_from_cvxpy:
      dual_norm = norm_type_dual(config.optim.adv_train.norm_type)
      wcomp = min_norm_w[dual_norm]
      wnorm = norm_f(wcomp, dual_norm)
      margin = 1. / wnorm
      # Exactly with the optimal margin and varepsilon
      # minus 0.1 is needed otherwise the convergence is slow. Also sensitive
      # to 0.1. 1e-2 would not work.
      config.optim.adv_train.eps_tot = float(margin) - .1
      config.optim.adv_train.eps_iter = float(margin) - .1

  if config['optim']['name'] == 'cvxpy':
    norm_type = config['optim']['norm']
    cvxpy_sol = compute_min_norm_solution(xtrain, ytrain, norm_type)
    model_param = jnp.array(cvxpy_sol)

  # Train loop
  optim_step, optim_options = optim.get_optimizer_step(config['optim'])
  niters = optim_options['niters']
  for step in range(1, niters):
    # Take one optimization step
    if config['optim']['name'] != 'cvxpy':
      if config['optim']['adv_train']['enable']:
        # Adversarial training
        rng_key, rng_subkey = jax.random.split(rng_key)
        x_adv = adversarial.find_adversarial_samples(train_data, loss_adv_f,
                                                     dloss_adv_dx,
                                                     linearize_f,
                                                     model_param, normalize_f,
                                                     config.optim.adv_train,
                                                     rng_key)
        train_data_new = x_adv, ytrain
      else:
        # Standard training
        train_data_new = train_data
      if config['optim']['name'] == 'fista':
        model_param, optim_options = optim_step(train_data_new,
                                                loss_and_prox_op,
                                                model_param,
                                                optim_options)
      else:
        model_param, optim_options = optim_step(train_data_new,
                                                loss_f,
                                                model_param, optim_options)

    # Log risks and other statistics
    if (step + 1) % config.log_interval == 0:
      # Evaluate risk on train/test sets
      for do_train in [True, False]:
        data = train_test_data[0] if do_train else train_test_data[1]
        prefix = 'risk/train' if do_train else 'risk/test'
        risk = evaluate_risks(data, predict_f, loss_f, model_param)
        for rname, rvalue in risk.items():
          summary.scalar('%s/%s' % (prefix, rname), rvalue, step=step)
        rng_key, rng_subkey = jax.random.split(rng_key)
        risk = evaluate_adversarial_risk(data, predict_f, loss_adv_f,
                                         dloss_adv_dx,
                                         linearize_f,
                                         model_param, normalize_f, config,
                                         rng_subkey)
        for rname, rvalue in risk.items():
          summary.array('%s/%s' % (prefix, rname), rvalue, step=step)

      grad = dloss_dw(model_param, xtrain, ytrain)
      grad_ravel, _ = ravel_pytree(grad)
      model_param_ravel, _ = ravel_pytree(model_param)
      for norm_type in config.available_norm_types:
        # Log the norm of the gradient w.r.t. various norms
        if not norm_type.startswith('dft'):
          summary.scalar(
              'grad/norm/' + norm_type,
              norm_f(grad_ravel, norm_type),
              step=step)

        # Log weight norm
        if not norm_type.startswith('dft'):
          wnorm = norm_f(model_param_ravel, norm_type)
          summary.scalar('weight/norm/' + norm_type, wnorm, step=step)

        # Log margin for the equivalent linearized single layer model
        linear_param = linearize_f(model_param)
        min_loss = jnp.min(ytrain * (linear_param.T @ xtrain))
        wcomp = linear_param / min_loss
        wnorm = norm_f(wcomp, norm_type)
        margin = jnp.sign(min_loss) * 1 / wnorm
        summary.scalar('margin/' + norm_type, margin, step=step)
        summary.scalar('weight/linear/norm/' + norm_type, wnorm, step=step)

        # Cosine similarity between the current params and min-norm solution
        if config.enable_cvxpy:

          def cos_sim(a, b):
            return jnp.dot(a, b) / (jnp.linalg.norm(a) * jnp.linalg.norm(b))

          min_norm_w_ravel, _ = ravel_pytree(min_norm_w[norm_type])
          cs = cos_sim(linear_param.flatten(), min_norm_w_ravel)
          summary.scalar('csim_to_wmin/' + norm_type, cs, step=step)

      if 'step_size' in optim_options:
        summary.scalar('optim/step_size', optim_options['step_size'], step=step)

      logging.info('Epoch: [%d/%d]\t%s', step + 1, niters,
                   summary.last_scalars_to_str(config.log_keys))
      logging.flush()

  summary.flush()
Ejemplo n.º 10
0
def FlushLogs():
  """Flushes all log files."""
  absl_logging.flush()
Ejemplo n.º 11
0
def Flush():
    """Flush logging and stout/stderr outputs."""
    logging.flush()
    sys.stdout.flush()
    sys.stderr.flush()
Ejemplo n.º 12
0
 def _log(self, msg, *args, **kwargs):
   if self._message and self._verbose:
     logging.info(msg, *args, **kwargs)
     logging.flush()
Ejemplo n.º 13
0
 def flush(self):
     logging.flush()
Ejemplo n.º 14
0
    assert len(dut_harness.testbeds) == 1

    logging.info('Preparing gold standard testbed.')
    config = GetBaseHarnessConfig(config_class)
    config.opencl_env.extend(
        [gpu.cldrive.env.OclgrindOpenCLEnvironment().name])
    config.opencl_opt.extend([True])
    gs_harness = harness_class(config)
    assert len(gs_harness.testbeds) >= 1

    TestingLoop(FLAGS.min_interesting_results,
                FLAGS.max_testing_time_seconds,
                FLAGS.batch_size,
                generator,
                dut_harness,
                gs_harness,
                filters,
                interesting_results_dir,
                start_time=start_time)


if __name__ == '__main__':
    try:
        app.run(main)
    except KeyboardInterrupt:
        logging.flush()
        sys.stdout.flush()
        sys.stderr.flush()
        print('keyboard interrupt')
        sys.exit(1)
Ejemplo n.º 15
0
def _test_do_logging():
    """Do some log operations."""
    logging.vlog(3, 'This line is VLOG level 3')
    logging.vlog(2, 'This line is VLOG level 2')
    logging.log(2, 'This line is log level 2')
    if logging.vlog_is_on(2):
        logging.log(1, 'VLOG level 1, but only if VLOG level 2 is active')

    logging.vlog(1, 'This line is VLOG level 1')
    logging.log(1, 'This line is log level 1')
    logging.debug('This line is DEBUG')

    logging.vlog(0, 'This line is VLOG level 0')
    logging.log(0, 'This line is log level 0')
    logging.info('Interesting Stuff\0')
    logging.info('Interesting Stuff with Arguments: %d', 42)
    logging.info('%(a)s Stuff with %(b)s', {
        'a': 'Interesting',
        'b': 'Dictionary'
    })

    with mock.patch.object(timeit, 'default_timer') as mock_timer:
        mock_timer.return_value = 0
        while timeit.default_timer() < 9:
            logging.log_every_n_seconds(logging.INFO,
                                        'This should appear 5 times.', 2)
            mock_timer.return_value = mock_timer() + .2

    for i in xrange(1, 5):
        logging.log_first_n(logging.INFO, 'Info first %d of %d', 2, i, 2)
        logging.log_every_n(logging.INFO, 'Info %d (every %d)', 3, i, 3)

    logging.vlog(-1, 'This line is VLOG level -1')
    logging.log(-1, 'This line is log level -1')
    logging.warning('Worrying Stuff')
    for i in xrange(1, 5):
        logging.log_first_n(logging.WARNING, 'Warn first %d of %d', 2, i, 2)
        logging.log_every_n(logging.WARNING, 'Warn %d (every %d)', 3, i, 3)

    logging.vlog(-2, 'This line is VLOG level -2')
    logging.log(-2, 'This line is log level -2')
    try:
        raise OSError('Fake Error')
    except OSError:
        saved_exc_info = sys.exc_info()
        logging.exception('An Exception %s')
        logging.exception('Once more, %(reason)s', {'reason': 'just because'})
        logging.error('Exception 2 %s', exc_info=True)
        logging.error('Non-exception', exc_info=False)

    try:
        sys.exc_clear()
    except AttributeError:
        # No sys.exc_clear() in Python 3, but this will clear sys.exc_info() too.
        pass

    logging.error('Exception %s', '3', exc_info=saved_exc_info)
    logging.error('No traceback', exc_info=saved_exc_info[:2] + (None, ))

    logging.error('Alarming Stuff')
    for i in xrange(1, 5):
        logging.log_first_n(logging.ERROR, 'Error first %d of %d', 2, i, 2)
        logging.log_every_n(logging.ERROR, 'Error %d (every %d)', 3, i, 3)
    logging.flush()