Exemple #1
0
  def __init__(self, spec, config, model_dir):
    """Initialize evaluator. See train_and_evaluate docstring."""
    self.input_train = cifar.CIFARInput('train', config)
    self.input_train_eval = cifar.CIFARInput('train_eval', config)
    self.input_valid = cifar.CIFARInput('valid', config)
    self.input_test = cifar.CIFARInput('test', config)
    self.input_sample = cifar.CIFARInput('sample', config)
    self.estimator = _create_estimator(spec, config, model_dir,
                                       self.input_train.num_images,
                                       self.input_sample.num_images)

    self.spec = spec
    self.config = config
    self.model_dir = model_dir
Exemple #2
0
    def __init__(self, spec, config, model_dir):
        """Initialize evaluator. See train_and_evaluate docstring."""
        self.input_train = cifar.CIFARInput('train', config)
        self.input_train_eval = cifar.CIFARInput('train_eval', config)
        self.input_valid = cifar.CIFARInput('valid', config)
        self.input_test = cifar.CIFARInput('test', config)
        self.input_sample = cifar.CIFARInput('sample', config)
        self.estimator = _create_estimator(spec, config, model_dir,
                                           self.input_train.num_images,
                                           self.input_sample.num_images)

        self.spec = spec
        self.config = config
        self.model_dir = model_dir
        if self.config['max_samples'] > 30:
            self.max_samples = self.config['max_samples']
        else:
            self.max_samples = 30

        if self.config['number_of_steps'] > 0:
            self.number_of_steps = self.config['number_of_steps']
        else:
            self.number_of_steps = 1
Exemple #3
0
def _augment_and_evaluate_impl(spec, config, model_dir, epochs_per_eval=5):
  """Augment and evaluate implementation, see augment_and_evaluate docstring."""
  input_augment, input_test = [
      cifar.CIFARInput(m, config)
      for m in ['augment', 'test']]
  estimator = _create_estimator(spec, config, model_dir,
                                input_augment.num_images)

  if config['train_seconds'] > 0.0:
    timing = training_time.limit(config['train_seconds'])
  else:
    timing = training_time.limit(None)

  steps_per_epoch = input_augment.num_images / config['batch_size']   # float
  ckpt = tf.train.latest_checkpoint(model_dir)
  if not ckpt:
    current_step = 0
  else:
    current_step = int(ckpt.split('-')[-1])
  max_steps = int(config['train_epochs'] * steps_per_epoch)

  while current_step < max_steps:
    next_step = current_step + int(epochs_per_eval * steps_per_epoch)
    next_step = min(next_step, max_steps)
    estimator.train(
        input_fn=input_augment.input_fn,
        max_steps=next_step,
        hooks=[timing.train_hook],
        saving_listeners=[timing.saving_listener])
    current_step = next_step

    test_accuracy = _evaluate(estimator, input_test, config)

  metadata = {
      'trainable_params': _get_param_count(model_dir),
      'test_accuracy': test_accuracy,
  }

  return metadata
Exemple #4
0
def keytotuple(key):
    cur_network_data = nb.get_metrics_from_hash(key)
    #print(cur_network_data[0])
    #print(cur_network_data[0].keys())
    model = model_spec.ModelSpec(cur_network_data[0]['module_adjacency'],
                                 cur_network_data[0]['module_operations'])
    model_fn = model_builder.build_model_fn(model, cfg, 60000)
    if os.path.exists('empty'):
        shutil.rmtree('empty')
    run_cfg = tf.contrib.tpu.RunConfig(
        model_dir='empty',
        keep_checkpoint_max=3,  # Keeps ckpt at start, halfway, and end
        save_checkpoints_secs=2**30)
    #tpu_config=tf.contrib.tpu.TPUConfig(
    #    iterations_per_loop=cfg['tpu_iterations_per_loop'],
    #    num_shards=cfg['tpu_num_shards']))
    #estimator = tf.contrib.tpu.TPUEstimator(model_fn, config=run_cfg,
    #                                       train_batch_size=cfg['batch_size'],
    #                                       eval_batch_size=cfg['batch_size'],
    #                                       predict_batch_size=cfg['batch_size'],
    #                                       use_tpu=False)#, params=cfg)
    estimator = tf.estimator.Estimator(model_fn, config=run_cfg, params=cfg)
    print(estimator)
    #dummy_input = np.zeros((1, 224, 224, 3))
    #dummy_label = np.zeros((1, 100))
    #dummy_label[0] = 1
    input_train = cifar.CIFARInput('train', cfg)
    print(cfg['batch_size'])

    #input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": dummy_input}, y=dummy_label, shuffle=True)
    #estimator.train(input_fn)
    #estimator.train(input_fn=input_train.input_fn,
    #                max_steps=1)
    #print(tf.get_default_graph().as_graph_def())

    with tf.Graph().as_default() as g:
        features = tf.placeholder(tf.float32, [cfg['batch_size'], 32, 32, 3])
        labels = tf.placeholder(tf.int32, [cfg['batch_size']])
        _ = model_fn(features,
                     labels,
                     mode=tf.estimator.ModeKeys.TRAIN,
                     params=cfg)
        with tf.Session() as sess:
            run_meta = tf.RunMetadata()
            opts = tf.profiler.ProfileOptionBuilder.float_operation()
            flops = tf.profiler.profile(g,
                                        run_meta=run_meta,
                                        cmd='op',
                                        options=opts)
            n_flops = flops.total_float_ops
            print(n_flops)
            #print(sess.graph.as_graph_def())

    training_time_sum = 0.0
    acc_sum = 0.0
    params = cur_network_data[0]['trainable_parameters']
    count = 0
    for item in cur_network_data[1][108]:
        count += 1
        training_time_sum += item['final_training_time']
        acc_sum += item['final_test_accuracy']
    training_time = training_time_sum / count
    acc = acc_sum / count

    return (params, training_time, acc, n_flops)