Exemple #1
0
  def test_prune_islands(self):
    """Tests isolated components are pruned."""
    model1 = model_spec.ModelSpec(
        np.array([[0, 1, 0, 0],
                  [0, 0, 0, 1],
                  [0, 0, 0, 0],
                  [0, 0, 0, 0]]),
        [1, 2, 3, 4])
    assert model1.valid_spec
    assert np.array_equal(model1.matrix,
                          np.array([[0, 1, 0],
                                    [0, 0, 1],
                                    [0, 0, 0]]))
    assert model1.ops == [1, 2, 4]

    model2 = model_spec.ModelSpec(
        np.array([[0, 1, 0, 0, 0],
                  [0, 0, 0, 0, 1],
                  [0, 0, 0, 1, 0],
                  [0, 0, 0, 0, 0],
                  [0, 0, 0, 0, 0]]),
        [1, 2, 3, 4, 5])
    assert model2.valid_spec
    assert np.array_equal(model2.matrix,
                          np.array([[0, 1, 0],
                                    [0, 0, 1],
                                    [0, 0, 0]]))
    assert model2.ops == [1, 2, 5]
Exemple #2
0
  def test_prune_dangling(self):
    """Tests dangling vertices are pruned."""
    model1 = model_spec.ModelSpec(
        np.array([[0, 1, 1, 0],
                  [0, 0, 0, 0],
                  [0, 0, 0, 1],
                  [0, 0, 0, 0]]),
        [1, 2, 3, 4])
    assert model1.valid_spec
    assert np.array_equal(model1.matrix,
                          np.array([[0, 1, 0],
                                    [0, 0, 1],
                                    [0, 0, 0]]))
    assert model1.ops == [1, 3, 4]

    model2 = model_spec.ModelSpec(
        np.array([[0, 0, 1, 0],
                  [0, 0, 0, 1],
                  [0, 0, 0, 1],
                  [0, 0, 0, 0]]),
        [1, 2, 3, 4])
    assert model2.valid_spec
    assert np.array_equal(model2.matrix,
                          np.array([[0, 1, 0],
                                    [0, 0, 1],
                                    [0, 0, 0]]))
    assert model2.ops == [1, 3, 4]
Exemple #3
0
  def test_prune_noop(self):
    """Tests graphs which require no pruning."""
    model1 = model_spec.ModelSpec(
        np.array([[0, 1, 0],
                  [0, 0, 1],
                  [0, 0, 0]]),
        [0, 0, 0])
    assert model1.valid_spec
    assert np.array_equal(model1.original_matrix, model1.matrix)
    assert model1.original_ops == model1.original_ops

    model2 = model_spec.ModelSpec(
        np.array([[0, 1, 1],
                  [0, 0, 1],
                  [0, 0, 0]]),
        [0, 0, 0])
    assert model2.valid_spec
    assert np.array_equal(model2.original_matrix, model2.matrix)
    assert model2.original_ops == model2.ops

    model3 = model_spec.ModelSpec(
        np.array([[0, 1, 1, 0],
                  [0, 0, 0, 1],
                  [0, 0, 0, 1],
                  [0, 0, 0, 0]]),
        [0, 0, 0, 0])
    assert model3.valid_spec
    assert np.array_equal(model3.original_matrix, model3.matrix)
    assert model3.original_ops == model3.ops
Exemple #4
0
    def test_prune_disconnected(self):
        """Tests graphs where with no input to output path are marked invalid."""
        model1 = model_spec.ModelSpec(np.array([[0, 0], [0, 0]]), [0, 0])
        assert not model1.valid_spec

        model2 = model_spec.ModelSpec(
            np.array([[0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0]]),
            [1, 2, 3, 4])
        assert not model2.valid_spec

        model3 = model_spec.ModelSpec(
            np.array([[0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 0], [0, 0, 0, 0]]),
            [1, 2, 3, 4])
        assert not model3.valid_spec
Exemple #5
0
def create_inception_resnet_spec(config):
    """Construct an Inception-ResNet like spec.

  This spec is very similar to the InceptionV2 module with an added
  residual connection except that there is an extra projection in front of the
  max pool. The overall network filter counts and module counts do not match
  the actual source model.

  Args:
    config: config dict created by config.py.

  Returns:
    ModelSpec object.
  """
    spec = model_spec.ModelSpec(
        np.array([[0, 1, 1, 1, 0, 1, 1], [0, 0, 0, 0, 0, 0, 1],
                  [0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0, 0],
                  [0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 1],
                  [0, 0, 0, 0, 0, 0, 0]]),
        [
            'input', 'conv1x1-bn-relu', 'conv3x3-bn-relu', 'conv3x3-bn-relu',
            'conv3x3-bn-relu', 'maxpool3x3', 'output'
        ])
    config['num_stacks'] = 3
    config['num_modules_per_stack'] = 3
    config['stem_filter_size'] = 128
    return spec
def main(*args, **kwargs):
    nasbench = nasbench_api.NASBench(FLAGS.path_to_nasbench)
    module = nasbench.fixed_statistics[FLAGS.hash_key]
    spec = model_spec.ModelSpec(module['module_adjacency'],
                                module['module_operations'])

    config = nasbench_config.build_config()
    for flag in FLAGS.flags_by_module_dict()[args[0][0]]:
        config[flag.name] = flag.value
    config['use_tpu'] = False
    config['use_KD'] = False
    config['intermediate_evaluations'] = ['1.0']

    trainset_multipier = FLAGS.trainset_part_percentage / 100.0
    config['num_train'] = int(config['num_train'] * trainset_multipier)
    config['num_train_eval'] = int(config['num_train_eval'] *
                                   trainset_multipier)
    config['num_augment'] = int(config['num_augment'] * trainset_multipier)

    logging.info("Prepare KD dataset")
    dataset_files = FLAGS.train_data_files + [
        FLAGS.valid_data_file, FLAGS.test_data_file, FLAGS.sample_data_file
    ]
    prepare_kd_dataset(spec, config, FLAGS.save_path, dataset_files,
                       FLAGS.new_dataset_path, FLAGS.trainset_part_percentage)
def main():
    config = _config.build_config()
    config['use_tpu'] = False
    config['train_data_files'] = ['train_1.tfrecords', 'train_2.tfrecords', 'train_3.tfrecords', 'train_4.tfrecords']
    config['valid_data_file'] = ['validation.tfrecords']
    config['test_data_file'] = ['test.tfrecords']
    config['sample_data_file'] = ['sample.tfrecords']
    config['train_epochs'] = 1
    config['use_KD'] = False
    
    matrix=[[0, 1, 1, 1, 0, 1, 0],    # input layer
          [0, 0, 0, 0, 0, 0, 1],    # 1x1 conv
          [0, 0, 0, 0, 0, 0, 1],    # 3x3 conv
          [0, 0, 0, 0, 1, 0, 0],    # 5x5 conv (replaced by two 3x3's)
          [0, 0, 0, 0, 0, 0, 1],    # 5x5 conv (replaced by two 3x3's)
          [0, 0, 0, 0, 0, 0, 1],    # 3x3 max-pool
          [0, 0, 0, 0, 0, 0, 0]]   # output layer
    # Operations at the vertices of the module, matches order of matrix
    labels=['input', 'conv1x1-bn-relu', 'conv3x3-bn-relu', 'conv3x3-bn-relu', 'conv3x3-bn-relu', 'maxpool3x3', 'output']
    

    matrix = np.array(matrix)
    labels = np.array(labels)

    spec = model_spec.ModelSpec(matrix, labels)
    model_dir = '../data/tmp'

    meta = evaluate.train_and_evaluate(spec, config, model_dir)

    output_file = os.path.join(model_dir, RESULTS_FILE)
    with tf.gfile.Open(output_file, 'w') as f:
      json.dump(meta, f, cls=NumpyEncoder)

    print('OK')
    print(spec.__dict__)
Exemple #8
0
    def _evaluate_work_unit(self, index):
        """Runs the evaluation of the model at the specified index.

    The index records the current index of the work unit being evaluated. Each
    worker will only compute the work units with index modulo total_workers
    equal to the worker_id.

    Args:
      index: int index into total work units.
    """
        if self.remainders:
            assert self.ordered_keys is None
            model_id = self.remainders[index][0]
            model_repeat = self.remainders[index][1]
        else:
            model_id = self.ordered_keys[index % self.num_models]
            model_repeat = index // self.num_models + 1

        matrix, labels = self.models[model_id]
        matrix = np.array(matrix)

        # Re-label to config['available_ops']
        labels = (['input'] +
                  [self.config['available_ops'][lab]
                   for lab in labels[1:-1]] + ['output'])
        spec = model_spec.ModelSpec(matrix, labels)
        assert spec.valid_spec
        assert np.sum(spec.matrix) <= self.config['max_edges']

        # Split the directory into 16^2 roughly equal subdirectories
        model_dir = os.path.join(self.output_dir, model_id[:2], model_id,
                                 'repeat_%d' % model_repeat)

        # evaluate the model
        try:
            meta = evaluate.train_and_evaluate(spec, self.config, model_dir)
        except evaluate.AbortError:
            # After hitting the retry limit, the job will continue to the next work
            # unit. These failed jobs may need to be re-run at a later point.
            return

        # Write data to model_dir
        output_file = os.path.join(model_dir, RESULTS_FILE)
        with tf.io.gfile.GFile(output_file, 'w') as f:
            json.dump(meta, f, cls=NumpyEncoder)

        # Delete some files to reclaim space
        self._clean_model_dir(model_dir)
Exemple #9
0
def create_resnet50_spec(config):
    """Construct a ResNet-50-like spec.

  The main difference is that there is an extra projection layer before the
  conv1x1 whereas the original ResNet doesn't have this. This increases the
  parameter count of this version slightly.

  Args:
    config: config dict created by config.py.

  Returns:
    ModelSpec object.
  """
    spec = model_spec.ModelSpec(np.array([[0, 1, 1], [0, 0, 1], [0, 0, 0]]),
                                ['input', 'bottleneck3x3', 'output'])
    config['num_stacks'] = 3
    config['num_modules_per_stack'] = 6
    config['stem_filter_size'] = 128
    return spec
def main(*args, **kwargs):
    nasbench = nasbench_api.NASBench(FLAGS.path_to_nasbench)
    module = nasbench.fixed_statistics[FLAGS.hash_key]
    spec = model_spec.ModelSpec(module['module_adjacency'], module['module_operations'])

    config = nasbench_config.build_config()
    for flag in FLAGS.flags_by_module_dict()[args[0][0]]:
        config[flag.name] = flag.value
    config['use_tpu'] = False
    config['use_KD'] = False
    config['intermediate_evaluations'] = ['1.0']

    trainset_multipier = FLAGS.trainset_part_percentage / 100.0
    config['num_train'] = int(config['num_train'] * trainset_multipier)
    config['num_train_eval'] = int(config['num_train_eval'] * trainset_multipier)
    config['num_augment'] = int(config['num_augment'] * trainset_multipier)

    logging.info("Train and evaluate with config\n{}\n and spec\n{}".format(config, spec))
    train(spec, config, FLAGS.save_path)
Exemple #11
0
def create_best_nasbench_spec(config):
    """Construct the best spec in the NASBench dataset w.r.t. mean test accuracy.

  Args:
    config: config dict created by config.py.

  Returns:
    ModelSpec object.
  """
    spec = model_spec.ModelSpec(
        np.array([[0, 1, 1, 0, 0, 1, 1], [0, 0, 0, 0, 0, 1, 0],
                  [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0],
                  [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1],
                  [0, 0, 0, 0, 0, 0, 0]]),
        [
            'input', 'conv1x1-bn-relu', 'conv3x3-bn-relu', 'maxpool3x3',
            'conv3x3-bn-relu', 'conv3x3-bn-relu', 'output'
        ])
    config['num_stacks'] = 3
    config['num_modules_per_stack'] = 3
    config['stem_filter_size'] = 128
    return spec
Exemple #12
0
def keytotuple(key):
    cur_network_data = nb.get_metrics_from_hash(key)
    #print(cur_network_data[0])
    #print(cur_network_data[0].keys())
    model = model_spec.ModelSpec(cur_network_data[0]['module_adjacency'],
                                 cur_network_data[0]['module_operations'])
    model_fn = model_builder.build_model_fn(model, cfg, 60000)
    if os.path.exists('empty'):
        shutil.rmtree('empty')
    run_cfg = tf.contrib.tpu.RunConfig(
        model_dir='empty',
        keep_checkpoint_max=3,  # Keeps ckpt at start, halfway, and end
        save_checkpoints_secs=2**30)
    #tpu_config=tf.contrib.tpu.TPUConfig(
    #    iterations_per_loop=cfg['tpu_iterations_per_loop'],
    #    num_shards=cfg['tpu_num_shards']))
    #estimator = tf.contrib.tpu.TPUEstimator(model_fn, config=run_cfg,
    #                                       train_batch_size=cfg['batch_size'],
    #                                       eval_batch_size=cfg['batch_size'],
    #                                       predict_batch_size=cfg['batch_size'],
    #                                       use_tpu=False)#, params=cfg)
    estimator = tf.estimator.Estimator(model_fn, config=run_cfg, params=cfg)
    print(estimator)
    #dummy_input = np.zeros((1, 224, 224, 3))
    #dummy_label = np.zeros((1, 100))
    #dummy_label[0] = 1
    input_train = cifar.CIFARInput('train', cfg)
    print(cfg['batch_size'])

    #input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": dummy_input}, y=dummy_label, shuffle=True)
    #estimator.train(input_fn)
    #estimator.train(input_fn=input_train.input_fn,
    #                max_steps=1)
    #print(tf.get_default_graph().as_graph_def())

    with tf.Graph().as_default() as g:
        features = tf.placeholder(tf.float32, [cfg['batch_size'], 32, 32, 3])
        labels = tf.placeholder(tf.int32, [cfg['batch_size']])
        _ = model_fn(features,
                     labels,
                     mode=tf.estimator.ModeKeys.TRAIN,
                     params=cfg)
        with tf.Session() as sess:
            run_meta = tf.RunMetadata()
            opts = tf.profiler.ProfileOptionBuilder.float_operation()
            flops = tf.profiler.profile(g,
                                        run_meta=run_meta,
                                        cmd='op',
                                        options=opts)
            n_flops = flops.total_float_ops
            print(n_flops)
            #print(sess.graph.as_graph_def())

    training_time_sum = 0.0
    acc_sum = 0.0
    params = cur_network_data[0]['trainable_parameters']
    count = 0
    for item in cur_network_data[1][108]:
        count += 1
        training_time_sum += item['final_training_time']
        acc_sum += item['final_test_accuracy']
    training_time = training_time_sum / count
    acc = acc_sum / count

    return (params, training_time, acc, n_flops)