Ejemplo n.º 1
0
def main():
    config = _config.build_config()
    config['use_tpu'] = False
    config['train_data_files'] = ['train_1.tfrecords', 'train_2.tfrecords', 'train_3.tfrecords', 'train_4.tfrecords']
    config['valid_data_file'] = ['validation.tfrecords']
    config['test_data_file'] = ['test.tfrecords']
    config['sample_data_file'] = ['sample.tfrecords']
    config['train_epochs'] = 1
    config['use_KD'] = False
    
    matrix=[[0, 1, 1, 1, 0, 1, 0],    # input layer
          [0, 0, 0, 0, 0, 0, 1],    # 1x1 conv
          [0, 0, 0, 0, 0, 0, 1],    # 3x3 conv
          [0, 0, 0, 0, 1, 0, 0],    # 5x5 conv (replaced by two 3x3's)
          [0, 0, 0, 0, 0, 0, 1],    # 5x5 conv (replaced by two 3x3's)
          [0, 0, 0, 0, 0, 0, 1],    # 3x3 max-pool
          [0, 0, 0, 0, 0, 0, 0]]   # output layer
    # Operations at the vertices of the module, matches order of matrix
    labels=['input', 'conv1x1-bn-relu', 'conv3x3-bn-relu', 'conv3x3-bn-relu', 'conv3x3-bn-relu', 'maxpool3x3', 'output']
    

    matrix = np.array(matrix)
    labels = np.array(labels)

    spec = model_spec.ModelSpec(matrix, labels)
    model_dir = '../data/tmp'

    meta = evaluate.train_and_evaluate(spec, config, model_dir)

    output_file = os.path.join(model_dir, RESULTS_FILE)
    with tf.gfile.Open(output_file, 'w') as f:
      json.dump(meta, f, cls=NumpyEncoder)

    print('OK')
    print(spec.__dict__)
Ejemplo n.º 2
0
    def _evaluate_work_unit(self, index):
        """Runs the evaluation of the model at the specified index.

    The index records the current index of the work unit being evaluated. Each
    worker will only compute the work units with index modulo total_workers
    equal to the worker_id.

    Args:
      index: int index into total work units.
    """
        if self.remainders:
            assert self.ordered_keys is None
            model_id = self.remainders[index][0]
            model_repeat = self.remainders[index][1]
        else:
            model_id = self.ordered_keys[index % self.num_models]
            model_repeat = index // self.num_models + 1

        matrix, labels = self.models[model_id]
        matrix = np.array(matrix)

        # Re-label to config['available_ops']
        labels = (['input'] +
                  [self.config['available_ops'][lab]
                   for lab in labels[1:-1]] + ['output'])
        spec = model_spec.ModelSpec(matrix, labels)
        assert spec.valid_spec
        assert np.sum(spec.matrix) <= self.config['max_edges']

        # Split the directory into 16^2 roughly equal subdirectories
        model_dir = os.path.join(self.output_dir, model_id[:2], model_id,
                                 'repeat_%d' % model_repeat)

        # evaluate the model
        try:
            meta = evaluate.train_and_evaluate(spec, self.config, model_dir)
        except evaluate.AbortError:
            # After hitting the retry limit, the job will continue to the next work
            # unit. These failed jobs may need to be re-run at a later point.
            return

        # Write data to model_dir
        output_file = os.path.join(model_dir, RESULTS_FILE)
        with tf.io.gfile.GFile(output_file, 'w') as f:
            json.dump(meta, f, cls=NumpyEncoder)

        # Delete some files to reclaim space
        self._clean_model_dir(model_dir)
Ejemplo n.º 3
0
    def evaluate(self, model_spec, model_dir):
        """Trains and evaluates a model spec from scratch (does not query dataset).

    This function runs the same procedure that was used to generate each
    evaluation in the dataset.  Because we are not querying the generated
    dataset of trained models, there are no limitations on number of vertices,
    edges, operations, or epochs. Note that the results will not exactly match
    the dataset due to randomness. By default, this uses TPUs for evaluation but
    CPU/GPU can be used by setting --use_tpu=false (GPU will require installing
    tensorflow-gpu).

    Args:
      model_spec: ModelSpec object.
      model_dir: directory to store the checkpoints, summaries, and logs.

    Returns:
      dict contained the evaluated data for this object, same structure as
      returned by query().
    """
        # Metadata contains additional metrics that aren't reported normally.
        # However, these are stored in the JSON file at the model_dir.
        metadata = evaluate.train_and_evaluate(model_spec, self.config,
                                               model_dir)
        metadata_file = os.path.join(model_dir, 'metadata.json')
        with tf.gfile.Open(metadata_file, 'w') as f:
            json.dump(metadata, f, cls=_NumpyEncoder)

        data_point = {}
        data_point['module_adjacency'] = model_spec.matrix
        data_point['module_operations'] = model_spec.ops
        data_point['trainable_parameters'] = metadata['trainable_params']

        final_evaluation = metadata['evaluation_results'][-1]
        data_point['training_time'] = final_evaluation['training_time']
        data_point['train_accuracy'] = final_evaluation['train_accuracy']
        data_point['validation_accuracy'] = final_evaluation[
            'validation_accuracy']
        data_point['test_accuracy'] = final_evaluation['test_accuracy']

        return data_point