コード例 #1
0
    def save(self, model_dir):
        if not os.path.exists(model_dir):
            os.makedirs(model_dir)

        models_file = os.path.join(model_dir, 'models')
        utils.pickle(self.models, models_file)
        print 'REGRESSION: models saved into ', models_file
コード例 #2
0
 def test_object(obj, equality_fn=None):
   obj_path = self.tmp_dir + '/obj'
   utils.pickle(obj, obj_path)
   loaded = utils.depickle(obj_path)
   if equality_fn is None:
     self.assertEqual(obj, loaded)
   else:
     self.assertTrue(equality_fn(obj, loaded))
コード例 #3
0
    def save(self, model_dir):
        """Saves all of the models in self.models into `model_dir`.

    The models are saved as serialized pickle objects.
    See: https://docs.python.org/3/library/pickle.html

    Args:
      model_dir: string, the directory to save into.
    """
        if not os.path.exists(model_dir):
            os.makedirs(model_dir)

        models_file = os.path.join(model_dir, 'models')
        utils.pickle(self.models, models_file)
        print('REGRESSION: models saved into %s' % models_file)
コード例 #4
0
ファイル: main.py プロジェクト: dashmn210/flipadelphia
def run_experiment(config, args, expt_id):
    # if train, switch the dataset to train, then
    #  train and save each model in the config spec
    if not os.path.exists(config.working_dir):
        os.makedirs(config.working_dir)
    utils.write_config(config, os.path.join(config.working_dir, 'config.yaml'))

    print 'MAIN: parsing dataset'
    d = Dataset(config, config.base_dir)
    print 'MAIN: dataset done. took %.2fs' % (time.time() - start)

    if args.train:
        d.set_active_split(config.train_suffix)

        for model_description in config.model_spec:
            if model_description.get('skip', False):
                continue
            if args.model is not None and args.model != model_description[
                    'type']:
                continue

            print 'MAIN: training ', model_description['name']
            start_time = time.time()
            model_dir = os.path.join(config.working_dir,
                                     model_description['name'])
            if not os.path.exists(model_dir):
                os.makedirs(model_dir)

            model = constants.MODEL_CLASSES[model_description['type']](
                config=config, params=model_description['params'])

            model.train(d, model_dir)
            model.save(model_dir)
            print 'MAIN: training %s done, time %.2fs' % (
                model_description['name'], time.time() - start_time)

    # if test, switch thh datset to test,
    #  and run inference + evaluation for each model
    #  in the config spec
    if args.test:
        d.set_active_split(config.test_suffix)
        results = []  # items to be written in executive summary
        for model_description in config.model_spec:
            if model_description.get('skip', False):
                continue
            if args.model is not None and args.model != model_description[
                    'type']:
                continue

            print 'MAIN: inference with ', model_description['name']
            start_time = time.time()

            model = constants.MODEL_CLASSES[model_description['type']](
                config=config, params=model_description['params'])

            model_dir = os.path.join(config.working_dir,
                                     model_description['name'])
            model.load(d, model_dir)

            predictions = model.inference(d, model_dir)
            utils.pickle(predictions, os.path.join(model_dir, 'predictions'))

            evaluation = evaluator.evaluate(config, d, predictions, model_dir)
            utils.pickle(evaluation, os.path.join(model_dir, 'evaluation'))
            evaluator.write_summary(evaluation, model_dir)
            # store info for executive summary
            results.append({
                'model-name': model_description['name'],
                'model-type': model_description['type'],
                'params': str(model_description['params']),
                'correlation': evaluation['mu_corr'],
                'regression_performance': evaluation['mu_reg_perf'],
                'fixed_performance': evaluation['mu_fixed_perf'],
                'model_dir': model_dir,
                'expt_id': expt_id
            })

            print 'MAIN: evaluation %s done, time %.2fs' % (
                model_description['name'], time.time() - start_time)

        return results
コード例 #5
0
def test_model(model_description, config, dataset):
    """Loads a model, do inference with it, and compute evaluation metrics.

  Args:
    model_description: dict, A dictionary corresponding to the part of a config
      which is about the current model which is to be run.
      Note that this dictionary is a subset of the `config`
      argument, but we keep it separate here for readability.
    config: NamedTuple, a config.yaml file which has been parsed into an object.
    dataset: src.data.dataset.Dataset, a convenience class for managing data.

  Returns:
    result: dict, a dictionary containing metrics and information about the run.
  """
    def make_summary_dict(evaluation, target_variable):
        """Finalize an evaluation before it is written to the summary file."""
        return {
            'target_variable':
            target_variable,
            'model_name':
            model_description['name'],
            'model_type':
            model_description['type'],
            'params':
            str(model_description),
            'target_log_odds':
            evaluation['mu_target_log_odds'],
            'confound_correlation':
            evaluation['mu_confound_corr'],
            'target_correlation':
            evaluation['mu_target_corr'],
            'informativeness_coef':
            evaluation['mu_confound_perf'] - evaluation['mu_fixed_perf'],
            'regression_performance':
            evaluation['mu_reg_perf'],
            'fixed_performance':
            evaluation['mu_fixed_perf'],
            'confound_performance':
            evaluation['mu_confound_perf'],
            'model_dir':
            model_dir,
        }

    print('MAIN: inference with %s' % model_description['name'])

    start_time = time.time()

    # Point the dataset towards its proper "test" data.
    if 'CNN' in model_description['type'] or 'ATTN' in model_description[
            'type']:
        # Attention and CNN models perform inference over the training data.
        dataset.set_active_split(config.train_suffix)
    else:
        # All other models perform inference over the test set.
        dataset.set_active_split(config.test_suffix)

    # Load the model.
    model = _MODEL_CLASSES[model_description['type']](config=config,
                                                      params=model_description)
    model_dir = os.path.join(config.working_dir, model_description['name'])
    model.load(dataset, model_dir)

    # Run inference with the loaded model.
    predictions = model.inference(dataset, model_dir)
    utils.pickle(predictions, os.path.join(model_dir, 'predictions'))

    # Evaluate these predictions and write the results.
    evaluation_reports = []
    eval_variable = dataset.get_variable(config.eval_variable_name)

    # Make sure the user asked us to evaluate a target variable.
    assert eval_variable['control'] is False

    if eval_variable['type'] == utils.CONTINUOUS:
        # If the eval variable is continuous, then create a new Prediction
        # object with (1) the original predictions from the trained model, and
        # (2) the feature importance values for the eval variable.
        eval_predictions = Prediction(
            scores=predictions.scores,
            feature_importance=predictions.feature_importance[
                config.eval_variable_name])
        # Now that we have a Prediction object whose feature_importance field
        # is a {token => score} mapping for the eval_variable we run an evaluation.
        evaluation = evaluator.evaluate(config, dataset, eval_predictions,
                                        model_dir, config.eval_variable_name)
        with open(
                os.path.join(model_dir,
                             '%s_summary.txt' % (config.eval_variable_name)),
                'w') as f:
            f.write(str(evaluation))
        # Add the eval for this variable to the outgoing report.
        evaluation_reports.append(
            make_summary_dict(evaluation, config.eval_variable_name))

    # This is the branch we take if the eval variable is categorical.
    else:
        # Perform an evaluation for each level of this categorical variable
        # because the model may have generated different feature_importance
        # values for each leve.
        for level in predictions.feature_importance[config.eval_variable_name]:
            variable_full_name = '%s|%s' % (config.eval_variable_name, level)

            # Create a "flat" Prediction object whose feature_importance field
            # is a {token => score} mapping and the scores are specific to this
            # variable and level.
            eval_predictions = Prediction(
                scores=predictions.scores,
                feature_importance=predictions.feature_importance[
                    config.eval_variable_name][level])
            # Run an evaluation with this variable/level specific Predictions object.
            evaluation = evaluator.evaluate(config,
                                            dataset,
                                            eval_predictions,
                                            model_dir,
                                            config.eval_variable_name,
                                            eval_level_name=level)
            with open(
                    os.path.join(model_dir,
                                 '%s_summary.txt' % (variable_full_name)),
                    'w') as f:
                f.write(str(evaluation))
            # Add the eval for this variable and level to the outgoing report.
            evaluation_reports.append(
                make_summary_dict(evaluation, variable_full_name))

    print('MAIN: evaluation %s done, time %.2fs' %
          (model_description['name'], time.time() - start_time))

    return evaluation_reports
コード例 #6
0
    def __init__(self, config, base_dir):
        """Initializes a Dataset object.

    Upon initialization, the object will
      -create new datafiles, one per outcome variable
      -generate a vocab
      -parse the data into a series of np arrays (one per variable)

    Args:
      config: NamedTuple, a config.yaml file that's been parsed into an object.
      base_dir: string, a directory where the system will write outputs.
    """
        self.config = config
        self.base_dir = base_dir

        # The train set is the default split.
        self.current_split = config.train_suffix

        assert self.config.data_spec[0]['type'] == utils.TEXT, (
            'Text input must be the first column of the input '
            'data!')

        # Create individual files for each variable.
        # Create dictionaries for mapping {split: {variable name: filepath} }.
        # We have to have these per-variable files to make tf.Iterators
        # in dataset.Dataset.make_tf_iterators()
        print('DATASET: making splits...')
        self.data_files, self.split_sizes, self.whole_data_files = self._cut_data(
        )

        # Create IDs for each level of a categorical variable, and map
        # variable names to their level.
        # class_to_id_map: {variable name: {'class': index}  }
        self.class_to_id_map, self.id_to_class_map = self._get_categorical_tables(
        )

        # Generate a vocabulary.
        # self.vocab = filepath to vocab file
        if self.config.vocab['vocab_file'] is None:
            start = time.time()
            input_seqs = self.whole_data_files[self.input_varname()]
            self.vocab = self._gen_vocab(input_seqs)
        else:
            self.vocab = self.config.vocab['vocab_file']

        # Create vocab maps that represent the vocabulary in 3 ways:
        # features: vocab --> index
        # ids_to_feat: index --> vocab
        # ordered_feat: list of v1, v2... ordered by index
        self.vocab_size = self._check_vocab(self.vocab)
        self.features = {v.strip(): i for i, v in enumerate(open(self.vocab))}
        self.ids_to_features = {i: f for f, i in self.features.items()}
        self.ordered_features = [
            self.ids_to_features[i] for i in range(self.vocab_size)
        ]

        # Parse the data into the form we need for training
        start = time.time()
        np_path = os.path.join(config.working_dir, 'np_data.pkl')
        if not os.path.exists(np_path):
            print('DATASET: parsing data into np arrays...')
            self.np_data = self._get_np_data()
            utils.pickle(self.np_data, np_path)
        else:
            print('DATASET: restoring np_arrays from %s', np_path)
            self.np_data = utils.depickle(np_path)
        print('\tdone, took %.2fs', time.time() - start)