示例#1
0
    def test_experiment_settings(self, project, ctx, monkeypatch):
        config = ctx.config
        config.set('config', 'target', 'species')
        config.set('config', 'experiment/cross_validation_folds', 55)
        config.set('config', 'experiment/max_total_time', 606)
        config.set('config', 'experiment/max_eval_time', 55)
        config.set('config', 'experiment/max_n_trials', 101)
        config.set('config', 'experiment/use_ensemble', False)

        PAYLOAD = {
            'get_experiment': EXPERIMENT,
            'get_project_file': PROJECT_FILE
        }
        interceptor(PAYLOAD, monkeypatch)
        config, model_type = AugerExperimentApi(
            ctx, 'project-api', 'iris-1.csv-experiment', '1234').\
            get_experiment_settings()

        assert config['evaluation_options']['crossValidationFolds'] == 55
        assert config['evaluation_options']['max_total_time_mins'] == 606
        assert config['evaluation_options']['max_eval_time_mins'] == 55
        assert config['evaluation_options']['max_n_trials'] == 101
        assert config['evaluation_options']['use_ensemble'] == False
        # dataset
        assert config['evaluation_options']['targetFeature'] == 'species'
        assert config['evaluation_options']['featureColumns'] == \
            ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
        assert config['evaluation_options']['categoricalFeatures'] == \
            ['species']
        assert config['evaluation_options']['timeSeriesFeatures'] == []
        assert config['evaluation_options']['binaryClassification'] == False
        assert config['evaluation_options']['labelEncodingFeatures'] == []
        assert config['evaluation_options']['classification'] == True
        assert config['evaluation_options']['scoring'] == 'f1_macro'
示例#2
0
    def train(self):
        # verify avalability of auger credentials
        self.credentials.verify()

        self.start_project()

        data_set_name = self.ctx.config['auger'].get('dataset')
        if data_set_name is None:
            raise AugerException(
                'Plese specify DataSet name in auger.yaml/dataset')

        experiment_api = AugerExperimentApi(self.ctx, self.project_api)
        experiment_api.create(data_set_name)
        self.ctx.log('Created Experiment %s ' % experiment_api.object_name)

        experiment_session_id = experiment_api.run()
        self.ctx.log('Started Experiment %s training.' %
                     experiment_api.object_name)

        AugerConfig(self.ctx).set_experiment(experiment_api.object_name,
                                             experiment_session_id)
示例#3
0
    def test_exclude_setting(self, project, ctx, monkeypatch):
        config = ctx.get_config('config')
        config.exclude = ['sepal_length']

        PAYLOAD = {
            'get_experiment': EXPERIMENT,
            'get_project_file': PROJECT_FILE
        }
        interceptor(PAYLOAD, monkeypatch)
        config, model_type = AugerExperimentApi(
            ctx, 'project-api', 'iris-1.csv-experiment', '1234').\
            get_experiment_settings()

        assert config['evaluation_options']['targetFeature'] == 'species'
        assert config['evaluation_options']['featureColumns'] == \
            ['sepal_width', 'petal_length', 'petal_width']
        assert config['evaluation_options']['categoricalFeatures'] == \
            ['species']
示例#4
0
    def test_model_type_setting(self, project, ctx, monkeypatch):
        ctx.config.set('config', 'target', 'species')
        ctx.config.set('config', 'model_type', 'regression')
        ctx.config.set('auger', 'experiment/metric', None)

        PAYLOAD = {
            'get_experiment': EXPERIMENT,
            'get_project_file': PROJECT_FILE
        }
        interceptor(PAYLOAD, monkeypatch)
        config, model_type = AugerExperimentApi(
            ctx, 'project-api', 'iris-1.csv-experiment', '1234').\
            get_experiment_settings()

        assert config['evaluation_options']['timeSeriesFeatures'] == []
        assert config['evaluation_options']['binaryClassification'] == False
        assert config['evaluation_options']['labelEncodingFeatures'] == []
        assert config['evaluation_options']['classification'] == False
        assert config['evaluation_options']['scoring'] == 'r2'