def test_scenario3(self):
        """
            Scenario: Successfully building evaluations from start:
                Given I create BigML resources uploading train "<data>" file to create model and log in "<output>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the model has been created
                And I evaluate "<test>" with proportional missing strategy
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the evaluation has been created
                Then the evaluation file is like "<json_evaluation_file>"

                Examples:
                | data             | test                          | output                      | json_evaluation_file    |
                | ../data/iris.csv | ../data/iris_nulls.csv   | ./scenario_mis_3/evaluation | ./check_files/evaluation_iris_nulls.json |

        """
        print self.test_scenario3.__doc__
        examples = [
            ['data/iris.csv', 'data/iris_nulls.csv', 'scenario_mis_3/evaluation', 'check_files/evaluation_iris_nulls.json']]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_create_all_resources_to_model(self, data=example[0], output=example[2])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            test_pred.i_check_create_model(self)
            evaluation.i_create_proportional_to_evaluate(self, test=example[1])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self)
            test_pred.i_check_create_evaluation(self)
            evaluation.then_the_evaluation_file_is_like(self, example[3])
Example #2
0
    def test_scenario1(self):
        """
            Scenario: Successfully building k-fold cross-validation from dataset:
                Given I create BigML dataset uploading train "<data>" file in "<output>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I create BigML <kfold>-fold cross-validation
                And I check that the <kfold>-datasets have been created
                And I check that the <kfold>-models have been created
                And I check that the <kfold>-fold cross-validation has been created
                Then the evaluation file is like "<json_evaluation_file>"

                Examples:
                | data             | output                    | kfold | json_evaluation_file               |
                | ../data/iris.csv | ./scenario_a_1/evaluation | 2     | ./check_files/evaluation_kfold.json |
        """
        print self.test_scenario1.__doc__
        examples = [
            ['data/iris.csv', 'scenario_a_1/evaluation', '2', 'check_files/evaluation_kfold.json']]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_create_dataset(self, data=example[0], output=example[1])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self)
            test_pred.i_create_kfold_cross_validation(self, k_folds=example[2])
            test_pred.i_check_create_kfold_datasets(self, example[2])
            test_pred.i_check_create_kfold_models(self, example[2])
            test_pred.i_check_create_kfold_cross_validation(self, example[2])
            evaluation.then_the_evaluation_file_is_like(self, example[3])
Example #3
0
    def setup_scenario1(self):
        """
            Scenario: Successfully building evaluations from start:
                Given I create BigML resources uploading train "<data>" file to evaluate and log evaluation in "<output>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the model has been created
                And I check that the evaluation has been created
                Then the evaluation file is like "<json_evaluation_file>"

                Examples:
                | data             | output                   | json_evaluation_file    |
                | ../data/iris.csv | ./scenario_e1/evaluation | ./check_files/evaluation_iris.json |
        """
        print self.setup_scenario1.__doc__
        examples = [[
            'data/iris.csv', 'scenario_e1/evaluation',
            'check_files/evaluation_iris.json'
        ]]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_create_all_resources_to_evaluate(self,
                                                         data=example[0],
                                                         output=example[1])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            test_pred.i_check_create_model(self)
            test_pred.i_check_create_evaluation(self)
            evaluation.then_the_evaluation_file_is_like(self, example[2])
Example #4
0
    def test_scenario2(self):
        """
            Scenario: Successfully building evaluations from source
                Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
                And I create BigML resources using source to evaluate and log evaluation in "<output>"
                And I check that the dataset has been created
                And I check that the model has been created
                And I check that the evaluation has been created
                Then the evaluation file is like "<json_evaluation_file>"

                Examples:
                |scenario    | kwargs                                                  | output                   | json_evaluation_file    |
                | scenario_e1| {"data": "../data/iris.csv", "output": "./scenario_e1/predictions.csv"}   |./scenario_e2/evaluation | ./check_files/evaluation_iris.json |
        """
        print self.test_scenario2.__doc__
        examples = [[
            'scenario_e1',
            '{"data": "data/iris.csv", "output": "scenario_e1/predictions.csv"}',
            'scenario_e2/evaluation', 'check_files/evaluation_iris.json'
        ]]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_have_previous_scenario_or_reproduce_it(
                self, example[0], example[1])
            evaluation.given_i_create_bigml_resources_using_source_to_evaluate(
                self, output=example[2])
            test_pred.i_check_create_dataset(self, suffix=None)
            test_pred.i_check_create_model(self)
            test_pred.i_check_create_evaluation(self)
            evaluation.then_the_evaluation_file_is_like(self, example[3])
Example #5
0
    def test_scenario1(self):
        """
            Scenario: Successfully building k-fold cross-validation from dataset:
                Given I create BigML dataset uploading train "<data>" file in "<output>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I create BigML <kfold>-fold cross-validation
                And I check that the <kfold>-datasets have been created
                And I check that the <kfold>-models have been created
                And I check that the <kfold>-fold cross-validation has been created
                Then the evaluation file is like "<json_evaluation_file>"

                Examples:
                | data             | output                    | kfold | json_evaluation_file               |
                | ../data/iris.csv | ./scenario_a_1/evaluation | 2     | ./check_files/evaluation_kfold.json |
        """
        print self.test_scenario1.__doc__
        examples = [
            ['data/iris.csv', 'scenario_a_1/evaluation', '2', 'check_files/evaluation_kfold.json']]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_create_dataset(self, data=example[0], output=example[1])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self)
            test_pred.i_create_kfold_cross_validation(self, k_folds=example[2])
            test_pred.i_check_create_kfold_datasets(self, example[2])
            test_pred.i_check_create_kfold_models(self, example[2])
            test_pred.i_check_create_kfold_cross_validation(self, example[2])
            evaluation.then_the_evaluation_file_is_like(self, example[3])
Example #6
0
    def test_scenario5(self):
        """
            Scenario: Successfully building evaluation from model and test file with data map
                Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
                And I create BigML resources using test file "<test>" and a fields map "<fields_map>" to evaluate a model and log evaluation in "<output>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the evaluation has been created
                Then the evaluation file is like "<json_evaluation_file>"

                Examples:
                |scenario    | kwargs                                                                   | test             | fields_map | output                   | json_evaluation_file     |
                | scenario_e1| {"data": "../data/iris.csv", "output": "./scenario_e1/predictions.csv"}  | ../data/iris_permuted.csv | ../data/fields_map.csv | ./scenario_e7/evaluation | ./check_files/evaluation_iris2.json |
        """
        print self.test_scenario5.__doc__
        examples = [[
            'scenario_e1',
            '{"data": "data/iris.csv", "output": "scenario_e1/predictions.csv"}',
            'data/iris_permuted.csv', 'data/fields_map.csv',
            'scenario_e7/evaluation', 'check_files/evaluation_iris2.json'
        ]]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_have_previous_scenario_or_reproduce_it(
                self, example[0], example[1])
            evaluation.i_create_all_resources_to_evaluate_with_model_and_map(
                self,
                data=example[2],
                fields_map=example[3],
                output=example[4])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            test_pred.i_check_create_evaluation(self)
            evaluation.then_the_evaluation_file_is_like(self, example[5])
    def test_scenario2(self):
        """
            Scenario: Successfully building multi-label evaluations from source
                Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
                And I create BigML multi-label resources using source to evaluate and log evaluation in "<output>"
                And I check that the dataset has been created
                And I check that the models have been created
                And I check that the <number_of_labels> evaluations have been created
                And I check that the evaluation is ready
                Then the evaluation file is like "<json_evaluation_file>"

                Examples:
                |scenario    | kwargs                                                  | number_of_labels                    | output                        |json_evaluation_file          |
                | scenario_ml_e1| {"tag": "my_multilabel_e_1", "data": "../data/multilabel.csv", "label_separator": ":", "number_of_labels": 7, "training_separator": ",", "output": "./scenario_ml_e1/evaluation"}   | 7 | ./scenario_ml_e2/evaluation   | ./check_files/evaluation_ml.json   |
        """
        print self.test_scenario2.__doc__
        examples = [
            ['scenario_ml_e1', '{"tag": "my_multilabel_e_1", "data": "data/multilabel.csv", "label_separator": ":", "number_of_labels": 7, "training_separator": ",", "output": "scenario_ml_e1/evaluation"}', '7', 'scenario_ml_e2/evaluation', 'check_files/evaluation_ml.json']]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
            ml_eval.i_create_ml_evaluations_from_source(self, output=example[3])
            test_pred.i_check_create_dataset(self)
            test_pred.i_check_create_models(self)
            test_pred.i_check_create_evaluations(self, number_of_evaluations=example[2])
            ml_eval.i_check_evaluation_ready(self)
            evaluation.then_the_evaluation_file_is_like(self, example[4])
Example #8
0
    def test_scenario2(self):
        """
            Scenario: Successfully building multi-label evaluations from source
                Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
                And I create BigML multi-label resources using source to evaluate and log evaluation in "<output>"
                And I check that the dataset has been created
                And I check that the models have been created
                And I check that the <number_of_labels> evaluations have been created
                And I check that the evaluation is ready
                Then the evaluation file is like "<json_evaluation_file>"

                Examples:
                |scenario    | kwargs                                                  | number_of_labels                    | output                        |json_evaluation_file          |
                | scenario_ml_e1| {"tag": "my_multilabel_e_1", "data": "../data/multilabel.csv", "label_separator": ":", "number_of_labels": 7, "training_separator": ",", "output": "./scenario_ml_e1/evaluation"}   | 7 | ./scenario_ml_e2/evaluation   | ./check_files/evaluation_ml.json   |
        """
        print self.test_scenario2.__doc__
        examples = [[
            'scenario_ml_e1',
            '{"tag": "my_multilabel_e_1", "data": "data/multilabel.csv", "label_separator": ":", "number_of_labels": 7, "training_separator": ",", "output": "scenario_ml_e1/evaluation"}',
            '7', 'scenario_ml_e2/evaluation', 'check_files/evaluation_ml.json'
        ]]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_have_previous_scenario_or_reproduce_it(
                self, example[0], example[1])
            ml_eval.i_create_ml_evaluations_from_source(self,
                                                        output=example[3])
            test_pred.i_check_create_dataset(self)
            test_pred.i_check_create_models(self)
            test_pred.i_check_create_evaluations(
                self, number_of_evaluations=example[2])
            ml_eval.i_check_evaluation_ready(self)
            evaluation.then_the_evaluation_file_is_like(self, example[4])
Example #9
0
    def setup_scenario1(self):
        """
            Scenario: Successfully building multi-label evaluations from scratch
                Given I create BigML multi-label resources tagged as "<tag>" with "<label_separator>" label separator and "<number_of_labels>" labels uploading train "<data>" file with "<training_separator>" field separator to evaluate and log evaluation in "<output>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the models have been created
                And I check that the <number_of_labels> evaluations have been created
                And I check that the evaluation is ready
                Then the evaluation file is like "<json_evaluation_file>"

                Examples:
                |tag |label_separator |number_of_labels| data                   |training_separator |  output                          |json_evaluation_file
                |my_multilabel_e_1|:|7| ../data/multilabel.csv |,| ./scenario_ml_e1/evaluation | ./check_files/evaluation_ml.json
        """
        print self.setup_scenario1.__doc__
        examples = [
            ['my_multilabel_e_1', ':', '7', 'data/multilabel.csv', ',', 'scenario_ml_e1/evaluation', 'check_files/evaluation_ml.json']]
        for example in examples:
            print "\nTesting with:\n", example
            ml_eval.i_create_all_ml_evaluations(self, tag=example[0], label_separator=example[1], number_of_labels=example[2], data=example[3], training_separator=example[4], output=example[5])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self)
            test_pred.i_check_create_models(self)
            test_pred.i_check_create_evaluations(self, number_of_evaluations=example[2])
            ml_eval.i_check_evaluation_ready(self)
            evaluation.then_the_evaluation_file_is_like(self, example[6])
Example #10
0
    def test_scenario2(self):
        """
            Scenario: Successfully building evaluations from source
                Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
                And I create BigML resources using source to evaluate and log evaluation in "<output>"
                And I check that the dataset has been created
                And I check that the model has been created
                And I check that the evaluation has been created
                Then the evaluation file is like "<json_evaluation_file>"

                Examples:
                |scenario    | kwargs                                                  | output                   | json_evaluation_file    |
                | scenario_e1| {"data": "../data/iris.csv", "output": "./scenario_e1/predictions.csv"}   |./scenario_e2/evaluation | ./check_files/evaluation_iris.json |
        """
        print self.test_scenario2.__doc__
        examples = [
            ['scenario_e1', '{"data": "data/iris.csv", "output": "scenario_e1/predictions.csv"}', 'scenario_e2/evaluation', 'check_files/evaluation_iris.json']]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
            evaluation.given_i_create_bigml_resources_using_source_to_evaluate(self, output=example[2])
            test_pred.i_check_create_dataset(self, suffix=None)
            test_pred.i_check_create_model(self)
            test_pred.i_check_create_evaluation(self)
            evaluation.then_the_evaluation_file_is_like(self, example[3])
Example #11
0
    def test_scenario5(self):
        """
            Scenario: Successfully building evaluation from model and test file with data map
                Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
                And I create BigML resources using test file "<test>" and a fields map "<fields_map>" to evaluate a model and log evaluation in "<output>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the evaluation has been created
                Then the evaluation file is like "<json_evaluation_file>"

                Examples:
                |scenario    | kwargs                                                                   | test             | fields_map | output                   | json_evaluation_file     |
                | scenario_e1| {"data": "../data/iris.csv", "output": "./scenario_e1/predictions.csv"}  | ../data/iris_permuted.csv | ../data/fields_map.csv | ./scenario_e7/evaluation | ./check_files/evaluation_iris2.json |
        """
        print self.test_scenario5.__doc__
        examples = [
            ['scenario_e1', '{"data": "data/iris.csv", "output": "scenario_e1/predictions.csv"}', 'data/iris_permuted.csv', 'data/fields_map.csv', 'scenario_e7/evaluation', 'check_files/evaluation_iris2.json']]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
            evaluation.i_create_all_resources_to_evaluate_with_model_and_map(self, data=example[2], fields_map=example[3], output=example[4])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            test_pred.i_check_create_evaluation(self)
            evaluation.then_the_evaluation_file_is_like(self, example[5])
    def test_scenario11(self):
        """
            Scenario: Successfully building evaluations for deepnets from start:
                Given I create BigML deepnet resources uploading train "<data>" file to evaluate and log evaluation in "<output>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the deepnet has been created
                And I check that the evaluation has been created
                Then the evaluation file is like "<json_evaluation_file>"

                Examples:
                | data             | output                   | json_evaluation_file    |
                | ../data/iris.csv | ./scenario_e11/evaluation | ./check_files/evaluation_iris_dn.json |
        """
        print self.test_scenario11.__doc__
        examples = [
            ['data/iris.csv', 'scenario_e11/evaluation', 'check_files/evaluation_iris_dn.json']]
        for example in examples:
            print "\nTesting with:\n", example
            dn_pred.i_create_all_dn_resources_to_evaluate(self, data=example[0], output=example[1])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            dn_pred.i_check_create_dn_model(self)
            test_pred.i_check_create_evaluation(self)
            evaluation.then_the_evaluation_file_is_like(self, example[2])
Example #13
0
    def test_scenario04(self):
        """
        Scenario: Successfully building evaluation from fusion
            Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
            And I create BigML fusion resources using model built from "<train>" to test "<test>" as an evaluation and log predictions in "<output>"
            And I check that the predictions are ready
            Then the local prediction file is like "<predictions_file>"
                Then the evaluation file is like "<json_evaluation_file>"

                Examples:
                | data             | output                   | json_evaluation_file    |
                | ../data/iris.csv | ./scenario_fs_e1/evaluation | ./check_files/evaluation_iris_dn.json |
        """
        print self.test_scenario04.__doc__
        examples = [
            ['scenario_fs_e1/evaluation', 'check_files/evaluation_iris_fs.json']]
        for example in examples:
            print "\nTesting with:\n", example
            fs_pred.i_create_fs_resources_from_mode_and_evaluate(self, output=example[0])
            fs_pred.i_check_create_fusion(self)
            test_pred.i_check_create_evaluation(self)
            evaluation.then_the_evaluation_file_is_like(self, example[1])