def setup_scenario02(self):
        """
        Scenario: Successfully building test predictions from start:
            Given I create BigML logistic regression resources uploading train "<data>" file to test "<test>" and log predictions in "<output>"
            And I check that the source has been created
            And I check that the dataset has been created
            And I check that the model has been created
            And I check that the predictions are ready
            Then the local prediction file is like "<predictions_file>"

            Examples:
            | data               | test                    | output                        |predictions_file           |
            | ../data/iris.csv   | ../data/test_iris.csv   | ./scenario1_lr/predictions.csv   | ./check_files/predictions_iris_lr.csv   |
        """
        print self.setup_scenario02.__doc__
        examples = [[
            'data/iris.csv', 'data/test_iris.csv',
            'scenario1_lr/predictions.csv',
            'check_files/predictions_iris_lr.csv'
        ]]
        for example in examples:
            print "\nTesting with:\n", example
            lr_pred.i_create_all_lr_resources(self, example[0], example[1],
                                              example[2])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            lr_pred.i_check_create_lr_model(self)
            test_pred.i_check_create_predictions(self)
            test_pred.i_check_predictions(self, example[3])
Beispiel #2
0
    def test_scenario10(self):
        """
            Scenario: Successfully building logistic regression from a sampled dataset
                Given I create a BigML dataset from "<data>" and store logs in "<output_dir>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I create a BigML logistic regression with params "<params>" from dataset in "<output_dir>"
                And I check that the logistic regression has been created
                And the logistic regression params are "<params_json>"

                Examples:
                |data |output_dir | params | params_json
                |../data/iris.csv | ./scenario_d_10 | "--sample-rate 0.2 --replacement" | {"sample-rate": 0.2, "replacement": true}
        """
        print self.test_scenario10.__doc__
        examples = [[
            'data/iris.csv', 'scenario_d_10',
            '--sample-rate 0.2 --replacement',
            '{"sample_rate": 0.2, "replacement": true}'
        ]]
        for example in examples:
            print "\nTesting with:\n", example
            dataset_adv.i_create_dataset(self,
                                         data=example[0],
                                         output_dir=example[1])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            dataset_adv.i_create_logistic_with_params_from_dataset( \
                self, params=example[2], output_dir=example[1])
            test_logistic.i_check_create_lr_model(self)
            dataset_adv.i_check_logistic_params(self, params_json=example[3])
    def test_scenario04(self):
        """
        Scenario: Successfully building test predictions from dataset
            Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
            And I create BigML logistic regression resources using dataset to test "<test>" and log predictions in "<output>"
            And I check that the model has been created
            And I check that the predictions are ready
            Then the local prediction file is like "<predictions_file>"

            Examples:
            |scenario    | kwargs                                                  | test                    | output                        |predictions_file           |
            | scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"}   | ../data/test_iris.csv   | ./scenario3/predictions.csv   | ./check_files/predictions_iris.csv   |

        """
        print self.test_scenario04.__doc__
        examples = [[
            'scenario1_lr',
            '{"data": "data/iris.csv", "output": "scenario1_lr/predictions.csv", "test": "data/test_iris.csv"}',
            'data/test_iris.csv', 'scenario3_lr/predictions.csv',
            'check_files/predictions_iris_lr.csv'
        ]]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_have_previous_scenario_or_reproduce_it(
                self, example[0], example[1])
            lr_pred.i_create_lr_resources_from_dataset(self,
                                                       None,
                                                       test=example[2],
                                                       output=example[3])
            lr_pred.i_check_create_lr_model(self)
            test_pred.i_check_create_predictions(self)
            test_pred.i_check_predictions(self, example[4])
    def setup_scenario02(self):
        """
        Scenario: Successfully building test predictions from start:
            Given I create BigML logistic regression resources uploading train "<data>" file to test "<test>" and log predictions in "<output>"
            And I check that the source has been created
            And I check that the dataset has been created
            And I check that the model has been created
            And I check that the predictions are ready
            Then the local prediction file is like "<predictions_file>"

            Examples:
            | data               | test                    | output                        |predictions_file           |
            | ../data/iris.csv   | ../data/test_iris.csv   | ./scenario1_lr/predictions.csv   | ./check_files/predictions_iris_lr.csv   |
        """
        print self.setup_scenario02.__doc__
        examples = [
            ['data/iris.csv', 'data/test_iris.csv', 'scenario1_lr/predictions.csv', 'check_files/predictions_iris_lr.csv']]
        for example in examples:
            print "\nTesting with:\n", example
            lr_pred.i_create_all_lr_resources(self, example[0], example[1], example[2])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            lr_pred.i_check_create_lr_model(self)
            test_pred.i_check_create_predictions(self)
            test_pred.i_check_predictions(self, example[3])
    def test_scenario03(self):
        """
        Scenario: Successfully building test predictions from source
            Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
            And I create BigML logistic regression resources using source to test "<test>" and log predictions in "<output>"
            And I check that the dataset has been created
            And I check that the model has been created
            And I check that the predictions are ready
            Then the local prediction file is like "<predictions_file>"

            Examples:
            |scenario    | kwargs                                                  | test                    | output                        |predictions_file           |
            | scenario1| {"data": "../data/iris.csv", "output": "./scenario1_lr/predictions.csv", "test": "../data/test_iris.csv"}   | ../data/test_iris.csv   | ./scenario2/predictions.csv   | ./check_files/predictions_iris.csv   |
        """
        print self.test_scenario03.__doc__
        examples = [
            ['scenario1_lr', '{"data": "data/iris.csv", "output": "scenario1_lr/predictions.csv", "test": "data/test_iris.csv"}', 'data/test_iris.csv', 'scenario2_lr/predictions.csv', 'check_files/predictions_iris_lr.csv']]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
            lr_pred.i_create_lr_resources_from_source(self, None, test=example[2], output=example[3])
            test_pred.i_check_create_dataset(self, suffix=None)
            lr_pred.i_check_create_lr_model(self)
            test_pred.i_check_create_predictions(self)
            test_pred.i_check_predictions(self, example[4])
Beispiel #6
0
    def test_scenario8(self):
        """
            Scenario: Successfully building evaluations for logistic regression from start:
                Given I create BigML logistic regression resources uploading train "<data>" file to evaluate and log evaluation in "<output>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the logistic regression has been created
                And I check that the evaluation has been created
                Then the evaluation file is like "<json_evaluation_file>"

                Examples:
                | data             | output                   | json_evaluation_file    |
                | ../data/iris.csv | ./scenario_e8/evaluation | ./check_files/evaluation_iris_lr.json |
        """
        print self.test_scenario8.__doc__
        examples = [[
            'data/iris.csv', 'scenario_e8/evaluation',
            'check_files/evaluation_iris_lr.json'
        ]]
        for example in examples:
            print "\nTesting with:\n", example
            lr_pred.i_create_all_lr_resources_to_evaluate(self,
                                                          data=example[0],
                                                          output=example[1])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            lr_pred.i_check_create_lr_model(self)
            test_pred.i_check_create_evaluation(self)
            evaluation.then_the_evaluation_file_is_like(self, example[2])
Beispiel #7
0
    def test_scenario10(self):
        """
            Scenario: Successfully building logistic regression from a sampled dataset
                Given I create a BigML dataset from "<data>" and store logs in "<output_dir>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I create a BigML logistic regression with params "<params>" from dataset in "<output_dir>"
                And I check that the logistic regression has been created
                And the logistic regression params are "<params_json>"

                Examples:
                |data |output_dir | params | params_json
                |../data/iris.csv | ./scenario_d_10 | "--sample-rate 0.2 --replacement" | {"sample-rate": 0.2, "replacement": true}
        """
        print self.test_scenario10.__doc__
        examples = [
            ['data/iris.csv', 'scenario_d_10',
             '--sample-rate 0.2 --replacement',
             '{"sample_rate": 0.2, "replacement": true}']]
        for example in examples:
            print "\nTesting with:\n", example
            dataset_adv.i_create_dataset(self, data=example[0],
                                         output_dir=example[1])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            dataset_adv.i_create_logistic_with_params_from_dataset( \
                self, params=example[2], output_dir=example[1])
            test_logistic.i_check_create_lr_model(self)
            dataset_adv.i_check_logistic_params(self, params_json=example[3])
    def test_scenario8(self):
        """
            Scenario: Successfully building evaluations for logistic regression from start:
                Given I create BigML logistic regression resources uploading train "<data>" file to evaluate and log evaluation in "<output>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the logistic regression has been created
                And I check that the evaluation has been created
                Then the evaluation file is like "<json_evaluation_file>"

                Examples:
                | data             | output                   | json_evaluation_file    |
                | ../data/iris.csv | ./scenario_e8/evaluation | ./check_files/evaluation_iris_lr.json |
        """
        print self.test_scenario8.__doc__
        examples = [
            ['data/iris.csv', 'scenario_e8/evaluation', 'check_files/evaluation_iris_lr.json']]
        for example in examples:
            print "\nTesting with:\n", example
            lr_pred.i_create_all_lr_resources_to_evaluate(self, data=example[0], output=example[1])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            lr_pred.i_check_create_lr_model(self)
            test_pred.i_check_create_evaluation(self)
            evaluation.then_the_evaluation_file_is_like(self, example[2])