def setup_scenario2(self):
        """

            Scenario 2: Successfully building test predictions from scratch:
                Given I create BigML resources uploading train "<data>" file to test "<test>" remotely and log predictions in "<output>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the model has been created
                And I check that the source has been created from the test file
                And I check that the dataset has been created from the test file
                And I check that the batch prediction has been created
                And I check that the predictions are ready
                Then the local prediction file is like "<predictions_file>"

                Examples:
                | data               | test                    | output                        |predictions_file           |
                | ../data/iris.csv   | ../data/test_iris.csv   | ./scenario_r1/predictions.csv   | ./check_files/predictions_iris.csv   |
        """
        print self.setup_scenario2.__doc__
        examples = [
            ['data/iris.csv', 'data/test_iris.csv', 'scenario_r1/predictions.csv', 'check_files/predictions_iris.csv']]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_create_all_resources_batch(self, data=example[0], test=example[1], output=example[2])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            test_pred.i_check_create_model(self)
            test_batch_pred.i_check_create_test_source(self)
            test_batch_pred.i_check_create_test_dataset(self)
            test_batch_pred.i_check_create_batch_prediction(self)
            test_pred.i_check_create_predictions(self)
            test_pred.i_check_predictions(self, example[3])
Example #2
0
    def test_scenario6(self):
        """
            Scenario 6: Successfully building remote test predictions from scratch to a dataset:
                Given I create BigML resources uploading train "<data>" file to test "<test>" remotely to a dataset with no CSV output and log resources in "<output_dir>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the model has been created
                And I check that the source has been created from the test file
                And I check that the dataset has been created from the test file
                And I check that the batch prediction has been created
                Then I check that the batch predictions dataset exists
                And no local CSV file is created

                Examples:
                | data               | test                    | output_dir      |
                | ../data/iris.csv   | ../data/test_iris.csv   | ./scenario_r5   |
        """

        print self.test_scenario6.__doc__
        examples = [['data/iris.csv', 'data/test_iris.csv', 'scenario_r5']]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_create_all_resources_batch_to_dataset(
                self, data=example[0], test=example[1], output_dir=example[2])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            test_pred.i_check_create_model(self)
            test_batch_pred.i_check_create_test_source(self)
            test_batch_pred.i_check_create_test_dataset(self)
            test_batch_pred.i_check_create_batch_prediction(self)
            test_batch_pred.i_check_create_batch_predictions_dataset(self)
            anomaly_pred.i_check_no_local_CSV(self)
    def test_scenario6(self):
        """
            Scenario 6: Successfully building remote test predictions from scratch to a dataset:
                Given I create BigML resources uploading train "<data>" file to test "<test>" remotely to a dataset with no CSV output and log resources in "<output_dir>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the model has been created
                And I check that the source has been created from the test file
                And I check that the dataset has been created from the test file
                And I check that the batch prediction has been created
                Then I check that the batch predictions dataset exists
                And no local CSV file is created

                Examples:
                | data               | test                    | output_dir      |
                | ../data/iris.csv   | ../data/test_iris.csv   | ./scenario_r5   |
        """

        print self.test_scenario6.__doc__
        examples = [
            ['data/iris.csv', 'data/test_iris.csv', 'scenario_r5']]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_create_all_resources_batch_to_dataset(self, data=example[0], test=example[1], output_dir=example[2])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            test_pred.i_check_create_model(self)
            test_batch_pred.i_check_create_test_source(self)
            test_batch_pred.i_check_create_test_dataset(self)
            test_batch_pred.i_check_create_batch_prediction(self)
            test_batch_pred.i_check_create_batch_predictions_dataset(self)
            anomaly_pred.i_check_no_local_CSV(self)
Example #4
0
    def setup_scenario2(self):
        """

            Scenario 2: Successfully building test predictions from scratch:
                Given I create BigML resources uploading train "<data>" file to test "<test>" remotely and log predictions in "<output>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the model has been created
                And I check that the source has been created from the test file
                And I check that the dataset has been created from the test file
                And I check that the batch prediction has been created
                And I check that the predictions are ready
                Then the local prediction file is like "<predictions_file>"

                Examples:
                | data               | test                    | output                        |predictions_file           |
                | ../data/iris.csv   | ../data/test_iris.csv   | ./scenario_r1/predictions.csv   | ./check_files/predictions_iris.csv   |
        """
        print self.setup_scenario2.__doc__
        examples = [
            ['data/iris.csv', 'data/test_iris.csv', 'scenario_r1/predictions.csv', 'check_files/predictions_iris.csv']]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_create_all_resources_batch(self, data=example[0], test=example[1], output=example[2])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            test_pred.i_check_create_model(self)
            test_batch_pred.i_check_create_test_source(self)
            test_batch_pred.i_check_create_test_dataset(self)
            test_batch_pred.i_check_create_batch_prediction(self)
            test_pred.i_check_create_predictions(self)
            test_pred.i_check_predictions(self, example[3])