def test_scenario13(self):
        """
            Scenario: Successfully comparing predictions:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model
                And I wait until the model is ready less than <time_3> secs
                And I create a local model
                When I create a prediction for "<data_input>"
                Then the prediction for "<objective>" is "<prediction>"
                And I create a local prediction for "<data_input>"
                Then the local prediction is "<prediction>"

                Examples:
                | data             | time_1  | time_2 | time_3 | data_input                             | objective | prediction  |

        """
        examples = [
            ['data/iris.csv', '10', '10', '10', '{"petal width": 0.5}', '000004', 'Iris-setosa', "tmp/my_model.json", "my_test"],
            ['data/iris.csv', '10', '10', '10', '{"petal length": 6, "petal width": 2}', '000004', 'Iris-virginica', "tmp/my_model.json", "my_test"],
            ['data/iris.csv', '10', '10', '10', '{"petal length": 4, "petal width": 1.5}', '000004', 'Iris-versicolor', "tmp/my_model.json", "my_test"],
            ['data/iris_sp_chars.csv', '10', '10', '10', '{"pétal.length": 4, "pétal&width\u0000": 1.5}', '000004', 'Iris-versicolor', "tmp/my_model.json", "my_test"]]
        show_doc(self.test_scenario13, examples)
        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
            args = '{"tags": ["%s"]}' % example[8]
            model_create.i_create_a_model_with(self, data=args)
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_export_model(self, False, example[7]) # no pmml
            prediction_compare.i_create_a_local_model_from_file(self, example[7])
            prediction_create.i_create_a_prediction(self, example[4])
            prediction_create.the_prediction_is(self, example[5], example[6])
            prediction_compare.i_create_a_local_prediction(self, example[4])
            prediction_compare.the_local_prediction_is(self, example[6])
            model_create.i_export_tags_model(self, example[7], example[8])
            prediction_compare.i_create_a_local_model_from_file(self, example[7])
            prediction_compare.i_create_a_local_prediction(self, example[4])
            prediction_compare.the_local_prediction_is(self, example[6])
    def test_scenario13(self):
        """
            Scenario: Successfully comparing predictions:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model
                And I wait until the model is ready less than <time_3> secs
                And I create a local model
                When I create a prediction for "<data_input>"
                Then the prediction for "<objective>" is "<prediction>"
                And I create a local prediction for "<data_input>"
                Then the local prediction is "<prediction>"

                Examples:
                | data             | time_1  | time_2 | time_3 | data_input                             | objective | prediction  |

        """
        examples = [
            ['data/iris.csv', '10', '10', '10', '{"petal width": 0.5}', '000004', 'Iris-setosa', "tmp/my_model.json", "my_test"],
            ['data/iris.csv', '10', '10', '10', '{"petal length": 6, "petal width": 2}', '000004', 'Iris-virginica', "tmp/my_model.json", "my_test"],
            ['data/iris.csv', '10', '10', '10', '{"petal length": 4, "petal width": 1.5}', '000004', 'Iris-versicolor', "tmp/my_model.json", "my_test"],
            ['data/iris_sp_chars.csv', '10', '10', '10', '{"pétal.length": 4, "pétal&width\u0000": 1.5}', '000004', 'Iris-versicolor', "tmp/my_model.json", "my_test"]]
        show_doc(self.test_scenario13, examples)
        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
            args = '{"tags": ["%s"]}' % example[8]
            model_create.i_create_a_model_with(self, data=args)
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_export_model(self, False, example[7]) # no pmml
            prediction_compare.i_create_a_local_model_from_file(self, example[7])
            prediction_create.i_create_a_prediction(self, example[4])
            prediction_create.the_prediction_is(self, example[5], example[6])
            prediction_compare.i_create_a_local_prediction(self, example[4])
            prediction_compare.the_local_prediction_is(self, example[6])
            model_create.i_export_tags_model(self, example[7], example[8])
            prediction_compare.i_create_a_local_model_from_file(self, example[7])
            prediction_compare.i_create_a_local_prediction(self, example[4])
            prediction_compare.the_local_prediction_is(self, example[6])
Example #3
0
    def test_scenario10(self):
        """
            Scenario: Successfully comparing predictions for fusions:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I retrieve a list of remote models tagged with "<tag>"
                And I create a fusion from a list of models
                And I wait until the fusion is ready less than <time_4> secs
                And I create a local fusion
                When I create a prediction for "<data_input>"
                Then the prediction for "<objective>" is "<prediction>"
                And I create a local prediction for "<data_input>"
                Then the local prediction is "<prediction>"

                Examples:
                | data             | time_1  | time_2 | time_3 | params| tag | data_input                             | objective | prediction  | params

        """
        examples = [[
            'data/iris_unbalanced.csv', '30', '30', '120', '120',
            '{"tags":["my_fusion_tag"]}', 'my_fusion_tag',
            '{"petal width": 4}', '000004', 'Iris-virginica'
        ],
                    [
                        'data/grades.csv', '30', '30', '120', '120',
                        '{"tags":["my_fusion_tag_reg"]}', 'my_fusion_tag_reg',
                        '{"Midterm": 20}', '000005', 43.65286
                    ]]
        show_doc(self.test_scenario10, examples)

        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(
                self, example[2])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            prediction_compare.i_retrieve_a_list_of_remote_models(
                self, example[6])
            model_create.i_create_a_fusion(self)
            model_create.the_fusion_is_finished_in_less_than(self, example[4])
            prediction_compare.i_create_a_local_fusion(self)
            prediction_create.i_create_a_fusion_prediction(self, example[7])
            prediction_create.the_prediction_is(self, example[8], example[9])
            prediction_compare.i_create_a_local_prediction(self, example[7])
            prediction_compare.the_local_prediction_is(self, example[9])
    def test_scenario2(self):
        """
            Scenario 2: Successfully creating a fusion:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I retrieve a list of remote models tagged with "<tag>"
                And I create a fusion from a list of models
                And I wait until the fusion is ready less than <time_4> secs
                And I update the fusion name to "<fusion_name>"
                When I wait until the fusion is ready less than <time_5> secs
                And I create a prediction for "<data_input>"
                Then the fusion name is "<fusion_name>"
                And the prediction for "<objective>" is "<prediction>"
                And I create an evaluation for the fusion with the dataset
                And I wait until the evaluation is ready less than <time_4> secs
                Then the measured "<measure>" is <value>

                Examples:
                | data                | time_1  | time_2 | time_3 | time_4 | fusion_name | data_input | objective | prediction
                | ../data/iris.csv | 10      | 10     | 20     | 20 | my new fusion name | {"petal length": 1, "petal width": 1} | "000004" | "Iris-setosa"
        """
        print self.test_scenario2.__doc__
        examples = [
            ['data/iris.csv', '10', '10', '20', '20', 'my new fusion name',
             '{"tags":["my_fusion_2_tag"]}', 'my_fusion_2_tag',
             '{"petal width": 1.75, "petal length": 2.45}', "000004",
             "Iris-setosa", 'average_phi', '1.0']]
        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
            model_create.i_create_a_model_with(self, example[6])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[6])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[6])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            compare_pred.i_retrieve_a_list_of_remote_models(self, example[7])
            model_create.i_create_a_fusion(self)
            model_create.the_fusion_is_finished_in_less_than(self, example[3])
            model_create.i_update_fusion_name(self, example[5])
            model_create.the_fusion_is_finished_in_less_than(self, example[4])
            model_create.i_check_fusion_name(self, example[5])
            prediction_create.i_create_a_fusion_prediction(self, example[8])
            prediction_create.the_prediction_is(self, example[9], example[10])
            evaluation_create.i_create_an_evaluation_fusion(self)
            evaluation_create.the_evaluation_is_finished_in_less_than(self, example[3])
            evaluation_create.the_measured_measure_is_value(self, example[11], example[12])
Example #5
0
    def test_scenario2(self):
        """
            Scenario 2: Successfully creating a fusion:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I retrieve a list of remote models tagged with "<tag>"
                And I create a fusion from a list of models
                And I wait until the fusion is ready less than <time_4> secs
                And I update the fusion name to "<fusion_name>"
                When I wait until the fusion is ready less than <time_5> secs
                And I create a prediction for "<data_input>"
                Then the fusion name is "<fusion_name>"
                And the prediction for "<objective>" is "<prediction>"
                And I create an evaluation for the fusion with the dataset
                And I wait until the evaluation is ready less than <time_4> secs
                Then the measured "<measure>" is <value>

                Examples:
                | data                | time_1  | time_2 | time_3 | time_4 | fusion_name | data_input | objective | prediction
                | ../data/iris.csv | 10      | 10     | 20     | 20 | my new fusion name | {"petal length": 1, "petal width": 1} | "000004" | "Iris-setosa"
        """
        print self.test_scenario2.__doc__
        examples = [
            ['data/iris.csv', '10', '10', '20', '20', 'my new fusion name',
             '{"tags":["my_fusion_2_tag"]}', 'my_fusion_2_tag',
             '{"petal width": 1.75, "petal length": 2.45}', "000004",
             "Iris-setosa", 'average_phi', '1.0']]
        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
            model_create.i_create_a_model_with(self, example[6])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[6])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[6])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            compare_pred.i_retrieve_a_list_of_remote_models(self, example[7])
            model_create.i_create_a_fusion(self)
            model_create.the_fusion_is_finished_in_less_than(self, example[3])
            model_create.i_update_fusion_name(self, example[5])
            model_create.the_fusion_is_finished_in_less_than(self, example[4])
            model_create.i_check_fusion_name(self, example[5])
            prediction_create.i_create_a_fusion_prediction(self, example[8])
            prediction_create.the_prediction_is(self, example[9], example[10])
            evaluation_create.i_create_an_evaluation_fusion(self)
            evaluation_create.the_evaluation_is_finished_in_less_than(self, example[3])
            evaluation_create.the_measured_measure_is_value(self, example[11], example[12])
    def test_scenario11(self):
        """
            Scenario: Successfully comparing predictions in operating points for fusions:
            Scenario: Successfully comparing predictions for fusions:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I retrieve a list of remote models tagged with "<tag>"
                And I create a fusion from a list of models
                And I wait until the fusion is ready less than <time_4> secs
                And I create a local fusion
                When I create a prediction for "<data_input>" in "<operating_point>"
                Then the prediction for "<objective>" is "<prediction>"
                And I create a local fusion prediction for "<data_input>" in "<operating_point>"
                Then the local ensemble prediction is "<prediction>"

                Examples:
                | data             | time_1  | time_2 | time_3 | params| tag | data_input                             | objective | prediction  | params | operating_point


        """
        examples = [
            ['data/iris_unbalanced.csv', '30', '30', '120', '120', '{"tags":["my_fusion_tag_11"]}', 'my_fusion_tag_11', '{"petal width": 4}', '000004', 'Iris-virginica',  {"kind": "probability", "threshold": 0.1, "positive_class": "Iris-setosa"}],
           ['data/iris_unbalanced.csv', '30', '30', '120', '120', '{"tags":["my_fusion_tag_11_b"]}', 'my_fusion_tag_11_b', '{"petal width": 4}', '000004', 'Iris-virginica',  {"kind": "probability", "threshold": 0.9, "positive_class": "Iris-setosa"}]]
        show_doc(self.test_scenario11, examples)

        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            prediction_compare.i_retrieve_a_list_of_remote_models(self, example[6])
            model_create.i_create_a_fusion(self)
            model_create.the_fusion_is_finished_in_less_than(self, example[4])
            prediction_compare.i_create_a_local_fusion(self)
            prediction_create.i_create_a_fusion_prediction_op(self, example[7], example[10])
            prediction_create.the_prediction_is(self, example[8], example[9])
            prediction_compare.i_create_a_local_prediction_op(self, example[7], example[10])
            prediction_compare.the_local_prediction_is(self, example[9])
    def test_scenario12(self):
        """
            Scenario: Successfully comparing predictions for fusions:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I retrieve a list of remote models tagged with "<tag>"
                And I create a fusion from a list of models
                And I wait until the fusion is ready less than <time_4> secs
                And I create a local fusion
                When I create a prediction for "<data_input>"
                Then the prediction for "<objective>" is "<prediction>"
                And I create a local prediction for "<data_input>"
                Then the local prediction is "<prediction>"

                Examples:
                | data             | time_1  | time_2 | time_3 | params| tag | data_input                             | objective | prediction  | params

        """
        tag = "my_fusion_tag_12_%s" % PY3
        tag_reg = "my_fusion_tag_12_reg_%s" % PY3
        examples = [
            ['data/iris_unbalanced.csv', '30', '30', '120', '120', '{"tags":["%s"], "sample_rate": 0.8, "seed": "bigml"}' % tag, tag, '{"petal width": 4}', '000004', 'Iris-virginica'],
            ['data/grades.csv', '30', '30', '120', '120', '{"tags":["%s"], "sample_rate": 0.8, "seed": "bigml"}' % tag_reg, tag_reg, '{"Midterm": 20}', '000005', 44.37625]]
        show_doc(self.test_scenario12, examples)

        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            prediction_compare.i_retrieve_a_list_of_remote_models(self, example[6])
            model_create.i_create_a_fusion_with_weights(self)
            model_create.the_fusion_is_finished_in_less_than(self, example[4])
            prediction_compare.i_create_a_local_fusion(self)
            prediction_create.i_create_a_fusion_prediction(self, example[7])
            prediction_create.the_prediction_is(self, example[8], example[9])
            prediction_compare.i_create_a_local_prediction(self, example[7])
            prediction_compare.the_local_prediction_is(self, example[9])
Example #8
0
    def test_scenario3(self):
        """
            Scenario 3: Successfully creating a fusion from a dataset:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I retrieve a list of remote models tagged with "<tag>"
                And I create a fusion from a list of models
                And I wait until the fusion is ready less than <time_4> secs
                When I create a batch prediction for the dataset with the fusion
                And I wait until the batch prediction is ready less than <time_4> secs
                And I download the created predictions file to "<local_file>"
                Then the batch prediction file is like "<predictions_file>"

                Examples:
                | data                | time_1  | time_2 | time_3 | time_4 | tag | local_file | predictions_file       |
                | ../data/iris.csv | 10      | 10     | 20     | 20 | mytag | ./tmp/batch_predictions.csv | ./data/batch_predictions_fs.csv |
        """
        print self.test_scenario3.__doc__
        examples = [[
            'data/iris.csv', '10', '10', '20', '20',
            '{"tags":["my_fusion_3_tag"]}', 'my_fusion_3_tag',
            'tmp/batch_predictions.csv', 'data/batch_predictions_fs.csv'
        ]]
        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(
                self, example[2])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            compare_pred.i_retrieve_a_list_of_remote_models(self, example[6])
            model_create.i_create_a_fusion(self)
            model_create.the_fusion_is_finished_in_less_than(self, example[3])
            batch_pred_create.i_create_a_batch_prediction_fusion(self)
            batch_pred_create.the_batch_prediction_is_finished_in_less_than(
                self, example[4])
            batch_pred_create.i_download_predictions_file(self, example[7])
            batch_pred_create.i_check_predictions(self, example[8])
Example #9
0
    def test_scenario1(self):
        """
            Scenario: Successfully creating a batch prediction from a multi model:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I retrieve a list of remote models tagged with "<tag>"
                And I create a local multi model
                When I create a batch prediction for "<data_input>" and save it in "<path>"
                And I combine the votes in "<path>"
                Then the plurality combined predictions are "<predictions>"
                And the confidence weighted predictions are "<predictions>"

                Examples:
                | data             | time_1  | time_2 | time_3 | params                         |  tag  |  data_input    | path | predictions  |
                | ../data/iris.csv | 10      | 10     | 10     | {"tags":["mytag"]} | mytag |  [{"petal width": 0.5}, {"petal length": 6, "petal width": 2}, {"petal length": 4, "petal width": 1.5}]  | ./tmp | ["Iris-setosa", "Iris-virginica", "Iris-versicolor"] |

        """
        print self.test_scenario1.__doc__
        examples = [[
            'data/iris.csv', '10', '10', '10', '{"tags":["mytag"]}', 'mytag',
            '[{"petal width": 0.5}, {"petal length": 6, "petal width": 2}, {"petal length": 4, "petal width": 1.5}]',
            './tmp', '["Iris-setosa", "Iris-virginica", "Iris-versicolor"]'
        ]]
        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(
                self, example[2])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            compare_pred.i_retrieve_a_list_of_remote_models(self, example[5])
            compare_pred.i_create_a_local_multi_model(self)
            compare_pred.i_create_a_batch_prediction(self, example[6],
                                                     example[7])
            compare_pred.i_combine_the_votes(self, example[7])
            compare_pred.the_plurality_combined_prediction(self, example[8])
            compare_pred.the_confidence_weighted_prediction(self, example[8])
 def test_scenario10(self):
     """
         Scenario 10: Successfully creating a local fusion from an exported file:
             Given I create a data source uploading a "<data>" file
             And I wait until the source is ready less than <time_1> secs
             And I create a dataset
             And I wait until the dataset is ready less than <time_2> secs
             And I create a model with "<params>"
             And I wait until the model is ready less than <time_3> secs
             And I create a model with "<params>"
             And I wait until the model is ready less than <time_3> secs
             And I create a model with "<params>"
             And I wait until the model is ready less than <time_3> secs
             And I retrieve a list of remote models tagged with "<tag>"
             And I create a fusion from a list of models
             And I wait until the fusion is ready less than <time_3> secs
             And I export the fusion to "<exported_file>"
             When I create a local fusion from the file "<exported_file>"
             Then the fusion ID and the local fusion ID match
             Examples:
             | data                | time_1  | time_2 | time_3 | exported_file | params | tag
             | ../data/iris.csv | 10      | 10     | 50 | ./tmp/fusion.json
     """
     print self.test_scenario10.__doc__
     examples = [[
         'data/iris.csv', '10', '10', '50', './tmp/fusion.json',
         'my_fusion_tag'
     ]]
     for example in examples:
         print "\nTesting with:\n", example
         tag = "%s_%s" % (example[5], PY3)
         tag_args = '{"tags":["%s"]}' % tag
         source_create.i_upload_a_file(self, example[0])
         source_create.the_source_is_finished(self, example[1])
         dataset_create.i_create_a_dataset(self)
         dataset_create.the_dataset_is_finished_in_less_than(
             self, example[2])
         model_create.i_create_a_model_with(self, tag_args)
         model_create.the_model_is_finished_in_less_than(self, example[3])
         model_create.i_create_a_model_with(self, tag_args)
         model_create.the_model_is_finished_in_less_than(self, example[3])
         model_create.i_create_a_model_with(self, tag_args)
         model_create.the_model_is_finished_in_less_than(self, example[3])
         prediction_compare.i_retrieve_a_list_of_remote_models(self, tag)
         model_create.i_create_a_fusion(self)
         model_create.the_fusion_is_finished_in_less_than(self, example[3])
         model_create.i_export_fusion(self, example[4])
         model_create.i_create_local_fusion_from_file(self, example[4])
         model_create.check_fusion_id_local_id(self)
Example #11
0
    def test_scenario3(self):
        """
            Scenario 3: Successfully creating a fusion:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I retrieve a list of remote models tagged with "<tag>"
                And I create a fusion from a list of models
                And I wait until the fusion is ready less than <time_4> secs
                When I create a batch prediction for the dataset with the fusion
                And I wait until the batch prediction is ready less than <time_4> secs
                And I download the created predictions file to "<local_file>"
                Then the batch prediction file is like "<predictions_file>"

                Examples:
                | data                | time_1  | time_2 | time_3 | time_4 | tag | local_file | predictions_file       |
                | ../data/iris.csv | 10      | 10     | 20     | 20 | mytag | ./tmp/batch_predictions.csv | ./data/batch_predictions_fs.csv |
        """
        print self.test_scenario3.__doc__
        examples = [
            ['data/iris.csv', '10', '10', '20', '20',
             '{"tags":["my_fusion_3_tag"]}', 'my_fusion_3_tag',
              'tmp/batch_predictions.csv', 'data/batch_predictions_fs.csv']]
        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            compare_pred.i_retrieve_a_list_of_remote_models(self, example[6])
            model_create.i_create_a_fusion(self)
            model_create.the_fusion_is_finished_in_less_than(self, example[3])
            batch_pred_create.i_create_a_batch_prediction_fusion(self)
            batch_pred_create.the_batch_prediction_is_finished_in_less_than(self, example[4])
            batch_pred_create.i_download_predictions_file(self, example[7])
            batch_pred_create.i_check_predictions(self, example[8])
    def test_scenario2(self):
        """

            Scenario: Successfully creating a local batch prediction from a multi model:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I retrieve a list of remote models tagged with "<tag>"
                And I create a local multi model
                When I create a batch multimodel prediction for "<data_inputs>"
                Then the predictions are "<predictions>"

                Examples:
                | data             | time_1  | time_2 | time_3 | params             |  tag  |  data_inputs                                | predictions  |
                | ../data/iris.csv | 10      | 10     | 10     | {"tags":["mytag"]} | mytag |  [{"petal width": 0.5}, {"petal length": 6, "petal width": 2}] | ["Iris-setosa", "Iris-virginica"] |
        """
        print self.test_scenario2.__doc__
        examples = [[
            'data/iris.csv', '10', '10', '10', '{"tags":["mytag"]}', 'mytag',
            '[{"petal width": 0.5}, {"petal length": 6, "petal width": 2}]',
            '["Iris-setosa", "Iris-virginica"]'
        ]]
        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(
                self, example[2])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            compare_pred.i_retrieve_a_list_of_remote_models(self, example[5])
            compare_pred.i_create_a_local_multi_model(self)
            compare_pred.i_create_a_batch_prediction_from_a_multi_model(
                self, example[6])
            compare_pred.the_batch_mm_predictions_are(self, example[7])
    def test_scenario1(self):
        """
            Scenario: Successfully creating a batch prediction from a multi model:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I retrieve a list of remote models tagged with "<tag>"
                And I create a local multi model
                When I create a batch prediction for "<data_input>" and save it in "<path>"
                And I combine the votes in "<path>"
                Then the plurality combined predictions are "<predictions>"
                And the confidence weighted predictions are "<predictions>"

                Examples:
                | data             | time_1  | time_2 | time_3 | params                         |  tag  |  data_input    | path | predictions  |
                | ../data/iris.csv | 10      | 10     | 10     | {"tags":["mytag"]} | mytag |  [{"petal width": 0.5}, {"petal length": 6, "petal width": 2}, {"petal length": 4, "petal width": 1.5}]  | ./tmp | ["Iris-setosa", "Iris-virginica", "Iris-versicolor"] |

        """
        print self.test_scenario1.__doc__
        examples = [
            ['data/iris.csv', '10', '10', '10', '{"tags":["mytag"]}', 'mytag', '[{"petal width": 0.5}, {"petal length": 6, "petal width": 2}, {"petal length": 4, "petal width": 1.5}]', './tmp', '["Iris-setosa", "Iris-virginica", "Iris-versicolor"]']]
        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            compare_pred.i_retrieve_a_list_of_remote_models(self, example[5])
            compare_pred.i_create_a_local_multi_model(self)
            compare_pred.i_create_a_batch_prediction(self, example[6], example[7])
            compare_pred.i_combine_the_votes(self, example[7])
            compare_pred.the_plurality_combined_prediction(self, example[8])
            compare_pred.the_confidence_weighted_prediction(self, example[8])
    def test_scenario2(self):
        """

            Scenario: Successfully obtaining field importance from an Ensemble:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model with "<parms1>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<parms2>"
                And I wait until the model is ready less than <time_4> secs
                And I create a model with "<parms3>"
                And I wait until the model is ready less than <time_5> secs
                When I create a local Ensemble with the last <number_of_models> models
                Then the field importance text is <field_importance>

                Examples:
                | data             | time_1  | time_2 |parms1 | time_3 |parms2 | time_4 |parms3| time_5 |number_of_models |field_importance
                | ../data/iris.csv | 10      | 10     |{"input_fields": ["000000", "000001","000003", "000004"]} |20      |{"input_fields": ["000000", "000001","000002", "000004"]} | 20     |{"input_fields": ["000000", "000001","000002", "000003", "000004"]} | 20   | 3 |[["000002", 0.5269933333333333], ["000003", 0.38936], ["000000", 0.04662333333333333], ["000001", 0.037026666666666666]]
        """
        print self.test_scenario2.__doc__
        examples = [[
            'data/iris.csv', '10', '10',
            '{"input_fields": ["000000", "000001","000003", "000004"]}', '20',
            '{"input_fields": ["000000", "000001","000002", "000004"]}', '20',
            '{"input_fields": ["000000", "000001","000002", "000003", "000004"]}',
            '20', '3',
            '[["000002", 0.5269933333333333], ["000003", 0.38936], ["000000", 0.04662333333333333], ["000001", 0.037026666666666666]]'
        ]]
        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(
                self, example[2])
            model_create.i_create_a_model_with(self, example[3])
            model_create.the_model_is_finished_in_less_than(self, example[4])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[6])
            model_create.i_create_a_model_with(self, example[7])
            model_create.the_model_is_finished_in_less_than(self, example[8])
            ensemble_create.create_local_ensemble_with_list(self, example[9])
            ensemble_create.field_importance_print(self, example[10])
 def test_scenario10(self):
     """
         Scenario 10: Successfully creating a local fusion from an exported file:
             Given I create a data source uploading a "<data>" file
             And I wait until the source is ready less than <time_1> secs
             And I create a dataset
             And I wait until the dataset is ready less than <time_2> secs
             And I create a model with "<params>"
             And I wait until the model is ready less than <time_3> secs
             And I create a model with "<params>"
             And I wait until the model is ready less than <time_3> secs
             And I create a model with "<params>"
             And I wait until the model is ready less than <time_3> secs
             And I retrieve a list of remote models tagged with "<tag>"
             And I create a fusion from a list of models
             And I wait until the fusion is ready less than <time_3> secs
             And I export the fusion to "<exported_file>"
             When I create a local fusion from the file "<exported_file>"
             Then the fusion ID and the local fusion ID match
             Examples:
             | data                | time_1  | time_2 | time_3 | exported_file | params | tag
             | ../data/iris.csv | 10      | 10     | 50 | ./tmp/fusion.json
     """
     print self.test_scenario10.__doc__
     examples = [
         ['data/iris.csv', '10', '10', '50', './tmp/fusion.json', '{"tags":["my_fusion_tag"]}', 'my_fusion_tag']]
     for example in examples:
         print "\nTesting with:\n", example
         source_create.i_upload_a_file(self, example[0])
         source_create.the_source_is_finished(self, example[1])
         dataset_create.i_create_a_dataset(self)
         dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
         model_create.i_create_a_model_with(self, example[5])
         model_create.the_model_is_finished_in_less_than(self, example[3])
         model_create.i_create_a_model_with(self, example[5])
         model_create.the_model_is_finished_in_less_than(self, example[3])
         model_create.i_create_a_model_with(self, example[5])
         model_create.the_model_is_finished_in_less_than(self, example[3])
         prediction_compare.i_retrieve_a_list_of_remote_models(self, example[6])
         model_create.i_create_a_fusion(self)
         model_create.the_fusion_is_finished_in_less_than(self, example[3])
         model_create.i_export_fusion(self, example[4])
         model_create.i_create_local_fusion_from_file(self, example[4])
         model_create.check_fusion_id_local_id(self)
    def test_scenario1(self):
        """
            Scenario: Successfully creating a prediction from a multi model:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I retrieve a list of remote models tagged with "<tag>"
                And I create a local multi model
                When I create a local prediction for "<data_input>"
                Then the prediction for "<objective>" is "<prediction>"

                Examples:
                | data             | time_1  | time_2 | time_3 | params                         |  tag  |  data_input    | prediction  |
                | ../data/iris.csv | 10      | 10     | 10     | {"tags":["mytag"]} | mytag |  {"petal width": 0.5}     | Iris-setosa |
        """
        print self.test_scenario1.__doc__
        examples = [[
            'data/iris.csv', '10', '10', '10', '{"tags":["mytag"]}', 'mytag',
            '{"petal width": 0.5}', 'Iris-setosa'
        ]]
        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(
                self, example[2])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            compare_pred.i_retrieve_a_list_of_remote_models(self, example[5])
            compare_pred.i_create_a_local_multi_model(self)
            compare_pred.i_create_a_local_prediction(self, example[6])
            compare_pred.the_local_prediction_is(self, example[7])
    def test_scenario2(self):
        """

            Scenario: Successfully creating a local batch prediction from a multi model:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I retrieve a list of remote models tagged with "<tag>"
                And I create a local multi model
                When I create a batch multimodel prediction for "<data_inputs>"
                Then the predictions are "<predictions>"

                Examples:
                | data             | time_1  | time_2 | time_3 | params             |  tag  |  data_inputs                                | predictions  |
                | ../data/iris.csv | 10      | 10     | 10     | {"tags":["mytag"]} | mytag |  [{"petal width": 0.5}, {"petal length": 6, "petal width": 2}] | ["Iris-setosa", "Iris-virginica"] |
        """
        print self.test_scenario2.__doc__
        examples = [
            ['data/iris.csv', '10', '10', '10', '{"tags":["mytag"]}', 'mytag', '[{"petal width": 0.5}, {"petal length": 6, "petal width": 2}]', '["Iris-setosa", "Iris-virginica"]']]
        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            compare_pred.i_retrieve_a_list_of_remote_models(self, example[5])
            compare_pred.i_create_a_local_multi_model(self)
            compare_pred.i_create_a_batch_prediction_from_a_multi_model(self, example[6])
            compare_pred.the_batch_mm_predictions_are(self, example[7])
    def test_scenario1(self):
        """
            Scenario: Successfully creating a prediction from a multi model:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I retrieve a list of remote models tagged with "<tag>"
                And I create a local multi model
                When I create a local prediction for "<data_input>"
                Then the prediction for "<objective>" is "<prediction>"

                Examples:
                | data             | time_1  | time_2 | time_3 | params                         |  tag  |  data_input    | prediction  |
                | ../data/iris.csv | 10      | 10     | 10     | {"tags":["mytag"]} | mytag |  {"petal width": 0.5}     | Iris-setosa |
        """
        print self.test_scenario1.__doc__
        examples = [
            ['data/iris.csv', '10', '10', '10', '{"tags":["mytag"]}', 'mytag', '{"petal width": 0.5}', 'Iris-setosa']]
        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[4])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            compare_pred.i_retrieve_a_list_of_remote_models(self, example[5])
            compare_pred.i_create_a_local_multi_model(self)
            compare_pred.i_create_a_local_prediction(self, example[6])
            compare_pred.the_local_prediction_is(self, example[7])
    def test_scenario2(self):
        """

            Scenario: Successfully obtaining field importance from an Ensemble:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model with "<parms1>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<parms2>"
                And I wait until the model is ready less than <time_4> secs
                And I create a model with "<parms3>"
                And I wait until the model is ready less than <time_5> secs
                When I create a local Ensemble with the last <number_of_models> models
                Then the field importance text is <field_importance>

                Examples:
                | data             | time_1  | time_2 |parms1 | time_3 |parms2 | time_4 |parms3| time_5 |number_of_models |field_importance
                | ../data/iris.csv | 10      | 10     |{"input_fields": ["000000", "000001","000003", "000004"]} |20      |{"input_fields": ["000000", "000001","000002", "000004"]} | 20     |{"input_fields": ["000000", "000001","000002", "000003", "000004"]} | 20   | 3 |[["000002", 0.5269933333333333], ["000003", 0.38936], ["000000", 0.04662333333333333], ["000001", 0.037026666666666666]]
        """
        print self.test_scenario2.__doc__
        examples = [
            ['data/iris.csv', '10', '10', '{"input_fields": ["000000", "000001","000003", "000004"]}', '20', '{"input_fields": ["000000", "000001","000002", "000004"]}', '20', '{"input_fields": ["000000", "000001","000002", "000003", "000004"]}', '20', '3', '[["000002", 0.5269933333333333], ["000003", 0.38936], ["000000", 0.04662333333333333], ["000001", 0.037026666666666666]]']]
        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
            model_create.i_create_a_model_with(self, example[3])
            model_create.the_model_is_finished_in_less_than(self, example[4])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[6])
            model_create.i_create_a_model_with(self, example[7])
            model_create.the_model_is_finished_in_less_than(self, example[8])
            ensemble_create.create_local_ensemble_with_list(self, example[9])
            ensemble_create.field_importance_print(self, example[10])
Example #20
0
    def test_scenario11(self):
        """
            Scenario: Successfully comparing predictions in operating points for fusions:
            Scenario: Successfully comparing predictions for fusions:
                Given I create a data source uploading a "<data>" file
                And I wait until the source is ready less than <time_1> secs
                And I create a dataset
                And I wait until the dataset is ready less than <time_2> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I create a model with "<params>"
                And I wait until the model is ready less than <time_3> secs
                And I retrieve a list of remote models tagged with "<tag>"
                And I create a fusion from a list of models
                And I wait until the fusion is ready less than <time_4> secs
                And I create a local fusion
                When I create a prediction for "<data_input>" in "<operating_point>"
                Then the prediction for "<objective>" is "<prediction>"
                And I create a local fusion prediction for "<data_input>" in "<operating_point>"
                Then the local ensemble prediction is "<prediction>"

                Examples:
                | data             | time_1  | time_2 | time_3 | params| tag | data_input                             | objective | prediction  | params | operating_point


        """
        examples = [[
            'data/iris_unbalanced.csv', '30', '30', '120', '120',
            '{"tags":["my_fusion_tag_11"]}', 'my_fusion_tag_11',
            '{"petal width": 4}', '000004', 'Iris-virginica', {
                "kind": "probability",
                "threshold": 0.1,
                "positive_class": "Iris-setosa"
            }
        ],
                    [
                        'data/iris_unbalanced.csv', '30', '30', '120', '120',
                        '{"tags":["my_fusion_tag_11_b"]}',
                        'my_fusion_tag_11_b', '{"petal width": 4}', '000004',
                        'Iris-virginica', {
                            "kind": "probability",
                            "threshold": 0.9,
                            "positive_class": "Iris-setosa"
                        }
                    ]]
        show_doc(self.test_scenario11, examples)

        for example in examples:
            print "\nTesting with:\n", example
            source_create.i_upload_a_file(self, example[0])
            source_create.the_source_is_finished(self, example[1])
            dataset_create.i_create_a_dataset(self)
            dataset_create.the_dataset_is_finished_in_less_than(
                self, example[2])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            model_create.i_create_a_model_with(self, example[5])
            model_create.the_model_is_finished_in_less_than(self, example[3])
            prediction_compare.i_retrieve_a_list_of_remote_models(
                self, example[6])
            model_create.i_create_a_fusion(self)
            model_create.the_fusion_is_finished_in_less_than(self, example[4])
            prediction_compare.i_create_a_local_fusion(self)
            prediction_create.i_create_a_fusion_prediction_op(
                self, example[7], example[10])
            prediction_create.the_prediction_is(self, example[8], example[9])
            prediction_compare.i_create_a_local_prediction_op(
                self, example[7], example[10])
            prediction_compare.the_local_prediction_is(self, example[9])