Пример #1
0
    def test_scenario01(self):
        """
        Scenario: Successfully creating an execution from source code:
            Given I create BigML execution resources from source code "<code>" and log results in  "<output_dir>"
            And I check that the script has been created
            And I check that the execution has been created
            And I check that the result is ready
            Then the result file is like "<result_file>"

            Examples:
            | code      | output_dir       | result_file
            | (+ 1 1)   | scenario1_exe    | check_files/results_s1exe.json

        """
        print self.test_scenario01.__doc__
        examples = [[
            '(+ 1 1)', 'scenario1_exe', 'check_files/results_s1exe.json'
        ]]
        for example in examples:
            print "\nTesting with:\n", example
            execute.i_create_all_execution_resources(self, example[0],
                                                     example[1])
            execute.i_check_create_script(self)
            execute.i_check_create_execution(self)
            execute.i_check_create_result(self)
            execute.i_check_result_is(self, example[2])
Пример #2
0
    def test_scenario1(self):
        """
            Scenario: Successfully retraining a balanced model
                Given I create a BigML balanced model from "<data>" and store logs in "<output_dir>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the model has been created
                And I retrain the model from "<data>" and store logs in "<output_dir>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the model has been created
                Then I check that the model has doubled its rows
                And I check that the model is balanced

                Examples:
                |data |output_dir  | output_dir_ret
                |../data/iris.csv | ./scenario_rt_1 |./scenario_rt_1b |
        """
        print self.test_scenario1.__doc__
        examples = [
            ['data/iris.csv', 'scenario_rt_1', 'scenario_rt_1b']]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_create_balanced_model(self, data=example[0], output_dir=example[1])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            test_pred.i_check_create_model(self)
            test_pred.i_retrain_model(self, data=example[0], output_dir=example[2])
            test_pred.i_check_create_source(self)
            execute_steps.i_check_create_execution(self, number_of_executions=2)
            test_pred.i_check_create_model_in_execution(self)
            test_pred.i_check_model_double(self)
            test_pred.i_check_model_is_balanced(self)
Пример #3
0
    def test_scenario03(self):
        """
        Scenario: Successfully creating an execution with input/outputs from a code file:
            Given I create BigML execution resources from code in file "<code_file>" with inputs "<inputs_dec>", outputs "<outputs_dec>" and inputs "<inputs>" and log results in  "<output_dir>"
            And I check that the script has been created
            And I check that the execution has been created
            And I check that the result is ready
            Then the result file is like "<result_file>"

            Examples:
            | code_file      | output_dir       | inputs_dec           | outputs_dec           | inputs           | result_file
            | code.whizzml   | scenario3_exe    | data/inputs_dec.json | data/outputs_dec.json | data/inputs.json | check_files/results_s3exe.json

        """
        print self.test_scenario03.__doc__
        examples = [[
            'data/whizzml/code.whizzml', 'scenario3_exe',
            'data/inputs_dec.json', 'data/outputs_dec.json',
            'data/inputs.json', 'check_files/results_s3exe.json'
        ]]
        for example in examples:
            print "\nTesting with:\n", example
            execute.i_create_all_execution_with_io_resources(
                self, example[0], example[1], example[2], example[3],
                example[4])
            execute.i_check_create_script(self)
            execute.i_check_create_execution(self)
            execute.i_check_create_result(self)
            execute.i_check_result_is(self, example[5])
Пример #4
0
    def test_scenario2(self):
        """
            Scenario: Successfully retraining from a model using sampled dataset
                Given I create a BigML balanced model from "<data>" sampling 50% of data and store logs in "<output_dir>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the model has been created
                And I retrain the model from "<data>" and store logs in "<output_dir>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the model has been created
                Then I check that the model has doubled its rows
                And I check that the model is balanced

                Examples:
                |data |output_dir  | output_dir_ret
                |../data/iris.csv | ./scenario_rt_2 |./scenario_rt_2b |
        """
        print self.test_scenario2.__doc__
        examples = [['data/iris.csv', 'scenario_rt_2', 'scenario_rt_2b'],
                    [
                        'https://static.bigml.com/csv/iris.csv',
                        'scenario_rt_2c', 'scenario_rt_2d'
                    ]]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_create_balanced_model_from_sample(
                self, data=example[0], output_dir=example[1])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            test_pred.i_check_create_dataset(self, suffix='gen ')
            test_pred.i_check_create_model(self)
            test_pred.i_retrain_model(self,
                                      data=example[0],
                                      output_dir=example[2])
            if not example[0].startswith("https"):
                test_pred.i_check_create_source(self)
            execute_steps.i_check_create_execution(self,
                                                   number_of_executions=2)
            test_pred.i_check_create_model_in_execution(self)
            test_pred.i_check_model_double(self)
            test_pred.i_check_model_is_balanced(self)
Пример #5
0
    def test_scenario2(self):
        """
            Scenario: Successfully retraining from a model using sampled dataset
                Given I create a BigML balanced model from "<data>" sampling 50% of data and store logs in "<output_dir>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the model has been created
                And I retrain the model from "<data>" and store logs in "<output_dir>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the model has been created
                Then I check that the model has doubled its rows
                And I check that the model is balanced

                Examples:
                |data |output_dir  | output_dir_ret
                |../data/iris.csv | ./scenario_rt_2 |./scenario_rt_2b |
        """
        print self.test_scenario2.__doc__
        examples = [
            ['data/iris.csv', 'scenario_rt_2', 'scenario_rt_2b'],
            ['https://static.bigml.com/csv/iris.csv', 'scenario_rt_2c',
             'scenario_rt_2d']]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_create_balanced_model_from_sample(self, data=example[0], output_dir=example[1])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            test_pred.i_check_create_dataset(self, suffix='gen ')
            test_pred.i_check_create_model(self)
            test_pred.i_retrain_model(self, data=example[0], output_dir=example[2])
            if not example[0].startswith("https"):
                test_pred.i_check_create_source(self)
            execute_steps.i_check_create_execution(self, number_of_executions=2)
            test_pred.i_check_create_model_in_execution(self)
            test_pred.i_check_model_double(self)
            test_pred.i_check_model_is_balanced(self)
Пример #6
0
    def test_scenario1(self):
        """
            Scenario: Successfully retraining a balanced model
                Given I create a BigML balanced model from "<data>" and store logs in "<output_dir>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the model has been created
                And I retrain the model from "<data>" and store logs in "<output_dir>"
                And I check that the source has been created
                And I check that the dataset has been created
                And I check that the model has been created
                Then I check that the model has doubled its rows
                And I check that the model is balanced

                Examples:
                |data |output_dir  | output_dir_ret
                |../data/iris.csv | ./scenario_rt_1 |./scenario_rt_1b |
        """
        print self.test_scenario1.__doc__
        examples = [['data/iris.csv', 'scenario_rt_1', 'scenario_rt_1b']]
        for example in examples:
            print "\nTesting with:\n", example
            test_pred.i_create_balanced_model(self,
                                              data=example[0],
                                              output_dir=example[1])
            test_pred.i_check_create_source(self)
            test_pred.i_check_create_dataset(self, suffix=None)
            test_pred.i_check_create_model(self)
            test_pred.i_retrain_model(self,
                                      data=example[0],
                                      output_dir=example[2])
            test_pred.i_check_create_source(self)
            execute_steps.i_check_create_execution(self,
                                                   number_of_executions=2)
            test_pred.i_check_create_model_in_execution(self)
            test_pred.i_check_model_double(self)
            test_pred.i_check_model_is_balanced(self)
Пример #7
0
    def test_scenario01(self):
        """
        Scenario: Successfully creating an execution from source code:
            Given I create BigML execution resources from source code "<code>" and log results in  "<output_dir>"
            And I check that the script has been created
            And I check that the execution has been created
            And I check that the result is ready
            Then the result file is like "<result_file>"

            Examples:
            | code      | output_dir       | result_file
            | (+ 1 1)   | scenario1_exe    | check_files/results_s1exe.json

        """
        print self.test_scenario01.__doc__
        examples = [
            ['(+ 1 1)', 'scenario1_exe', 'check_files/results_s1exe.json' ]]
        for example in examples:
            print "\nTesting with:\n", example
            execute.i_create_all_execution_resources(self, example[0], example[1])
            execute.i_check_create_script(self)
            execute.i_check_create_execution(self)
            execute.i_check_create_result(self)
            execute.i_check_result_is(self, example[2])
Пример #8
0
    def test_scenario03(self):
        """
        Scenario: Successfully creating an execution with input/outputs from a code file:
            Given I create BigML execution resources from code in file "<code_file>" with inputs "<inputs_dec>", outputs "<outputs_dec>" and inputs "<inputs>" and log results in  "<output_dir>"
            And I check that the script has been created
            And I check that the execution has been created
            And I check that the result is ready
            Then the result file is like "<result_file>"

            Examples:
            | code_file      | output_dir       | inputs_dec           | outputs_dec           | inputs           | result_file
            | code.whizzml   | scenario3_exe    | data/inputs_dec.json | data/outputs_dec.json | data/inputs.json | check_files/results_s3exe.json

        """
        print self.test_scenario03.__doc__
        examples = [
            ['data/whizzml/code.whizzml', 'scenario3_exe', 'data/inputs_dec.json', 'data/outputs_dec.json', 'data/inputs.json', 'check_files/results_s3exe.json']]
        for example in examples:
            print "\nTesting with:\n", example
            execute.i_create_all_execution_with_io_resources(self, example[0], example[1], example[2], example[3], example[4])
            execute.i_check_create_script(self)
            execute.i_check_create_execution(self)
            execute.i_check_create_result(self)
            execute.i_check_result_is(self, example[5])