def setUp(self):
        # Create a temporary directory
        self.config_path = tempfile.mkdtemp()
        self.config_file = 'config.yaml'
        self.config_fil_full_path = os.path.join(self.config_path,
                                                 self.config_file)
        self.config_content = open(
            get_resource_file_path('default-config.yaml')).read()
        with open(self.config_fil_full_path, 'w') as stream:
            stream.write(self.config_content)

        # Creating prescience config
        self.prescience_config = PrescienceConfig(config_path=self.config_path,
                                                  config_file=self.config_file)

        # Loading configuration
        self.prescience_config.load()

        # Setting environnement variables
        os.environ['PRESCIENCE_DEFAULT_PROJECT'] = 'project-default'
        os.environ['PRESCIENCE_DEFAULT_TOKEN'] = 'token-default'
        os.environ[
            'PRESCIENCE_DEFAULT_API_URL'] = 'https://default-api.ai.ovh.net'
        os.environ[
            'PRESCIENCE_DEFAULT_WEBSOCKET_URL'] = 'wss://default-websocket.ai.ovh.net'
        os.environ[
            'PRESCIENCE_DEFAULT_SERVING_URL'] = 'https://default-serving.ai.ovh.net'
예제 #2
0
 def test_scenario(self):
     print('01 - Creating local input...')
     input_path = get_resource_file_path('test.csv')
     input_file = prescience.csv_local_file_input(filepath=input_path, headers=True)
     print('02 - Launching parse task...')
     parse_task = input_file.parse(source_id='my-source-id').watch()
     print('03 - Showing sources...')
     prescience.sources().show()
     source = parse_task.source()
     print('04 - Showing source schema...')
     source.schema().show()
     print('05 - Launching preprocess task...')
     preprocess_task = source.preprocess(dataset_id='my-dataset_id').watch()
     print('06 - Showing datasets...')
     prescience.datasets().show()
     dataset = preprocess_task.dataset()
     print('07 - Showing dataset schema...')
     dataset.schema().show()
     print('08 - Launching optimize task...')
     dataset.optimize().watch()
     evaluation_results = dataset.evaluation_results()
     print('09 - Showing evaluation results...')
     evaluation_results.show()
     single_eval_result = evaluation_results.content[0]
     print('10 - Launching train task...')
     single_eval_result.train('my-model').watch()
     prescience.models().show()
    def setUp(self):
        # Create a temporary directory
        self.config_path = tempfile.mkdtemp()
        self.config_file = 'config.yaml'
        self.config_fil_full_path = os.path.join(self.config_path,
                                                 self.config_file)
        self.config_content = open(
            get_resource_file_path('default-config.yaml')).read()
        with open(self.config_fil_full_path, 'w') as stream:
            stream.write(self.config_content)

        # Creating prescience config
        self.prescience_config = PrescienceConfig(config_path=self.config_path,
                                                  config_file=self.config_file)
        # Loading configuration
        self.prescience_config.load()
        # Creating prescience client
        self.presience_client = PrescienceClient(
            prescience_config=self.prescience_config)
    def test_parse(self):
        """
        Test the launch of a simple Parse
        """
        # Init
        output = {
            'uuid': 'parse-task-uuid',
            'type': 'parse',
            'status': 'PENDING'
        }
        self.presience_client.call = MagicMock(return_value=(200, output, {}))

        # Test
        csv_path = get_resource_file_path('test.csv')
        input_file = self.presience_client.csv_local_file_input(
            filepath=csv_path, headers=True)
        task = input_file.parse('my-source-id')
        self.assertEqual('parse-task-uuid', task.uuid())
        self.assertEqual('parse', task.type())
        self.assertIsInstance(task, ParseTask)
        self.assertEqual(Status.PENDING, task.status())

        expected_parse_payload = {
            'source_id': 'my-source-id',
            'type': 'CSV',
            'headers': True,
            'separator': 'COMMA'
        }
        self.presience_client.call.assert_called_with(
            method='POST',
            path='/ml/upload/source',
            content_type='multipart/form-data',
            multipart=[('input',
                        (pycurl.FORM_CONTENTS,
                         json.dumps(expected_parse_payload),
                         pycurl.FORM_CONTENTTYPE, 'application/json')),
                       ('input-file', (pycurl.FORM_FILE, csv_path))],
            data=None,
            filepath=None,
            query_parameters=None,
            call_type=PrescienceWebService.API)
예제 #5
0
    def test_scenario(self):
        print('01 - Creating local input...')
        input_path = get_resource_file_path('test.csv')
        input_file = prescience.csv_local_file_input(filepath=input_path,
                                                     headers=True)
        print('02 - Launching parse task...')
        input_file.parse(source_id='my-source-id').watch()
        print('03 - Showing sources...')
        prescience.sources().show()
        source = prescience.source('my-source-id')
        print('04 - Showing source schema...')
        source.schema().show()
        print('05 - Launching preprocess task...')
        preprocess_task = source.preprocess(dataset_id='my-dataset_id').watch()
        print('06 - Showing datasets...')
        prescience.datasets().show()
        dataset = preprocess_task.dataset()
        print('07 - Showing dataset schema...')
        dataset.schema().show()
        print('08 - Launching optimize task...')
        dataset.optimize().watch()
        evaluation_results = dataset.evaluation_results()
        print('09 - Showing evaluation results...')
        evaluation_results.show()
        single_eval_result = evaluation_results.content[0]
        print('10 - Launching train task...')
        single_eval_result.train('my-model').watch()
        prescience.models().show()

        print('10 - Evaluate model...')
        evaluation_payload_input = prescience.model(
            'my-model').get_model_evaluation_payload(
                evaluation_id='my-evaluation',
                arguments={
                    'hours-per-week': 40,
                    'capital-gain': 2174,
                    'education-num': 13,
                    'random-bool': 'True',
                    'marital-status': 'Never-married',
                    'age': 39,
                    'sex': 'Male',
                    'relationship': 'Not-in-family',
                    'education': 'Bachelors',
                    'race': 'White',
                    'native-country': 'United-States',
                    'fnlwgt': 77516,
                    'workclass': 'State-gov',
                    'capital-loss': 0,
                    'occupation': 'Adm-clerical'
                })
        evaluation_payload_input.show()
        validation_result, _ = evaluation_payload_input.validate()
        self.assertEqual(False, validation_result)

        evaluation_payload_input.add_payload_argument('random-bool', True)
        evaluation_payload_input.show()
        validation_result, _ = evaluation_payload_input.validate()
        self.assertEqual(True, validation_result)

        evaluation_payload_output = evaluation_payload_input.evaluate()
        evaluation_payload_output.show()
        final_label = evaluation_payload_output.get_result_label()
        self.assertIn(final_label, ['<=50K', '>50K'])