def test_create_performance_definition(self): from sasctl.services import model_repository as mr from sasctl.services import model_management as mm project = mr.get_project(self.PROJECT_NAME) # Update project properties project['function'] = 'prediction' project['targetLevel'] = 'interval' project['targetVariable'] = 'Price' project['predictionVariable'] = 'var1' project = mr.update_project(project) mm.create_performance_definition(self.MODEL_NAME, 'Public', 'boston')
def test_table_prefix_format(): with pytest.raises(ValueError): # Underscores should not be allowed _ = mm.create_performance_definition('model', 'TestLibrary', 'invalid_name')
def test_create_performance_definition(): import copy from sasctl import current_session PROJECT = RestObj({'name': 'Test Project', 'id': '98765'}) MODEL = RestObj({ 'name': 'Test Model', 'id': '12345', 'projectId': PROJECT['id'] }) USER = '******' with mock.patch('sasctl.core.requests.Session.request'): current_session('example.com', USER, 'password') with mock.patch('sasctl._services.model_repository.ModelRepository' '.get_model') as get_model: with mock.patch('sasctl._services.model_repository.ModelRepository' '.get_project') as get_project: with mock.patch('sasctl._services.model_management.ModelManagement' '.post') as post: get_model.return_value = MODEL with pytest.raises(ValueError): # Project missing all required properties get_project.return_value = copy.deepcopy(PROJECT) _ = mm.create_performance_definition( 'model', 'TestLibrary', 'TestData') with pytest.raises(ValueError): # Project missing some required properties get_project.return_value = copy.deepcopy(PROJECT) get_project.return_value['targetVariable'] = 'target' _ = mm.create_performance_definition( 'model', 'TestLibrary', 'TestData') with pytest.raises(ValueError): # Project missing some required properties get_project.return_value = copy.deepcopy(PROJECT) get_project.return_value['targetLevel'] = 'interval' _ = mm.create_performance_definition( 'model', 'TestLibrary', 'TestData') with pytest.raises(ValueError): # Project missing some required properties get_project.return_value = copy.deepcopy(PROJECT) get_project.return_value[ 'predictionVariable'] = 'predicted' _ = mm.create_performance_definition( 'model', 'TestLibrary', 'TestData') get_project.return_value = copy.deepcopy(PROJECT) get_project.return_value['targetVariable'] = 'target' get_project.return_value['targetLevel'] = 'interval' get_project.return_value['predictionVariable'] = 'predicted' _ = mm.create_performance_definition('model', 'TestLibrary', 'TestData', max_bins=3, monitor_challenger=True, monitor_champion=True) assert post.call_count == 1 url, data = post.call_args assert PROJECT['id'] == data['json']['projectId'] assert MODEL['id'] in data['json']['modelIds'] assert 'TestLibrary' == data['json']['dataLibrary'] assert 'TestData' == data['json']['dataPrefix'] assert 'cas-shared-default' == data['json']['casServerId'] assert data['json']['name'] is not None assert data['json']['description'] is not None assert data['json']['maxBins'] == 3 assert data['json']['championMonitored'] == True assert data['json']['challengerMonitored'] == True def test_table_prefix_format(): with pytest.raises(ValueError): # Underscores should not be allowed _ = mm.create_performance_definition('model', 'TestLibrary', 'invalid_name')
lm, model_name, input=X_train, # Use X to determine model inputs project=project, # Register in "Iris" project force=True) # Create project if it doesn't exist # Update project properties. Target variable must be set before performance # definitions can be created. project = mr.get_project(project) project['targetVariable'] = 'Price' project = mr.update_project(project) # Instruct the project to look for tables in the "Public" CAS library with # names starting with "boston_" and use these tables to track model # performance over time. mm.create_performance_definition(model_name, 'Public', 'boston') # Publish the model to the real-time scoring engine module_lm = publish_model(model_name, 'maslocal') # Select the first row of testing data x = X_test.iloc[0, :] # Call the published module and score the record result = module_lm.score(**x) print(result) # Build a second model dt = DecisionTreeRegressor() dt.fit(X_train, y_train)