def test_20_banner_api(self): """(20) Access the Banner messages via API""" msgt(self.test_20_banner_api.__doc__) banner_info = self.get_banner_messages_via_api(2) # Check the banner count and responses # self.assertEqual(banner_info['count'], 2) self.assertEqual(banner_info['results'][0]['type'], bstatic.BANNER_TYPE_WARNING) self.assertEqual(banner_info['results'][1]['type'], bstatic.BANNER_TYPE_INFORMATIONAL) # Flip the sorting order # self.banner1.sort_order = 100 self.banner1.save() banner_info = self.get_banner_messages_via_api(2) self.assertEqual(banner_info['count'], 2) self.assertEqual(banner_info['results'][0]['type'], bstatic.BANNER_TYPE_INFORMATIONAL) self.assertEqual(banner_info['results'][1]['type'], bstatic.BANNER_TYPE_WARNING) # Set banners to inactive # self.banner1.save_as_inactive() self.banner2.save_as_inactive() banner_info = self.get_banner_messages_via_api(0)
def test_50_bad_total_epsilon(self): """(50) Fail: Bad total epsilon""" msgt(self.test_50_bad_total_epsilon.__doc__) analysis_plan = self.analysis_plan setup_info = analysis_plan.dataset.get_depositor_setup_info() setup_info.epsilon = 4 setup_info.save() # Send the dp_statistics for validation # stat_spec = self.general_stat_spec request_plan = dict(analysis_plan_id=analysis_plan.object_id, dp_statistics=[stat_spec]) # Check the basics # serializer = ReleaseValidationSerializer(data=request_plan) valid = serializer.is_valid() self.assertTrue(valid) self.assertTrue(serializer.errors == {}) # Now run the validator # stats_valid = serializer.save(**dict(opendp_user=self.user_obj)) self.assertFalse(stats_valid.success) self.assertTrue( stats_valid.message.find(astatic.ERR_MSG_BAD_TOTAL_EPSILON) > -1)
def test_70_fail_impute_too_high(self): """(70) Fail: Impute higher than max""" msgt(self.test_70_fail_impute_too_high.__doc__) analysis_plan = self.analysis_plan # invalid min/max analysis_plan.variable_info['EyeHeight']['min'] = -8 analysis_plan.variable_info['EyeHeight']['max'] = 5 analysis_plan.save() # Send the dp_statistics for validation # stat_spec = self.general_stat_spec stat_spec['fixed_value'] = 40 request_plan = dict(analysis_plan_id=analysis_plan.object_id, dp_statistics=[stat_spec]) # Check the basics # serializer = ReleaseValidationSerializer(data=request_plan) self.assertTrue(serializer.is_valid()) # Now run the validator # stats_valid = serializer.save(**dict(opendp_user=self.user_obj)) print('stats_valid.data', stats_valid.data) self.assertTrue(stats_valid.success) self.assertFalse(stats_valid.data[0]['valid']) user_msg2 = 'The "fixed value" (40.0) cannot be more than the "max" (5.0)' self.assertEqual(stats_valid.data[0]['message'], user_msg2)
def test_30_count_valid_another_spec(self): """(30) Run DP Count on another valid spec""" msgt(self.test_30_count_valid_another_spec.__doc__) spec_props = { 'variable': 'TypingSpeed', 'col_index': 5, 'statistic': astatic.DP_COUNT, 'dataset_size': 183, 'epsilon': 1.0, 'delta': 0.0, 'cl': astatic.CL_99, 'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED, 'fixed_value': '62', 'variable_info': { 'min': 1, 'max': 61, 'type': pstatic.VAR_TYPE_FLOAT }, } dp_count = DPCountSpec(spec_props) dp_count.is_chain_valid() # if dp_count.has_error(): # print(dp_count.get_err_msgs()) # ------------------------------------------------------ # Run the actual count # ------------------------------------------------------ # Column indexes - We know this data has 20 columns col_indexes = [idx for idx in range(0, 20)] # File object # eye_fatigue_filepath = join(TEST_DATA_DIR, 'Fatigue_data.tab') # print('eye_fatigue_filepath', eye_fatigue_filepath) self.assertTrue(isfile(eye_fatigue_filepath)) file_obj = open(eye_fatigue_filepath, 'r') # Call run_chain # dp_count.run_chain(col_indexes, file_obj, sep_char="\t") file_obj.close() self.assertFalse(dp_count.has_error()) self.show_release_result(dp_count.get_release_dict()) # (test has wide accuracy latitude) self.assertTrue(dp_count.accuracy_val > 4.4) self.assertTrue(dp_count.accuracy_val < 4.8) # Actual count 184 self.assertTrue(dp_count.value > 170) # should be well within range final_dict = dp_count.get_release_dict() self.assertIn('description', final_dict) self.assertIn('text', final_dict['description']) self.assertIn('html', final_dict['description'])
def test_20_fail_unsupported_stat(self): """(20) Fail: Test a known but unsupported statistic""" msgt(self.test_20_fail_unsupported_stat.__doc__) analysis_plan = self.analysis_plan # Send the dp_statistics for validation # stat_spec = self.general_stat_spec stat_spec['statistic'] = astatic.DP_QUANTILE request_plan = dict(analysis_plan_id=analysis_plan.object_id, dp_statistics=[stat_spec]) # Check the basics # serializer = ReleaseValidationSerializer(data=request_plan) valid = serializer.is_valid() self.assertTrue(valid) self.assertTrue(serializer.errors == {}) # Now run the validator # stats_info = serializer.save(**dict(opendp_user=self.user_obj)) #print('stats_info.success', stats_info.success) self.assertTrue(stats_info.success) self.assertEqual(stats_info.data[0]['valid'], False) self.assertEqual(stats_info.data[0]['statistic'], astatic.DP_QUANTILE) self.assertEqual(stats_info.data[0]['variable'], 'EyeHeight') self.assertEqual( stats_info.data[0]['message'], f'Statistic "{astatic.DP_QUANTILE}" will be supported soon!')
def test_40_test_impute(self): """(40) Test impute validation""" msgt(self.test_40_test_impute.__doc__) spec_props = self.spec_props dp_mean = DPMeanSpec(spec_props) self.assertTrue(dp_mean.is_chain_valid()) bad_impute_info = [(-10, astatic.ERR_IMPUTE_PHRASE_MIN), (45, astatic.ERR_IMPUTE_PHRASE_MAX), (5.2, astatic.ERR_IMPUTE_PHRASE_MAX)] for bad_impute, stat_err_msg in bad_impute_info: print(f'> bad impute: {bad_impute}') new_props = spec_props.copy() new_props['fixed_value'] = bad_impute dp_mean2 = DPMeanSpec(new_props) self.assertFalse(dp_mean2.is_chain_valid()) err_dict = dp_mean2.get_error_msg_dict() print(f" - {err_dict['message']}") self.assertTrue(err_dict['message'].find(stat_err_msg) > -1) good_impute_info = [-8, 5, '-8.0', '5.0000', -7, 0, '0.0'] for good_impute in good_impute_info: print(f'> good impute: {good_impute}') new_props = spec_props.copy() new_props['fixed_value'] = good_impute dp_mean = DPMeanSpec(new_props) self.assertTrue(dp_mean.is_chain_valid())
def test_30_patch_bad_values(self, req_mocker): """(30) Attempt a patch with a invalid values for updateable fields""" msgt(self.test_30_patch_bad_values.__doc__) self.set_mock_requests(req_mocker) response = self.client.patch( reverse( "deposit-detail", kwargs={'object_id': "9255c067-e435-43bd-8af1-33a6987ffc9b"}), { 'confidence_level': 0.48, 'default_epsilon': -2, 'epsilon': 0.001, 'default_delta': -0.1, 'delta': -3 }) self.assertEqual(response.status_code, 400) print(f"get response: {response.json()}") expected_msg = { 'confidence_level': ['"0.48" is not a valid choice.'], 'default_delta': [VALIDATE_MSG_ZERO_OR_GREATER], 'delta': [VALIDATE_MSG_ZERO_OR_GREATER], 'default_epsilon': [VALIDATE_MSG_EPSILON], 'epsilon': [VALIDATE_MSG_EPSILON] } self.assertEqual(response.json(), expected_msg)
def test_20_error_no_file_schema(self): """(20) Error: No file_schema_info""" msgt(self.test_20_error_no_file_schema.__doc__) dfi = DataverseFileInfo.objects.get(pk=3) self.assertTrue(not dfi.source_file) # Set bad data, e.g. no file_schema_info # dfi.file_schema_info = '' # Run DataverseDownloadHandler # dhandler = DataverseDownloadHandler(dfi) #print('dhandler.has_error()', dhandler.has_error()) self.assertTrue(dhandler.has_error()) print(dhandler.get_err_msg()) self.assertTrue(dhandler.get_err_msg().find('dv_download_020') > -1) # Check the status on depositor_setup_info.DepositorSetupInfo # dfi2 = DataverseFileInfo.objects.get(pk=3) #self.assertTrue(not dfi2.depositor_setup_info) self.assertEqual( dfi2.depositor_setup_info.user_step, DepositorSetupInfo. DepositorSteps.STEP_9200_DATAVERSE_DOWNLOAD_FAILED)
def test_40_error_empty_content_url(self): """(40) Error: Empty "contentUrl" in file_schema_info""" msgt(self.test_40_error_empty_content_url.__doc__) dfi = DataverseFileInfo.objects.get(pk=3) self.assertTrue(not dfi.source_file) # set bad data bad_file_schema_info = dfi.file_schema_info bad_file_schema_info[dv_static.SCHEMA_KEY_CONTENTURL] = ' ' dfi.file_schema_info = bad_file_schema_info # Run DataverseDownloadHandler # dhandler = DataverseDownloadHandler(dfi) #print('dhandler.has_error()', dhandler.has_error()) self.assertTrue(dhandler.has_error()) print(dhandler.get_err_msg()) self.assertTrue(dhandler.get_err_msg().find('dv_download_050') > -1) # Check the status on depositor_setup_info.DepositorSetupInfo # dfi2 = DataverseFileInfo.objects.get(pk=3) self.assertEqual( dfi2.depositor_setup_info.user_step, DepositorSetupInfo. DepositorSteps.STEP_9200_DATAVERSE_DOWNLOAD_FAILED)
def test_10_create_plan(self): """(10) Create AnalysisPlan directly""" msgt(self.test_10_create_plan.__doc__) dataset_info = DataSetInfo.objects.get(id=4) plan_util = AnalysisPlanUtil.create_plan(dataset_info.object_id, self.user_obj) # did plan creation work? self.assertTrue(plan_util.success) # look at the plan data/defaults the_plan = plan_util.data # should have same user and dataset self.assertEqual(the_plan.analyst.object_id, self.user_obj.object_id) # check default settings self.assertEqual(the_plan.dataset.object_id, dataset_info.object_id) self.assertFalse(the_plan.is_complete) self.assertEqual(the_plan.user_step, AnalysisPlan.AnalystSteps.STEP_0700_VARIABLES_CONFIRMED) self.assertEqual(the_plan.variable_info, dataset_info.depositor_setup_info.variable_info) self.assertEqual(the_plan.dp_statistics, None)
def test_45_clear_data_fail_no_cypress_app(self): """(45) url not available b/c cypress app not in INSTALLED_APPS""" msgt(self.test_45_clear_data_fail_no_cypress_app.__doc__) # Don't add cypress to installed apps #with self.modify_settings(INSTALLED_APPS={ # 'append': 'opendp_apps.cypress_utils'}): # set DJANGO_SETTINGS_MODULE os.environ[ 'DJANGO_SETTINGS_MODULE'] = 'opendp_project.settings.cypress_settings' # log in with superuser self.client.force_login(self.user_obj_admin) try: clear_data_url = reverse('clear_test_data') except NoReverseMatch as err_obj: self.assertEquals('NoReverseMatch', err_obj.__class__.__name__) # Test management command in the same context if CYPRESS_SETTINGS_APP_NAME in settings.INSTALLED_APPS: self.run_cmd_clear_test_data(expect_success=True) else: self.run_cmd_clear_test_data(expect_success=False, **dict(expect_command_error=True))
def test_100_update_plan_bad_fields(self): """(100) Update AnalysisPlan, fail w/ bad fields""" msgt(self.test_100_update_plan_bad_fields.__doc__) dataset_info = DataSetInfo.objects.get(id=4) plan_util = AnalysisPlanUtil.create_plan(dataset_info.object_id, self.user_obj) # did plan creation work? self.assertTrue(plan_util.success) # # Update the plan! # plan_object_id = plan_util.data.object_id payload = json.dumps(dict(name='haha')) response = self.client.patch(f'/api/analyze/{plan_object_id}/', payload, content_type='application/json') self.assertEqual(response.status_code, 400) jresp = response.json() self.assertTrue(jresp['message'].find(astatic.ERR_MSG_FIELDS_NOT_UPDATEABLE) > -1)
def test_80_update_plan(self): """(80) Update AnalysisPlan""" msgt(self.test_80_update_plan.__doc__) dataset_info = DataSetInfo.objects.get(id=4) plan_util = AnalysisPlanUtil.create_plan(dataset_info.object_id, self.user_obj) # did plan creation work? self.assertTrue(plan_util.success) # # Update the plan! # plan_object_id = plan_util.data.object_id payload = json.dumps(dict(dp_statistics=dict(hi='there'))) response = self.client.patch(f'/api/analyze/{plan_object_id}/', payload, content_type='application/json') self.assertEqual(response.status_code, 200) jresp = response.json() self.assertEqual(jresp['dp_statistics'], dict(hi='there'))
def test_15_banner_timing(self): """(15) banner timing""" msgt(self.test_15_banner_timing.__doc__) # test window with times outside the window # three_days_ago = self.current_time + datetime.timedelta(days=-3) self.assertFalse(self.banner2.is_current_time_window(three_days_ago)) next_week = self.current_time + datetime.timedelta(days=7) self.assertFalse(self.banner2.is_current_time_window(next_week)) # Get a queryset of active banners where the current time is out of range # for the timed banners # self.assertTrue( BannerMessage.get_active_banners(three_days_ago).count() == 1) self.assertTrue( BannerMessage.get_active_banners(next_week).count() == 1) # Set bad timing for banner # self.banner2.view_start_time = self.next_day self.banner2.view_stop_time = self.yesterday with self.assertRaises(ValueError): self.banner2.save()
def test_30_bad_confidence_levels(self): """(30) Bad confidence level vals""" msgt(self.test_30_bad_confidence_levels.__doc__) spec_props = self.spec_props def float_range(start, stop, step): while start < stop: yield float(start) start += decimal.Decimal(step) for cl_val in list(float_range(-1, 3, '0.08')): #print(f'> Invalid ci val: {ci_val}') spec_props['cl'] = cl_val dp_mean = DPMeanSpec(spec_props) #print(dp_mean.is_chain_valid()) self.assertFalse(dp_mean.is_chain_valid()) self.assertTrue(dp_mean.get_single_err_msg().find( VALIDATE_MSG_NOT_VALID_CL_VALUE) > -1) for cl_val in ['alphabet', 'soup', 'c']: #print(f'> Invalid ci val: {ci_val}') spec_props['cl'] = cl_val dp_mean = DPMeanSpec(spec_props) #print(dp_mean.is_chain_valid()) self.assertFalse(dp_mean.is_chain_valid()) self.assertTrue(dp_mean.get_single_err_msg().find( 'Failed to convert "cl" to a float') > -1)
def test_90_direct_profile_download_fail(self): """(90) API endpoint: fail to download file""" msgt(self.test_90_direct_profile_download_fail.__doc__) dfi = DataverseFileInfo.objects.get(pk=3) self.assertTrue(not dfi.source_file) with responses.RequestsMock() as rsps: rsps.add(\ responses.GET, "https://dataverse.harvard.edu/api/access/datafile/101649", json={'error': 'not found'}, status=404) # --------------------------- # Run the Profiler! # --------------------------- response = self.client.post( '/api/profile/run-direct-profile/', json.dumps( {"object_id": "af0d01d4-073c-46fa-a2ff-829193828b82"}), content_type='application/json') self.assertEqual(response.status_code, 200) jresp = response.json() #print(jresp) self.assertEqual(jresp.get('success'), False) self.assertTrue(jresp.get('message').find('failed') > -1)
def test_35_check_confidence_level_alpha(self): """(35) Check accuracy with bad confidence level""" msgt(self.test_35_check_confidence_level_alpha.__doc__) # shouldn't happen, change cl after validity # spec_props_income = self.spec_props_income.copy() dp_mean = DPMeanSpec(spec_props_income) self.assertTrue(dp_mean.is_chain_valid()) self.assertEqual(dp_mean.get_confidence_level_alpha(), astatic.CL_99_ALPHA) # Set CL to None -- shouldn't happen, would be caught in the __init__ # dp_mean.cl = None cl_alpha = dp_mean.get_confidence_level_alpha() self.assertIsNone(cl_alpha) self.assertTrue(dp_mean.has_error()) self.assertTrue(dp_mean.get_single_err_msg().startswith( astatic.ERR_MSG_CL_ALPHA_CL_NOT_SET)) # Set CL to non numeric -- shouldn't happen, would be caught in the __init__ # spec_props_income2 = self.spec_props_income.copy() dp_mean = DPMeanSpec(spec_props_income) self.assertTrue(dp_mean.is_chain_valid()) dp_mean.cl = 'zebra' cl_alpha = dp_mean.get_confidence_level_alpha() self.assertIsNone(cl_alpha) self.assertTrue(dp_mean.has_error()) self.assertTrue(dp_mean.get_single_err_msg().startswith( astatic.ERR_MSG_CL_ALPHA_CL_NOT_NUMERIC)) # Set CL to 2.0 -- shouldn't happen, would be caught in the __init__ # spec_props_income3 = self.spec_props_income.copy() dp_mean = DPMeanSpec(spec_props_income3) self.assertTrue(dp_mean.is_chain_valid()) dp_mean.cl = 2.0 cl_alpha = dp_mean.get_confidence_level_alpha() self.assertIsNone(cl_alpha) self.assertTrue(dp_mean.has_error()) self.assertTrue(dp_mean.get_single_err_msg().startswith( astatic.ERR_MSG_CL_ALPHA_CL_LESS_THAN_0)) # Set CL to -1 -- shouldn't happen, would be caught in the __init__ # spec_props_income3 = self.spec_props_income.copy() dp_mean = DPMeanSpec(spec_props_income3) self.assertTrue(dp_mean.is_chain_valid()) dp_mean.cl = -1.0 cl_alpha = dp_mean.get_confidence_level_alpha() self.assertIsNone(cl_alpha) self.assertTrue(dp_mean.has_error()) self.assertTrue(dp_mean.get_single_err_msg().startswith( astatic.ERR_MSG_CL_ALPHA_CL_GREATER_THAN_1))
def test_60_duplicate_dataverse_user(self, req_mocker): """(60) Attempt to add the same user twice""" msgt(self.test_60_duplicate_dataverse_user.__doc__) # set the mock requests self.set_mock_requests(req_mocker) url = reverse('dv-user-list') # Ensure there are no DataverseUsers DataverseUser.objects.all().delete() initial_dv_user_count = DataverseUser.objects.count() # Call once to create DataverseUser response = self.client.post(url, data=self.dv_user_api_input_01, format='json') msg(response.json()) self.assertEqual(response.status_code, 201) dataverse_users_count = DataverseUser.objects.count() self.assertEqual(initial_dv_user_count + 1, dataverse_users_count) # Now make the same request, and demonstrate that it queried for DataverseUser # rather than creating another one response = self.client.post(url, data=self.dv_user_api_input_01, format='json') msg(response.json()) self.assertEqual(response.status_code, 201) self.assertEqual(dataverse_users_count, DataverseUser.objects.count())
def test_40_api_bad_overall_epsilon(self): """(30) Via API, run compute stats, bad overall epsilon""" msgt(self.test_40_api_bad_overall_epsilon.__doc__) analysis_plan = self.analysis_plan # Send the dp_statistics for validation # analysis_plan.dp_statistics = self.general_stat_specs analysis_plan.save() # Put some bad data in! setup_info = analysis_plan.dataset.get_depositor_setup_info() setup_info.epsilon = None # Shouldn't happen but what if it does! setup_info.save() params = dict(object_id=str(analysis_plan.object_id)) response = self.client.post('/api/release/', json.dumps(params), content_type='application/json') jresp = response.json() # print('jresp', jresp) self.assertEqual(response.status_code, 400) self.assertFalse(jresp['success']) self.assertTrue(jresp['message'].find(astatic.ERR_MSG_BAD_TOTAL_EPSILON) > -1)
def test_50_invalid_token(self, req_mocker): """(50) test_invalid_token""" msgt(self.test_50_invalid_token.__doc__) # set the mock requests req_mocker.get('http://127.0.0.1:8000/dv-mock-api/api/v1/users/:me', json=self.dv_user_invalid_token) # Now test the API call which would be initiated from the Vue.js client # dataverse_handoff = DataverseHandoff.objects.first() dataverse_handoff.apiGeneralToken = 'invalid_token_1234' dataverse_handoff.save() # url = reverse('dv-user-detail', kwargs={'pk': '4472310a-f591-403a-b8d6-dfb562f8b32f'}) response = self.client.put(self.url, data=self.dv_user_api_input_01, format='json') msg(response.content) self.assertEqual(response.status_code, 400) response_json = json.loads(response.content) self.assertTrue(response_json['success'] is False) self.assertTrue(response_json['message'].find('not found') > -1)
def test_100_locate_var_info(self): """(100) Locate variable info""" msgt(self.test_100_locate_var_info.__doc__) """ The AnalysisPlan contains variable info ({}) where the column names are standardized to snake case. When sending lists of statistics for validation/computation, the UI sends these column names in their original form. Make sure that column names with spaces and other changes are locatable. """ # Load fixtures files to OrderedDict objects # step1_fixture_file = join(PROFILER_FIXTURES_DIR, 'step1_variable_info.json') self.assertTrue(isfile(step1_fixture_file)) orig_var_info = json.loads(open(step1_fixture_file, 'r').read(), object_pairs_hook=OrderedDict) step2_fixture_file = join(PROFILER_FIXTURES_DIR, 'step2_variable_info.json') self.assertTrue(isfile(step2_fixture_file)) plan_var_info = json.loads(open(step2_fixture_file, 'r').read(), object_pairs_hook=OrderedDict) # Iterate through original variable names, attempt to find them # variable profile info # for _var_idx, orig_varname in orig_var_info['dataset'][ 'variableOrder']: varname_snakecase = camel_to_snake(orig_varname) var_found = (orig_varname in plan_var_info) or \ (varname_snakecase in plan_var_info) print( f'> Check: {orig_varname}/{varname_snakecase} -> {var_found}') self.assertTrue(var_found)
def test_20_schema_info_parsing(self): """Retrieve the correct dataset from schema info, using File Ids""" msgt(self.test_20_schema_info_parsing.__doc__) # Schema contains file info, when file Id is an int # file_resp = DataverseManifestParams.get_file_specific_schema_info( \ schema_test_data.schema_info_01, file_id=schema_test_data.schema_info_01_file_id, file_persistent_id=schema_test_data.schema_info_01_file_pid) self.assertTrue(file_resp.success is True) self.assertTrue('contentUrl' in file_resp.data) self.assertTrue(file_resp.data['contentUrl'].endswith( str(schema_test_data.schema_info_01_file_id))) # Schema contains file info, when file Id is a string # file_resp = DataverseManifestParams.get_file_specific_schema_info(\ schema_test_data.schema_info_01, file_id=str(schema_test_data.schema_info_01_file_id), file_persistent_id=schema_test_data.schema_info_01_file_pid) self.assertTrue(file_resp.success is True) self.assertTrue('contentUrl' in file_resp.data) self.assertTrue(file_resp.data['contentUrl'].endswith( str(schema_test_data.schema_info_01_file_id)))
def test_08_api_fail_wrong_user(self): """(8) Test API fail, logged in as different user""" msgt(self.test_08_api_fail_wrong_user.__doc__) new_client = APIClient() other_user_obj, _created = get_user_model().objects.get_or_create( username='******') new_client.force_login(other_user_obj) dataset_info = DataSetInfo.objects.get(id=4) AnalysisPlanUtil.create_plan(dataset_info.object_id, self.user_obj) analysis_plan = AnalysisPlan.objects.first() # Send the dp_statistics for validation # request_plan = dict(analysis_plan_id=analysis_plan.object_id, dp_statistics=[self.general_stat_spec]) response = new_client.post('/api/validation/', data=request_plan, format='json') self.assertEqual(response.status_code, 400) self.assertFalse(response.json()['success']) self.assertEqual(response.json()['message'], astatic.ERR_MSG_NO_ANALYSIS_PLAN)
def test_10_successful_get(self, req_mocker): """(10) test_successful_creation""" msgt(self.test_10_successful_get.__doc__) # From fixture file: "test_manifest_params_04.json" tparams = ManifestTestParams.objects.get( object_id='4bcad631-ce7c-475e-a569-29e71ee0b2ee') handoff_req = tparams.make_test_handoff_object() self.assertTrue(handoff_req.success is True) handoff_obj = handoff_req.data # The Mock url is for when the applications calls "Dataverse" to retrieve JSON-LD metadata # mock_url = ('http://127.0.0.1:8000/dv-mock-api/api/v1/datasets/export' '?exporter=schema.org&persistentId=doi:10.7910/DVN/PUXVDH' '&User-Agent=pydataverse&key=shoefly-dont-bother-m3') req_mocker.get(mock_url, json=tparams.schema_org_content) response = self.client.get('/api/dv-file/', data={ 'handoff_id': handoff_obj.object_id, 'user_id': '6c4986b1-e90d-48a2-98d5-3a37da1fd331' }, content_type='application/json') # print(response.json()) self.assertEqual(response.status_code, 200) self.assertEqual(response.json().get('success'), True) self.assertEqual(response.json().get('data'), [])
def test_25_api_fail_unsupported_stat(self): """(25) Fail: API, Test a known but unsupported statistic""" msgt(self.test_25_api_fail_unsupported_stat.__doc__) analysis_plan = self.analysis_plan # Send the dp_statistics for validation # stat_spec = self.general_stat_spec stat_spec['statistic'] = astatic.DP_QUANTILE request_plan = dict(analysis_plan_id=str(analysis_plan.object_id), dp_statistics=[stat_spec]) response = self.client.post('/api/validation/', json.dumps(request_plan), content_type='application/json') jresp = response.json() self.assertEqual(response.status_code, 200) self.assertTrue(jresp['success']) self.assertEqual(jresp['data'][0]['valid'], False) self.assertEqual(jresp['data'][0]['statistic'], astatic.DP_QUANTILE) self.assertEqual(jresp['data'][0]['variable'], 'EyeHeight') self.assertEqual( jresp['data'][0]['message'], f'Statistic "{astatic.DP_QUANTILE}" will be supported soon!')
def test_12_successful_creation(self, req_mocker): """(15) Schema.org retrieved but file specific info is not found!""" msgt(self.test_12_successful_creation.__doc__) # From fixture file: "test_manifest_params_04.json" tparams = ManifestTestParams.objects.get( object_id='4bcad631-ce7c-475e-a569-29e71ee0b2ee') handoff_req = tparams.make_test_handoff_object() self.assertTrue(handoff_req.success is True) handoff_obj = handoff_req.data # The Mock url is for when the applications calls "Dataverse" to retrieve JSON-LD metadata # mock_url = ('http://127.0.0.1:8000/dv-mock-api/api/v1/datasets/export' '?exporter=schema.org&persistentId=doi:10.7910/DVN/PUXVDH' '&User-Agent=pydataverse&key=shoefly-dont-bother-m3') schema_content = tparams.schema_org_content req_mocker.get(mock_url, json=schema_content) response = self.client.post('/api/dv-file/', data={ 'handoff_id': handoff_obj.object_id, 'creator': '6c4986b1-e90d-48a2-98d5-3a37da1fd331' }, content_type='application/json') print(response.json()) # print('response.status_code', response.status_code) self.assertEqual(response.status_code, 201) self.assertEqual(response.json().get('success'), True) # 'message' should only appear when there's an error and we are displaying the error message self.assertTrue('message' not in response.json())
def test_55_api_bad_total_epsilon(self): """(55) Fail: API, Bad total epsilon""" msgt(self.test_55_api_bad_total_epsilon.__doc__) analysis_plan = self.analysis_plan setup_info = analysis_plan.dataset.get_depositor_setup_info() setup_info.epsilon = 4 setup_info.save() # Send the dp_statistics for validation # stat_spec = self.general_stat_spec request_plan = dict(analysis_plan_id=str(analysis_plan.object_id), dp_statistics=[stat_spec]) response = self.client.post('/api/validation/', json.dumps(request_plan), content_type='application/json') jresp = response.json() self.assertEqual(response.status_code, 400) self.assertFalse(jresp['success']) self.assertTrue( jresp['message'].find(astatic.ERR_MSG_BAD_TOTAL_EPSILON) > -1)
def test_20_bad_epsilon(self): """(20) Bad epsilon""" msgt(self.test_20_bad_epsilon.__doc__) spec_props = self.spec_props for epsilon_val in [1.01, -0.01, 10]: print(f'> Bad epsilon val: {epsilon_val}') spec_props['epsilon'] = epsilon_val dp_mean = DPMeanSpec(spec_props) self.assertFalse(dp_mean.is_chain_valid()) err_info = dp_mean.get_error_msg_dict() self.assertTrue(err_info['valid'] == False) print(err_info['message']) self.assertTrue( err_info['message'].find(VALIDATE_MSG_EPSILON) > -1) for epsilon_val in ['a', 'carrot', 'cake']: print(f'> Bad epsilon val: {epsilon_val}') spec_props['epsilon'] = epsilon_val dp_mean = DPMeanSpec(spec_props) self.assertFalse(dp_mean.is_chain_valid()) err_info = dp_mean.get_error_msg_dict() self.assertTrue(err_info['valid'] == False) print(err_info['message']) self.assertTrue(err_info['message'].find('Failed to convert') > -1) spec_props['epsilon'] = 1 for bad_ds in [-1, 0, 1.0, .03, 'brick', 'cookie']: print(f'> Bad dataset_size: {bad_ds}') spec_props['dataset_size'] = bad_ds dp_mean = DPMeanSpec(spec_props) self.assertFalse(dp_mean.is_chain_valid())
def test_successful_get(self, req_mocker): """(10) Get inheritors of DataSetInfo""" msgt(self.test_successful_get.__doc__) self.set_mock_requests(req_mocker) response = self.client.get(reverse("datasetinfo-list")) self.assertEqual(response.status_code, 200) self.assertEqual(response.json(), {'count': 0, 'next': None, 'previous': None, 'results': []})
def test_030_dv_handler_bad_param(self, req_mocker): """(30) Test DataverseRequestHandler with bad params""" msgt(self.test_030_dv_handler_bad_param.__doc__) print('1a. Set up requests mocker') self.set_requests_mocker(req_mocker) print('1. Test with bad file id param') params = self.mock_params.as_dict() params[dv_static.DV_PARAM_FILE_ID] = 777777 # bad file Id dv_handler = DataverseRequestHandler(params, self.user_obj) self.assertTrue(dv_handler.has_error()) print(dv_handler.get_err_msg()) self.assertTrue( dv_handler.get_err_msg().find(dv_static.DV_PARAM_FILE_ID) > -1) print('2. Test with bad datasetPid param') params = self.mock_params.as_dict() params[dv_static.DV_PARAM_DATASET_PID] = 'cool-breeze' # datasetPid dv_handler = DataverseRequestHandler(params, self.user_obj) print('schema_info', dv_handler.schema_info) self.assertTrue(dv_handler.has_error()) print(dv_handler.get_err_msg()) self.assertTrue('cool-breeze' in dv_handler.schema_info.get('message')) self.assertFalse(dv_handler.schema_info is None)