def test_validation_c3s_ismn(self): run = ValidationRun() run.start_time = datetime.now(tzlocal()) run.user = self.testuser # set validation period run.interval_from = datetime(1978, 1, 1, tzinfo=UTC) run.interval_to = datetime(2018, 12, 31, tzinfo=UTC) run.save() data_c = DatasetConfiguration() data_c.validation = run data_c.dataset = Dataset.objects.get(short_name='C3S') data_c.version = DatasetVersion.objects.get(short_name='C3S_V201812') data_c.variable = DataVariable.objects.get(short_name='C3S_sm') data_c.save() # object needs to be saved before m2m relationship can be used data_c.filters.add(DataFilter.objects.get(name='FIL_C3S_FLAG_0')) data_c.filters.add(DataFilter.objects.get(name='FIL_ALL_VALID_RANGE')) data_c.save() ref_c = DatasetConfiguration() ref_c.validation = run ref_c.dataset = Dataset.objects.get(short_name='ISMN') ref_c.version = DatasetVersion.objects.get(short_name='ISMN_V20180712_MINI') ref_c.variable = DataVariable.objects.get(short_name='ISMN_soil_moisture') ref_c.save() ref_c.filters.add(DataFilter.objects.get(name='FIL_ISMN_GOOD')) ref_c.save() run.reference_configuration = ref_c run.scaling_ref = ref_c run.save() run_id = run.id ## run the validation val.run_validation(run_id) ## wait until it's over (if necessary) finished_run = ValidationRun.objects.get(pk=run_id) timeout = 300 # seconds wait_time = 5 # seconds runtime = 0 while(finished_run.end_time is None): assert runtime <= timeout, 'Validations are taking too long.' sleep(wait_time) runtime += wait_time ## TODO: check the results here self.generic_result_check(finished_run) self.delete_run(finished_run)
def test_change_validation_name(self): # create new no-named result run = ValidationRun() run.user = self.testuser run.start_time = datetime.now(tzlocal()) run.interval_from = datetime(1978, 1, 1, tzinfo=UTC) run.interval_to = datetime(2018, 1, 1, tzinfo=UTC) run.save() result_id = str(run.id) assert result_id, "Error saving the test validation run." #try to change name of other user's validation url = reverse('result', kwargs={'result_uuid': result_id}) self.client.login(**self.credentials2) response = self.client.patch( url, 'save_name=false', content_type='application/x-www-form-urlencoded;') self.assertEqual(response.status_code, 403) # log in as owner of result and check invalid saving mode self.client.login(**self.credentials) response = self.client.patch( url, 'save_name=false', content_type='application/x-www-form-urlencoded;') self.assertEqual(response.status_code, 400) # log in as owner of result and check valid saving mode self.client.login(**self.credentials) response = self.client.patch( url, 'save_name=true&new_name="new_name"', content_type='application/x-www-form-urlencoded;') self.assertEqual(response.status_code, 200) run.doi = '10.1000/182' run.save() response = self.client.patch( url, 'save_name=true&new_name="new_name"', content_type='application/x-www-form-urlencoded;') self.assertEqual(response.status_code, 405) run.doi = '' run.save() run.name_tag = 'new_name' run.save() assert ValidationRun.objects.filter(name_tag='new_name').exists()
def create(self, validated_data): with transaction.atomic(): # prepare ValidationRun model new_val_run = ValidationRun(start_time=timezone.now()) new_val_run.interval_from = validated_data.get('validation_period').get('interval_from', None) new_val_run.interval_to = validated_data.get('validation_period').get('interval_to', None) new_val_run.anomalies = validated_data.get('anomalies').get('method') new_val_run.anomalies_from = validated_data.get('anomalies').get('anomalies_from', None) new_val_run.anomalies_to = validated_data.get('anomalies').get('anomalies_to', None) new_val_run.min_lat = validated_data.get('spatial_subsetting').get('min_lat', None) new_val_run.min_lon = validated_data.get('spatial_subsetting').get('min_lon', None) new_val_run.max_lat = validated_data.get('spatial_subsetting').get('max_lat', None) new_val_run.max_lon = validated_data.get('spatial_subsetting').get('max_lon', None) new_val_run.scaling_method = validated_data.get('scaling').get('method', None) for metric in validated_data.get('metrics'): if metric.get('id') == 'tcol': new_val_run.tcol = metric.get('value') new_val_run.save() # prepare DatasetConfiguration models reference_config = None dataset_config_models = [] configs_to_save = [validated_data.get('reference_config')] print('Reference config:') print(configs_to_save) configs_to_save.extend(validated_data.get('dataset_configs')) for config in configs_to_save: config_model = DatasetConfiguration.objects.create(validation=new_val_run, dataset_id=config.get('dataset_id'), version_id=config.get('version_id'), variable_id=config.get('variable_id')) config_model.save() filter_models = [] for filter_id in config.get('basic_filters'): filter_models.append(DataFilter.objects.get(id=filter_id)) for filter_model in filter_models: config_model.filters.add(filter_model) config_model.save() dataset_config_models.append(config_model) new_val_run.reference_configuration = dataset_config_models[0] new_val_run.save() return new_val_run
def test_delete_result(self): # create result to delete: run = ValidationRun() run.user = self.testuser run.start_time = datetime.now(tzlocal()) run.interval_from = datetime(1978, 1, 1, tzinfo=UTC) run.interval_to = datetime(2018, 1, 1, tzinfo=UTC) run.save() result_id = str(run.id) assert result_id, "Error saving the test validation run." url = reverse('result', kwargs={'result_uuid': result_id}) # try deleting other user's result - should be blocked self.client.login(**self.credentials2) response = self.client.delete(url) self.assertEqual(response.status_code, 403) # log in as owner of result self.client.login(**self.credentials) # try deleting a result that already has a DOI, should be blocked run.doi = '10.1000/182' run.save() response = self.client.delete(url) self.assertEqual(response.status_code, 405) # remove DOI again run.doi = '' run.save() # try to delete own result, should succeed response = self.client.delete(url) self.assertEqual(response.status_code, 200) assert not ValidationRun.objects.filter( pk=result_id).exists(), "Validation run didn't get deleted."
def test_validation_run_clean(self): run = ValidationRun() ## default object should be valid run.clean() ## object with just a from date should be invalid run.interval_from = datetime(2000, 1, 1) with pytest.raises(ValidationError): run.clean() ## object with just a to start date should be invalid run.interval_from = None run.interval_to = datetime(2000, 1, 1) with pytest.raises(ValidationError): run.clean() ## object with from date after to date should be invalid run.interval_from = datetime(2001, 1, 1) with pytest.raises(ValidationError): run.clean() ## object with from date before to date should be valid run.interval_to = datetime(2005, 1, 1) run.clean() ## object with no spatial subsetting should be valid run.min_lat = None run.max_lat = None run.min_lon = None run.max_lon = None run.clean() ## spatial subsetting with only two coords should be invalid run.min_lat = -45.0 run.max_lat = +45.0 with pytest.raises(ValidationError): run.clean() ## spatial subsetting with four coords should be valid run.min_lon = -120.0 run.max_lon = +120.0 run.clean() ## climatology with moving average should be valid without time period run.anomalies = ValidationRun.MOVING_AVG_35_D run.clean() ## climatology without anomalies time period should be invalid run.anomalies = ValidationRun.CLIMATOLOGY with pytest.raises(ValidationError): run.clean() ## climatology with broken time period should be invalid run.anomalies_from = datetime(2005, 1, 1) run.anomalies_to = datetime(2000, 1, 1) with pytest.raises(ValidationError): run.clean() ## climatology with correct time period should be invalid run.anomalies_from = datetime(2000, 1, 1) run.anomalies_to = datetime(2005, 1, 1) run.clean() ## climatology with moving average should be invalid with time period run.anomalies = ValidationRun.MOVING_AVG_35_D with pytest.raises(ValidationError): run.clean()