def create(self, validated_data): with transaction.atomic(): # prepare ValidationRun model new_val_run = ValidationRun(start_time=timezone.now()) new_val_run.interval_from = validated_data.get('validation_period').get('interval_from', None) new_val_run.interval_to = validated_data.get('validation_period').get('interval_to', None) new_val_run.anomalies = validated_data.get('anomalies').get('method') new_val_run.anomalies_from = validated_data.get('anomalies').get('anomalies_from', None) new_val_run.anomalies_to = validated_data.get('anomalies').get('anomalies_to', None) new_val_run.min_lat = validated_data.get('spatial_subsetting').get('min_lat', None) new_val_run.min_lon = validated_data.get('spatial_subsetting').get('min_lon', None) new_val_run.max_lat = validated_data.get('spatial_subsetting').get('max_lat', None) new_val_run.max_lon = validated_data.get('spatial_subsetting').get('max_lon', None) new_val_run.scaling_method = validated_data.get('scaling').get('method', None) for metric in validated_data.get('metrics'): if metric.get('id') == 'tcol': new_val_run.tcol = metric.get('value') new_val_run.save() # prepare DatasetConfiguration models reference_config = None dataset_config_models = [] configs_to_save = [validated_data.get('reference_config')] print('Reference config:') print(configs_to_save) configs_to_save.extend(validated_data.get('dataset_configs')) for config in configs_to_save: config_model = DatasetConfiguration.objects.create(validation=new_val_run, dataset_id=config.get('dataset_id'), version_id=config.get('version_id'), variable_id=config.get('variable_id')) config_model.save() filter_models = [] for filter_id in config.get('basic_filters'): filter_models.append(DataFilter.objects.get(id=filter_id)) for filter_model in filter_models: config_model.filters.add(filter_model) config_model.save() dataset_config_models.append(config_model) new_val_run.reference_configuration = dataset_config_models[0] new_val_run.save() return new_val_run
def test_validation_run_clean(self): run = ValidationRun() ## default object should be valid run.clean() ## object with just a from date should be invalid run.interval_from = datetime(2000, 1, 1) with pytest.raises(ValidationError): run.clean() ## object with just a to start date should be invalid run.interval_from = None run.interval_to = datetime(2000, 1, 1) with pytest.raises(ValidationError): run.clean() ## object with from date after to date should be invalid run.interval_from = datetime(2001, 1, 1) with pytest.raises(ValidationError): run.clean() ## object with from date before to date should be valid run.interval_to = datetime(2005, 1, 1) run.clean() ## object with no spatial subsetting should be valid run.min_lat = None run.max_lat = None run.min_lon = None run.max_lon = None run.clean() ## spatial subsetting with only two coords should be invalid run.min_lat = -45.0 run.max_lat = +45.0 with pytest.raises(ValidationError): run.clean() ## spatial subsetting with four coords should be valid run.min_lon = -120.0 run.max_lon = +120.0 run.clean() ## climatology with moving average should be valid without time period run.anomalies = ValidationRun.MOVING_AVG_35_D run.clean() ## climatology without anomalies time period should be invalid run.anomalies = ValidationRun.CLIMATOLOGY with pytest.raises(ValidationError): run.clean() ## climatology with broken time period should be invalid run.anomalies_from = datetime(2005, 1, 1) run.anomalies_to = datetime(2000, 1, 1) with pytest.raises(ValidationError): run.clean() ## climatology with correct time period should be invalid run.anomalies_from = datetime(2000, 1, 1) run.anomalies_to = datetime(2005, 1, 1) run.clean() ## climatology with moving average should be invalid with time period run.anomalies = ValidationRun.MOVING_AVG_35_D with pytest.raises(ValidationError): run.clean()