def generate_default_validation_triple_coll(): run = ValidationRun() run.start_time = datetime.now(tzlocal()) run.save() data_c = DatasetConfiguration() data_c.validation = run data_c.dataset = Dataset.objects.get(short_name='C3S') data_c.version = DatasetVersion.objects.get(short_name='C3S_V201912') data_c.variable = DataVariable.objects.get(short_name='C3S_sm') data_c.save() other_data_c = DatasetConfiguration() other_data_c.validation = run other_data_c.dataset = Dataset.objects.get(short_name='SMOS') other_data_c.version = DatasetVersion.objects.get(short_name='SMOS_105_ASC') other_data_c.variable = DataVariable.objects.get(short_name='SMOS_sm') other_data_c.save() ref_c = DatasetConfiguration() ref_c.validation = run ref_c.dataset = Dataset.objects.get(short_name='ISMN') ref_c.version = DatasetVersion.objects.get(short_name='ISMN_V20180712_MINI') ref_c.variable = DataVariable.objects.get(short_name='ISMN_soil_moisture') ref_c.save() run.reference_configuration = ref_c run.scaling_ref = ref_c run.tcol = True run.bootstrap_tcol_cis = True run.save() return run
def create(self, validated_data): with transaction.atomic(): # prepare ValidationRun model new_val_run = ValidationRun(start_time=timezone.now()) new_val_run.interval_from = validated_data.get('validation_period').get('interval_from', None) new_val_run.interval_to = validated_data.get('validation_period').get('interval_to', None) new_val_run.anomalies = validated_data.get('anomalies').get('method') new_val_run.anomalies_from = validated_data.get('anomalies').get('anomalies_from', None) new_val_run.anomalies_to = validated_data.get('anomalies').get('anomalies_to', None) new_val_run.min_lat = validated_data.get('spatial_subsetting').get('min_lat', None) new_val_run.min_lon = validated_data.get('spatial_subsetting').get('min_lon', None) new_val_run.max_lat = validated_data.get('spatial_subsetting').get('max_lat', None) new_val_run.max_lon = validated_data.get('spatial_subsetting').get('max_lon', None) new_val_run.scaling_method = validated_data.get('scaling').get('method', None) for metric in validated_data.get('metrics'): if metric.get('id') == 'tcol': new_val_run.tcol = metric.get('value') new_val_run.save() # prepare DatasetConfiguration models reference_config = None dataset_config_models = [] configs_to_save = [validated_data.get('reference_config')] print('Reference config:') print(configs_to_save) configs_to_save.extend(validated_data.get('dataset_configs')) for config in configs_to_save: config_model = DatasetConfiguration.objects.create(validation=new_val_run, dataset_id=config.get('dataset_id'), version_id=config.get('version_id'), variable_id=config.get('variable_id')) config_model.save() filter_models = [] for filter_id in config.get('basic_filters'): filter_models.append(DataFilter.objects.get(id=filter_id)) for filter_model in filter_models: config_model.filters.add(filter_model) config_model.save() dataset_config_models.append(config_model) new_val_run.reference_configuration = dataset_config_models[0] new_val_run.save() return new_val_run