Beispiel #1
0
def generate_default_validation_triple_coll():
    run = ValidationRun()
    run.start_time = datetime.now(tzlocal())
    run.save()

    data_c = DatasetConfiguration()
    data_c.validation = run
    data_c.dataset = Dataset.objects.get(short_name='C3S')
    data_c.version = DatasetVersion.objects.get(short_name='C3S_V201912')
    data_c.variable = DataVariable.objects.get(short_name='C3S_sm')
    data_c.save()

    other_data_c = DatasetConfiguration()
    other_data_c.validation = run
    other_data_c.dataset = Dataset.objects.get(short_name='SMOS')
    other_data_c.version = DatasetVersion.objects.get(short_name='SMOS_105_ASC')
    other_data_c.variable = DataVariable.objects.get(short_name='SMOS_sm')
    other_data_c.save()

    ref_c = DatasetConfiguration()
    ref_c.validation = run
    ref_c.dataset = Dataset.objects.get(short_name='ISMN')
    ref_c.version = DatasetVersion.objects.get(short_name='ISMN_V20180712_MINI')
    ref_c.variable = DataVariable.objects.get(short_name='ISMN_soil_moisture')
    ref_c.save()

    run.reference_configuration = ref_c
    run.scaling_ref = ref_c
    run.tcol = True
    run.bootstrap_tcol_cis = True
    run.save()

    return run
Beispiel #2
0
    def test_val_finished(self):
        test_datasets = [Dataset.objects.get(short_name=globals.C3S),
                         Dataset.objects.get(short_name=globals.ASCAT),
                         Dataset.objects.get(short_name=globals.SMAP),]

        run = ValidationRun()
        run.start_time = datetime.now(tzlocal())
        run.end_time = datetime.now(tzlocal())
        run.user = self.testuser
        run.save()

        for ds in test_datasets:
            data_c = DatasetConfiguration()
            data_c.validation = run
            data_c.dataset = ds
            data_c.version = ds.versions.first()
            data_c.variable = ds.variables.first()
            data_c.save()

        ref_c = DatasetConfiguration()
        ref_c.validation = run
        ref_c.dataset = Dataset.objects.get(short_name='ISMN')
        ref_c.version = DatasetVersion.objects.get(short_name='ISMN_V20180712_MINI')
        ref_c.variable = DataVariable.objects.get(short_name='ISMN_soil_moisture')
        ref_c.save()

        run.reference_configuration = ref_c
        run.scaling_ref = ref_c
        run.save()

        val_mail.send_val_done_notification(run)
        self.check_outbox()
Beispiel #3
0
def generate_ismn_nonref_validation():
    """Generate a validation where ISMN is used as non-reference"""
    run = ValidationRun()
    run.start_time = datetime.now(tzlocal())
    run.save()

    ref_c = DatasetConfiguration()
    ref_c.validation = run
    ref_c.dataset = Dataset.objects.get(short_name='C3S')
    ref_c.version = DatasetVersion.objects.get(short_name='C3S_V202012')
    ref_c.variable = DataVariable.objects.get(short_name='C3S_sm')
    ref_c.save()

    data_c = DatasetConfiguration()
    data_c.validation = run
    data_c.dataset = Dataset.objects.get(short_name='ISMN')
    data_c.version = DatasetVersion.objects.get(short_name='ISMN_V20180712_MINI')
    data_c.variable = DataVariable.objects.get(short_name='ISMN_soil_moisture')
    data_c.save()

    run.reference_configuration = ref_c
    run.scaling_ref = ref_c
    run.save()

    return run
Beispiel #4
0
    def test_abortrunningvalidations(self):
        # make sure we don't have real running validations
        running_validations = ValidationRun.objects.filter(
            progress__range=(0, 99))
        assert not running_validations

        # make sure we have a fake running validation for testing
        run = ValidationRun()
        run.start_time = datetime.now(tzlocal())
        run.progress = 50
        run.save()
        run_id = run.id
        running_validations = ValidationRun.objects.filter(
            progress__range=(0, 99))
        assert running_validations

        # run the command
        args = []
        opts = {}
        call_command('abortrunningvalidations', *args, **opts)

        # make sure that our test validation was marked as failed
        running_validations = ValidationRun.objects.filter(
            progress__range=(0, 99))
        assert not running_validations
        test_val = ValidationRun.objects.get(id=run_id)
        assert test_val
        assert test_val.end_time
        assert test_val.progress == -1
Beispiel #5
0
    def test_val_expired(self):
        run = ValidationRun()
        now = datetime.now(tzlocal())
        run.start_time = now - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS -
            settings.VALIDATION_EXPIRY_WARNING_DAYS)
        run.end_time = run.start_time + timedelta(days=1)
        run.user = self.testuser
        run.save()
        assert not run.expiry_notified

        val_mail.send_val_expiry_notification([run])
        self.check_outbox()

        assert ValidationRun.objects.get(pk=run.id).expiry_notified

        # multiple validations in one email:
        run = ValidationRun()
        now = datetime.now(tzlocal())
        run.start_time = now - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS -
            settings.VALIDATION_EXPIRY_WARNING_DAYS)
        run.end_time = run.start_time + timedelta(days=1)
        run.user = self.testuser
        run.save()
        assert not run.expiry_notified

        run_2 = ValidationRun()
        now = datetime.now(tzlocal())
        run_2.start_time = now - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS -
            settings.VALIDATION_EXPIRY_WARNING_DAYS)
        run_2.end_time = run_2.start_time + timedelta(days=1)
        run_2.user = self.testuser
        run_2.save()
        assert not run_2.expiry_notified

        val_mail.send_val_expiry_notification([run, run_2])

        assert ValidationRun.objects.get(pk=run.id).expiry_notified
        assert ValidationRun.objects.get(pk=run_2.id).expiry_notified
Beispiel #6
0
    def test_validation_c3s_ismn(self):
        run = ValidationRun()
        run.start_time = datetime.now(tzlocal())
        run.user = self.testuser
        # set validation period
        run.interval_from = datetime(1978, 1, 1, tzinfo=UTC)
        run.interval_to = datetime(2018, 12, 31, tzinfo=UTC)
        run.save()

        data_c = DatasetConfiguration()
        data_c.validation = run
        data_c.dataset = Dataset.objects.get(short_name='C3S')
        data_c.version = DatasetVersion.objects.get(short_name='C3S_V201812')
        data_c.variable = DataVariable.objects.get(short_name='C3S_sm')
        data_c.save() # object needs to be saved before m2m relationship can be used

        data_c.filters.add(DataFilter.objects.get(name='FIL_C3S_FLAG_0'))
        data_c.filters.add(DataFilter.objects.get(name='FIL_ALL_VALID_RANGE'))
        data_c.save()

        ref_c = DatasetConfiguration()
        ref_c.validation = run
        ref_c.dataset = Dataset.objects.get(short_name='ISMN')
        ref_c.version = DatasetVersion.objects.get(short_name='ISMN_V20180712_MINI')
        ref_c.variable = DataVariable.objects.get(short_name='ISMN_soil_moisture')
        ref_c.save()

        ref_c.filters.add(DataFilter.objects.get(name='FIL_ISMN_GOOD'))
        ref_c.save()

        run.reference_configuration = ref_c
        run.scaling_ref = ref_c
        run.save()

        run_id = run.id

        ## run the validation
        val.run_validation(run_id)

        ## wait until it's over (if necessary)
        finished_run = ValidationRun.objects.get(pk=run_id)
        timeout = 300 # seconds
        wait_time = 5 # seconds
        runtime = 0
        while(finished_run.end_time is None):
            assert runtime <= timeout, 'Validations are taking too long.'
            sleep(wait_time)
            runtime += wait_time

        ## TODO: check the results here

        self.generic_result_check(finished_run)
        self.delete_run(finished_run)
Beispiel #7
0
    def test_change_validation_name(self):
        # create new no-named result
        run = ValidationRun()
        run.user = self.testuser
        run.start_time = datetime.now(tzlocal())
        run.interval_from = datetime(1978, 1, 1, tzinfo=UTC)
        run.interval_to = datetime(2018, 1, 1, tzinfo=UTC)
        run.save()
        result_id = str(run.id)

        assert result_id, "Error saving the test validation run."

        #try to change name of other user's validation
        url = reverse('result', kwargs={'result_uuid': result_id})

        self.client.login(**self.credentials2)
        response = self.client.patch(
            url,
            'save_name=false',
            content_type='application/x-www-form-urlencoded;')
        self.assertEqual(response.status_code, 403)

        # log in as owner of result and check invalid saving mode
        self.client.login(**self.credentials)
        response = self.client.patch(
            url,
            'save_name=false',
            content_type='application/x-www-form-urlencoded;')
        self.assertEqual(response.status_code, 400)

        # log in as owner of result and check valid saving mode
        self.client.login(**self.credentials)
        response = self.client.patch(
            url,
            'save_name=true&new_name="new_name"',
            content_type='application/x-www-form-urlencoded;')
        self.assertEqual(response.status_code, 200)

        run.doi = '10.1000/182'
        run.save()

        response = self.client.patch(
            url,
            'save_name=true&new_name="new_name"',
            content_type='application/x-www-form-urlencoded;')
        self.assertEqual(response.status_code, 405)

        run.doi = ''
        run.save()
        run.name_tag = 'new_name'
        run.save()
        assert ValidationRun.objects.filter(name_tag='new_name').exists()
Beispiel #8
0
    def test_validation_configuration(self):
        run = ValidationRun()
        run.start_time = now()
        run.save()

        dc = DatasetConfiguration()
        dc.validation = run
        dc.dataset = Dataset.objects.get(pk=1)
        dc.version = DatasetVersion.objects.get(pk=1)
        dc.variable = DataVariable.objects.get(pk=1)

        dc.save()

        run.reference_configuration = dc
        run.scaling_ref = dc

        run.save()

        assert len(run.dataset_configurations.all()) == 1
        assert run.reference_configuration
        assert run.scaling_ref
Beispiel #9
0
    def test_delete_result(self):
        # create result to delete:
        run = ValidationRun()
        run.user = self.testuser
        run.start_time = datetime.now(tzlocal())
        run.interval_from = datetime(1978, 1, 1, tzinfo=UTC)
        run.interval_to = datetime(2018, 1, 1, tzinfo=UTC)
        run.save()
        result_id = str(run.id)

        assert result_id, "Error saving the test validation run."

        url = reverse('result', kwargs={'result_uuid': result_id})

        # try deleting other user's result - should be blocked
        self.client.login(**self.credentials2)
        response = self.client.delete(url)
        self.assertEqual(response.status_code, 403)

        # log in as owner of result
        self.client.login(**self.credentials)

        # try deleting a result that already has a DOI, should be blocked
        run.doi = '10.1000/182'
        run.save()
        response = self.client.delete(url)
        self.assertEqual(response.status_code, 405)

        # remove DOI again
        run.doi = ''
        run.save()

        # try to delete own result, should succeed
        response = self.client.delete(url)
        self.assertEqual(response.status_code, 200)

        assert not ValidationRun.objects.filter(
            pk=result_id).exists(), "Validation run didn't get deleted."
Beispiel #10
0
    def test_ds_config_order(self):
        dataset_range = range(1, 6)
        run = ValidationRun()
        run.start_time = now()
        run.save()

        # create dataset configs in order of dataset ids
        for i in dataset_range:
            dc = DatasetConfiguration()
            dc.validation = run
            dc.dataset = Dataset.objects.get(pk=i)
            dc.version = dc.dataset.versions.first()
            dc.variable = dc.dataset.variables.first()
            dc.save()

        run.reference_configuration = dc
        run.scaling_ref = dc
        run.save()

        # check that we can get the order of dataset configs from the validation run
        orderorder = run.get_datasetconfiguration_order()
        self.__logger.debug('Orig order {}'.format(orderorder))
        assert orderorder

        # check that they have the same order when using all()
        for i, dsc in enumerate(run.dataset_configurations.all(), 1):
            assert dsc.dataset.id == i
            assert dsc.id == orderorder[i-1]

        # randomly change the order
        newworldorder = np.random.permutation(orderorder)
        self.__logger.debug('New order {}'.format(newworldorder))
        run.set_datasetconfiguration_order(newworldorder)

        # make sure the new order is used
        for i, dsc in enumerate(run.dataset_configurations.all(), 1):
            self.__logger.debug('current id {}'.format(dsc.id))
            assert dsc.id == newworldorder[i-1]
Beispiel #11
0
    def test_doi(self):
        infile = 'testdata/output_data/c3s_era5land.nc'

        ## generate test validation
        val = ValidationRun()
        val.start_time = timezone.now() - timedelta(days=1)
        val.end_time = timezone.now()
        val.user = self.testuser
        val.save()

        data_c = DatasetConfiguration()
        data_c.validation = val
        data_c.dataset = Dataset.objects.get(short_name='C3S')
        data_c.version = DatasetVersion.objects.get(short_name='C3S_V201812')
        data_c.variable = DataVariable.objects.get(short_name='C3S_sm')
        data_c.save()

        ref_c = DatasetConfiguration()
        ref_c.validation = val
        ref_c.dataset = Dataset.objects.get(short_name='ISMN')
        ref_c.version = DatasetVersion.objects.get(short_name='ISMN_V20191211')
        ref_c.variable = DataVariable.objects.get(
            short_name='ISMN_soil_moisture')
        ref_c.save()

        val.reference_configuration = ref_c
        val.scaling_ref = ref_c
        val.save()

        ## set valid output file for validation
        run_dir = path.join(OUTPUT_FOLDER, str(val.id))
        mkdir_if_not_exists(run_dir)
        shutil.copy(infile, path.join(run_dir, 'results.nc'))
        set_outfile(val, run_dir)
        val.save()

        ## test the publishing form

        # no name given
        val.user.first_name = None
        val.user.last_name = None
        form = PublishingForm(validation=val)
        assert form

        # only first name given
        val.user.first_name = self.user_data['first_name']
        form = PublishingForm(validation=val)
        assert form
        val.user.first_name = None

        # only last name given
        val.user.last_name = self.user_data['last_name']
        form = PublishingForm(validation=val)
        assert form

        # first and last name given but not a real orcid id
        val.user.first_name = self.user_data['first_name']
        val.user.orcid = 'not a real orcid'
        form = PublishingForm(validation=val)

        caught_orcid_error = False
        try:
            assert form.pub_metadata
        except:
            caught_orcid_error = True

        assert caught_orcid_error

        # fix orcid
        val.user.orcid = self.user_data['orcid']

        ## finally everything should be ok and we can use the form to generate the necessary metadata
        form = PublishingForm(validation=val)
        metadata = form.pub_metadata

        ## create a test doi on zenodo's sandbox service
        get_doi_for_validation(val, metadata)

        val = ValidationRun.objects.get(pk=val.id)
        self.__logger.debug(val.doi)
        assert val.doi
        firstdoi = val.doi

        ## check that the DOI was correctly stored in the netcdf file
        with netCDF4.Dataset(val.output_file.path, mode='r') as ds:
            assert val.doi in ds.doi

        form = PublishingForm(validation=val)
        metadata = form.pub_metadata

        ## try to upload the same data with the same title again - it should work but yield a different doi
        get_doi_for_validation(val, metadata)

        val = ValidationRun.objects.get(pk=val.id)
        self.__logger.debug(val.doi)
        assert val.doi
        assert val.doi != firstdoi

        ## check that the DOI was correctly stored in the netcdf file
        with netCDF4.Dataset(val.output_file.path, mode='r') as ds:
            assert val.doi in ds.doi
Beispiel #12
0
    def test_autocleanupvalidations(self):

        ended_vals = ValidationRun.objects.filter(
            end_time__isnull=False).count()

        ## unexpired validation
        run1 = ValidationRun()
        run1.start_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS * 4)
        run1.end_time = timezone.now()
        run1.user = self.testuser
        run1.save()
        runid1 = run1.id

        ## 20% of warning period has passed
        run2 = ValidationRun()
        run2.start_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS * 4)
        run2.end_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS -
            settings.VALIDATION_EXPIRY_WARNING_DAYS * 0.8)
        run2.user = self.testuser
        run2.save()
        runid2 = run2.id

        ## 80% of warning period has passed
        run3 = ValidationRun()
        run3.start_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS * 4)
        run3.end_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS -
            settings.VALIDATION_EXPIRY_WARNING_DAYS * 0.2)
        run3.user = self.testuser
        run3.save()
        runid3 = run3.id

        ## just expired validation
        run4 = ValidationRun()
        run4.start_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS * 4)
        run4.end_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS)
        run4.user = self.testuser
        run4.save()
        runid4 = run4.id

        ## long expired validation
        run5 = ValidationRun()
        run5.start_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS * 4)
        run5.end_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS * 2)
        run5.user = self.testuser
        run5.save()
        runid5 = run5.id

        # test what happens if there is no user assigned to a validation
        no_user_run = ValidationRun()
        no_user_run.start_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS * 4)
        no_user_run.end_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS)
        no_user_run.user = None
        no_user_run.save()
        no_user_run_id = no_user_run.id

        # test what happens if there is no user assigned to a validation, but validation has been published
        no_user_run_published = ValidationRun()
        no_user_run_published.start_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS * 4)
        no_user_run_published.end_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS)
        no_user_run_published.user = None
        no_user_run_published.doi = '10101/101.010'
        no_user_run_published.save()
        no_user_run_published_id = no_user_run_published.id

        ended_vals2 = ValidationRun.objects.filter(
            end_time__isnull=False).count()
        assert ended_vals + 7 == ended_vals2
        assert runid1
        assert runid2
        assert runid3
        assert runid4
        assert runid5
        assert no_user_run_id
        assert no_user_run_published_id

        # run the command
        args = []
        opts = {}
        call_command('autocleanupvalidations', *args, **opts)

        ## reload from db because the validations have been changed.
        run1 = ValidationRun.objects.get(pk=runid1)
        run2 = ValidationRun.objects.get(pk=runid2)
        run3 = ValidationRun.objects.get(pk=runid3)
        run4 = ValidationRun.objects.get(pk=runid4)
        run5 = ValidationRun.objects.get(pk=runid5)
        non_user_val = ValidationRun.objects.filter(pk=no_user_run_id)
        no_user_run_published = ValidationRun.objects.get(
            pk=no_user_run_published_id)

        ## with the last command call, the user should have been notified about most of our test validations
        ## but the validations should not have been deleted yet
        assert not run1.expiry_notified
        assert run2.expiry_notified
        assert run3.expiry_notified
        assert run4.expiry_notified
        assert run5.expiry_notified
        assert len(
            non_user_val
        ) == 0  # there should be no validation anymore, because it was already removed
        assert not no_user_run_published.expiry_notified  # no notification sent

        ## the validations may have been extended in the previous step, undo that to get them really deleted in the next call
        run1.last_extended = None
        run1.save()
        run2.last_extended = None
        run2.save()
        run3.last_extended = None
        run3.save()
        run4.last_extended = None
        run4.save()
        run5.last_extended = None
        run5.save()

        call_command('autocleanupvalidations', *args, **opts)

        ## the two expired validations should be have been deleted now
        ended_vals3 = ValidationRun.objects.filter(
            end_time__isnull=False).count()
        assert ended_vals + 4 == ended_vals3
Beispiel #13
0
    def test_autocleanupvalidations(self):

        ended_vals = ValidationRun.objects.filter(
            end_time__isnull=False).count()

        ## unexpired validation
        run1 = ValidationRun()
        run1.start_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS * 4)
        run1.end_time = timezone.now()
        run1.user = self.testuser
        run1.save()
        runid1 = run1.id

        ## 20% of warning period has passed
        run2 = ValidationRun()
        run2.start_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS * 4)
        run2.end_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS -
            settings.VALIDATION_EXPIRY_WARNING_DAYS * 0.8)
        run2.user = self.testuser
        run2.save()
        runid2 = run2.id

        ## 80% of warning period has passed
        run3 = ValidationRun()
        run3.start_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS * 4)
        run3.end_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS -
            settings.VALIDATION_EXPIRY_WARNING_DAYS * 0.2)
        run3.user = self.testuser
        run3.save()
        runid3 = run3.id

        ## just expired validation
        run4 = ValidationRun()
        run4.start_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS * 4)
        run4.end_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS)
        run4.user = self.testuser
        run4.save()
        runid4 = run4.id

        ## long expired validation
        run5 = ValidationRun()
        run5.start_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS * 4)
        run5.end_time = timezone.now() - timedelta(
            days=settings.VALIDATION_EXPIRY_DAYS * 2)
        run5.user = self.testuser
        run5.save()
        runid5 = run5.id

        ended_vals2 = ValidationRun.objects.filter(
            end_time__isnull=False).count()
        assert ended_vals + 5 == ended_vals2
        assert runid1
        assert runid2
        assert runid3
        assert runid4
        assert runid5

        # run the command
        args = []
        opts = {}
        call_command('autocleanupvalidations', *args, **opts)

        ## reload from db because the validations have been changed.
        run1 = ValidationRun.objects.get(pk=runid1)
        run2 = ValidationRun.objects.get(pk=runid2)
        run3 = ValidationRun.objects.get(pk=runid3)
        run4 = ValidationRun.objects.get(pk=runid4)
        run5 = ValidationRun.objects.get(pk=runid5)

        ## with the last command call, the user should have been notified about most of our test validations
        ## but the validations should not have been deleted yet
        assert not run1.expiry_notified
        assert run2.expiry_notified
        assert run3.expiry_notified
        assert run4.expiry_notified
        assert run5.expiry_notified

        ## the validations may have been extended in the previous step, undo that to get them really deleted in the next call
        run1.last_extended = None
        run1.save()
        run2.last_extended = None
        run2.save()
        run3.last_extended = None
        run3.save()
        run4.last_extended = None
        run4.save()
        run5.last_extended = None
        run5.save()

        call_command('autocleanupvalidations', *args, **opts)

        ## the two expired validations should be have been deleted now
        ended_vals3 = ValidationRun.objects.filter(
            end_time__isnull=False).count()
        assert ended_vals + 3 == ended_vals3