def _check_value(self, path, key, value, current, ignore): if key == 'geometry': self._check_stac_geometry(value, current[key]) elif isinstance(value, dict): self._check_stac_dictsubset(path, value, current[key], ignore) elif isinstance(value, list): if key == 'links': self._check_stac_links(path, value, current[key]) elif key == 'providers': self._check_stac_providers(path, value, current[key]) elif key in ['bbox']: self._check_stac_list(path, value, current[key]) else: self._check_stac_list(path, sorted(value), sorted(current[key])) elif key in ['created', 'updated']: # created and updated time are automatically set therefore don't do an exact # test as we can't guess the exact time. self.assertAlmostEqual( fromisoformat(value), fromisoformat(current[key]), delta=timedelta(seconds=1), msg= f'{path}: current datetime value is not equal to the expected') elif key == 'href': self.assertEqual( urlparse(value).path, urlparse(current[key]).path, msg=f'{path}: value does not match in href {current[key]}') else: self.assertEqual( value, current[key], msg=f'{path}: current value is not equal to the expected')
def validate_date_time(self, date_time): ''' Validate the datetime query as specified in the api-spec.md. If there is an error, a corresponding entry will be added to the self.errors dict Args: date_time: string The datetime to get validated ''' start, sep, end = date_time.partition('/') message = None try: if start != '..': start = fromisoformat(start) if end and end != '..': end = fromisoformat(end) except ValueError as error: message = "Invalid datetime query parameter, must be isoformat. " self.errors['datetime'] = _(message) if end == '': end = None if start == '..' and (end is None or end == '..'): message = f"{message} Invalid datetime query parameter, " \ f"cannot start with open range when no end range is defined" if message: self.errors['datetime'] = _(message)
def test_query_updated(self): limit = 1 # get match query = {"updated": {"lte": "9999-12-31T09:07:39.399892Z"}} response = self.client.get( f"{self.path}?query={json.dumps(query)}&limit={limit}") self.assertStatusCode(200, response) json_data_get = response.json() self.assertEqual(len(json_data_get['features']), limit) # post match payload = {"query": query, "limit": limit} response = self.client.post(self.path, data=payload, content_type="application/json") self.assertStatusCode(200, response) json_data_post = response.json() self.assertEqual(len(json_data_post['features']), limit) # compare get and post self.assertEqual( json_data_get['features'], json_data_post['features'], msg= "GET and POST responses do not match when filtering for date updated" ) for feature in json_data_get['features']: self.assertLessEqual( fromisoformat(feature['properties']['updated']), fromisoformat(query['updated']['lte']))
def check_aborted_response(self, json_response): self.assertNotIn('urls', json_response) self.assertNotIn('completed', json_response) self.assertIn('upload_id', json_response) self.assertIn('status', json_response) self.assertIn('number_parts', json_response) self.assertIn('checksum:multihash', json_response) self.assertIn('aborted', json_response) self.assertEqual(json_response['status'], 'aborted') self.assertGreater(fromisoformat(json_response['aborted']), fromisoformat(json_response['created']))
def validate_item_properties_datetimes_dependencies( properties_datetime, properties_start_datetime, properties_end_datetime ): ''' Validate the dependencies between the Item datetimes properties This makes sure that either only the properties.datetime is set or both properties.start_datetime and properties.end_datetime Raises: django.core.exceptions.ValidationError ''' try: if not isinstance(properties_datetime, datetime) and properties_datetime is not None: properties_datetime = fromisoformat(properties_datetime) if ( not isinstance(properties_start_datetime, datetime) and properties_start_datetime is not None ): properties_start_datetime = fromisoformat(properties_start_datetime) if ( not isinstance(properties_end_datetime, datetime) and properties_end_datetime is not None ): properties_end_datetime = fromisoformat(properties_end_datetime) except ValueError as error: logger.error("Invalid datetime string %s", error) raise ValidationError( _('Invalid datetime string %(error)s'), params={'error': error}, code='invalid' ) from error if properties_datetime is not None: if (properties_start_datetime is not None or properties_end_datetime is not None): message = 'Cannot provide together property datetime with datetime range ' \ '(start_datetime, end_datetime)' logger.error(message) raise ValidationError(_(message), code='invalid') else: if properties_end_datetime is None: message = "Property end_datetime can't be null when no property datetime is given" logger.error(message) raise ValidationError(_(message), code='invalid') if properties_start_datetime is None: message = "Property start_datetime can't be null when no property datetime is given" logger.error(message) raise ValidationError(_(message), code='invalid') if properties_datetime is None: if properties_end_datetime < properties_start_datetime: message = "Property end_datetime can't refer to a date earlier than property "\ "start_datetime" raise ValidationError(_(message), code='invalid')
def setUpTestData(cls): cls.factory = Factory() cls.collection = cls.factory.create_collection_sample().model cls.item_1 = cls.factory.create_item_sample( cls.collection, name='item-1', properties_datetime=fromisoformat('2019-01-01T00:00:00Z'), db_create=True, ) cls.now = utc_aware(datetime.utcnow()) cls.yesterday = cls.now - timedelta(days=1) cls.item_now = cls.factory.create_item_sample( cls.collection, name='item-now', properties_datetime=cls.now, db_create=True, ) cls.item_yesterday = cls.factory.create_item_sample( cls.collection, name='item-yesterday', properties_datetime=cls.yesterday, db_create=True)
def check_urls_response(self, urls, number_parts): now = utc_aware(datetime.utcnow()) self.assertEqual(len(urls), number_parts) for i, url in enumerate(urls): self.assertListEqual(list(url.keys()), ['url', 'part', 'expires'], msg='Url dictionary keys missing') self.assertEqual( url['part'], i + 1, msg=f'Part {url["part"]} does not match the url index {i}') try: url_parsed = parse.urlparse(url["url"]) self.assertIn(url_parsed[0], ['http', 'https']) except ValueError as error: self.fail( msg= f"Invalid url {url['url']} for part {url['part']}: {error}" ) try: expires_dt = fromisoformat(url['expires']) self.assertGreater( expires_dt, now, msg= f"expires {url['expires']} for part {url['part']} is not in future" ) except ValueError as error: self.fail( msg= f"Invalid expires {url['expires']} for part {url['part']}: {error}" )
def check_stac_collection(self, expected, current, ignore=None): '''Check a STAC Collection data Check if the `current` Collection data match the `expected`. This check is a subset check which means that if a value is missing from `current`, then it raises a Test Assert, while if a value is in `current` but not in `expected`, the test passed. The functions knows also the STAC Spec and does some check based on it. Args: expected: dict Expected STAC Collection current: dict Current STAC Collection to test ignore: list(string) | None List of keys to ignore in the test ''' if ignore is None: ignore = [] self._check_stac_dictsubset('collection', expected, current, ignore) # check required fields for key, value in [('stac_version', '0.9.0'), ('crs', ['http://www.opengis.net/def/crs/OGC/1.3/CRS84']), ('itemType', 'Feature')]: self.assertIn(key, current) self.assertEqual(value, current[key]) for key in [ 'id', 'extent', 'summaries', 'links', 'description', 'license' ]: self.assertIn(key, current, msg=f'Collection {key} is missing') for date_field in ['created', 'updated']: self.assertIn(date_field, current, msg=f'Collection {date_field} is missing') self.assertTrue( fromisoformat(current[date_field]), msg=f"The collection field {date_field} has an invalid date") name = current['id'] links = [ { 'rel': 'self', 'href': f'{TEST_LINK_ROOT_HREF}/collections/{name}', }, TEST_LINK_ROOT, { 'rel': 'parent', 'href': f'{TEST_LINK_ROOT_HREF}/', }, { 'rel': 'items', 'href': f'{TEST_LINK_ROOT_HREF}/collections/{name}/items', }, ] self._check_stac_links('item.links', links, current['links'])
def check_stac_item(self, expected, current, collection, ignore=None): '''Check a STAC Item data Check if the `current` Item data match the `expected`. This check is a subset check which means that if a value is missing from `current`, then it raises a Test Assert, while if a value is in `current` but not in `expected`, the test passed. The functions knows also the STAC Spec and does some check based on it. Args: expected: dict Expected STAC Item current: dict Current STAC Item to test ignore: list(string) | None List of keys to ignore in the test ''' if ignore is None: ignore = [] self._check_stac_dictsubset('item', expected, current, ignore=ignore) # check required fields for key, value in [('stac_version', '0.9.0'), ('type', 'Feature')]: self.assertIn(key, current) self.assertEqual(value, current[key]) for key in ['id', 'bbox', 'links', 'properties', 'assets', 'geometry']: self.assertIn(key, current, msg=f'Item {key} is missing') for date_field in ['created', 'updated']: self.assertIn(date_field, current['properties'], msg=f'Item properties.{date_field} is missing') self.assertTrue( fromisoformat(current['properties'][date_field]), msg=f"The item field {date_field} has an invalid date") name = current['id'] links = [ { 'rel': 'self', 'href': f'{TEST_LINK_ROOT_HREF}/collections/{collection}/items/{name}', }, TEST_LINK_ROOT, { 'rel': 'parent', 'href': f'{TEST_LINK_ROOT_HREF}/collections/{collection}/items', }, { 'rel': 'collection', 'href': f'{TEST_LINK_ROOT_HREF}/collections/{collection}', }, ] self._check_stac_links('item.links', links, current['links'])
def _query_validate_in_operator(self, attribute, value): ''' Tests if the type in the list stays the same. This is a helper function of _query_validate_operators. If there is an error, a corresponding entry will be added to the self.errors dict Args: attribute: string The attribute to be tested value: string or list[strings] The value to be tested (string or datetime) ''' # validate date if attribute in self.queriable_date_fields: try: if isinstance(value, list): self.validate_list_length(value, 'query') value = [fromisoformat(i) for i in value] else: value = fromisoformat(value) except ValueError as error: message = f"{value} is an invalid dateformat: ({error})" self.errors[f"query-attributes-{attribute}"] = _(message) # validate str if attribute in self.queriable_str_fields: message = '' if isinstance(value, list): self.validate_list_length(value, 'query') for val in value: if not isinstance(val, str): message = f"{message} The values have to be strings." \ f" The value {val} is not a string" else: if not isinstance(value, str): message = f"{message} The values have to be strings." \ f" The value {value} is not a string" if message != '': self.errors[f"query-attributes-{attribute}"] = _(message)
def test_single_item_endpoint(self): collection_name = self.collection.name item = self.items[0] # create assets in a non ascending order to make sure that the assets ordering is working assets = self.factory.create_asset_samples( 3, item.model, name=['asset-1.tiff', 'asset-0.tiff', 'asset-2.tiff'], db_create=True ) response = self.client.get( f"/{STAC_BASE_V}/collections/{collection_name}/items/{item['name']}" ) json_data = response.json() self.assertStatusCode(200, response) # The ETag change between each test call due to the created, updated time that are in the # hash computation of the ETag self.check_header_etag(None, response) self.check_stac_item(item.json, json_data, self.collection.name) # created and updated must exist and be a valid date date_fields = ['created', 'updated'] for date_field in date_fields: self.assertTrue( fromisoformat(json_data['properties'][date_field]), msg=f"The field {date_field} has an invalid date" ) self.assertEqual(len(json_data['assets']), 3, msg="Integrated assets length don't match") # Check that the integrated assets output is sorted by name asset_ids = list(json_data['assets'].keys()) self.assertListEqual( asset_ids, sorted(asset_ids), msg="Integrated assets are not sorted by ID" ) # Check the integrated assets output asset_samples = sorted(assets, key=lambda asset: asset['name']) for asset in asset_samples: self.check_stac_asset( asset.json, json_data['assets'][asset['name']], collection_name, json_data['id'], # in the integrated asset there is no id (the id is actually the json key) ignore=['id', 'links'] )
def test_single_item_endpoint(self): collection_name = self.collection.name item_name = self.items[0].model.name response = self.client.get( f"/{STAC_BASE_V}/collections/{collection_name}/items/{item_name}") json_data = response.json() self.assertStatusCode(200, response) # The ETag change between each test call due to the created, updated time that are in the # hash computation of the ETag self.check_header_etag(None, response) self.check_stac_item(self.items[0].json, json_data, self.collection.name) # created and updated must exist and be a valid date date_fields = ['created', 'updated'] for date_field in date_fields: self.assertTrue(fromisoformat(json_data['properties'][date_field]), msg=f"The field {date_field} has an invalid date")
GEOSGeometry( json.dumps({ "coordinates": [[ [5.644711, 46.775054], [5.644711, 48.014995], [6.602408, 48.014995], [7.602408, 49.014995], [5.644711, 46.775054], ]], "type": "Polygon" })), 'properties_title': 'My item 1', 'properties_datetime': fromisoformat('2020-10-28T13:05:10Z'), 'links': links.values() }, 'item-2': { 'name': 'item-2', 'geometry': GEOSGeometry( json.dumps({ "coordinates": [[ [5.644711, 46.775054], [5.644711, 48.014995], [6.602408, 48.014995], [7.602408, 49.014995], [5.644711, 46.775054],
def check_stac_asset(self, expected, current, collection, item, ignore=None): '''Check a STAC Asset data Check if the `current` Asset data match the `expected`. This check is a subset check which means that if a value is missing from `current`, then it raises a Test Assert, while if a value is in `current` but not in `expected`, the test passed. The functions knows also the STAC Spec and does some check based on it. Args: expected: dict Expected STAC Asset current: dict Current STAC Asset to test ignore: list(string) | None List of keys to ignore in the test ''' if ignore is None: ignore = [] self._check_stac_dictsubset('asset', expected, current, ignore=ignore) # check required fields for key in ['links', 'id', 'type', 'href']: if key in ignore: logger.info('Ignoring key %s in asset', key) continue self.assertIn(key, current, msg=f'Asset {key} is missing') for date_field in ['created', 'updated']: if key in ignore: logger.info('Ignoring key %s in asset', key) continue self.assertIn(date_field, current, msg=f'Asset {date_field} is missing') self.assertTrue( fromisoformat(current[date_field]), msg=f"The asset field {date_field} has an invalid date") if 'links' not in ignore: name = current['id'] links = [ { 'rel': 'self', 'href': f'{TEST_LINK_ROOT_HREF}/collections/{collection}/items/{item}/assets/{name}' }, TEST_LINK_ROOT, { 'rel': 'parent', 'href': f'{TEST_LINK_ROOT_HREF}/collections/{collection}/items/{item}', }, { 'rel': 'item', 'href': f'{TEST_LINK_ROOT_HREF}/collections/{collection}/items/{item}', }, { 'rel': 'collection', 'href': f'{TEST_LINK_ROOT_HREF}/collections/{collection}', }, ] self._check_stac_links('asset.links', links, current['links'])