def test_time_dependent_merge(self): all_timestamps = [dt.datetime(2020, month, 1) for month in range(1, 7)] eop1 = EOPatch(data={'bands': np.ones((3, 4, 5, 2))}, timestamp=[ all_timestamps[0], all_timestamps[5], all_timestamps[4] ]) eop2 = EOPatch(data={'bands': np.ones((4, 4, 5, 2))}, timestamp=[ all_timestamps[3], all_timestamps[1], all_timestamps[2], all_timestamps[4] ]) eop = eop1.merge(eop2) expected_eop = EOPatch(data={'bands': np.ones((6, 4, 5, 2))}, timestamp=all_timestamps) self.assertEqual(eop, expected_eop) eop = eop1.merge(eop2, time_dependent_op='concatenate') expected_eop = EOPatch(data={'bands': np.ones((7, 4, 5, 2))}, timestamp=all_timestamps[:5] + [all_timestamps[4]] + all_timestamps[5:]) self.assertEqual(eop, expected_eop) eop1.data['bands'][-1:, ...] = 3 with self.assertRaises(ValueError): eop1.merge(eop2) eop = eop1.merge(eop2, time_dependent_op='mean') expected_eop = EOPatch(data={'bands': np.ones((6, 4, 5, 2))}, timestamp=all_timestamps) expected_eop.data['bands'][4:5, ...] = 2 self.assertEqual(eop, expected_eop)
def test_equals(): eop1 = EOPatch( data={ "bands": np.arange(2 * 3 * 3 * 2, dtype=np.float32).reshape(2, 3, 3, 2) }) eop2 = EOPatch( data={ "bands": np.arange(2 * 3 * 3 * 2, dtype=np.float32).reshape(2, 3, 3, 2) }) assert eop1 == eop2 assert eop1.data == eop2.data eop1.data["bands"][1, ...] = np.nan assert eop1 != eop2 assert eop1.data != eop2.data eop2.data["bands"][1, ...] = np.nan assert eop1 == eop2 eop1.data["bands"] = np.reshape(eop1.data["bands"], (2, 3, 2, 3)) assert eop1 != eop2 eop2.data["bands"] = np.reshape(eop2.data["bands"], (2, 3, 2, 3)) eop1.data["bands"] = eop1.data["bands"].astype(np.float16) assert eop1 != eop2 del eop1.data["bands"] del eop2.data["bands"] assert eop1 == eop2 eop1.data_timeless["dem"] = np.arange(3 * 3 * 2).reshape(3, 3, 2) assert eop1 != eop2
def test_workflow_copying_eopatches(): feature1 = FeatureType.DATA, "data1" feature2 = FeatureType.DATA, "data2" create_node = EONode(CreateEOPatchTask()) init_node = EONode( InitializeFeatureTask([feature1, feature2], shape=(2, 4, 4, 3), init_value=1), inputs=[create_node], ) remove_node1 = EONode(RemoveFeatureTask([feature1]), inputs=[init_node]) remove_node2 = EONode(RemoveFeatureTask([feature2]), inputs=[init_node]) output_node1 = EONode(OutputTask(name="out1"), inputs=[remove_node1]) output_node2 = EONode(OutputTask(name="out2"), inputs=[remove_node2]) workflow = EOWorkflow([ create_node, init_node, remove_node1, remove_node2, output_node1, output_node2 ]) results = workflow.execute() eop1 = results.outputs["out1"] eop2 = results.outputs["out2"] assert eop1 == EOPatch( data={"data2": np.ones((2, 4, 4, 3), dtype=np.uint8)}) assert eop2 == EOPatch( data={"data1": np.ones((2, 4, 4, 3), dtype=np.uint8)})
def test_train_split_per_value(): """Test if class ids get assigned to the same subclasses in multiple eopatches""" shape = (1000, 1000, 3) input1 = np.random.randint(10, size=shape, dtype=int) input2 = np.random.randint(10, size=shape, dtype=int) patch1 = EOPatch() patch1[INPUT_FEATURE] = input1 patch2 = EOPatch() patch2[INPUT_FEATURE] = input2 bins = [0.2, 0.6] split_task = TrainTestSplitTask(INPUT_FEATURE, OUTPUT_FEATURE, bins, split_type="per_value") # seeds should get ignored when splitting 'per_value' patch1 = split_task(patch1, seed=1) patch2 = split_task(patch2, seed=1) otuput1 = patch1[OUTPUT_FEATURE] otuput2 = patch2[OUTPUT_FEATURE] unique = set(np.unique(input1)) | set(np.unique(input2)) for uniq in unique: folds1 = otuput1[input1 == uniq] folds2 = otuput2[input2 == uniq] assert_array_equal(np.unique(folds1), np.unique(folds2))
def test_timeless_merge(self): eop1 = EOPatch( mask_timeless={ 'mask': np.ones((3, 4, 5), dtype=np.int16), 'mask1': np.ones((5, 4, 3), dtype=np.int16) }) eop2 = EOPatch( mask_timeless={ 'mask': 4 * np.ones((3, 4, 5), dtype=np.int16), 'mask2': np.ones((4, 5, 3), dtype=np.int16) }) with self.assertRaises(ValueError): eop1.merge(eop2) eop = eop1.merge(eop2, timeless_op='concatenate') expected_eop = EOPatch( mask_timeless={ 'mask': np.ones((3, 4, 10), dtype=np.int16), 'mask1': eop1.mask_timeless['mask1'], 'mask2': eop2.mask_timeless['mask2'] }) expected_eop.mask_timeless['mask'][..., 5:] = 4 self.assertEqual(eop, expected_eop) eop = eop1.merge(eop2, eop2, timeless_op='min') expected_eop = EOPatch( mask_timeless={ 'mask': eop1.mask_timeless['mask'], 'mask1': eop1.mask_timeless['mask1'], 'mask2': eop2.mask_timeless['mask2'] }) self.assertEqual(eop, expected_eop)
def test_equals(self): eop1 = EOPatch( data={'bands': np.arange(2 * 3 * 3 * 2).reshape(2, 3, 3, 2)}) eop2 = EOPatch( data={'bands': np.arange(2 * 3 * 3 * 2).reshape(2, 3, 3, 2)}) self.assertEqual(eop1, eop2) eop1.data['bands'][1, ...] = np.nan self.assertNotEqual(eop1, eop2) eop2.data['bands'][1, ...] = np.nan self.assertEqual(eop1, eop2) eop1.data['bands'] = np.reshape(eop1.data['bands'], (2, 3, 2, 3)) self.assertNotEqual(eop1, eop2) eop2.data['bands'] = np.reshape(eop2.data['bands'], (2, 3, 2, 3)) eop1.data['bands'] = eop1.data['bands'].astype(np.float16) self.assertNotEqual(eop1, eop2) del eop1.data['bands'] del eop2.data['bands'] self.assertEqual(eop1, eop2) eop1.data_timeless['dem'] = np.arange(3 * 3 * 2).reshape(3, 3, 2) self.assertNotEqual(eop1, eop2)
def test_bbox_merge(self): eop1 = EOPatch(bbox=BBox((1, 2, 3, 4), CRS.WGS84)) eop2 = EOPatch(bbox=BBox((1, 2, 3, 4), CRS.POP_WEB)) eop = eop1.merge(eop1) self.assertEqual(eop, eop1) with self.assertRaises(ValueError): eop1.merge(eop2)
def test_partial_copy(self): partial_copy = DeepCopyTask(features=[(FeatureType.MASK_TIMELESS, 'mask'), FeatureType.BBOX]).execute(self.patch) expected_patch = EOPatch(mask_timeless=self.patch.mask_timeless, bbox=self.patch.bbox) self.assertEqual(partial_copy, expected_patch, 'Partial copying was not successful') partial_deepcopy = DeepCopyTask(features=[FeatureType.TIMESTAMP, (FeatureType.SCALAR, 'values')]).execute(self.patch) expected_patch = EOPatch(scalar=self.patch.scalar, timestamp=self.patch.timestamp) self.assertEqual(partial_deepcopy, expected_patch, 'Partial deep copying was not successful')
def test_check_dims(self): bands_2d = np.arange(3 * 3).reshape(3, 3) with self.assertRaises(ValueError): EOPatch(data={'bands': bands_2d}) eop = EOPatch() for feature_type in FeatureType: if feature_type.is_spatial() and not feature_type.is_vector(): with self.assertRaises(ValueError): eop[feature_type][feature_type.value] = bands_2d
def test_concatenate_different_key(self): eop1 = EOPatch() bands1 = np.arange(2 * 3 * 3 * 2).reshape(2, 3, 3, 2) eop1.data['bands'] = bands1 eop2 = EOPatch() bands2 = np.arange(3 * 3 * 3 * 2).reshape(3, 3, 3, 2) eop2.data['measurements'] = bands2 eop = eop1 + eop2 self.assertTrue('bands' in eop.data and 'measurements' in eop.data, 'Failed to concatenate different features')
def test_meta_info_merge(self): eop1 = EOPatch(meta_info={'a': 1, 'b': 2}) eop2 = EOPatch(meta_info={'a': 1, 'c': 5}) eop = eop1.merge(eop2) expected_eop = EOPatch(meta_info={'a': 1, 'b': 2, 'c': 5}) self.assertEqual(eop, expected_eop) eop2.meta_info['a'] = 3 with self.assertWarns(UserWarning): eop = eop1.merge(eop2) self.assertEqual(eop, expected_eop)
def test_concatenate(self): eop1 = EOPatch() bands1 = np.arange(2*3*3*2).reshape(2, 3, 3, 2) eop1.data['bands'] = bands1 eop2 = EOPatch() bands2 = np.arange(3*3*3*2).reshape(3, 3, 3, 2) eop2.data['bands'] = bands2 eop = EOPatch.concatenate(eop1, eop2) self.assertTrue(np.array_equal(eop.data['bands'], np.concatenate((bands1, bands2), axis=0)), msg="Array mismatch")
def test_partial_copy(patch): partial_copy = DeepCopyTask( features=[(FeatureType.MASK_TIMELESS, "mask"), FeatureType.BBOX]).execute(patch) expected_patch = EOPatch(mask_timeless=patch.mask_timeless, bbox=patch.bbox) assert partial_copy == expected_patch, "Partial copying was not successful" partial_deepcopy = DeepCopyTask( features=[FeatureType.TIMESTAMP, (FeatureType.SCALAR, "values")]).execute(patch) expected_patch = EOPatch(scalar=patch.scalar, timestamp=patch.timestamp) assert partial_deepcopy == expected_patch, "Partial deep copying was not successful"
def test_concatenate_missmatched_timeless(self): mask = np.arange(3 * 3 * 2).reshape(3, 3, 2) eop1 = EOPatch() eop1.data_timeless['mask'] = mask eop1.data_timeless['nask'] = 3 * mask eop2 = EOPatch() eop2.data_timeless['mask'] = mask eop2.data_timeless['nask'] = 5 * mask with self.assertRaises(ValueError): _ = eop1 + eop2
def test_move_feature(): patch_src = EOPatch() patch_dst = EOPatch() shape = (10, 5, 5, 3) size = np.product(shape) shape_timeless = (5, 5, 3) size_timeless = np.product(shape_timeless) data = [np.random.randint( 0, 100, size).reshape(*shape) for i in range(3)] + [ np.random.randint(0, 100, size_timeless).reshape(*shape_timeless) for i in range(2) ] features = [ (FeatureType.DATA, "D1"), (FeatureType.DATA, "D2"), (FeatureType.MASK, "M1"), (FeatureType.MASK_TIMELESS, "MTless1"), (FeatureType.MASK_TIMELESS, "MTless2"), ] for feat, dat in zip(features, data): patch_src = AddFeatureTask(feat)(patch_src, dat) patch_dst = MoveFeatureTask(features)(patch_src, patch_dst) for i, feature in enumerate(features): assert id(data[i]) == id(patch_dst[feature]) assert np.array_equal(data[i], patch_dst[feature]) patch_dst = EOPatch() patch_dst = MoveFeatureTask(features, deep_copy=True)(patch_src, patch_dst) for i, feature in enumerate(features): assert id(data[i]) != id(patch_dst[feature]) assert np.array_equal(data[i], patch_dst[feature]) features = [(FeatureType.MASK_TIMELESS, ...)] patch_dst = EOPatch() patch_dst = MoveFeatureTask(features)(patch_src, patch_dst) assert FeatureType.MASK_TIMELESS in patch_dst.get_features() assert FeatureType.DATA not in patch_dst.get_features() assert "MTless1" in patch_dst[FeatureType.MASK_TIMELESS] assert "MTless2" in patch_dst[FeatureType.MASK_TIMELESS]
def test_join_masks(): eopatch = EOPatch() mask1 = (FeatureType.MASK_TIMELESS, "Mask1") mask_data1 = np.zeros((10, 10, 1), dtype=np.uint8) mask_data1[2:5, 2:5] = 1 eopatch[mask1] = mask_data1 mask2 = (FeatureType.MASK_TIMELESS, "Mask2") mask_data2 = np.zeros((10, 10, 1), dtype=np.uint8) mask_data2[0:3, 7:8] = 1 eopatch[mask2] = mask_data2 mask3 = (FeatureType.MASK_TIMELESS, "Mask3") mask_data3 = np.zeros((10, 10, 1), dtype=np.uint8) mask_data3[1:1] = 1 eopatch[mask3] = mask_data3 input_features = [mask1, mask2, mask3] output_feature = (FeatureType.MASK_TIMELESS, "Output") task1 = JoinMasksTask(input_features, output_feature) expected_result = mask_data1 & mask_data2 & mask_data3 task1(eopatch) assert np.array_equal(eopatch[output_feature], expected_result) task2 = JoinMasksTask(input_features, output_feature, "or") expected_result = mask_data1 | mask_data2 | mask_data3 task2(eopatch) assert np.array_equal(eopatch[output_feature], expected_result) task3 = JoinMasksTask(input_features, output_feature, lambda x, y: x + y) expected_result = mask_data1 + mask_data2 + mask_data3 task3(eopatch) assert np.array_equal(eopatch[output_feature], expected_result)
def test_remove_feature(self): bands = np.arange(2 * 3 * 3 * 2).reshape(2, 3, 3, 2) names = ['bands1', 'bands2', 'bands3'] eop = EOPatch() eop.add_feature(FeatureType.DATA, names[0], bands) eop.data[names[1]] = bands eop[FeatureType.DATA][names[2]] = bands for feature_name in names: self.assertTrue( feature_name in eop.data, "Feature {} was not added to EOPatch".format(feature_name)) self.assertTrue( np.array_equal(eop.data[feature_name], bands), "Data of feature {} is " "incorrect".format(feature_name)) eop.remove_feature(FeatureType.DATA, names[0]) del eop.data[names[1]] del eop[FeatureType.DATA][names[2]] for feature_name in names: self.assertFalse(feature_name in eop.data, msg="Feature {} should be deleted from " "EOPatch".format(feature_name))
def execute(self, eopatch=None, *, bbox=None, time_interval=None): """Execute method that adds new Meteoblue data into an EOPatch :param eopatch: An EOPatch in which data will be added. If not provided a new EOPatch will be created. :type eopatch: EOPatch or None :param bbox: A bounding box of a request. Should be provided if eopatch parameter is not provided. :type bbox: BBox or None :param time_interval: An interval for which data should be downloaded. If not provided then timestamps from provided eopatch will be used. :type time_interval: (dt.datetime, dt.datetime) or (str, str) or None """ eopatch = eopatch or EOPatch() eopatch.bbox = self._prepare_bbox(eopatch, bbox) time_intervals = self._prepare_time_intervals(eopatch, time_interval) bbox = eopatch.bbox geometry = Geometry(bbox.geometry, bbox.crs).transform(CRS.WGS84) geojson = shapely.geometry.mapping(geometry.geometry) query = { "units": self.units, "geometry": geojson, "format": "protobuf", "timeIntervals": time_intervals, "queries": [self.query], } result_data, result_timestamp = self._get_data(query) if not eopatch.timestamp and result_timestamp: eopatch.timestamp = result_timestamp eopatch[self.feature] = result_data return eopatch
def test_temporal_indices(self): """ Test case for computation of argmax/argmin of NDVI and another band Cases with and without data masking are tested """ # EOPatch eopatch = EOPatch() t, h, w, c = 5, 3, 3, 2 # NDVI ndvi_shape = (t, h, w, 1) # VAlid data mask valid_data = np.ones(ndvi_shape, np.bool) valid_data[0] = 0 valid_data[-1] = 0 # Fill in eopatch eopatch.add_feature(FeatureType.DATA, 'NDVI', np.arange(np.prod(ndvi_shape)).reshape(ndvi_shape)) eopatch.add_feature(FeatureType.MASK, 'IS_DATA', np.ones(ndvi_shape, dtype=np.int16)) eopatch.add_feature(FeatureType.MASK, 'VALID_DATA', valid_data) # Task add_ndvi = AddMaxMinTemporalIndicesTask(mask_data=False) # Run task new_eopatch = add_ndvi(eopatch) # Asserts self.assertTrue( np.array_equal(new_eopatch.data_timeless['ARGMIN_NDVI'], np.zeros((h, w, 1)))) self.assertTrue( np.array_equal(new_eopatch.data_timeless['ARGMAX_NDVI'], (t - 1) * np.ones((h, w, 1)))) del add_ndvi, new_eopatch # Repeat with valid dat amask add_ndvi = AddMaxMinTemporalIndicesTask(mask_data=True) new_eopatch = add_ndvi(eopatch) # Asserts self.assertTrue( np.array_equal(new_eopatch.data_timeless['ARGMIN_NDVI'], np.ones((h, w, 1)))) self.assertTrue( np.array_equal(new_eopatch.data_timeless['ARGMAX_NDVI'], (t - 2) * np.ones((h, w, 1)))) del add_ndvi, new_eopatch, valid_data # BANDS bands_shape = (t, h, w, c) eopatch.add_feature( FeatureType.DATA, 'BANDS', np.arange(np.prod(bands_shape)).reshape(bands_shape)) add_bands = AddMaxMinTemporalIndicesTask(data_feature='BANDS', data_index=1, amax_data_feature='ARGMAX_B1', amin_data_feature='ARGMIN_B1', mask_data=False) new_eopatch = add_bands(eopatch) self.assertTrue( np.array_equal(new_eopatch.data_timeless['ARGMIN_B1'], np.zeros((h, w, 1)))) self.assertTrue( np.array_equal(new_eopatch.data_timeless['ARGMAX_B1'], (t - 1) * np.ones((h, w, 1))))
def test_point_sampling_task(self): # test PointSamplingTask t, h, w, d = 10, 100, 100, 5 eop = EOPatch() eop.data['bands'] = np.arange(t * h * w * d).reshape(t, h, w, d) eop.mask_timeless['raster'] = self.raster.reshape(self.raster_size + (1, )) task = PointSamplingTask(n_samples=self.n_samples, ref_mask_feature='raster', ref_labels=[0, 1], sample_features=[(FeatureType.DATA, 'bands', 'SAMPLED_DATA'), (FeatureType.MASK_TIMELESS, 'raster', 'SAMPLED_LABELS') ], even_sampling=True) task.execute(eop) # assert features, labels and sampled rows and cols are added to eopatch self.assertIn('SAMPLED_LABELS', eop.mask_timeless, msg="labels not added to eopatch") self.assertIn('SAMPLED_DATA', eop.data, msg="features not added to eopatch") # check validity of sampling self.assertTupleEqual(eop.data['SAMPLED_DATA'].shape, (t, self.n_samples, 1, d), msg="incorrect features size") self.assertTupleEqual(eop.mask_timeless['SAMPLED_LABELS'].shape, (self.n_samples, 1, 1), msg="incorrect number of samples")
def setUpClass(cls): eopatch = EOPatch() mask = np.zeros((3, 3, 2), dtype=np.int16) data = np.zeros((2, 3, 3, 2), dtype=np.int16) eopatch.data_timeless['mask'] = mask eopatch.data['data'] = data eopatch.timestamp = [ datetime.datetime(2017, 1, 1, 10, 4, 7), datetime.datetime(2017, 1, 4, 10, 14, 5) ] eopatch.meta_info['something'] = 'nothing' eopatch.meta_info['something-else'] = 'nothing' eopatch.bbox = BBox((1, 2, 3, 4), CRS.WGS84) eopatch.scalar['my scalar with spaces'] = np.array([[1, 2, 3], [1, 2, 3]]) eopatch.scalar_timeless['my timeless scalar with spaces'] = np.array( [1, 2, 3]) eopatch.vector['my-df'] = GeoDataFrame( { 'values': [1, 2], 'TIMESTAMP': [ datetime.datetime(2017, 1, 1, 10, 4, 7), datetime.datetime(2017, 1, 4, 10, 14, 5) ], 'geometry': [eopatch.bbox.geometry, eopatch.bbox.geometry] }, crs=eopatch.bbox.crs.pyproj_crs()) cls.eopatch = eopatch cls.filesystem_loaders = [TempFS, _create_new_s3_fs]
def test_nonexistent_location(self): path = './folder/subfolder/new-eopatch/' empty_eop = EOPatch() for fs_loader in self.filesystem_loaders: with fs_loader() as temp_fs: with self.assertRaises(ResourceNotFound): EOPatch.load(path, filesystem=temp_fs) empty_eop.save(path, filesystem=temp_fs) with TempFS() as temp_fs: full_path = os.path.join(temp_fs.root_path, path) with self.assertRaises(CreateFailed): EOPatch.load(full_path) load_task = LoadTask(full_path) with self.assertRaises(CreateFailed): load_task.execute() empty_eop.save(full_path) self.assertTrue(os.path.exists(full_path)) with TempFS() as temp_fs: full_path = os.path.join(temp_fs.root_path, path) save_task = SaveTask(full_path) save_task.execute(empty_eop) self.assertTrue(os.path.exists(full_path))
def test_numpy_feature_types(self): eop = EOPatch() data_examples = [] for size in range(6): for dtype in [ np.float32, np.float64, np.float, np.uint8, np.int64, np.bool ]: data_examples.append(np.zeros((2, ) * size, dtype=dtype)) for feature_type in FeatureTypeSet.RASTER_TYPES: valid_count = 0 for data in data_examples: try: eop[feature_type]['TEST'] = data valid_count += 1 except ValueError: pass self.assertEqual( valid_count, 6, # 3 * (2 - feature_type.is_discrete()), msg='Feature type {} should take only a specific type of data'. format(feature_type))
def test_object_sampling_task(small_image, seed, amount): t, h, w, d = 10, *small_image.shape, 5 eop = EOPatch() eop.data["bands"] = np.arange(t * h * w * d).reshape(t, h, w, d) eop.mask_timeless["raster"] = small_image.reshape(small_image.shape + (1,)) task = BlockSamplingTask( [(FeatureType.DATA, "bands", "SAMPLED_DATA"), (FeatureType.MASK_TIMELESS, "raster", "SAMPLED_LABELS")], amount=amount, mask_of_samples=(FeatureType.MASK_TIMELESS, "sampling_mask"), ) task.execute(eop, seed=seed) expected_amount = amount if isinstance(amount, int) else round(np.prod(small_image.shape) * amount) # assert features, labels and sampled rows and cols are added to eopatch assert "SAMPLED_LABELS" in eop.mask_timeless, "Labels not added to eopatch" assert "SAMPLED_DATA" in eop.data, "Features not added to eopatch" assert "sampling_mask" in eop.mask_timeless, "Mask of sampling not generated" # check validity of sampling assert eop.data["SAMPLED_DATA"].shape == (t, expected_amount, 1, d), "Incorrect features size" assert eop.mask_timeless["SAMPLED_LABELS"].shape == (expected_amount, 1, 1), "Incorrect number of samples" assert eop.mask_timeless["sampling_mask"].shape == (h, w, 1), "Sampling mask of incorrect size" sampled_uniques, sampled_counts = np.unique(eop.data["SAMPLED_DATA"], return_counts=True) masked = eop.mask_timeless["sampling_mask"].squeeze(axis=2) == 1 masked_uniques, masked_counts = np.unique(eop.data["bands"][:, masked, :], return_counts=True) assert_array_equal(sampled_uniques, masked_uniques, err_msg="Sampling mask not correctly describing sampled points") assert_array_equal(sampled_counts, masked_counts, err_msg="Sampling mask not correctly describing sampled points")
def test_double_logistic_approximation(example_eopatch): data = example_eopatch.data["NDVI"] timestamps = example_eopatch.timestamp mask = example_eopatch.mask["IS_VALID"] indices = list(np.nonzero([t.year == 2016 for t in timestamps])[0]) start, stop = indices[0], indices[-1] + 2 eopatch = EOPatch() eopatch.timestamp = timestamps[start:stop] eopatch.data["TEST"] = np.reshape(data[start:stop, 0, 0, 0], (-1, 1, 1, 1)) eopatch.mask["IS_VALID"] = np.reshape(mask[start:stop, 0, 0, 0], (-1, 1, 1, 1)) eopatch = DoublyLogisticApproximationTask( feature=(FeatureType.DATA, "TEST"), valid_mask=(FeatureType.MASK, "IS_VALID"), new_feature=(FeatureType.DATA_TIMELESS, "TEST_OUT"), ).execute(eopatch) names = "c1", "c2", "a1", "a2", "a3", "a4", "a5" values = eopatch.data_timeless["TEST_OUT"].squeeze() expected_values = 0.207, 0.464, 0.686, 0.222, 1.204, 0.406, 15.701 delta = 0.1 for name, value, expected_value in zip(names, values, expected_values): assert value == approx(expected_value, abs=delta), f"Missmatch in value of {name}"
def test_meteoblue_vector_task(mocker): """Unit test for MeteoblueVectorTask""" mocker.patch( "meteoblue_dataset_sdk.Client.querySync", return_value=_load_meteoblue_client_response( "test_meteoblue_vector_input.bin"), ) feature = FeatureType.VECTOR, "WEATHER-DATA" meteoblue_task = MeteoblueVectorTask(feature, "dummy-api-key", query=VECTOR_QUERY, units=UNITS) eopatch = EOPatch(bbox=BBOX) eopatch = meteoblue_task.execute(eopatch, time_interval=TIME_INTERVAL) assert eopatch.bbox == BBOX data = eopatch[feature] assert len(data.index) == 18 assert data.crs.to_epsg() == 4326 data_series = data["11_2 m above gnd_mean"] assert round(data_series.mean(), 5) == 23.75278 assert round(data_series.std(), 5) == 2.99785
def test_add_feature(self): bands = np.arange(2*3*3*2).reshape(2, 3, 3, 2) eop = EOPatch() eop.data['bands'] = bands self.assertTrue(np.array_equal(eop.data['bands'], bands), msg="Data numpy array not stored")
def test_overwriting_non_empty_folder(self): for fs_loader in self.filesystem_loaders: with fs_loader() as temp_fs: self.eopatch.save('/', filesystem=temp_fs) self.eopatch.save('/', filesystem=temp_fs, overwrite_permission=OverwritePermission. OVERWRITE_FEATURES) self.eopatch.save( '/', filesystem=temp_fs, overwrite_permission=OverwritePermission.OVERWRITE_PATCH) add_eopatch = EOPatch() add_eopatch.data['some data'] = np.empty((2, 3, 3, 2)) add_eopatch.save( '/', filesystem=temp_fs, overwrite_permission=OverwritePermission.ADD_ONLY) with self.assertRaises(ValueError): add_eopatch.save( '/', filesystem=temp_fs, overwrite_permission=OverwritePermission.ADD_ONLY) new_eopatch = EOPatch.load('/', filesystem=temp_fs, lazy_loading=False) self.assertEqual(new_eopatch, self.eopatch + add_eopatch)
def test_vector_feature_types(self): eop = EOPatch() invalid_entries = [{}, [], 0, None] for feature_type in FeatureTypeSet.VECTOR_TYPES: for entry in invalid_entries: with self.assertRaises( ValueError, msg='Invalid entry {} for {} should raise an error'. format(entry, feature_type)): eop[feature_type]['TEST'] = entry crs_test = {'init': 'epsg:4326'} geo_test = GeoSeries( [BBox((1, 2, 3, 4), crs=CRS.WGS84).get_geometry()], crs=crs_test) eop.vector_timeless['TEST'] = geo_test self.assertTrue(isinstance(eop.vector_timeless['TEST'], GeoDataFrame), 'GeoSeries should be parsed into GeoDataFrame') self.assertTrue(hasattr(eop.vector_timeless['TEST'], 'geometry'), 'Feature should have geometry attribute') self.assertEqual(eop.vector_timeless['TEST'].crs, crs_test, 'GeoDataFrame should still contain the crs') with self.assertRaises( ValueError, msg='Should fail because there is no TIMESTAMP column'): eop.vector['TEST'] = geo_test
def test_simplified_feature_operations(self): bands = np.arange(2 * 3 * 3 * 2).reshape(2, 3, 3, 2) feature = FeatureType.DATA, 'TEST-BANDS' eop = EOPatch() eop[feature] = bands self.assertTrue(np.array_equal(eop[feature], bands), msg="Data numpy array not stored")