def test_create_aggregation_for_netcdf_resource_title(self): # here we are using a valid nc file for setting it # to NetCDF file type which includes metadata extraction # and testing that the resource title gets set with the # extracted metadata if the original title is 'untitled resource' self.res_title = 'untitled resource' self.create_composite_resource(self.netcdf_file) self.assertEqual(self.composite_resource.metadata.title.value, self.res_title) self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is not associated with any logical file self.assertEqual(res_file.has_logical_file, False) # check that there is no NetCDFLogicalFile object self.assertEqual(NetCDFLogicalFile.objects.count(), 0) # set the nc file to NetCDF file type NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test resource title was updated with the extracted netcdf data res_title = "Snow water equivalent estimation at TWDEF site from Oct 2009 to June 2010" self.assertEqual(self.composite_resource.metadata.title.value, res_title) self.composite_resource.delete()
def _test_create_aggregation_from_folder(self, foldet_to_test): self.create_composite_resource() self.assertEqual(self.composite_resource.files.count(), 0) # create a folder to upload the nc file there new_folder = foldet_to_test ResourceFile.create_folder(self.composite_resource, new_folder) # add the nc file to the resource at the above folder res_file = self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder) self.assertEqual(res_file.file_folder, new_folder) # resource should have 1 file now self.assertEqual(self.composite_resource.files.count(), 1) for res_file in self.composite_resource.files.all(): self.assertFalse(res_file.has_logical_file) self.assertEqual(NetCDFLogicalFile.objects.count(), 0) # create the aggregation from the folder NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, folder_path=new_folder) self.assertEqual(NetCDFLogicalFile.objects.count(), 1) for res_file in self.composite_resource.files.all(): # test that each resource file is part of an aggregation (logical file) self.assertTrue(res_file.has_logical_file) # test that the each resource file has the same folder - no new folder created self.assertEqual(res_file.file_folder, new_folder) self.composite_resource.delete()
def test_create_aggregation_from_invalid_nc_file_2(self): # here we are using a valid nc file for setting it # to NetCDF file type which already been previously set to this file type - should fail self.create_composite_resource(self.netcdf_file) self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is not associated with any logical file self.assertEqual(res_file.has_logical_file, False) # set nc file to aggregation NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) self.assertEqual(self.composite_resource.files.all().count(), 2) # check that the nc resource file is associated with a logical file res_file = hydroshare.utils.get_resource_files_by_extension( self.composite_resource, '.nc')[0] self.assertEqual(res_file.has_logical_file, True) self.assertEqual(res_file.logical_file_type_name, "NetCDFLogicalFile") # trying to set this nc file again to netcdf file type should raise # ValidationError with self.assertRaises(ValidationError): NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) self.composite_resource.delete()
def test_remove_aggregation(self): # test that when an instance NetCDFLogicalFile Type (aggregation) is deleted # all resource files associated with that aggregation is not deleted but the associated # metadata is deleted self.create_composite_resource(self.netcdf_file) res_file = self.composite_resource.files.first() base_file_name, _ = os.path.splitext(res_file.file_name) expected_folder_name = base_file_name # set the nc file to NetCDFLogicalFile aggregation NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test that we have one logical file (aggregation) of type NetCDFLogicalFile self.assertEqual(NetCDFLogicalFile.objects.count(), 1) self.assertEqual(NetCDFFileMetaData.objects.count(), 1) logical_file = NetCDFLogicalFile.objects.first() self.assertEqual(logical_file.files.all().count(), 2) self.assertEqual(self.composite_resource.files.all().count(), 2) self.assertEqual(set(self.composite_resource.files.all()), set(logical_file.files.all())) # delete the aggregation (logical file) object using the remove_aggregation function logical_file.remove_aggregation() # test there is no NetCDFLogicalFile object self.assertEqual(NetCDFLogicalFile.objects.count(), 0) # test there is no NetCDFFileMetaData object self.assertEqual(NetCDFFileMetaData.objects.count(), 0) # check the files associated with the aggregation not deleted self.assertEqual(self.composite_resource.files.all().count(), 2) # check the file folder is not deleted for f in self.composite_resource.files.all(): self.assertEqual(f.file_folder, expected_folder_name) self.composite_resource.delete()
def test_create_aggregation_from_nc_file_1(self): # here we are using a valid nc file for setting it # to NetCDF file type which includes metadata extraction # the nc file in this case is at the root of the folder hierarchy self.create_composite_resource(self.netcdf_file) self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is not associated with any logical file self.assertEqual(res_file.has_logical_file, False) # check that there is no NetCDFLogicalFile object self.assertEqual(NetCDFLogicalFile.objects.count(), 0) base_file_name, _ = os.path.splitext(res_file.file_name) expected_folder_name = base_file_name # set the nc file to NetCDF file type NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test extracted metadata assert_netcdf_file_type_metadata(self, self.res_title, aggr_folder=expected_folder_name) # test file level keywords res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertEqual(len(logical_file.metadata.keywords), 1) self.assertEqual(logical_file.metadata.keywords[0], 'Snow water equivalent') self.composite_resource.delete()
def test_set_file_type_to_netcdf(self): # here we are using a valid nc file for setting it # to NetCDF file type which includes metadata extraction self.netcdf_file_obj = open(self.netcdf_file, 'r') self._create_composite_resource() self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is associated with GenericLogicalFile self.assertEqual(res_file.has_logical_file, True) self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile") # check that there is one GenericLogicalFile object self.assertEqual(GenericLogicalFile.objects.count(), 1) # check that there is no NetCDFLogicalFile object self.assertEqual(NetCDFLogicalFile.objects.count(), 0) # set the nc file to NetCDF file type NetCDFLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user) # test extracted metadata res_title = 'Test NetCDF File Type Metadata' assert_netcdf_file_type_metadata(self, res_title) # test file level keywords res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertEqual(len(logical_file.metadata.keywords), 1) self.assertEqual(logical_file.metadata.keywords[0], 'Snow water equivalent') self.composite_resource.delete()
def test_set_file_type_to_netcdf_resource_title(self): # here we are using a valid nc file for setting it # to NetCDF file type which includes metadata extraction # and testing that the resource title gets set with the # extracted metadata if the original title is 'untitled resource' self.netcdf_file_obj = open(self.netcdf_file, 'r') self._create_composite_resource(title='untitled resource') self.assertEqual(self.composite_resource.metadata.title.value, 'untitled resource') self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is associated with GenericLogicalFile self.assertEqual(res_file.has_logical_file, True) self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile") # check that there is one GenericLogicalFile object self.assertEqual(GenericLogicalFile.objects.count(), 1) # check that there is no NetCDFLogicalFile object self.assertEqual(NetCDFLogicalFile.objects.count(), 0) # set the nc file to NetCDF file type NetCDFLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user) # test resource title was updated with the extracted netcdf data res_title = "Snow water equivalent estimation at TWDEF site from Oct 2009 to June 2010" self.assertEqual(self.composite_resource.metadata.title.value, res_title) self.composite_resource.delete()
def test_aggregation_file_rename(self): # test that a file can't renamed for any resource file # that's part of the NetCDF logical file self.create_composite_resource() self.add_file_to_resource(file_to_add=self.netcdf_file) res_file = self.composite_resource.files.first() base_file_name, ext = os.path.splitext(res_file.file_name) expected_folder_name = base_file_name # create aggregation from the nc file NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test renaming of files that are associated with aggregation raises exception self.assertEqual(self.composite_resource.files.count(), 2) for res_file in self.composite_resource.files.all(): base_file_name, ext = os.path.splitext(res_file.file_name) self.assertEqual(res_file.file_folder, expected_folder_name) src_path = 'data/contents/{0}/{1}.{2}'.format(expected_folder_name, base_file_name, ext) new_file_name = 'some_netcdf.{}'.format(ext) self.assertNotEqual(res_file.file_name, new_file_name) tgt_path = 'data/contents/{}/{}'.format(expected_folder_name, new_file_name) with self.assertRaises(DRF_ValidationError): move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path, tgt_path) self.composite_resource.delete()
def test_aggregation_metadata_on_resource_delete(self): # test that when the composite resource is deleted # all metadata associated with NetCDFLogicalFile Type is deleted self.create_composite_resource(self.netcdf_file) res_file = self.composite_resource.files.first() # extract metadata from the tif file NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test that we have one logical file of type NetCDFLogicalFile as a result # of metadata extraction self.assertEqual(NetCDFLogicalFile.objects.count(), 1) self.assertEqual(NetCDFFileMetaData.objects.count(), 1) # test that we have the metadata elements # there should be 4 Coverage objects - 2 at the resource level and # the other 2 at the file type level self.assertEqual(Coverage.objects.count(), 4) self.assertEqual(OriginalCoverage.objects.count(), 1) self.assertEqual(Variable.objects.count(), 5) # delete resource hydroshare.delete_resource(self.composite_resource.short_id) # test that we have no logical file of type NetCDFLogicalFile self.assertEqual(NetCDFLogicalFile.objects.count(), 0) self.assertEqual(NetCDFFileMetaData.objects.count(), 0) # test that all metadata deleted self.assertEqual(Coverage.objects.count(), 0) self.assertEqual(OriginalCoverage.objects.count(), 0) self.assertEqual(Variable.objects.count(), 0)
def test_file_metadata_on_resource_delete(self): # test that when the composite resource is deleted # all metadata associated with NetCDFFileType is deleted self.netcdf_file_obj = open(self.netcdf_file, 'r') self._create_composite_resource() res_file = self.composite_resource.files.first() # extract metadata from the tif file NetCDFLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user) # test that we have one logical file of type NetCDFLogicalFile as a result # of metadata extraction self.assertEqual(NetCDFLogicalFile.objects.count(), 1) self.assertEqual(NetCDFFileMetaData.objects.count(), 1) # test that we have the metadata elements # there should be 4 Coverage objects - 2 at the resource level and # the other 2 at the file type level self.assertEqual(Coverage.objects.count(), 4) self.assertEqual(OriginalCoverage.objects.count(), 1) self.assertEqual(Variable.objects.count(), 5) # delete resource hydroshare.delete_resource(self.composite_resource.short_id) # test that we have no logical file of type NetCDFLogicalFile self.assertEqual(NetCDFLogicalFile.objects.count(), 0) self.assertEqual(NetCDFFileMetaData.objects.count(), 0) # test that all metadata deleted self.assertEqual(Coverage.objects.count(), 0) self.assertEqual(OriginalCoverage.objects.count(), 0) self.assertEqual(Variable.objects.count(), 0)
def test_aggregation_file_move(self): # test any resource file that's part of the NetCDF logical file can't be moved self.create_composite_resource() self.add_file_to_resource(file_to_add=self.netcdf_file) res_file = self.composite_resource.files.first() # create the aggregation using the nc file NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test renaming of files that are associated with raster LFO - which should raise exception self.assertEqual(self.composite_resource.files.count(), 2) res_file = self.composite_resource.files.first() base_file_name, ext = os.path.splitext(res_file.file_name) expected_folder_name = base_file_name self.assertEqual(res_file.file_folder, expected_folder_name) new_folder = 'netcdf_aggr' ResourceFile.create_folder(self.composite_resource, new_folder) # moving any of the resource files to this new folder should raise exception tgt_path = 'data/contents/{}'.format(new_folder) for res_file in self.composite_resource.files.all(): with self.assertRaises(DRF_ValidationError): src_path = os.path.join('data', 'contents', res_file.short_path) move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path, tgt_path) self.composite_resource.delete()
def test_create_aggregation_from_nc_file_2(self): # here we are using a valid nc file for setting it # to NetCDF file type which includes metadata extraction # the nc file in this case is not at the root of the folder hierarchy but in a folder self.create_composite_resource() new_folder = 'netcdf_aggr' ResourceFile.create_folder(self.composite_resource, new_folder) # add the the nc file to the resource at the above folder self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder) self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is not associated with any logical file self.assertEqual(res_file.has_logical_file, False) # check that there is no NetCDFLogicalFile object self.assertEqual(NetCDFLogicalFile.objects.count(), 0) # set the nc file to NetCDF file type NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test extracted metadata assert_netcdf_file_type_metadata(self, self.res_title, aggr_folder=new_folder) # test file level keywords res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertEqual(len(logical_file.metadata.keywords), 1) self.assertEqual(logical_file.metadata.keywords[0], 'Snow water equivalent') self.composite_resource.delete()
def test_aggregation_folder_rename(self): # test changes to aggregation name, aggregation metadata xml file path, and aggregation # resource map xml file path on folder name change self.create_composite_resource() self.add_file_to_resource(file_to_add=self.netcdf_file) res_file = self.composite_resource.files.first() base_file_name, ext = os.path.splitext(res_file.file_name) expected_folder_name = base_file_name # create aggregation from the nc file NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) self.assertEqual(self.composite_resource.files.count(), 2) for res_file in self.composite_resource.files.all(): self.assertEqual(res_file.file_folder, expected_folder_name) # test aggregation name res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertEqual(logical_file.aggregation_name, res_file.file_folder) # test aggregation xml file paths expected_meta_file_path = '{}/{}_meta.xml'.format(base_file_name, base_file_name) self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path) expected_map_file_path = '{}/{}_resmap.xml'.format(base_file_name, base_file_name) self.assertEqual(logical_file.map_short_file_path, expected_map_file_path) # test renaming folder src_path = 'data/contents/{}'.format(expected_folder_name) tgt_path = 'data/contents/{}_1'.format(expected_folder_name) move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path, tgt_path) for res_file in self.composite_resource.files.all(): self.assertEqual(res_file.file_folder, '{}_1'.format(expected_folder_name)) # test aggregation name update res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertEqual(logical_file.aggregation_name, res_file.file_folder) # test aggregation xml file paths expected_meta_file_path = '{}_1/{}_1_meta.xml'.format(expected_folder_name, expected_folder_name) self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path) expected_map_file_path = '{}_1/{}_1_resmap.xml'.format(expected_folder_name, expected_folder_name) self.assertEqual(logical_file.map_short_file_path, expected_map_file_path) self.composite_resource.delete()
def test_main_file(self): self.create_composite_resource(self.netcdf_file) self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() self.assertEqual(res_file.has_logical_file, False) NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) self.assertEqual(1, NetCDFLogicalFile.objects.count()) self.assertEqual(".nc", NetCDFLogicalFile.objects.first().get_main_file_type()) self.assertEqual(self.netcdf_file_name, NetCDFLogicalFile.objects.first().get_main_file.file_name)
def test_nc_set_file_type_to_netcdf(self): # only do federation testing when REMOTE_USE_IRODS is True and irods docker containers # are set up properly if not super(NetCDFFileTypeMetaDataTest, self).is_federated_irods_available(): return # here we are using a valid netcdf file for setting it # to NetCDF file type which includes metadata extraction fed_test_file_full_path = '/{zone}/home/{username}/{fname}'.format( zone=settings.HS_USER_IRODS_ZONE, username=self.user.username, fname=self.netcdf_file_name) res_upload_files = [] fed_res_path = hydroshare.utils.get_federated_zone_home_path( fed_test_file_full_path) res_title = 'Federated Composite Resource NetCDF File Type Testing' self.composite_resource = hydroshare.create_resource( resource_type='CompositeResource', owner=self.user, title=res_title, files=res_upload_files, fed_res_file_names=[fed_test_file_full_path], fed_res_path=fed_res_path, fed_copy_or_move='copy', metadata=[]) # test resource is created on federated zone self.assertNotEqual(self.composite_resource.resource_federation_path, '') # set the logical file -which get sets as part of the post resource creation signal resource_post_create_actions(resource=self.composite_resource, user=self.user, metadata=self.composite_resource.metadata) self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is associated with GenericLogicalFile self.assertEqual(res_file.has_logical_file, True) self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile") # check that there is one GenericLogicalFile object self.assertEqual(GenericLogicalFile.objects.count(), 1) fed_file_path = "data/contents/{}".format(self.netcdf_file_name) self.assertEqual(res_file.fed_resource_file_name_or_path, fed_file_path) # set the tif file to NetCDF file type NetCDFLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user) # test extracted netcdf file type metadata assert_netcdf_file_type_metadata(self, res_title)
def test_create_aggregation_from_nc_file_3(self): # here we are using a valid nc file for setting it # to NetCDF file type which includes metadata extraction # the nc file in this case is not at the root of the folder hierarchy but in a folder. The # same folder contains another file that's not going part of the aggregation # location nc file before aggregation is created: /my_folder/netcdf_valid.nc # location of another file before aggregation is created: /my_folder/netcdf_invalid.nc # location of nc file after aggregation is created: # /my_folder/netcdf_valid/netcdf_valid.nc # location of another file after aggregation is created: /my_folder/netcdf_invalid.nc self.create_composite_resource() new_folder = 'my_folder' ResourceFile.create_folder(self.composite_resource, new_folder) # add the the nc file to the resource at the above folder self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder) self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is not associated with any logical file self.assertEqual(res_file.has_logical_file, False) # check that there is no NetCDFLogicalFile object self.assertEqual(NetCDFLogicalFile.objects.count(), 0) # add another file to the same folder self.add_file_to_resource(file_to_add=self.netcdf_invalid_file, upload_folder=new_folder) self.assertEqual(self.composite_resource.files.all().count(), 2) # set the nc file to NetCDF file type NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) self.assertEqual(self.composite_resource.files.all().count(), 3) # test logical file/aggregation self.assertEqual(len(self.composite_resource.logical_files), 1) logical_file = self.composite_resource.logical_files[0] self.assertEqual(logical_file.files.count(), 2) base_nc_file_name, _ = os.path.splitext(self.netcdf_file_name) expected_file_folder = '{0}/{1}'.format(new_folder, base_nc_file_name) for res_file in logical_file.files.all(): self.assertEqual(res_file.file_folder, expected_file_folder) self.assertTrue(isinstance(logical_file, NetCDFLogicalFile)) self.assertTrue(logical_file.metadata, NetCDFLogicalFile) # test the location of the file that's not part of the netcdf aggregation other_res_file = None for res_file in self.composite_resource.files.all(): if not res_file.has_logical_file: other_res_file = res_file break self.assertEqual(other_res_file.file_folder, new_folder) self.composite_resource.delete()
def test_nc_set_file_type_to_netcdf(self): super(NetCDFFileTypeMetaDataTest, self).assert_federated_irods_available() # here we are using a valid netcdf file for setting it # to NetCDF file type which includes metadata extraction fed_test_file_full_path = '/{zone}/home/{username}/{fname}'.format( zone=settings.HS_USER_IRODS_ZONE, username=self.user.username, fname=self.netcdf_file_name) res_upload_files = [] fed_res_path = hydroshare.utils.get_federated_zone_home_path( fed_test_file_full_path) res_title = 'Federated Composite Resource NetCDF File Type Testing' self.composite_resource = hydroshare.create_resource( resource_type='CompositeResource', owner=self.user, title=res_title, files=res_upload_files, source_names=[fed_test_file_full_path], fed_res_path=fed_res_path, move=False, metadata=[], auto_aggregate=False) # test resource is created on federated zone self.assertNotEqual(self.composite_resource.resource_federation_path, '') self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() base_file_name, _ = os.path.splitext(res_file.file_name) expected_folder_name = base_file_name # check that the resource file is not associated with any logical file self.assertEqual(res_file.has_logical_file, False) fed_file_path = "{}/{}".format(self.composite_resource.file_path, self.netcdf_file_name) self.assertEqual(res_file.storage_path, fed_file_path) # set the nc file to NetCDF file type NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test extracted netcdf file type metadata assert_netcdf_file_type_metadata(self, res_title, aggr_folder=expected_folder_name)
def _test_file_metadata_on_file_delete(self, ext): self.create_composite_resource(self.netcdf_file) res_file = self.composite_resource.files.first() # extract metadata from the tif file NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test that we have one logical file of type NetCDFLogicalFile self.assertEqual(NetCDFLogicalFile.objects.count(), 1) self.assertEqual(NetCDFFileMetaData.objects.count(), 1) res_file = self.composite_resource.files.first() logical_file = res_file.logical_file # there should be 2 coverage elements - one spatial and the other one temporal self.assertEqual(logical_file.metadata.coverages.all().count(), 2) self.assertNotEqual(logical_file.metadata.spatial_coverage, None) self.assertNotEqual(logical_file.metadata.temporal_coverage, None) # there should be one original coverage self.assertNotEqual(logical_file.metadata.originalCoverage, None) # testing extended metadata element: variables self.assertEqual(logical_file.metadata.variables.all().count(), 5) # there should be 4 coverage objects - 2 at the resource level # and the other 2 at the file type level self.assertEqual(Coverage.objects.count(), 4) self.assertEqual(OriginalCoverage.objects.count(), 1) self.assertEqual(Variable.objects.count(), 5) # delete content file specified by extension (ext parameter) res_file = hydroshare.utils.get_resource_files_by_extension( self.composite_resource, ext)[0] hydroshare.delete_resource_file(self.composite_resource.short_id, res_file.id, self.user) # test that we don't have logical file of type NetCDFLogicalFile Type self.assertEqual(NetCDFLogicalFile.objects.count(), 0) self.assertEqual(NetCDFFileMetaData.objects.count(), 0) # test that all metadata deleted - there should be still 2 resource level coverages self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 2) self.assertEqual(Coverage.objects.count(), 2) self.assertEqual(OriginalCoverage.objects.count(), 0) self.assertEqual(Variable.objects.count(), 0) self.composite_resource.delete()
def test_aggregation_folder_delete(self): # when a file is set to NetCDFLogicalFile type # system automatically creates folder using the name of the file # that was used to set the file type # Here we need to test that when that folder gets deleted, all files # in that folder gets deleted, the logicalfile object gets deleted and # the associated metadata objects get deleted self.create_composite_resource(self.netcdf_file) res_file = self.composite_resource.files.first() base_file_name, _ = os.path.splitext(res_file.file_name) expected_folder_name = base_file_name # extract metadata from the tif file NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test that we have one logical file of type NetCDFLogicalFile as a result # of metadata extraction self.assertEqual(NetCDFLogicalFile.objects.count(), 1) # should have one NetCDFFileMetadata object self.assertEqual(NetCDFFileMetaData.objects.count(), 1) # there should be 2 content files self.assertEqual(self.composite_resource.files.count(), 2) # test that there are metadata associated with the logical file self.assertEqual(Coverage.objects.count(), 4) self.assertEqual(OriginalCoverage.objects.count(), 1) self.assertEqual(Variable.objects.count(), 5) # delete the folder for the logical file folder_path = "data/contents/{}".format(expected_folder_name) remove_folder(self.user, self.composite_resource.short_id, folder_path) # there should no content files self.assertEqual(self.composite_resource.files.count(), 0) # there should not be any netCDF logical file or metadata file self.assertEqual(NetCDFLogicalFile.objects.count(), 0) self.assertEqual(NetCDFFileMetaData.objects.count(), 0) # test that all metadata associated with the logical file got deleted - there still be # 2 resource level coverages self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 2) self.assertEqual(Coverage.objects.count(), 2) self.assertEqual(OriginalCoverage.objects.count(), 0) self.assertEqual(Variable.objects.count(), 0) self.composite_resource.delete()
def _test_invalid_file(self): self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is not associated with any logical file self.assertEqual(res_file.has_logical_file, False) # trying to set this invalid tif file to NetCDF file type should raise # ValidationError with self.assertRaises(ValidationError): NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test that the invalid file did not get deleted self.assertEqual(self.composite_resource.files.all().count(), 1) # check that the resource file is not associated with any logical file self.assertEqual(res_file.has_logical_file, False)
def _test_file_metadata_on_file_delete(self, ext): self.netcdf_file_obj = open(self.netcdf_file, 'r') self._create_composite_resource() res_file = self.composite_resource.files.first() # extract metadata from the tif file NetCDFLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user) # test that we have one logical file of type NetCDFLogicalFile self.assertEqual(NetCDFLogicalFile.objects.count(), 1) self.assertEqual(NetCDFFileMetaData.objects.count(), 1) res_file = self.composite_resource.files.first() logical_file = res_file.logical_file # there should be 2 coverage elements - one spatial and the other one temporal self.assertEqual(logical_file.metadata.coverages.all().count(), 2) self.assertNotEqual(logical_file.metadata.spatial_coverage, None) self.assertNotEqual(logical_file.metadata.temporal_coverage, None) # there should be one original coverage self.assertNotEqual(logical_file.metadata.originalCoverage, None) # testing extended metadata element: variables self.assertEqual(logical_file.metadata.variables.all().count(), 5) # there should be 4 coverage objects - 2 at the resource level # and the other 2 at the file type level self.assertEqual(Coverage.objects.count(), 4) self.assertEqual(OriginalCoverage.objects.count(), 1) self.assertEqual(Variable.objects.count(), 5) # delete content file specified by extension (ext parameter) res_file = hydroshare.utils.get_resource_files_by_extension( self.composite_resource, ext)[0] hydroshare.delete_resource_file(self.composite_resource.short_id, res_file.id, self.user) # test that we don't have logical file of type NetCDFLogicalFile Type self.assertEqual(NetCDFLogicalFile.objects.count(), 0) self.assertEqual(NetCDFFileMetaData.objects.count(), 0) # test that all metadata deleted self.assertEqual(Coverage.objects.count(), 0) self.assertEqual(OriginalCoverage.objects.count(), 0) self.assertEqual(Variable.objects.count(), 0) self.composite_resource.delete()
def test_aggregation_folder_sub_folder_not_allowed(self): # test a folder can't be created inside a folder that represents an aggregation self.create_composite_resource() self.add_file_to_resource(file_to_add=self.netcdf_file) res_file = self.composite_resource.files.first() self.assertEqual(res_file.file_folder, None) # create aggregation from the nc file NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) res_file = self.composite_resource.files.first() self.assertNotEqual(res_file.file_folder, None) # create a folder inside the aggregation folder new_folder = '{}/sub_folder'.format(res_file.file_folder) with self.assertRaises(DRF_ValidationError): ResourceFile.create_folder(self.composite_resource, new_folder) self.composite_resource.delete()
def test_bag_ingestion(self): from hs_core.views.utils import unzip_file def normalize_metadata(metadata_str): """Prepares metadata string to match resource id and hydroshare url of original""" return metadata_str\ .replace(current_site_url(), "http://www.hydroshare.org")\ .replace(res.short_id, "97523bdb7b174901b3fc2d89813458f1") # create empty resource res = resource.create_resource( 'CompositeResource', self.user, 'My Test Resource' ) full_paths = {} files_to_upload = [UploadedFile(file=open('hs_core/tests/data/test_resource_metadata_files.zip', 'rb'), name="test_resource_metadata_files.zip")] add_resource_files(res.short_id, *files_to_upload, full_paths=full_paths) unzip_file(self.user, res.short_id, "data/contents/test_resource_metadata_files.zip", True, overwrite=True, auto_aggregate=True, ingest_metadata=True) def compare_metadatas(new_metadata_str, original_metadata_file): original_graph = Graph() with open(os.path.join(self.extracted_directory, original_metadata_file), "r") as f: original_graph = original_graph.parse(data=f.read()) new_graph = Graph() new_graph = new_graph.parse(data=normalize_metadata(new_metadata_str)) # remove modified date, they'll never match subject = new_graph.value(predicate=RDF.type, object=DCTERMS.modified) new_graph.remove((subject, None, None)) subject = original_graph.value(predicate=RDF.type, object=DCTERMS.modified) original_graph.remove((subject, None, None)) for (new_triple, original_triple) in _squashed_graphs_triples(new_graph, original_graph): self.assertEquals(new_triple, original_triple, "Ingested resource metadata does not match original") res.refresh_from_db() compare_metadatas(res.metadata.get_xml(), "resourcemetadata.xml") compare_metadatas(res.get_logical_files(GenericLogicalFile.type_name())[0].metadata.get_xml(), "test_meta.xml") compare_metadatas(res.get_logical_files(FileSetLogicalFile.type_name())[0].metadata.get_xml(), "asdf/asdf_meta.xml") compare_metadatas(res.get_logical_files(GeoFeatureLogicalFile.type_name())[0].metadata.get_xml(), "watersheds_meta.xml") compare_metadatas(res.get_logical_files(GeoRasterLogicalFile.type_name())[0].metadata.get_xml(), "logan_meta.xml") compare_metadatas(res.get_logical_files(NetCDFLogicalFile.type_name())[0].metadata.get_xml(), "SWE_time_meta.xml") compare_metadatas(res.get_logical_files(RefTimeseriesLogicalFile.type_name())[0].metadata.get_xml(), "msf_version.refts_meta.xml") compare_metadatas(res.get_logical_files(TimeSeriesLogicalFile.type_name())[0].metadata.get_xml(), "ODM2_Multi_Site_One_Variable_meta.xml")
def test_netcdf_file_type_folder_delete(self): # when a file is set to NetCDFLogicalFile type # system automatically creates folder using the name of the file # that was used to set the file type # Here we need to test that when that folder gets deleted, all files # in that folder gets deleted, the logicalfile object gets deleted and # the associated metadata objects get deleted self.netcdf_file_obj = open(self.netcdf_file, 'r') self._create_composite_resource() res_file = self.composite_resource.files.first() # extract metadata from the tif file NetCDFLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user) # test that we have one logical file of type NetCDFLogicalFile as a result # of metadata extraction self.assertEqual(NetCDFLogicalFile.objects.count(), 1) # should have one NetCDFFileMetadata object self.assertEqual(NetCDFFileMetaData.objects.count(), 1) # there should be 2 content files self.assertEqual(self.composite_resource.files.count(), 2) # test that there are metadata associated with the logical file self.assertEqual(Coverage.objects.count(), 4) self.assertEqual(OriginalCoverage.objects.count(), 1) self.assertEqual(Variable.objects.count(), 5) # delete the folder for the logical file folder_path = "data/contents/netcdf_valid" remove_folder(self.user, self.composite_resource.short_id, folder_path) # there should no content files self.assertEqual(self.composite_resource.files.count(), 0) # there should not be any netCDF logical file or metadata file self.assertEqual(NetCDFLogicalFile.objects.count(), 0) self.assertEqual(NetCDFFileMetaData.objects.count(), 0) # test that all metadata associated with the logical file got deleted self.assertEqual(Coverage.objects.count(), 0) self.assertEqual(OriginalCoverage.objects.count(), 0) self.assertEqual(Variable.objects.count(), 0) self.composite_resource.delete()
def test_upload_file_to_aggregation_not_allowed(self): # test no file can be uploaded into a folder that represents an aggregation self.create_composite_resource() self.add_file_to_resource(file_to_add=self.netcdf_file) res_file = self.composite_resource.files.first() self.assertEqual(res_file.file_folder, None) # create aggregation from the nc file NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) res_file = self.composite_resource.files.first() self.assertNotEqual(res_file.file_folder, None) # add a file to the resource at the aggregation folder with self.assertRaises(ValidationError): self.add_file_to_resource(file_to_add=self.netcdf_invalid_file, upload_folder=res_file.file_folder) self.composite_resource.delete()
def test_file_metadata_on_logical_file_delete(self): # test that when the NetCDFLogicalFile instance is deleted # all metadata associated with it also get deleted self.netcdf_file_obj = open(self.netcdf_file, 'r') self._create_composite_resource() res_file = self.composite_resource.files.first() # extract metadata from the tif file NetCDFLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user) # test that we have one logical file of type NetCDFLogicalFile as a result # of metadata extraction self.assertEqual(NetCDFLogicalFile.objects.count(), 1) self.assertEqual(NetCDFFileMetaData.objects.count(), 1) res_file = self.composite_resource.files.first() logical_file = res_file.logical_file # test that we have the metadata elements # there should be 4 Coverage objects - 2 at the resource level and # the other 2 at the file type level self.assertEqual(Coverage.objects.count(), 4) self.assertEqual( self.composite_resource.metadata.coverages.all().count(), 2) self.assertEqual(logical_file.metadata.coverages.all().count(), 2) self.assertEqual(OriginalCoverage.objects.count(), 1) self.assertNotEqual(logical_file.metadata.originalCoverage, None) self.assertEqual(Variable.objects.count(), 5) self.assertEqual(logical_file.metadata.variables.all().count(), 5) # delete the logical file logical_file.logical_delete(self.user) # test that we have no logical file of type NetCDFLogicalFile self.assertEqual(NetCDFLogicalFile.objects.count(), 0) self.assertEqual(NetCDFFileMetaData.objects.count(), 0) # test that all metadata deleted self.assertEqual(Coverage.objects.count(), 0) self.assertEqual(OriginalCoverage.objects.count(), 0) self.assertEqual(Variable.objects.count(), 0) self.composite_resource.delete()
def _test_invalid_file(self): self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is associated with the generic logical file self.assertEqual(res_file.has_logical_file, True) self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile") # trying to set this invalid tif file to NetCDF file type should raise # ValidationError with self.assertRaises(ValidationError): NetCDFLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user) # test that the invalid file did not get deleted self.assertEqual(self.composite_resource.files.all().count(), 1) # check that the resource file is not associated with generic logical file self.assertEqual(res_file.has_logical_file, True) self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
def test_aggregation_metadata_on_logical_file_delete(self): # test that when the NetCDFLogicalFile instance is deleted # all metadata associated with it also get deleted self.create_composite_resource(self.netcdf_file) res_file = self.composite_resource.files.first() # extract metadata from the tif file NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test that we have one logical file of type NetCDFLogicalFile as a result # of metadata extraction self.assertEqual(NetCDFLogicalFile.objects.count(), 1) self.assertEqual(NetCDFFileMetaData.objects.count(), 1) res_file = self.composite_resource.files.first() logical_file = res_file.logical_file # test that we have the metadata elements # there should be 4 Coverage objects - 2 at the resource level and # the other 2 at the file type level self.assertEqual(Coverage.objects.count(), 4) self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 2) self.assertEqual(logical_file.metadata.coverages.all().count(), 2) self.assertEqual(OriginalCoverage.objects.count(), 1) self.assertNotEqual(logical_file.metadata.originalCoverage, None) self.assertEqual(Variable.objects.count(), 5) self.assertEqual(logical_file.metadata.variables.all().count(), 5) # delete the logical file logical_file.logical_delete(self.user) # test that we have no logical file of type NetCDFLogicalFile self.assertEqual(NetCDFLogicalFile.objects.count(), 0) self.assertEqual(NetCDFFileMetaData.objects.count(), 0) # test that all metadata deleted - there should be 2 resource level coverages self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 2) self.assertEqual(Coverage.objects.count(), 2) self.assertEqual(OriginalCoverage.objects.count(), 0) self.assertEqual(Variable.objects.count(), 0) self.composite_resource.delete()
def test_file_rename_or_move(self): # test that file can't be moved or renamed for any resource file # that's part of the NetCDF logical file object (LFO) self.netcdf_file_obj = open(self.netcdf_file, 'r') self._create_composite_resource() res_file = self.composite_resource.files.first() # extract metadata from the tif file NetCDFLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user) # test renaming of files that are associated with netcdf LFO - which should raise exception self.assertEqual(self.composite_resource.files.count(), 2) src_path = 'data/contents/netcdf_valid/netcdf_valid.nc' tgt_path = "data/contents/netcdf_valid/netcdf_valid_1.nc" with self.assertRaises(DRF_ValidationError): move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path, tgt_path) src_path = 'data/contents/netcdf_valid/netcdf_valid_header_info.txt' tgt_path = 'data/contents/netcdf_valid/netcdf_valid_header_info_1.txt' with self.assertRaises(DRF_ValidationError): move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path, tgt_path) # test moving the files associated with netcdf LFO src_path = 'data/contents/netcdf_valid/netcdf_valid.nc' tgt_path = 'data/contents/netcdf_valid_1/netcdf_valid.nc' with self.assertRaises(DRF_ValidationError): move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path, tgt_path) src_path = 'data/contents/netcdf_valid/netcdf_valid_header_info.txt' tgt_path = 'data/contents/netcdf_valid_1/netcdf_valid_header_info.txt' with self.assertRaises(DRF_ValidationError): move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path, tgt_path) self.composite_resource.delete()
def test_update_netcdf_file(self): self.netcdf_file_obj = open(self.netcdf_file, 'r') self._create_composite_resource(self.netcdf_file_obj) res_file = self.composite_resource.files.first() # set the nc file to NetCDF file type NetCDFLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user) res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertEqual(res_file.logical_file_type_name, "NetCDFLogicalFile") # one keyword metadata for the netcdf file type self.assertEqual(len(logical_file.metadata.keywords), 1) nc_dump_res_file = None for f in logical_file.files.all(): if f.extension == ".txt": nc_dump_res_file = f break self.assertNotEqual(nc_dump_res_file, None) self.assertIn('keywords = "Snow water equivalent"', nc_dump_res_file.resource_file.read()) logical_file.metadata.keywords = ["keyword-1", 'keyword-2'] logical_file.metadata.save() url_params = {'file_type_id': logical_file.id} url = reverse('update_netcdf_file', kwargs=url_params) request = self.factory.post(url, data={}) request.user = self.user # this is the view function we are testing response = update_netcdf_file(request, file_type_id=logical_file.id) self.assertEqual(response.status_code, status.HTTP_200_OK) response_dict = json.loads(response.content) self.assertEqual('success', response_dict['status']) # ncdump file gets regenerated as part of the netcdf file update for f in logical_file.files.all(): if f.extension == ".txt": nc_dump_res_file = f break self.assertNotEqual(nc_dump_res_file, None) self.assertIn('keywords = "keyword-1, keyword-2"', nc_dump_res_file.resource_file.read()) self.composite_resource.delete()
def test_aggregation_folder_move_not_allowed(self): # test a folder is not allowed to be moved into a folder that represents an aggregation self.create_composite_resource() self.add_file_to_resource(file_to_add=self.netcdf_file) res_file = self.composite_resource.files.first() base_file_name, ext = os.path.splitext(res_file.file_name) aggregation_folder_name = base_file_name # create aggregation from the nc file NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # create a folder to move the aggregation folder there new_folder = 'folder_to_move' ResourceFile.create_folder(self.composite_resource, new_folder) # move the new folder into the aggregation folder src_path = 'data/contents/{}'.format(new_folder) tgt_path = 'data/contents/{}'.format(aggregation_folder_name) with self.assertRaises(DRF_ValidationError): move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path, tgt_path) self.composite_resource.delete()
def test_update_resource_temporal_coverage(self): """Here we are testing the update of resource temporal coverage based on the temporal coverages from all the aggregations in the resource using the view function update_resource_coverage""" self.create_composite_resource(file_to_upload=self.netcdf_file) res_file = self.composite_resource.files.first() # set the tif file to GeoRasterLogicalFile type (aggregation) NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) url_params = {'resource_id': self.composite_resource.short_id, 'coverage_type': 'temporal' } url = reverse('update_resource_coverage', kwargs=url_params) request = self.factory.post(url) request.user = self.user # this is the view function we are testing response = update_resource_coverage(request, resource_id=self.composite_resource.short_id, coverage_type='temporal') self.assertEqual(response.status_code, status.HTTP_200_OK) self.composite_resource.delete()
def test_file_move_to_aggregation_not_allowed(self): # test no file can be moved into a folder that represents an aggregation self.create_composite_resource() self.add_file_to_resource(file_to_add=self.netcdf_file) res_file = self.composite_resource.files.first() self.assertEqual(res_file.file_folder, None) # create aggregation from the nc file NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) res_file = self.composite_resource.files.first() self.assertNotEqual(res_file.file_folder, None) # add a file to the resource which will try to move into the aggregation folder res_file_to_move = self.add_file_to_resource(file_to_add=self.netcdf_invalid_file) src_path = os.path.join('data', 'contents', res_file_to_move.short_path) tgt_path = 'data/contents/{}'.format(res_file.file_folder) # move file to aggregation folder with self.assertRaises(DRF_ValidationError): move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path, tgt_path) self.composite_resource.delete()
def handle(self, *args, **options): logger = logging.getLogger(__name__) resource_counter = 0 to_resource_type = 'CompositeResource' msg = "THERE ARE CURRENTLY {} MULTIDIMENSIONAL RESOURCES PRIOR TO CONVERSION.".format( NetcdfResource.objects.all().count()) logger.info(msg) print(">> {}".format(msg)) for nc_res in NetcdfResource.objects.all(): # check resource exists on irods istorage = nc_res.get_irods_storage() if not istorage.exists(nc_res.root_path): err_msg = "NetCDF resource not found in irods (ID: {})".format( nc_res.short_id) logger.error(err_msg) print("Error:>> {}".format(err_msg)) # skip this netcdf resource for migration continue # get the nc file name which needs to be used to create a new folder nc_file = None txt_file = None if nc_res.files.count() == 2: for res_file in nc_res.files.all(): if res_file.extension.lower() == '.nc': nc_file = res_file elif res_file.file_name.lower().endswith( 'header_info.txt'): txt_file = res_file create_nc_aggregation = nc_file is not None and txt_file is not None if create_nc_aggregation: # check resource files exist on irods file_missing = False for res_file in nc_res.files.all(): file_path = res_file.public_path if not istorage.exists(file_path): err_msg = "File path not found in irods:{}".format( file_path) logger.error(err_msg) err_msg = "Failed to convert netcdf resource (ID: {}). Resource file is " \ "missing on irods".format(nc_res.short_id) print("Error:>> {}".format(err_msg)) file_missing = True break if file_missing: # skip this corrupt netcdf resource for migration continue # change the resource_type nc_metadata_obj = nc_res.metadata nc_res.resource_type = to_resource_type nc_res.content_model = to_resource_type.lower() nc_res.save() # get the converted resource object - CompositeResource comp_res = nc_res.get_content_model() # set CoreMetaData object for the composite resource core_meta_obj = CoreMetaData.objects.create() comp_res.content_object = core_meta_obj # migrate netcdf resource core metadata elements to composite resource migrate_core_meta_elements(nc_metadata_obj, comp_res) # update url attribute of the metadata 'type' element type_element = comp_res.metadata.type type_element.url = '{0}/terms/{1}'.format(current_site_url(), to_resource_type) type_element.save() if create_nc_aggregation: # create a NetCDF aggregation nc_aggr = None try: nc_aggr = NetCDFLogicalFile.create(resource=comp_res) except Exception as ex: err_msg = 'Failed to create NetCDF aggregation for resource (ID: {})' err_msg = err_msg.format(nc_res.short_id) err_msg = err_msg + '\n' + ex.message logger.error(err_msg) print("Error:>> {}".format(err_msg)) if nc_aggr is not None: # set aggregation dataset title nc_aggr.dataset_name = comp_res.metadata.title.value nc_aggr.save() # make the res files part of the aggregation for res_file in comp_res.files.all(): nc_aggr.add_resource_file(res_file) # migrate netcdf specific metadata to aggregation for variable in nc_metadata_obj.variables.all(): variable.content_object = nc_aggr.metadata variable.save() # create aggregation level coverage elements for coverage in comp_res.metadata.coverages.all(): aggr_coverage = Coverage() aggr_coverage.type = coverage.type aggr_coverage._value = coverage._value aggr_coverage.content_object = nc_aggr.metadata aggr_coverage.save() org_coverage = nc_metadata_obj.originalCoverage if org_coverage: org_coverage.content_object = nc_aggr.metadata org_coverage.save() # create aggregation level keywords keywords = [ sub.value for sub in comp_res.metadata.subjects.all() ] nc_aggr.metadata.keywords = keywords # set aggregation metadata dirty status to that of the netcdf resource metadata # dirty status - this would trigger netcdf file update for the new aggregation # if metadata is dirty nc_aggr.metadata.is_dirty = nc_metadata_obj.is_dirty nc_aggr.metadata.save() # create aggregation level xml files nc_aggr.create_aggregation_xml_documents() msg = 'One Multidimensional aggregation was created in resource (ID: {})' msg = msg.format(comp_res.short_id) logger.info(msg) # set resource to dirty so that resource level xml files (resource map and # metadata xml files) will be re-generated as part of next bag download comp_res.save() try: set_dirty_bag_flag(comp_res) except Exception as ex: err_msg = 'Failed to set bag flag dirty for the converted resource (ID: {})' err_msg = err_msg.format(nc_res.short_id) err_msg = err_msg + '\n' + ex.message logger.error(err_msg) print("Error:>> {}".format(err_msg)) resource_counter += 1 # delete the instance of NetCdfMetaData that was part of the original netcdf resource nc_metadata_obj.delete() msg = 'Multidimensional resource (ID: {}) was converted to Composite Resource type' msg = msg.format(comp_res.short_id) logger.info(msg) msg = "{} MULTIDIMENSIONAL RESOURCES WERE CONVERTED TO COMPOSITE RESOURCE.".format( resource_counter) logger.info(msg) print(">> {}".format(msg)) msg = "THERE ARE CURRENTLY {} MULTIDIMENSIONAL RESOURCES AFTER CONVERSION.".format( NetcdfResource.objects.all().count()) logger.info(msg) if NetcdfResource.objects.all().count() > 0: msg = "NOT ALL MULTIDIMENSIONAL RESOURCES WERE CONVERTED TO COMPOSITE RESOURCE TYPE" logger.error(msg) print(">> {}".format(msg))
def test_add_update_metadata_to_netcdf_file_type(self): self.netcdf_file_obj = open(self.netcdf_file, 'r') self._create_composite_resource(self.netcdf_file_obj) res_file = self.composite_resource.files.first() # set the nc file to NetCDF File type NetCDFLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user) res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertEqual(res_file.logical_file_type_name, "NetCDFLogicalFile") # there should be temporal coverage for the netcdf file type self.assertNotEqual(logical_file.metadata.temporal_coverage, None) temporal_coverage = logical_file.metadata.temporal_coverage self.assertEqual(temporal_coverage.value['start'], '2009-10-01 00:00:00') self.assertEqual(temporal_coverage.value['end'], '2010-05-30 23:00:00') url_params = {'hs_file_type': 'NetCDFLogicalFile', 'file_type_id': logical_file.id, 'element_name': 'coverage', 'element_id': logical_file.metadata.temporal_coverage.id } # test updating temporal coverage url = reverse('update_file_metadata', kwargs=url_params) request = self.factory.post(url, data={'start': '1/1/2011', 'end': '12/12/2016'}) request.user = self.user # this is the view function we are testing response = update_metadata_element(request, hs_file_type="NetCDFLogicalFile", file_type_id=logical_file.id, element_name='coverage', element_id=logical_file.metadata.temporal_coverage.id) self.assertEqual(response.status_code, status.HTTP_200_OK) response_dict = json.loads(response.content) self.assertEqual('success', response_dict['status']) temporal_coverage = logical_file.metadata.temporal_coverage self.assertEqual(temporal_coverage.value['start'], '2011-01-01') self.assertEqual(temporal_coverage.value['end'], '2016-12-12') # test updating OriginalCoverage element # there should be original coverage for the netcdf file type self.assertNotEqual(logical_file.metadata.original_coverage, None) orig_coverage = logical_file.metadata.original_coverage self.assertEqual(orig_coverage.value['northlimit'], '4.63515e+06') coverage_data = {'northlimit': '111.333', 'southlimit': '42.678', 'eastlimit': '123.789', 'westlimit': '40.789', 'units': 'meters'} url_params['element_name'] = 'originalcoverage' url_params['element_id'] = logical_file.metadata.original_coverage.id url = reverse('update_file_metadata', kwargs=url_params) request = self.factory.post(url, data=coverage_data) request.user = self.user # this is the view function we are testing response = update_metadata_element(request, hs_file_type="NetCDFLogicalFile", file_type_id=logical_file.id, element_name='originalcoverage', element_id=logical_file.metadata.original_coverage.id) self.assertEqual(response.status_code, status.HTTP_200_OK) response_dict = json.loads(response.content) self.assertEqual('success', response_dict['status']) orig_coverage = logical_file.metadata.original_coverage self.assertEqual(orig_coverage.value['northlimit'], '111.333') # test updating spatial coverage # there should be spatial coverage for the netcdf file type self.assertNotEqual(logical_file.metadata.spatial_coverage, None) spatial_coverage = logical_file.metadata.spatial_coverage self.assertEqual(spatial_coverage.value['northlimit'], 41.867126409) coverage_data = {'type': 'box', 'projection': 'WGS 84 EPSG:4326', 'northlimit': '41.87', 'southlimit': '41.863', 'eastlimit': '-111.505', 'westlimit': '-111.511', 'units': 'meters'} url_params['element_name'] = 'coverage' url_params['element_id'] = spatial_coverage.id url = reverse('update_file_metadata', kwargs=url_params) request = self.factory.post(url, data=coverage_data) request.user = self.user # this is the view function we are testing response = update_metadata_element(request, hs_file_type="NetCDFLogicalFile", file_type_id=logical_file.id, element_name='coverage', element_id=spatial_coverage.id) self.assertEqual(response.status_code, status.HTTP_200_OK) response_dict = json.loads(response.content) self.assertEqual('success', response_dict['status']) spatial_coverage = logical_file.metadata.spatial_coverage self.assertEqual(spatial_coverage.value['northlimit'], 41.87) # test update Variable element variable = logical_file.metadata.variables.first() variable_data = {'name': 'variable_name_updated', 'type': 'Int', 'unit': 'deg F', 'shape': 'variable_shape'} url_params['element_name'] = 'variable' url_params['element_id'] = variable.id url = reverse('update_file_metadata', kwargs=url_params) request = self.factory.post(url, data=variable_data) request.user = self.user # this is the view function we are testing response = update_metadata_element(request, hs_file_type="NetCDFLogicalFile", file_type_id=logical_file.id, element_name='variable', element_id=variable.id) self.assertEqual(response.status_code, status.HTTP_200_OK) response_dict = json.loads(response.content) self.assertEqual('success', response_dict['status']) variable = logical_file.metadata.variables.all().filter(id=variable.id).first() self.assertEqual(variable.name, 'variable_name_updated') self.composite_resource.delete()
def test_CRUD_key_value_metadata_netcdf_file_type(self): self.netcdf_file_obj = open(self.netcdf_file, 'r') self._create_composite_resource(self.netcdf_file_obj) res_file = self.composite_resource.files.first() # set the nc file to NetCDF file type NetCDFLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user) res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertEqual(res_file.logical_file_type_name, "NetCDFLogicalFile") # no key/value metadata for the netcdf file type yet self.assertEqual(logical_file.metadata.extra_metadata, {}) url_params = {'hs_file_type': 'NetCDFLogicalFile', 'file_type_id': logical_file.id } url = reverse('update_file_keyvalue_metadata', kwargs=url_params) request = self.factory.post(url, data={'key': 'key-1', 'value': 'value-1'}) request.user = self.user # this is the view function we are testing response = update_key_value_metadata(request, hs_file_type="NetCDFLogicalFile", file_type_id=logical_file.id) self.assertEqual(response.status_code, status.HTTP_200_OK) response_dict = json.loads(response.content) self.assertEqual('success', response_dict['status']) # there should be key/value metadata for the raster file type yet res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertNotEqual(logical_file.metadata.extra_metadata, {}) self.assertEqual(logical_file.metadata.extra_metadata['key-1'], 'value-1') # update existing key value metadata - updating both key and value request = self.factory.post(url, data={'key': 'key-2', 'value': 'value-2', 'key_original': 'key-1'}) request.user = self.user response = update_key_value_metadata(request, hs_file_type="NetCDFLogicalFile", file_type_id=logical_file.id) self.assertEqual(response.status_code, status.HTTP_200_OK) response_dict = json.loads(response.content) self.assertEqual('success', response_dict['status']) res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertEqual(logical_file.metadata.extra_metadata['key-2'], 'value-2') self.assertNotIn('key-1', logical_file.metadata.extra_metadata.keys()) # update existing key value metadata - updating value only request = self.factory.post(url, data={'key': 'key-2', 'value': 'value-1', 'key_original': 'key-2'}) request.user = self.user response = update_key_value_metadata(request, hs_file_type="NetCDFLogicalFile", file_type_id=logical_file.id) self.assertEqual(response.status_code, status.HTTP_200_OK) response_dict = json.loads(response.content) self.assertEqual('success', response_dict['status']) res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertEqual(logical_file.metadata.extra_metadata['key-2'], 'value-1') # delete key/value data using the view function request = self.factory.post(url, data={'key': 'key-2'}) request.user = self.user # this the view function we are testing response = delete_key_value_metadata(request, hs_file_type="NetCDFLogicalFile", file_type_id=logical_file.id) self.assertEqual(response.status_code, status.HTTP_200_OK) response_dict = json.loads(response.content) self.assertEqual('success', response_dict['status']) res_file = self.composite_resource.files.first() logical_file = res_file.logical_file # at this point there should not be any key/value metadata self.assertEqual(logical_file.metadata.extra_metadata, {}) self.composite_resource.delete()
def get_folder_aggregation_type_to_set(self, dir_path): """Returns an aggregation (file type) type that the specified folder *dir_path* can possibly be set to. :param dir_path: Resource file directory path (full folder path starting with resource id) for which the possible aggregation type that can be set needs to be determined :return If the specified folder is already represents an aggregation or does not contain suitable file(s) then returns "" (empty string). If the specified folder contains only the files that meet the requirements of a supported aggregation, and does not contain other folders or does not have a parent folder then return the class name of that matching aggregation type. """ aggregation_type_to_set = "" if self.get_folder_aggregation_object(dir_path) is not None: # target folder is already an aggregation return None istorage = self.get_irods_storage() irods_path = dir_path if self.is_federated: irods_path = os.path.join(self.resource_federation_path, irods_path) store = istorage.listdir(irods_path) if store[0]: # seems there are folders under dir_path - no aggregation type can be set if the target # folder contains other folders return None files_in_folder = [ res_file for res_file in self.files.all() if res_file.dir_path == dir_path ] if not files_in_folder: # folder is empty return None if len(files_in_folder) > 1: # check for geo feature aggregation_type_to_set = GeoFeatureLogicalFile.check_files_for_aggregation_type( files_in_folder) if aggregation_type_to_set: return aggregation_type_to_set # check for raster aggregation_type_to_set = GeoRasterLogicalFile.check_files_for_aggregation_type( files_in_folder) if aggregation_type_to_set: return aggregation_type_to_set else: # check for raster aggregation_type_to_set = GeoRasterLogicalFile.check_files_for_aggregation_type( files_in_folder) if aggregation_type_to_set: return aggregation_type_to_set # check for NetCDF aggregation type aggregation_type_to_set = NetCDFLogicalFile.check_files_for_aggregation_type( files_in_folder) if aggregation_type_to_set: return aggregation_type_to_set # check for TimeSeries aggregation type aggregation_type_to_set = TimeSeriesLogicalFile.check_files_for_aggregation_type( files_in_folder) if aggregation_type_to_set: return aggregation_type_to_set return None
def _add_delete_keywords_file_type(self, file_obj, file_type): self._create_composite_resource(file_obj) res_file = self.composite_resource.files.first() # set specific file type if file_type == "GeoRasterLogicalFile": GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user) else: NetCDFLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user) res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertEqual(res_file.logical_file_type_name, file_type) if file_type != "NetCDFLogicalFile": # no keyword metadata for the raster file type yet self.assertEqual(len(logical_file.metadata.keywords), 0) else: # one keyword metadata for the netcdf file type self.assertEqual(len(logical_file.metadata.keywords), 1) # at this point resource should have all the keywords that we have for the file type res_keywords = [subject.value for subject in self.composite_resource.metadata.subjects.all()] for kw in logical_file.metadata.keywords: self.assertIn(kw, res_keywords) # add keywords at the file level url_params = {'hs_file_type': file_type, 'file_type_id': logical_file.id } url = reverse('add_file_keyword_metadata', kwargs=url_params) request = self.factory.post(url, data={'keywords': 'keyword-1,keyword-2'}) request.user = self.user # this is the view function we are testing response = add_keyword_metadata(request, hs_file_type=file_type, file_type_id=logical_file.id) self.assertEqual(response.status_code, status.HTTP_200_OK) response_dict = json.loads(response.content) self.assertEqual('success', response_dict['status']) # there should be 2 keywords for the raster file type yet res_file = self.composite_resource.files.first() logical_file = res_file.logical_file if file_type != "NetCDFLogicalFile": self.assertEqual(len(logical_file.metadata.keywords), 2) else: self.assertEqual(len(logical_file.metadata.keywords), 3) self.assertIn('keyword-1', logical_file.metadata.keywords) self.assertIn('keyword-2', logical_file.metadata.keywords) # resource level keywords must have been updated with the keywords we added # to file level res_keywords = [subject.value for subject in self.composite_resource.metadata.subjects.all()] for kw in logical_file.metadata.keywords: self.assertIn(kw, res_keywords) # delete keyword url = reverse('delete_file_keyword_metadata', kwargs=url_params) request = self.factory.post(url, data={'keyword': 'keyword-1'}) request.user = self.user # this is the view function we are testing response = delete_keyword_metadata(request, hs_file_type=file_type, file_type_id=logical_file.id) self.assertEqual(response.status_code, status.HTTP_200_OK) response_dict = json.loads(response.content) self.assertEqual('success', response_dict['status']) res_file = self.composite_resource.files.first() logical_file = res_file.logical_file if file_type != "NetCDFLogicalFile": self.assertEqual(len(logical_file.metadata.keywords), 1) else: self.assertEqual(len(logical_file.metadata.keywords), 2) self.assertIn('keyword-2', logical_file.metadata.keywords) # test that deleting a file level keyword doesn't delete the same keyword from # resource level self.assertIn('keyword-1', res_keywords) self.composite_resource.delete()
def test_netcdf_metadata_CRUD(self): # here we are using a valid nc file for setting it # to NetCDF file type which includes metadata extraction self.netcdf_file_obj = open(self.netcdf_file, 'r') self._create_composite_resource() # make the netcdf file part of the NetCDFLogicalFile res_file = self.composite_resource.files.first() self.assertEqual(NetCDFFileMetaData.objects.count(), 0) netcdf_logical_file = NetCDFLogicalFile.create() self.assertEqual(NetCDFFileMetaData.objects.count(), 1) netcdf_logical_file.add_resource_file(res_file) res_file = self.composite_resource.files.first() self.assertEqual(res_file.logical_file_type_name, 'NetCDFLogicalFile') self.assertEqual(netcdf_logical_file.files.count(), 1) # create keywords - note it is possible to have duplicate keywords # appropriate view functions need to disallow duplicate keywords keywords = ['key-1', 'key-1', 'key-2'] netcdf_logical_file.metadata.keywords = keywords netcdf_logical_file.metadata.save() self.assertEqual(len(keywords), len(netcdf_logical_file.metadata.keywords)) for keyword in keywords: self.assertIn(keyword, netcdf_logical_file.metadata.keywords) # create OriginalCoverage element self.assertEqual(netcdf_logical_file.metadata.original_coverage, None) coverage_data = { 'northlimit': '121.345', 'southlimit': '42.678', 'eastlimit': '123.789', 'westlimit': '40.789', 'units': 'meters' } netcdf_logical_file.metadata.create_element('OriginalCoverage', value=coverage_data) self.assertNotEqual(netcdf_logical_file.metadata.original_coverage, None) self.assertEqual( netcdf_logical_file.metadata.original_coverage.value['northlimit'], '121.345') # test updating OriginalCoverage element orig_coverage = netcdf_logical_file.metadata.original_coverage coverage_data = { 'northlimit': '111.333', 'southlimit': '42.678', 'eastlimit': '123.789', 'westlimit': '40.789', 'units': 'meters' } netcdf_logical_file.metadata.update_element('OriginalCoverage', orig_coverage.id, value=coverage_data) self.assertEqual( netcdf_logical_file.metadata.original_coverage.value['northlimit'], '111.333') # trying to create a 2nd OriginalCoverage element should raise exception with self.assertRaises(Exception): netcdf_logical_file.metadata.create_element('OriginalCoverage', value=coverage_data) # trying to update bounding box values with non-numeric values # (e.g., 'north_limit' key with a non-numeric value) should raise exception coverage_data = { 'northlimit': '121.345a', 'southlimit': '42.678', 'eastlimit': '123.789', 'westlimit': '40.789', 'units': 'meters' } with self.assertRaises(ValidationError): netcdf_logical_file.metadata.update_element('OriginalCoverage', orig_coverage.id, value=coverage_data) # test creating spatial coverage # there should not be any spatial coverage for the netcdf file type self.assertEqual(netcdf_logical_file.metadata.spatial_coverage, None) coverage_data = { 'projection': 'WGS 84 EPSG:4326', 'northlimit': '41.87', 'southlimit': '41.863', 'eastlimit': '-111.505', 'westlimit': '-111.511', 'units': 'meters' } # create spatial coverage netcdf_logical_file.metadata.create_element('Coverage', type="box", value=coverage_data) spatial_coverage = netcdf_logical_file.metadata.spatial_coverage self.assertEqual(spatial_coverage.value['northlimit'], 41.87) # test updating spatial coverage coverage_data = { 'projection': 'WGS 84 EPSG:4326', 'northlimit': '41.87706', 'southlimit': '41.863', 'eastlimit': '-111.505', 'westlimit': '-111.511', 'units': 'meters' } netcdf_logical_file.metadata.update_element( 'Coverage', element_id=spatial_coverage.id, type="box", value=coverage_data) spatial_coverage = netcdf_logical_file.metadata.spatial_coverage self.assertEqual(spatial_coverage.value['northlimit'], 41.87706) # create Variable element self.assertEqual(netcdf_logical_file.metadata.variables.count(), 0) variable_data = { 'name': 'variable_name', 'type': 'Int', 'unit': 'deg F', 'shape': 'variable_shape' } netcdf_logical_file.metadata.create_element('Variable', **variable_data) self.assertEqual(netcdf_logical_file.metadata.variables.count(), 1) self.assertEqual(netcdf_logical_file.metadata.variables.first().name, 'variable_name') # test that multiple Variable elements can be created variable_data = { 'name': 'variable_name_2', 'type': 'Int', 'unit': 'deg F', 'shape': 'variable_shape_2' } netcdf_logical_file.metadata.create_element('Variable', **variable_data) self.assertEqual(netcdf_logical_file.metadata.variables.count(), 2) # test update Variable element variable = netcdf_logical_file.metadata.variables.first() variable_data = { 'name': 'variable_name_updated', 'type': 'Int', 'unit': 'deg F', 'shape': 'variable_shape' } netcdf_logical_file.metadata.update_element('Variable', variable.id, **variable_data) variable = netcdf_logical_file.metadata.variables.get(id=variable.id) self.assertEqual(variable.name, 'variable_name_updated') self.composite_resource.delete()
def test_aggregation_metadata_CRUD(self): # here we are using a valid nc file for creating a NetCDF file type (aggregation) # then testing with metadata CRUD actions for the aggregation self.create_composite_resource() new_folder = 'nc_folder' ResourceFile.create_folder(self.composite_resource, new_folder) # add the the nc file to the resource at the above folder self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder) # make the netcdf file part of the NetCDFLogicalFile res_file = self.composite_resource.files.first() self.assertEqual(NetCDFFileMetaData.objects.count(), 0) netcdf_logical_file = NetCDFLogicalFile.create(self.composite_resource) netcdf_logical_file.save() self.assertEqual(NetCDFFileMetaData.objects.count(), 1) netcdf_logical_file.add_resource_file(res_file) res_file = self.composite_resource.files.first() self.assertEqual(res_file.logical_file_type_name, 'NetCDFLogicalFile') self.assertEqual(netcdf_logical_file.files.count(), 1) # create keywords - note it is possible to have duplicate keywords # appropriate view functions need to disallow duplicate keywords keywords = ['key-1', 'key-1', 'key-2'] netcdf_logical_file.metadata.keywords = keywords netcdf_logical_file.metadata.save() self.assertEqual(len(keywords), len(netcdf_logical_file.metadata.keywords)) for keyword in keywords: self.assertIn(keyword, netcdf_logical_file.metadata.keywords) # create OriginalCoverage element self.assertEqual(netcdf_logical_file.metadata.original_coverage, None) coverage_data = {'northlimit': 121.345, 'southlimit': 42.678, 'eastlimit': 123.789, 'westlimit': 40.789, 'units': 'meters'} netcdf_logical_file.metadata.create_element('OriginalCoverage', value=coverage_data) self.assertNotEqual(netcdf_logical_file.metadata.original_coverage, None) self.assertEqual(float(netcdf_logical_file.metadata.original_coverage.value['northlimit']), 121.345) # test updating OriginalCoverage element orig_coverage = netcdf_logical_file.metadata.original_coverage coverage_data = {'northlimit': 111.333, 'southlimit': 42.678, 'eastlimit': 123.789, 'westlimit': 40.789, 'units': 'meters'} netcdf_logical_file.metadata.update_element('OriginalCoverage', orig_coverage.id, value=coverage_data) self.assertEqual(float(netcdf_logical_file.metadata.original_coverage.value['northlimit']), 111.333) # trying to create a 2nd OriginalCoverage element should raise exception with self.assertRaises(Exception): netcdf_logical_file.metadata.create_element('OriginalCoverage', value=coverage_data) # trying to update bounding box values with non-numeric values # (e.g., 'north_limit' key with a non-numeric value) should raise exception coverage_data = {'northlimit': '121.345a', 'southlimit': 42.678, 'eastlimit': 123.789, 'westlimit': 40.789, 'units': 'meters'} with self.assertRaises(ValidationError): netcdf_logical_file.metadata.update_element('OriginalCoverage', orig_coverage.id, value=coverage_data) # test creating spatial coverage # there should not be any spatial coverage for the netcdf file type self.assertEqual(netcdf_logical_file.metadata.spatial_coverage, None) coverage_data = {'projection': 'WGS 84 EPSG:4326', 'northlimit': 41.87, 'southlimit': 41.863, 'eastlimit': -111.505, 'westlimit': -111.511, 'units': 'meters'} # create spatial coverage netcdf_logical_file.metadata.create_element('Coverage', type="box", value=coverage_data) spatial_coverage = netcdf_logical_file.metadata.spatial_coverage self.assertEqual(float(spatial_coverage.value['northlimit']), 41.87) # test updating spatial coverage coverage_data = {'projection': 'WGS 84 EPSG:4326', 'northlimit': 41.87706, 'southlimit': 41.863, 'eastlimit': -111.505, 'westlimit': -111.511, 'units': 'meters'} netcdf_logical_file.metadata.update_element('Coverage', element_id=spatial_coverage.id, type="box", value=coverage_data) spatial_coverage = netcdf_logical_file.metadata.spatial_coverage self.assertEqual(float(spatial_coverage.value['northlimit']), 41.87706) # create Variable element self.assertEqual(netcdf_logical_file.metadata.variables.count(), 0) variable_data = {'name': 'variable_name', 'type': 'Int', 'unit': 'deg F', 'shape': 'variable_shape'} netcdf_logical_file.metadata.create_element('Variable', **variable_data) self.assertEqual(netcdf_logical_file.metadata.variables.count(), 1) self.assertEqual(netcdf_logical_file.metadata.variables.first().name, 'variable_name') # test that multiple Variable elements can be created variable_data = {'name': 'variable_name_2', 'type': 'Int', 'unit': 'deg F', 'shape': 'variable_shape_2'} netcdf_logical_file.metadata.create_element('Variable', **variable_data) self.assertEqual(netcdf_logical_file.metadata.variables.count(), 2) # test update Variable element variable = netcdf_logical_file.metadata.variables.first() variable_data = {'name': 'variable_name_updated', 'type': 'Int', 'unit': 'deg F', 'shape': 'variable_shape'} netcdf_logical_file.metadata.update_element('Variable', variable.id, **variable_data) variable = netcdf_logical_file.metadata.variables.get(id=variable.id) self.assertEqual(variable.name, 'variable_name_updated') self.composite_resource.delete()
def test_aggregation_parent_folder_rename(self): # test changes to aggregation name, aggregation metadata xml file path, and aggregation # resource map xml file path on aggregation folder parent folder name change self.create_composite_resource() self.add_file_to_resource(file_to_add=self.netcdf_file) res_file = self.composite_resource.files.first() base_file_name, ext = os.path.splitext(res_file.file_name) aggregation_folder_name = base_file_name # create aggregation from the nc file NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test renaming of files that are associated with aggregation raises exception self.assertEqual(self.composite_resource.files.count(), 2) for res_file in self.composite_resource.files.all(): self.assertEqual(res_file.file_folder, aggregation_folder_name) # test aggregation name res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertEqual(logical_file.aggregation_name, res_file.file_folder) # test aggregation xml file paths # test aggregation xml file paths expected_meta_file_path = '{}/{}_meta.xml'.format(aggregation_folder_name, aggregation_folder_name) self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path) expected_map_file_path = '{}/{}_resmap.xml'.format(aggregation_folder_name, aggregation_folder_name) self.assertEqual(logical_file.map_short_file_path, expected_map_file_path) # create a folder to be the parent folder of the aggregation folder parent_folder = 'parent_folder' ResourceFile.create_folder(self.composite_resource, parent_folder) # move the aggregation folder to the parent folder src_path = 'data/contents/{}'.format(aggregation_folder_name) tgt_path = 'data/contents/{0}/{1}'.format(parent_folder, aggregation_folder_name) move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path, tgt_path) file_folder = '{}/{}'.format(parent_folder, aggregation_folder_name) for res_file in self.composite_resource.files.all(): self.assertEqual(res_file.file_folder, file_folder) # renaming parent folder parent_folder_rename = 'parent_folder_1' src_path = 'data/contents/{}'.format(parent_folder) tgt_path = 'data/contents/{}'.format(parent_folder_rename) move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path, tgt_path) file_folder = '{}/{}'.format(parent_folder_rename, aggregation_folder_name) for res_file in self.composite_resource.files.all(): self.assertEqual(res_file.file_folder, file_folder) # test aggregation name after folder rename res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertEqual(logical_file.aggregation_name, res_file.file_folder) # test aggregation xml file paths after folder rename expected_meta_file_path = '{0}/{1}/{2}_meta.xml'.format(parent_folder_rename, aggregation_folder_name, aggregation_folder_name) self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path) expected_map_file_path = '{0}/{1}/{2}_resmap.xml'.format(parent_folder_rename, aggregation_folder_name, aggregation_folder_name) self.assertEqual(logical_file.map_short_file_path, expected_map_file_path) self.composite_resource.delete()