def netcdf_pre_delete_file_from_resource(sender, **kwargs): nc_res = kwargs['resource'] metadata = nc_res.metadata metadata.is_dirty = False metadata.save() del_file = kwargs['file'] del_file_ext = utils.get_resource_file_name_and_extension(del_file)[2] # update resource modification info user = nc_res.creator utils.resource_modified(nc_res, user, overwrite_bag=False) # delete the netcdf header file or .nc file file_ext = {'.nc': 'application/x-netcdf', '.txt': 'text/plain'} if del_file_ext in file_ext: del file_ext[del_file_ext] for f in ResourceFile.objects.filter(object_id=nc_res.id): ext = utils.get_resource_file_name_and_extension(f)[2] if ext in file_ext: delete_resource_file_only(nc_res, f) nc_res.metadata.formats.filter(value=file_ext[ext]).delete() break # delete all the coverage info nc_res.metadata.coverages.all().delete() # delete all the extended meta info nc_res.metadata.variables.all().delete() nc_res.metadata.ori_coverage.all().delete()
def delete_key_value_metadata(request, hs_file_type, file_type_id, **kwargs): """deletes one pair of key/value extended metadata for a given logical file key data is expected as part of the request.POST data If key is found the matching key/value pair is deleted from the hstore dict type field """ logical_file, json_response = _get_logical_file(hs_file_type, file_type_id) if json_response is not None: return json_response resource_id = logical_file.resource.short_id resource, authorized, _ = authorize(request, resource_id, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE, raises_exception=False) if not authorized: ajax_response_data = {'status': 'error', 'logical_file_type': logical_file.type_name(), 'element_name': 'key_value', 'message': "Permission denied"} return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) key = request.POST['key'] if key in logical_file.metadata.extra_metadata.keys(): del logical_file.metadata.extra_metadata[key] logical_file.metadata.is_dirty = True logical_file.metadata.save() resource_modified(resource, request.user, overwrite_bag=False) extra_metadata_div = super(logical_file.metadata.__class__, logical_file.metadata).get_extra_metadata_html_form() context = Context({}) template = Template(extra_metadata_div.render()) rendered_html = template.render(context) ajax_response_data = {'status': 'success', 'logical_file_type': logical_file.type_name(), 'extra_metadata': rendered_html, 'message': "Delete was successful"} return JsonResponse(ajax_response_data, status=status.HTTP_200_OK)
def set_file_type(request, resource_id, hs_file_type, file_id=None, **kwargs): """Set a file (*file_id*) to a specific file type - aggregation (*hs_file_type*) :param request: an instance of HttpRequest :param resource_id: id of the resource in which this file type needs to be set :param file_id: id of the file which needs to be set to a file type. If file_id is not provided then the request must have a file_folder key. In that case the specified folder will be used for creating the logical file (aggregation) :param hs_file_type: file type to be set (e.g, SingleFile, NetCDF, GeoRaster, RefTimeseries, TimeSeries and GeoFeature) :return an instance of JsonResponse type """ response_data = {'status': 'error'} folder_path = None if file_id is None: folder_path = request.POST.get('folder_path', "") if not folder_path: err_msg = "Must provide id of the file or folder path for setting aggregation type." response_data['message'] = err_msg return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST) res, authorized, _ = authorize( request, resource_id, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE, raises_exception=False) response_data = {'status': 'error'} if not authorized: err_msg = "Permission denied" response_data['message'] = err_msg return JsonResponse(response_data, status=status.HTTP_401_UNAUTHORIZED) if res.resource_type != "CompositeResource": err_msg = "Aggregation type can be set only for files in composite resource." response_data['message'] = err_msg return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST) try: set_logical_file_type(res, request.user, file_id, hs_file_type, folder_path) resource_modified(res, request.user, overwrite_bag=False) msg = "{} was successfully set to the selected aggregation type." if folder_path is None: msg = msg.format("Selected file") else: msg = msg.format("Selected folder") response_data['status'] = 'success' response_data['message'] = msg return JsonResponse(response_data, status=status.HTTP_201_CREATED) except ValidationError as ex: response_data['message'] = ex.message return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST) except Exception as ex: response_data['message'] = ex.message return JsonResponse(response_data, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def add_or_remove_relation_metadata(add=True, target_res_obj=None, relation_type="", relation_value="", set_res_modified=False, last_change_user=None): """ add new or remove relation metadata to/from target res obj :param add: True -- add metadata; False -- remove metadata :param target_res_obj: the target res obj to receive the change :param relation_type: "hasPart" or "isPartOf" :param relation_value: value of relation :param set_res_modified: set bag modified flag to True or False :param last_change_user: the User obj represents the last_change_by user (only works when set_res_modified is True) :return: """ if add: meta_dict = {} meta_dict['type'] = relation_type meta_dict['value'] = relation_value target_res_obj.metadata.create_element("relation", **meta_dict) else: target_res_obj.metadata.relations.\ filter(type=relation_type, value=relation_value).all().delete() if set_res_modified: resource_modified(target_res_obj, last_change_user, overwrite_bag=False)
def update_timeseries_abstract(request, file_type_id, **kwargs): """updates the abstract for time series specified logical file object """ logical_file, json_response = _get_logical_file('TimeSeriesLogicalFile', file_type_id) if json_response is not None: return json_response resource_id = logical_file.resource.short_id resource, authorized, _ = authorize(request, resource_id, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE, raises_exception=False) if not authorized: ajax_response_data = {'status': 'error', 'logical_file_type': logical_file.type_name(), 'element_name': 'abstract', 'message': "Permission denied"} return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) abstract = request.POST['abstract'] if abstract.strip(): metadata = logical_file.metadata metadata.abstract = abstract metadata.is_dirty = True metadata.save() resource_modified(resource, request.user, overwrite_bag=False) ajax_response_data = {'status': 'success', 'logical_file_type': logical_file.type_name(), 'element_name': 'abstract', "is_dirty": metadata.is_dirty, 'can_update_sqlite': logical_file.can_update_sqlite_file, 'message': "Update was successful"} else: ajax_response_data = {'status': 'error', 'logical_file_type': logical_file.type_name(), 'element_name': 'abstract', 'message': "Data is missing for abstract"} return JsonResponse(ajax_response_data, status=status.HTTP_200_OK)
def update_sqlite_file(request, file_type_id, **kwargs): """updates (writes the metadata) the SQLite file associated with a instance of a specified TimeSeriesLogicalFile file object """ hs_file_type = "TimeSeriesLogicalFile" logical_file, json_response = _get_logical_file(hs_file_type, file_type_id) if json_response is not None: return json_response resource_id = logical_file.resource.short_id resource, authorized, _ = authorize(request, resource_id, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE, raises_exception=False) if not authorized: ajax_response_data = {'status': 'error', 'logical_file_type': logical_file.type_name(), 'element_name': 'datatset_name', 'message': "Permission denied"} return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) try: logical_file.update_sqlite_file(request.user) except Exception as ex: ajax_response_data = {'status': 'error', 'logical_file_type': logical_file.type_name(), 'message': ex.message} return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) resource_modified(resource, request.user, overwrite_bag=False) ajax_response_data = {'status': 'success', 'logical_file_type': logical_file.type_name(), 'message': "SQLite file update was successful"} return JsonResponse(ajax_response_data, status=status.HTTP_200_OK)
def delete_file_type(request, resource_id, hs_file_type, file_type_id, **kwargs): """deletes an instance of a specific file type and all its associated resource files""" res, _, _ = authorize(request, resource_id, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE) if res.resource_type != "CompositeResource": err_msg = "File type can be deleted only in composite resource." messages.error(request, err_msg) return HttpResponseRedirect(request.META['HTTP_REFERER']) if hs_file_type != "GeoRaster": err_msg = "Currently only an instance of Geo Raster file type can be deleted." messages.error(request, err_msg) return HttpResponseRedirect(request.META['HTTP_REFERER']) logical_file_to_delete = GeoRasterLogicalFile.objects.filter( id=file_type_id).first() if logical_file_to_delete is None: err_msg = "No matching Geo Raster file type was found." messages.error(request, err_msg) return HttpResponseRedirect(request.META['HTTP_REFERER']) if logical_file_to_delete.resource.short_id != res.short_id: err_msg = "Geo Raster file type doesn't belong to the specified resource." messages.error(request, err_msg) return HttpResponseRedirect(request.META['HTTP_REFERER']) logical_file_to_delete.logical_delete(request.user) resource_modified(res, request.user, overwrite_bag=False) msg = "Geo Raster file type was deleted." messages.success(request, msg) return HttpResponseRedirect(request.META['HTTP_REFERER'])
def test_resource_modified(self): modified_date1 = self.res.metadata.dates.filter(type='modified').first() self.assertEquals(self.res.last_changed_by, self.user) utils.resource_modified(self.res, self.user2) modified_date2 = self.res.metadata.dates.filter(type='modified').first() self.assertTrue((modified_date2.start_date - modified_date1.start_date).total_seconds() > 0) self.assertEquals(self.res.last_changed_by, self.user2)
def migrate_tif_file(apps, schema_editor): # create a vrt file from tif file for each of the Raster Resources log = logging.getLogger() istorage = IrodsStorage() for res in RasterResource.objects.all(): try: if len(res.files.all()) == 1: res_file = res.files.all().first() vrt_file_path = create_vrt_file(res_file.resource_file) if os.path.isfile(vrt_file_path): files = (UploadedFile(file=open(vrt_file_path, 'r'), name=os.path.basename(vrt_file_path))) hydroshare.add_resource_files(res.short_id, files) bag_name = 'bags/{res_id}.zip'.format(res_id=res.short_id) if istorage.exists(bag_name): # delete the resource bag as the old bag is not valid istorage.delete(bag_name) print("Deleted bag for resource ID:" + str(res.short_id)) resource_modified(res, res.creator) log.info('Tif file conversion to VRT successful for resource:ID:{} ' 'Title:{}'.format(res.short_id, res.metadata.title.value)) else: log.error('Tif file conversion to VRT unsuccessful for resource:ID:{} ' 'Title:{}'.format(res.short_id, res.metadata.title.value)) if os.path.exists(vrt_file_path): shutil.rmtree(os.path.dirname(vrt_file_path)) except: pass
def update_dataset_name(request, hs_file_type, file_type_id, **kwargs): """updates the dataset_name (title) attribute of the specified logical file object """ logical_file, json_response = _get_logical_file(hs_file_type, file_type_id) if json_response is not None: return json_response resource_id = logical_file.resource.short_id resource, authorized, _ = authorize(request, resource_id, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE, raises_exception=False) if not authorized: ajax_response_data = {'status': 'error', 'logical_file_type': logical_file.type_name(), 'element_name': 'datatset_name', 'message': "Permission denied"} return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) dataset_name = request.POST['dataset_name'] logical_file.dataset_name = dataset_name logical_file.save() logical_file.metadata.is_dirty = True logical_file.metadata.save() resource_modified(resource, request.user, overwrite_bag=False) ajax_response_data = {'status': 'success', 'logical_file_type': logical_file.type_name(), 'element_name': 'datatset_name', 'message': "Update was successful"} return JsonResponse(ajax_response_data, status=status.HTTP_200_OK)
def test_resource_modified(self): modified_date1 = self.res.metadata.dates.filter(type='modified').first() self.assertEquals(self.res.last_changed_by, self.user) utils.resource_modified(self.res, self.user2) modified_date2 = self.res.metadata.dates.filter(type='modified').first() self.assertTrue((modified_date2.start_date - modified_date1.start_date).total_seconds() > 0) self.assertEquals(self.res.last_changed_by, self.user2) self.assertEquals(self.res.last_updated, modified_date2.start_date)
def update_metadata_element(request, hs_file_type, file_type_id, element_name, element_id, **kwargs): err_msg = "Failed to update metadata element '{}'. {}." content_type = ContentType.objects.get(app_label="hs_file_types", model=hs_file_type.lower()) logical_file_type_class = content_type.model_class() logical_file = logical_file_type_class.objects.filter(id=file_type_id).first() if logical_file is None: err_msg = "No matching logical file type was found." ajax_response_data = {'status': 'error', 'message': err_msg} return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) resource_id = logical_file.resource.short_id resource, authorized, _ = authorize(request, resource_id, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE, raises_exception=False) if not authorized: ajax_response_data = {'status': 'error', 'logical_file_type': logical_file.type_name(), 'element_name': element_name, 'message': "Permission denied"} return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) validation_response = logical_file.metadata.validate_element_data(request, element_name) is_update_success = False if validation_response['is_valid']: element_data_dict = validation_response['element_data_dict'] try: logical_file.metadata.update_element(element_name, element_id, **element_data_dict) resource_modified(resource, request.user, overwrite_bag=False) is_update_success = True except ValidationError as ex: err_msg = err_msg.format(element_name, ex.message) except Error as ex: err_msg = err_msg.format(element_name, ex.message) else: err_msg = err_msg.format(element_name, validation_response['errors']) if is_update_success: if resource.can_be_public_or_discoverable: metadata_status = METADATA_STATUS_SUFFICIENT else: metadata_status = METADATA_STATUS_INSUFFICIENT ajax_response_data = {'status': 'success', 'element_name': element_name, 'metadata_status': metadata_status, 'logical_file_type': logical_file.type_name() } if element_name.lower() == 'coverage': spatial_coverage_dict = get_coverage_data_dict(resource) temporal_coverage_dict = get_coverage_data_dict(resource, coverage_type='temporal') ajax_response_data['spatial_coverage'] = spatial_coverage_dict ajax_response_data['temporal_coverage'] = temporal_coverage_dict return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) else: ajax_response_data = {'status': 'error', 'message': err_msg} # need to return http status 200 to show form errors return JsonResponse(ajax_response_data, status=status.HTTP_200_OK)
def set_file_type(request, resource_id, file_id, hs_file_type, **kwargs): """Set a file (*file_id*) to a specific file type (*hs_file_type*) :param request: an instance of HttpRequest :param resource_id: id of the resource in which this file type needs to be set :param file_id: id of the file which needs to be set to a file type :param hs_file_type: file type to be set (e.g, NetCDF, GeoRaster, GeoFeature etc) :return an instance of JsonResponse type """ res, authorized, _ = authorize( request, resource_id, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE, raises_exception=False) file_type_map = { "GeoRaster": GeoRasterLogicalFile, "NetCDF": NetCDFLogicalFile, 'GeoFeature': GeoFeatureLogicalFile, 'RefTimeseries': RefTimeseriesLogicalFile, 'TimeSeries': TimeSeriesLogicalFile } response_data = {'status': 'error'} if not authorized: err_msg = "Permission denied" response_data['message'] = err_msg return JsonResponse(response_data, status=status.HTTP_401_UNAUTHORIZED) if res.resource_type != "CompositeResource": err_msg = "File type can be set only for files in composite resource." response_data['message'] = err_msg return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST) if hs_file_type not in file_type_map: err_msg = "Unsupported file type. Supported file types are: {}".format( file_type_map.keys()) response_data['message'] = err_msg return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST) try: logical_file_type_class = file_type_map[hs_file_type] logical_file_type_class.set_file_type(resource=res, file_id=file_id, user=request.user) resource_modified(res, request.user, overwrite_bag=False) msg = "File was successfully set to selected file type. " \ "Metadata extraction was successful." response_data['status'] = 'success' response_data['message'] = msg spatial_coverage_dict = get_coverage_data_dict(res) response_data['spatial_coverage'] = spatial_coverage_dict return JsonResponse(response_data, status=status.HTTP_201_CREATED) except ValidationError as ex: response_data['message'] = ex.message return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST) except Exception as ex: response_data['message'] = ex.message return JsonResponse(response_data, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def create_new_version_resource(ori_res, new_res, user): """ Populate metadata and contents from ori_res object to new_res object to make new_res object as a new version of the ori_res object Args: ori_res: the original resource that is to be versioned. new_res: the new_res to be populated with metadata and content from the original resource to make it a new version user: the requesting user Returns: the new versioned resource for the original resource and thus obsolete the original resource """ # newly created new resource version is private initially # add files directly via irods backend file operation utils.copy_resource_files_and_AVUs(ori_res.short_id, new_res.short_id) # copy metadata from source resource to target new-versioned resource except three elements utils.copy_and_create_metadata(ori_res, new_res) # add or update Relation element to link source and target resources hs_identifier = new_res.metadata.identifiers.all().filter( name="hydroShareIdentifier")[0] ori_res.metadata.create_element('relation', type='isReplacedBy', value=hs_identifier.url) if new_res.metadata.relations.all().filter(type='isVersionOf').exists(): # the original resource is already a versioned resource, and its isVersionOf relation # element is copied over to this new version resource, needs to delete this element so # it can be created to link to its original resource correctly eid = new_res.metadata.relations.all().filter( type='isVersionOf').first().id new_res.metadata.delete_element('relation', eid) hs_identifier = ori_res.metadata.identifiers.all().filter( name="hydroShareIdentifier")[0] new_res.metadata.create_element('relation', type='isVersionOf', value=hs_identifier.url) if ori_res.resource_type.lower() == "collectionresource": # clone contained_res list of original collection and add to new collection # note that new version collection will not contain "deleted resources" new_res.resources = ori_res.resources.all() # create bag for the new resource hs_bagit.create_bag(new_res) # since an isReplaceBy relation element is added to original resource, needs to call # resource_modified() for original resource utils.resource_modified(ori_res, user, overwrite_bag=False) # if everything goes well up to this point, set original resource to be immutable so that # obsoleted resources cannot be modified from REST API ori_res.raccess.immutable = True ori_res.raccess.save() return new_res
def publish(request, shortkey, *args, **kwargs): res, _, _ = authorize(request, shortkey, edit=True, full=True, superuser=True) res.edit_users = [] res.edit_groups = [] res.published_and_frozen = True res.doi = "to be assigned" res.save() resource_modified(res, request.user) return HttpResponseRedirect(request.META['HTTP_REFERER'])
def add_citation(request, shortkey, *args, **kwargs): res, _, _ = authorize(request, shortkey, edit=True, full=True, superuser=True) res.dublin_metadata.create(term='REF', content=request.REQUEST['content']) resource_modified(res, request.user) return HttpResponseRedirect(request.META['HTTP_REFERER'])
def delete_file(request, shortkey, f, *args, **kwargs): res, _, _ = authorize(request, shortkey, edit=True, full=True, superuser=True) fl = res.files.filter(pk=int(f)).first() fl.resource_file.delete() fl.delete() resource_modified(res, request.user) return HttpResponseRedirect(request.META['HTTP_REFERER'])
def post(self, request, pk, pathname): """ Add a file to a resource. :param request: :param pk: Primary key of the resource (i.e. resource short ID) :param pathname: the path to the containing folder in the folder hierarchy :return: Leaving out pathname in the URI calls a different class function in ResourceFileListCreate that stores in the root directory instead. """ resource, _, _ = view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE) resource_files = request.FILES.values() if len(resource_files) == 0: error_msg = {'file': 'No file was found to add to the resource.'} raise ValidationError(detail=error_msg) elif len(resource_files) > 1: error_msg = {'file': 'More than one file was found. Only one file can be ' 'added at a time.'} raise ValidationError(detail=error_msg) # TODO: (Brian) I know there has been some discussion when to validate a file # I agree that we should not validate and extract metadata as part of the file add api # Once we have a decision, I will change this implementation accordingly. In that case # we have to implement additional rest endpoints for file validation and extraction. try: hydroshare.utils.resource_file_add_pre_process(resource=resource, files=[resource_files[0]], user=request.user, extract_metadata=True) except (hydroshare.utils.ResourceFileSizeException, hydroshare.utils.ResourceFileValidationException, Exception) as ex: error_msg = {'file': 'Adding file to resource failed. %s' % ex.message} raise ValidationError(detail=error_msg) try: res_file_objects = hydroshare.utils.resource_file_add_process(resource=resource, files=[resource_files[0]], folder=pathname, user=request.user, extract_metadata=True) except (hydroshare.utils.ResourceFileValidationException, Exception) as ex: error_msg = {'file': 'Adding file to resource failed. %s' % ex.message} raise ValidationError(detail=error_msg) # prepare response data file_name = os.path.basename(res_file_objects[0].resource_file.name) file_path = res_file_objects[0].resource_file.name.split('/data/contents/')[1] response_data = {'resource_id': pk, 'file_name': file_name, 'file_path': file_path} resource_modified(resource, request.user, overwrite_bag=False) return Response(data=response_data, status=status.HTTP_201_CREATED)
def add_keyword_metadata(request, hs_file_type, file_type_id, **kwargs): """adds one or more keywords for a given logical file data for keywords is expected as part of the request.POST multiple keywords are part of the post data in a comma separated format If any of the keywords to be added already exists (case insensitive check) then none of the posted keywords is added NOTE: This view function must be called via ajax call """ logical_file, json_response = _get_logical_file(hs_file_type, file_type_id) if json_response is not None: return json_response resource_id = logical_file.resource.short_id resource, authorized, _ = authorize(request, resource_id, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE, raises_exception=False) if not authorized: ajax_response_data = {'status': 'error', 'logical_file_type': logical_file.type_name(), 'element_name': 'keyword', 'message': "Permission denied"} return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) if hs_file_type == "RefTimeseriesLogicalFile" and logical_file.metadata.has_keywords_in_json: # if there are keywords in json file, we don't allow adding new keyword ajax_response_data = {'status': 'error', 'logical_file_type': logical_file.type_name(), 'element_name': 'keyword', 'message': "Adding of keyword is not allowed"} return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) keywords = request.POST['keywords'] keywords = keywords.split(",") existing_keywords = [kw.lower() for kw in logical_file.metadata.keywords] if not any(kw.lower() in keywords for kw in existing_keywords): metadata = logical_file.metadata metadata.keywords += keywords if hs_file_type != "TimeSeriesLogicalFile": metadata.is_dirty = True metadata.save() # add keywords to resource resource_keywords = [subject.value.lower() for subject in resource.metadata.subjects.all()] for kw in keywords: if kw.lower() not in resource_keywords: resource.metadata.create_element('subject', value=kw) resource_modified(resource, request.user, overwrite_bag=False) resource_keywords = [subject.value for subject in resource.metadata.subjects.all()] ajax_response_data = {'status': 'success', 'logical_file_type': logical_file.type_name(), 'added_keywords': keywords, 'resource_keywords': resource_keywords, 'message': "Add was successful"} return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) else: ajax_response_data = {'status': 'error', 'logical_file_type': logical_file.type_name(), 'element_name': 'keyword', 'message': "Keyword already exists"} return JsonResponse(ajax_response_data, status=status.HTTP_200_OK)
def _process_uploaded_sqlite_file(user, resource, res_file, validate_files_dict, delete_existing_metadata=True): # check if it a sqlite file fl_ext = utils.get_resource_file_name_and_extension(res_file)[2] if fl_ext == '.sqlite': # get the file from iRODS to a temp directory fl_obj_name = utils.get_file_from_irods(res_file) validate_err_message = _validate_odm2_db_file(fl_obj_name) if not validate_err_message: # first delete relevant existing metadata elements if delete_existing_metadata: TimeSeriesMetaData.objects.filter( id=resource.metadata.id).update(is_dirty=False) _delete_extracted_metadata(resource) extract_err_message = _extract_metadata(resource, fl_obj_name) if extract_err_message: # delete the invalid file delete_resource_file_only(resource, res_file) # cleanup any extracted metadata _delete_extracted_metadata(resource) validate_files_dict['are_files_valid'] = False extract_err_message += "{}".format(FILE_UPLOAD_ERROR_MESSAGE) validate_files_dict['message'] = extract_err_message else: # set metadata is_dirty to False TimeSeriesMetaData.objects.filter( id=resource.metadata.id).update(is_dirty=False) # delete the csv file if it exists _delete_resource_file(resource, ".csv") utils.resource_modified(resource, user, overwrite_bag=False) else: # file validation failed # delete the invalid file just uploaded delete_resource_file_only(resource, res_file) validate_files_dict['are_files_valid'] = False validate_err_message += "{}".format(FILE_UPLOAD_ERROR_MESSAGE) validate_files_dict['message'] = validate_err_message # cleanup the temp file if os.path.exists(fl_obj_name): shutil.rmtree(os.path.dirname(fl_obj_name)) else: # delete the invalid file delete_resource_file_only(resource, res_file) validate_files_dict['are_files_valid'] = False err_message = "The uploaded file not a sqlite file. {}" err_message += err_message.format(FILE_UPLOAD_ERROR_MESSAGE) validate_files_dict['message'] = err_message
def update_dataset_name(request, hs_file_type, file_type_id, **kwargs): """updates the dataset_name (title) attribute of the specified logical file object """ logical_file, json_response = _get_logical_file(hs_file_type, file_type_id) if json_response is not None: return json_response resource_id = logical_file.resource.short_id resource, authorized, _ = authorize( request, resource_id, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE, raises_exception=False) if not authorized: ajax_response_data = { 'status': 'error', 'logical_file_type': logical_file.type_name(), 'element_name': 'datatset_name', 'message': "Permission denied" } return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) if hs_file_type == "RefTimeseriesLogicalFile" and logical_file.metadata.has_title_in_json: # if json file has title, we can't update title (dataset name) ajax_response_data = { 'status': 'error', 'logical_file_type': logical_file.type_name(), 'element_name': 'title', 'message': "Title can't be updated" } return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) dataset_name = request.POST['dataset_name'] logical_file.dataset_name = dataset_name logical_file.save() metadata = logical_file.metadata metadata.is_dirty = True metadata.save() resource_modified(resource, request.user, overwrite_bag=False) ajax_response_data = { 'status': 'success', 'logical_file_type': logical_file.type_name(), 'element_name': 'datatset_name', "is_dirty": metadata.is_dirty, 'message': "Update was successful" } if logical_file.type_name() == "TimeSeriesLogicalFile": ajax_response_data[ 'can_update_sqlite'] = logical_file.can_update_sqlite_file return JsonResponse(ajax_response_data, status=status.HTTP_200_OK)
def add_zip_file_contents_to_resource(pk, zip_file_path): """Add zip file to existing resource and remove tmp zip file.""" zfile = None resource = None try: resource = utils.get_resource_by_shortkey(pk, or_404=False) zfile = zipfile.ZipFile(zip_file_path) num_files = len(zfile.infolist()) zcontents = utils.ZipContents(zfile) files = zcontents.get_files() resource.file_unpack_status = 'Running' resource.save() for i, f in enumerate(files): logger.debug("Adding file {0} to resource {1}".format(f.name, pk)) utils.add_file_to_resource(resource, f) resource.file_unpack_message = "Imported {0} of about {1} file(s) ...".format( i, num_files) resource.save() # This might make the resource unsuitable for public consumption resource.update_public_and_discoverable() # TODO: this is a bit of a lie because a different user requested the bag overwrite utils.resource_modified(resource, resource.creator, overwrite_bag=False) # Call success callback resource.file_unpack_message = None resource.file_unpack_status = 'Done' resource.save() except BaseResource.DoesNotExist: msg = "Unable to add zip file contents to non-existent resource {pk}." msg = msg.format(pk=pk) logger.error(msg) except: exc_info = "".join(traceback.format_exception(*sys.exc_info())) if resource: resource.file_unpack_status = 'Error' resource.file_unpack_message = exc_info resource.save() if zfile: zfile.close() logger.error(exc_info) finally: # Delete upload file os.unlink(zip_file_path)
def delete_keyword_metadata(request, hs_file_type, file_type_id, **kwargs): """deletes a keyword for a given logical file The keyword to be deleted is expected as part of the request.POST NOTE: This view function must be called via ajax call """ logical_file, json_response = _get_logical_file(hs_file_type, file_type_id) if json_response is not None: return json_response resource_id = logical_file.resource.short_id resource, authorized, _ = authorize( request, resource_id, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE, raises_exception=False) if not authorized: ajax_response_data = { 'status': 'error', 'logical_file_type': logical_file.type_name(), 'element_name': 'keyword', 'message': "Permission denied" } return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) keyword = request.POST['keyword'] existing_keywords = [kw.lower() for kw in logical_file.metadata.keywords] if keyword.lower() in existing_keywords: logical_file.metadata.keywords = [ kw for kw in logical_file.metadata.keywords if kw.lower() != keyword.lower() ] logical_file.metadata.is_dirty = True logical_file.metadata.save() resource_modified(resource, request.user, overwrite_bag=False) ajax_response_data = { 'status': 'success', 'logical_file_type': logical_file.type_name(), 'deleted_keyword': keyword, 'message': "Add was successful" } return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) else: ajax_response_data = { 'status': 'error', 'logical_file_type': logical_file.type_name(), 'element_name': 'keyword', 'message': "Keyword was not found" } return JsonResponse(ajax_response_data, status=status.HTTP_200_OK)
def update_collection_for_deleted_resources(request, shortkey, *args, **kwargs): """ If there are any tracked deleted resource objects for a collection resource (identified by shortkey), those are deleted and resource bag is regenerated for the collection resource to avoid the possibility of broken links in resource map as a result of collection referenced resource being deleted by resource owner. """ ajax_response_data = {'status': "success"} try: with transaction.atomic(): collection_res, is_authorized, user \ = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE) if collection_res.resource_type.lower() != "collectionresource": raise Exception( "Resource {0} is not a collection resource.".format( shortkey)) # handle "Relation" metadata hasPart = "hasPart" for deleted_res_log in collection_res.deleted_resources: relation_value = RES_LANDING_PAGE_URL_TEMPLATE.format( deleted_res_log.resource_id) add_or_remove_relation_metadata(add=False, target_res_obj=collection_res, relation_type=hasPart, relation_value=relation_value, set_res_modified=False) new_coverage_list = _update_collection_coverages(collection_res) ajax_response_data['new_coverage_list'] = new_coverage_list # remove all logged deleted resources for the collection collection_res.deleted_resources.all().delete() update_collection_list_csv(collection_res) resource_modified(collection_res, user, overwrite_bag=False) except Exception as ex: logger.error("Failed to update collection for " "deleted resources.Collection resource ID: {}. " "Error:{} ".format(shortkey, str(ex))) ajax_response_data = {'status': "error", 'message': str(ex)} finally: return JsonResponse(ajax_response_data)
def add_file_to_resource(request, *args, **kwargs): try: shortkey = kwargs['shortkey'] except KeyError: raise TypeError('shortkey must be specified...') res, _, _ = authorize(request, shortkey, edit=True, full=True, superuser=True) for f in request.FILES.getlist('files'): res.files.add(ResourceFile(content_object=res, resource_file=f)) resource_modified(res, request.user) return HttpResponseRedirect(request.META['HTTP_REFERER'])
def update_collection_for_deleted_resources(request, shortkey, *args, **kwargs): """ If there are any tracked deleted resource objects for a collection resource (identified by shortkey), those are deleted and resource bag is regenerated for the collection resource to avoid the possibility of broken links in resource map as a result of collection referenced resource being deleted by resource owner. """ ajax_response_data = {'status': "success"} try: with transaction.atomic(): collection_res, is_authorized, user \ = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE) if collection_res.resource_type.lower() != "collectionresource": raise Exception("Resource {0} is not a collection resource.".format(shortkey)) # handle "Relation" metadata hasPart = "hasPart" for deleted_res_log in collection_res.deleted_resources: relation_value = RES_LANDING_PAGE_URL_TEMPLATE.format(deleted_res_log.resource_id) add_or_remove_relation_metadata(add=False, target_res_obj=collection_res, relation_type=hasPart, relation_value=relation_value, set_res_modified=False) new_coverage_list = _update_collection_coverages(collection_res) ajax_response_data['new_coverage_list'] = new_coverage_list # remove all logged deleted resources for the collection collection_res.deleted_resources.all().delete() update_collection_list_csv(collection_res) resource_modified(collection_res, user, overwrite_bag=False) except Exception as ex: logger.error("Failed to update collection for " "deleted resources.Collection resource ID: {}. " "Error:{} ".format(shortkey, ex.message)) ajax_response_data = {'status': "error", 'message': ex.message} finally: return JsonResponse(ajax_response_data)
def _process_uploaded_sqlite_file(user, resource, res_file, validate_files_dict, delete_existing_metadata=True): # check if it a sqlite file fl_ext = utils.get_resource_file_name_and_extension(res_file)[2] if fl_ext == '.sqlite': # get the file from iRODS to a temp directory fl_obj_name = utils.get_file_from_irods(res_file) validate_err_message = validate_odm2_db_file(fl_obj_name) if not validate_err_message: # first delete relevant existing metadata elements if delete_existing_metadata: TimeSeriesMetaData.objects.filter(id=resource.metadata.id).update(is_dirty=False) _delete_extracted_metadata(resource) extract_err_message = extract_metadata(resource, fl_obj_name) if extract_err_message: # delete the invalid file delete_resource_file_only(resource, res_file) # cleanup any extracted metadata _delete_extracted_metadata(resource) validate_files_dict['are_files_valid'] = False extract_err_message += "{}".format(FILE_UPLOAD_ERROR_MESSAGE) validate_files_dict['message'] = extract_err_message else: # set metadata is_dirty to False TimeSeriesMetaData.objects.filter(id=resource.metadata.id).update(is_dirty=False) # delete the csv file if it exists _delete_resource_file(resource, ".csv") utils.resource_modified(resource, user, overwrite_bag=False) else: # file validation failed # delete the invalid file just uploaded delete_resource_file_only(resource, res_file) validate_files_dict['are_files_valid'] = False validate_err_message += "{}".format(FILE_UPLOAD_ERROR_MESSAGE) validate_files_dict['message'] = validate_err_message # cleanup the temp file if os.path.exists(fl_obj_name): shutil.rmtree(os.path.dirname(fl_obj_name)) else: # delete the invalid file delete_resource_file_only(resource, res_file) validate_files_dict['are_files_valid'] = False err_message = "The uploaded file not a sqlite file. {}" err_message += err_message.format(FILE_UPLOAD_ERROR_MESSAGE) validate_files_dict['message'] = err_message
def set_file_type(request, resource_id, file_id, hs_file_type, **kwargs): """This view function must be called using ajax call. Note: Response status code is always 200 (OK). Client needs check the the response 'status' key for success or failure. """ res, authorized, _ = authorize( request, resource_id, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE, raises_exception=False) file_type_map = { "GeoRaster": GeoRasterLogicalFile, "NetCDF": NetCDFLogicalFile } response_data = {'status': 'error'} if not authorized: err_msg = "Permission denied" response_data['message'] = err_msg return JsonResponse(response_data, status=status.HTTP_200_OK) if res.resource_type != "CompositeResource": err_msg = "File type can be set only for files in composite resource." response_data['message'] = err_msg return JsonResponse(response_data, status=status.HTTP_200_OK) if hs_file_type not in file_type_map: err_msg = "Unsupported file type." response_data['message'] = err_msg return JsonResponse(response_data, status=status.HTTP_200_OK) try: logical_file_type_class = file_type_map[hs_file_type] logical_file_type_class.set_file_type(resource=res, file_id=file_id, user=request.user) resource_modified(res, request.user, overwrite_bag=False) msg = "File was successfully set to selected file type. " \ "Metadata extraction was successful." response_data['message'] = msg response_data['status'] = 'success' spatial_coverage_dict = get_coverage_data_dict(res) response_data['spatial_coverage'] = spatial_coverage_dict return JsonResponse(response_data, status=status.HTTP_200_OK) except ValidationError as ex: response_data['message'] = ex.message return JsonResponse(response_data, status=status.HTTP_200_OK)
def add_metadata_element(request, shortkey, element_name, *args, **kwargs): res, _, _ = authorize(request, shortkey, edit=True, full=True, superuser=True) sender_resource = _get_resource_sender(element_name, res) handler_response = pre_metadata_element_create.send(sender=sender_resource, element_name=element_name, request=request) is_add_success = False for receiver, response in handler_response: if 'is_valid' in response: if response['is_valid']: element_data_dict = response['element_data_dict'] if element_name == 'subject': keywords = [k.strip() for k in element_data_dict['value'].split(',')] if res.metadata.subjects.all().count() > 0: res.metadata.subjects.all().delete() for kw in keywords: res.metadata.create_element(element_name, value=kw) else: element = res.metadata.create_element(element_name, **element_data_dict) is_add_success = True resource_modified(res, request.user) if request.is_ajax(): if is_add_success: if res.metadata.has_all_required_elements(): metadata_status = "Sufficient to make public" else: metadata_status = "Insufficient to make public" if element_name == 'subject': ajax_response_data = {'status': 'success', 'element_name': element_name, 'metadata_status': metadata_status} else: ajax_response_data = {'status': 'success', 'element_id': element.id, 'element_name': element_name, 'metadata_status': metadata_status} return HttpResponse(json.dumps(ajax_response_data)) else: ajax_response_data = {'status': 'error'} return HttpResponse (json.dumps(ajax_response_data)) if 'resource-mode' in request.POST: request.session['resource-mode'] = 'edit' return HttpResponseRedirect(request.META['HTTP_REFERER'])
def update_science_metadata(pk, metadata, user): """ Updates science metadata for a resource Args: pk: Unique HydroShare identifier for the resource for which science metadata needs to be updated. metadata: a list of dictionary items containing data for each metadata element that needs to be updated user: user who is updating metadata example metadata format: [ {'title': {'value': 'Updated Resource Title'}}, {'description': {'abstract': 'Updated Resource Abstract'}}, {'date': {'type': 'valid', 'start_date': '1/26/2016', 'end_date': '12/31/2016'}}, {'creator': {'name': 'John Smith', 'email': '*****@*****.**'}}, {'creator': {'name': 'Lisa Molley', 'email': '*****@*****.**'}}, {'contributor': {'name': 'Kelvin Marshal', 'email': '*****@*****.**', 'organization': 'Utah State University', 'profile_links': [{'type': 'yahooProfile', 'url': 'http://yahoo.com/LH001'}]}}, {'coverage': {'type': 'period', 'value': {'name': 'Name for period coverage', 'start': '1/1/2000', 'end': '12/12/2012'}}}, {'coverage': {'type': 'point', 'value': {'name': 'Name for point coverage', 'east': '56.45678', 'north': '12.6789', 'units': 'decimal deg'}}}, {'identifier': {'name': 'someIdentifier', 'url': "http://some.org/001"}}, {'language': {'code': 'fre'}}, {'relation': {'type': 'isPartOf', 'value': 'http://hydroshare.org/resource/001'}}, {'rights': {'statement': 'This is the rights statement for this resource', 'url': 'http://rights.ord/001'}}, {'source': {'derived_from': 'http://hydroshare.org/resource/0001'}}, {'subject': {'value': 'sub-1'}}, {'subject': {'value': 'sub-2'}}, ] Returns: """ resource = utils.get_resource_by_shortkey(pk) resource.metadata.update(metadata, user) utils.resource_modified(resource, user, overwrite_bag=False) # set to private if metadata has become non-compliant resource.update_public_and_discoverable() # set to False if necessary
def migrate_namespace_for_source_and_relation(apps, schema_editor): # migrate the namespace for the 'Source' and 'Relation' metadata # elements from 'dcterms' to 'hsterms' by regenerating the metadata xml log = logging.getLogger() # We can't import the BaseResource model directly as it may be a newer # version than this migration expects. We use the historical version. BaseResource = apps.get_model("hs_core", "BaseResource") for res in BaseResource.objects.all(): # need to regenerate xml file only for those resources that have either 'source' or 'relation' metadata if len(res.metadata.sources.all()) > 0 or len(res.metadata.relations.all()) > 0: resource_modified(res, res.creator) log_msg = "Namespace for either 'source' or 'relation' metadata element was updated for resource " \ "with ID:{} and type:{}." log_msg = log_msg.format(res.short_id, res.resource_type) log.info(log_msg) # TODO: after debugging remove the print print (log_msg)
def migrate_namespace_for_source_and_relation(apps, schema_editor): # migrate the namespace for the 'Source' and 'Relation' metadata # elements from 'dcterms' to 'hsterms' by regenerating the metadata xml log = logging.getLogger() # We can't import the BaseResource model directly as it may be a newer # version than this migration expects. We use the historical version. BaseResource = apps.get_model("hs_core", "BaseResource") for res in BaseResource.objects.all(): # need to regenerate xml file only for those resources that have either 'source' or 'relation' metadata if len(res.metadata.sources.all()) > 0 or len( res.metadata.relations.all()) > 0: resource_modified(res, res.creator) log_msg = "Namespace for either 'source' or 'relation' metadata element was updated for resource " \ "with ID:{} and type:{}." log_msg = log_msg.format(res.short_id, res.resource_type) log.info(log_msg) # TODO: after debugging remove the print print(log_msg)
def delete(self, request, pk, pathname): resource, _, user = view_utils.authorize( request, pk, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE) if not resource.supports_folders and '/' in pathname: return Response("Resource type does not support folders", status.HTTP_403_FORBIDDEN) try: view_utils.irods_path_is_allowed(pathname) # check for hacking attempts except (ValidationError, SuspiciousFileOperation) as ex: return Response(ex.message, status=status.HTTP_400_BAD_REQUEST) try: hydroshare.delete_resource_file(pk, pathname, user) except ObjectDoesNotExist as ex: # matching file not found raise NotFound(detail=ex.message) # prepare response data response_data = {'resource_id': pk, 'file_name': pathname} resource_modified(resource, request.user, overwrite_bag=False) return Response(data=response_data, status=status.HTTP_200_OK)
def update_metadata_element(request, shortkey, element_name, element_id, *args, **kwargs): res, _, _ = authorize(request, shortkey, edit=True, full=True, superuser=True) sender_resource = _get_resource_sender(element_name, res) handler_response = pre_metadata_element_update.send(sender=sender_resource, element_name=element_name, element_id=element_id, request=request) is_update_success = False is_redirect = False for receiver, response in handler_response: if 'is_valid' in response: if response['is_valid']: element_data_dict = response['element_data_dict'] res.metadata.update_element(element_name, element_id, **element_data_dict) if element_name == 'title': res.title = res.metadata.title.value res.save() if res.public: if not res.can_be_public: res.public = False res.save() is_redirect = True resource_modified(res, request.user) is_update_success = True if request.is_ajax(): if is_update_success: if res.metadata.has_all_required_elements(): metadata_status = "Sufficient to make public" else: metadata_status = "Insufficient to make public" ajax_response_data = {'status': 'success', 'element_name': element_name, 'metadata_status': metadata_status} return HttpResponse(json.dumps(ajax_response_data)) else: ajax_response_data = {'status': 'error'} return HttpResponse(json.dumps(ajax_response_data)) return HttpResponseRedirect(request.META['HTTP_REFERER'])
def migrate_tif_file(apps, schema_editor): # create a vrt file from tif file for each of the Raster Resources log = logging.getLogger() istorage = IrodsStorage() for res in RasterResource.objects.all(): try: if len(res.files.all()) == 1: res_file = res.files.all().first() vrt_file_path = create_vrt_file(res_file.resource_file) if os.path.isfile(vrt_file_path): files = (UploadedFile( file=open(vrt_file_path, 'r'), name=os.path.basename(vrt_file_path))) hydroshare.add_resource_files(res.short_id, files) bag_name = 'bags/{res_id}.zip'.format(res_id=res.short_id) if istorage.exists(bag_name): # delete the resource bag as the old bag is not valid istorage.delete(bag_name) print("Deleted bag for resource ID:" + str(res.short_id)) resource_modified(res, res.creator) log.info( 'Tif file conversion to VRT successful for resource:ID:{} ' 'Title:{}'.format(res.short_id, res.metadata.title.value)) else: log.error( 'Tif file conversion to VRT unsuccessful for resource:ID:{} ' 'Title:{}'.format(res.short_id, res.metadata.title.value)) if os.path.exists(vrt_file_path): shutil.rmtree(os.path.dirname(vrt_file_path)) except: pass
def delete(self, request, pk, pathname): resource, _, user = view_utils.authorize( request, pk, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE) if not resource.supports_folders and '/' in pathname: return Response("Resource type does not support folders", status.HTTP_403_FORBIDDEN) try: view_utils.irods_path_is_allowed( pathname) # check for hacking attempts except (ValidationError, SuspiciousFileOperation) as ex: return Response(ex.message, status=status.HTTP_400_BAD_REQUEST) try: hydroshare.delete_resource_file(pk, pathname, user) except ObjectDoesNotExist as ex: # matching file not found raise NotFound(detail=ex.message) # prepare response data response_data = {'resource_id': pk, 'file_name': pathname} resource_modified(resource, request.user, overwrite_bag=False) return Response(data=response_data, status=status.HTTP_200_OK)
def delete_resource_file(pk, filename_or_id, user, delete_logical_file=True): """ Deletes an individual file from a HydroShare resource. If the file does not exist, the Exceptions.NotFound exception is raised. REST URL: DELETE /resource/{pid}/files/{filename} Parameters: :param pk: The unique HydroShare identifier for the resource from which the file will be deleted :param filename_or_id: Name of the file or id of the file to be deleted from the resource :param user: requesting user :param delete_logical_file: If True then if the ResourceFile object to be deleted is part of a LogicalFile object then the LogicalFile object will be deleted which deletes all associated ResourceFile objects and file type metadata objects. :returns: The name or id of the file which was deleted Return Type: string or integer Raises: Exceptions.NotAuthorized - The user is not authorized Exceptions.NotFound - The resource identified by pid does not exist or the file identified by file does not exist Exception.ServiceFailure - The service is unable to process the request Note: This does not handle immutability as previously intended. """ resource = utils.get_resource_by_shortkey(pk) res_cls = resource.__class__ for f in ResourceFile.objects.filter(object_id=resource.id): if filter_condition(filename_or_id, f): if delete_logical_file: if f.has_logical_file and not f.logical_file.is_fileset: # delete logical file if any resource file that belongs to logical file # gets deleted for any logical file other than fileset logical file # logical_delete() calls this function (delete_resource_file()) # to delete each of its contained ResourceFile objects f.logical_file.logical_delete(user) return filename_or_id signals.pre_delete_file_from_resource.send(sender=res_cls, file=f, resource=resource, user=user) file_name = delete_resource_file_only(resource, f) # This presumes that the file is no longer in django delete_format_metadata_after_delete_file(resource, file_name) signals.post_delete_file_from_resource.send(sender=res_cls, resource=resource) # set to private if necessary -- AFTER post_delete_file handling resource.update_public_and_discoverable() # set to False if necessary # generate bag utils.resource_modified(resource, user, overwrite_bag=False) return filename_or_id # if execution gets here, file was not found raise ObjectDoesNotExist(str.format("resource {}, file {} not found", resource.short_id, filename_or_id))
def migrate_tif_file(apps, schema_editor): log = logging.getLogger() istorage = IrodsStorage() copy_res_fail = [] vrt_update_fail = [] vrt_update_success = [] meta_update_fail = [] meta_update_success = [] # start migration for each raster resource that has raster files for res in RasterResource.objects.all(): if res.files.all(): # copy all the resource files to temp dir try: temp_dir = tempfile.mkdtemp() for res_file in res.files.all(): shutil.copy(res_file.resource_file.file.name, os.path.join(temp_dir, os.path.basename(res_file.resource_file.name))) vrt_file_path = [os.path.join(temp_dir, f) for f in os.listdir(temp_dir) if '.vrt' == f[-4:]].pop() except Exception as e: log.exception(e.message) copy_res_fail.append('{}:{}'.format(res.short_id, res.metadata.title.value)) continue # update vrt file if the raster resource that has a single tif file try: if len(os.listdir(temp_dir)) == 2: # create new vrt file tif_file_path = [os.path.join(temp_dir, f) for f in os.listdir(temp_dir) if '.tif' == f[-4:]].pop() with open(os.devnull, 'w') as fp: subprocess.Popen(['gdal_translate', '-of', 'VRT', tif_file_path, vrt_file_path], stdout=fp, stderr=fp).wait() # remember to add .wait() # modify the vrt file contents tree = ET.parse(vrt_file_path) root = tree.getroot() for element in root.iter('SourceFilename'): element.attrib['relativeToVRT'] = '1' tree.write(vrt_file_path) # delete vrt res file for f in res.files.all(): if 'vrt' == f.resource_file.name[-3:]: f.resource_file.delete() f.delete() # add new vrt file to resource new_file = UploadedFile(file=open(vrt_file_path, 'r'), name=os.path.basename(vrt_file_path)) hydroshare.add_resource_files(res.short_id, new_file) # update the bag bag_name = 'bags/{res_id}.zip'.format(res_id=res.short_id) if istorage.exists(bag_name): # delete the resource bag as the old bag is not valid istorage.delete(bag_name) resource_modified(res, res.creator) vrt_update_success.append('{}:{}'.format(res.short_id,res.metadata.title.value)) except Exception as e: log.exception(e.message) vrt_update_fail.append('{}:{}'.format(res.short_id,res.metadata.title.value)) # update the metadata for the band information of all the raster resources try: meta_updated = False # extract meta ori_dir = os.getcwd() os.chdir(temp_dir) res_md_dict = raster_meta_extract.get_raster_meta_dict(vrt_file_path) os.chdir(ori_dir) shutil.rmtree(temp_dir) # update band information metadata in django if res_md_dict['band_info']: for i, band_meta in res_md_dict['band_info'].items(): band_obj = res.metadata.bandInformation.filter(name='Band_{}'.format(i)).first() if band_obj: res.metadata.update_element('bandInformation', band_obj.id, maximumValue=band_meta['maximumValue'], minimumValue=band_meta['minimumValue'], noDataValue=band_meta['noDataValue'], ) meta_updated = True # update the bag if meta is updated if meta_updated: bag_name = 'bags/{res_id}.zip'.format(res_id=res.short_id) if istorage.exists(bag_name): # delete the resource bag as the old bag is not valid istorage.delete(bag_name) resource_modified(res, res.creator) meta_update_success.append('{}:{}'.format(res.short_id, res.metadata.title.value)) except Exception as e: log.exception(e.message) meta_update_fail.append('{}:{}'.format(res.short_id, res.metadata.title.value)) # Print migration results print 'Copy resource to temp folder failure: Number: {} List: {}'.format(len(copy_res_fail), copy_res_fail) print 'VRT file update success: Number: {} List{}'.format(len(vrt_update_success), vrt_update_success) print 'VRT file update fail: Number: {} List{}'.format(len(vrt_update_fail), vrt_update_fail) print 'Meta update success: Number: {} List {}'.format(len(meta_update_success), meta_update_success) print 'Meta update fail: Number: {} List {}'.format(len(meta_update_fail), meta_update_fail)
def publish_resource(user, pk): """ Formally publishes a resource in HydroShare. Triggers the creation of a DOI for the resource, and triggers the exposure of the resource to the HydroShare DataONE Member Node. The user must be an owner of a resource or an administrator to perform this action. Parameters: user - requesting user to publish the resource who must be one of the owners of the resource pk - Unique HydroShare identifier for the resource to be formally published. Returns: The id of the resource that was published Return Type: string Raises: Exceptions.NotAuthorized - The user is not authorized Exceptions.NotFound - The resource identified by pid does not exist Exception.ServiceFailure - The service is unable to process the request and other general exceptions Note: This is different than just giving public access to a resource via access control rule """ resource = utils.get_resource_by_shortkey(pk) # TODO: whether a resource can be published is not considered in can_be_published # TODO: can_be_published is currently an alias for can_be_public_or_discoverable if not resource.can_be_published: raise ValidationError("This resource cannot be published since it does not have required " "metadata or content files, or this resource contains referenced " "content, or this resource type is not allowed for publication.") # append pending to the doi field to indicate DOI is not activated yet. Upon successful # activation, "pending" will be removed from DOI field resource.doi = get_resource_doi(pk, 'pending') resource.save() response = deposit_res_metadata_with_crossref(resource) if not response.status_code == status.HTTP_200_OK: # resource metadata deposition failed from CrossRef - set failure flag to be retried in a # crontab celery task resource.doi = get_resource_doi(pk, 'failure') resource.save() resource.set_public(True) # also sets discoverable to True resource.raccess.immutable = True resource.raccess.shareable = False resource.raccess.published = True resource.raccess.save() # change "Publisher" element of science metadata to CUAHSI md_args = {'name': 'Consortium of Universities for the Advancement of Hydrologic Science, ' 'Inc. (CUAHSI)', 'url': 'https://www.cuahsi.org'} resource.metadata.create_element('Publisher', **md_args) # create published date resource.metadata.create_element('date', type='published', start_date=resource.updated) # add doi to "Identifier" element of science metadata md_args = {'name': 'doi', 'url': get_activated_doi(resource.doi)} resource.metadata.create_element('Identifier', **md_args) utils.resource_modified(resource, user, overwrite_bag=False) return pk
def post(self, request, pk, pathname): """ Add a file to a resource. :param request: :param pk: Primary key of the resource (i.e. resource short ID) :param pathname: the path to the containing folder in the folder hierarchy :return: Leaving out pathname in the URI calls a different class function in ResourceFileListCreate that stores in the root directory instead. """ resource, _, _ = view_utils.authorize( request, pk, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE) resource_files = request.FILES.values() if len(resource_files) == 0: error_msg = {'file': 'No file was found to add to the resource.'} raise ValidationError(detail=error_msg) elif len(resource_files) > 1: error_msg = { 'file': 'More than one file was found. Only one file can be ' 'added at a time.' } raise ValidationError(detail=error_msg) # TODO: (Brian) I know there has been some discussion when to validate a file # I agree that we should not validate and extract metadata as part of the file add api # Once we have a decision, I will change this implementation accordingly. In that case # we have to implement additional rest endpoints for file validation and extraction. try: hydroshare.utils.resource_file_add_pre_process( resource=resource, files=[resource_files[0]], user=request.user, extract_metadata=True) except (hydroshare.utils.ResourceFileSizeException, hydroshare.utils.ResourceFileValidationException, Exception) as ex: error_msg = { 'file': 'Adding file to resource failed. %s' % ex.message } raise ValidationError(detail=error_msg) try: res_file_objects = hydroshare.utils.resource_file_add_process( resource=resource, files=[resource_files[0]], folder=pathname, user=request.user, extract_metadata=True) except (hydroshare.utils.ResourceFileValidationException, Exception) as ex: error_msg = { 'file': 'Adding file to resource failed. %s' % ex.message } raise ValidationError(detail=error_msg) # prepare response data file_name = os.path.basename(res_file_objects[0].resource_file.name) response_data = {'resource_id': pk, 'file_name': file_name} resource_modified(resource, request.user, overwrite_bag=False) return Response(data=response_data, status=status.HTTP_201_CREATED)
# the raster files can be found by Gdal for metadata extraction # when "relativeToVRT" parameter is set as "0" ori_dir = os.getcwd() os.chdir(temp_dir) res_md_dict = {} res_md_dict = raster_meta_extract.get_raster_meta_dict(vrt_file_path) os.chdir(ori_dir) shutil.rmtree(temp_dir) # update original coverage information for datum and coordinate string in django if res_md_dict['spatial_coverage_info']['original_coverage_info'].\ get('datum', None): res.metadata.originalCoverage.delete() v = {'value': res_md_dict['spatial_coverage_info']['original_coverage_info']} res.metadata.create_element('OriginalCoverage', **v) meta_updated = True # update the bag if meta is updated if meta_updated: resource_modified(res, res.creator) meta_update_success.append('{}:{}'.format(res.short_id, res.metadata.title.value)) except Exception as e: if os.path.isdir(temp_dir): shutil.rmtree(temp_dir) meta_update_fail.append('{}:{}'.format(res.short_id, res.metadata.title.value)) print e.message print res_md_dict print 'Copy Fail Number: {} List: {}'.format(len(copy_res_fail), copy_res_fail) print 'Success Number: {} List {}'.format(len(meta_update_success), meta_update_success) print 'Update Fail Number: {} List {}'.format(len(meta_update_fail), meta_update_fail)
def delete_resource_file(pk, filename_or_id, user): """ Deletes an individual file from a HydroShare resource. If the file does not exist, the Exceptions.NotFound exception is raised. REST URL: DELETE /resource/{pid}/files/{filename} Parameters: pid - The unique HydroShare identifier for the resource from which the file will be deleted filename - Name of the file to be deleted from the resource Returns: The pid of the resource from which the file was deleted Return Type: pid Raises: Exceptions.NotAuthorized - The user is not authorized Exceptions.NotFound - The resource identified by pid does not exist or the file identified by file does not exist Exception.ServiceFailure - The service is unable to process the request Note: For mutable resources (resources that have not been formally published), this method modifies the resource by deleting the file. For immutable resources (formally published resources), this method creates a new resource that is a new version of the formally published resource. HydroShare will record the update by storing the SystemMetadata.obsoletes and SystemMetadata.obsoletedBy fields for the respective resources in their system metadata HydroShare MUST check or set the values of SystemMetadata.obsoletes and SystemMetadata.obsoletedBy so that they accurately represent the relationship between the new and old objects. HydroShare MUST also set SystemMetadata.dateSysMetadataModified. The modified system metadata entries must then be available in HydroShare.listObjects() to ensure that any cataloging systems pick up the changes when filtering on SystmeMetadata.dateSysMetadataModified. A formally published resource can only be obsoleted by one newer version. Once a resource is obsoleted, no other resources can obsolete it. """ resource = utils.get_resource_by_shortkey(pk) res_cls = resource.__class__ try: file_id = int(filename_or_id) filter_condition = lambda fl: fl.id == file_id except ValueError: filter_condition = lambda fl: os.path.basename(fl.resource_file.name) == filename_or_id for f in ResourceFile.objects.filter(object_id=resource.id): if filter_condition(f): # send signal signals.pre_delete_file_from_resource.send(sender=res_cls, file=f, resource=resource) file_name = f.resource_file.name f.resource_file.delete() f.delete() delete_file_mime_type = utils.get_file_mime_type(file_name) delete_file_extension = os.path.splitext(file_name)[1] # if there is no other resource file with the same extension as the # file just deleted then delete the matching format metadata element for the resource resource_file_extensions = [os.path.splitext(f.resource_file.name)[1] for f in resource.files.all()] if delete_file_extension not in resource_file_extensions: format_element = resource.metadata.formats.filter(value=delete_file_mime_type).first() if format_element: resource.metadata.delete_element(format_element.term, format_element.id) break else: raise ObjectDoesNotExist(filename_or_id) if resource.public: if not resource.can_be_public: resource.public = False resource.save() # generate bag utils.resource_modified(resource, user) return filename_or_id
def update_collection(request, shortkey, *args, **kwargs): """ Update collection. The POST request should contain a list of resource ids and a 'update_type' parameter with value of 'set', 'add' or 'remove', which are three differnt mode to update the collection.If no 'update_type' parameter is provided, the 'set' will be used by default. To add a resource to collection, user should have certain premission on both collection and resources being added. For collection: user should have at least Edit permission For resources being added, one the following criteria should be met: 1) user has at lest View permission and the resource is Shareable 2) user is resource owner :param shortkey: id of the collection resource to which resources are to be added. """ status = "success" msg = "" metadata_status = "Insufficient to make public" new_coverage_list = [] hasPart = "hasPart" try: with transaction.atomic(): collection_res_obj, is_authorized, user \ = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE) if collection_res_obj.resource_type.lower() != "collectionresource": raise Exception("Resource {0} is not a collection resource.".format(shortkey)) # get 'resource_id_list' list from POST updated_contained_res_id_list = request.POST.getlist("resource_id_list") # get optional 'update_type' parameter: # 1) "set" (default): set collection content to the list, # following code will find out which resources are newly added, removed and unchanged # 2) 'add': add resources in the list to collection # adding a resource that is already in the collection will raise error # 3) 'remove': remove resources in the list from collection # removing a resource that is not in collection will raise error update_type = request.POST.get("update_type", 'set').lower() if update_type not in ["set", "add", "remove"]: raise Exception("Invalid value of 'update_type' parameter") if len(updated_contained_res_id_list) > len(set(updated_contained_res_id_list)): raise Exception("Duplicate resources exist in list 'resource_id_list'") for updated_contained_res_id in updated_contained_res_id_list: # avoid adding collection itself if updated_contained_res_id == shortkey: raise Exception("Can not contain collection itself.") # current contained res res_id_list_current_collection = \ [res.short_id for res in collection_res_obj.resources.all()] # res to remove res_id_list_remove = [] if update_type == "remove": res_id_list_remove = updated_contained_res_id_list for res_id_remove in res_id_list_remove: if res_id_remove not in res_id_list_current_collection: raise Exception('Cannot remove resource {0} as it ' 'is not currently contained ' 'in collection'.format(res_id_remove)) elif update_type == "set": for res_id_remove in res_id_list_current_collection: if res_id_remove not in updated_contained_res_id_list: res_id_list_remove.append(res_id_remove) for res_id_remove in res_id_list_remove: # user with Edit permission over this collection can remove any resource from it res_obj_remove = get_resource_by_shortkey(res_id_remove) collection_res_obj.resources.remove(res_obj_remove) # change "Relation" metadata in collection value = RES_LANDING_PAGE_URL_TEMPLATE.format(res_id_remove) add_or_remove_relation_metadata(add=False, target_res_obj=collection_res_obj, relation_type=hasPart, relation_value=value, set_res_modified=False) # res to add res_id_list_add = [] if update_type == "add": res_id_list_add = updated_contained_res_id_list for res_id_add in res_id_list_add: if res_id_add in res_id_list_current_collection: raise Exception('Cannot add resource {0} as it ' 'is already contained in collection'.format(res_id_add)) elif update_type == "set": for res_id_add in updated_contained_res_id_list: if res_id_add not in res_id_list_current_collection: res_id_list_add.append(res_id_add) for res_id_add in res_id_list_add: # check authorization for all new resources being added to the collection # the requesting user should at least have metadata view permission for each of # the new resources to be added to the collection res_to_add, _, _ \ = authorize(request, res_id_add, needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA) # the resources being added should be 'Shareable' # or is owned by current user is_shareable = res_to_add.raccess.shareable is_owner = res_to_add.raccess.owners.filter(pk=user.pk).exists() if not is_shareable and not is_owner: raise Exception('Only resource owner can add a non-shareable ' 'resource to a collection ') # add this new res to collection res_obj_add = get_resource_by_shortkey(res_id_add) collection_res_obj.resources.add(res_obj_add) # change "Relation" metadata in collection value = RES_LANDING_PAGE_URL_TEMPLATE.format(res_id_add) add_or_remove_relation_metadata(add=True, target_res_obj=collection_res_obj, relation_type=hasPart, relation_value=value, set_res_modified=False) if collection_res_obj.can_be_public_or_discoverable: metadata_status = "Sufficient to make public" new_coverage_list = _update_collection_coverages(collection_res_obj) update_collection_list_csv(collection_res_obj) resource_modified(collection_res_obj, user, overwrite_bag=False) except Exception as ex: err_msg = "update_collection: {0} ; username: {1}; collection_id: {2} ." logger.error(err_msg.format(ex.message, request.user.username if request.user.is_authenticated() else "anonymous", shortkey)) status = "error" msg = ex.message finally: ajax_response_data = \ {'status': status, 'msg': msg, 'metadata_status': metadata_status, 'new_coverage_list': new_coverage_list} return JsonResponse(ajax_response_data)
def netcdf_pre_add_files_to_resource(sender, **kwargs): nc_res = kwargs['resource'] files = kwargs['files'] validate_files_dict = kwargs['validate_files'] source_names = kwargs['source_names'] if __debug__: assert(isinstance(source_names, list)) if len(files) > 1: # file number validation validate_files_dict['are_files_valid'] = False validate_files_dict['message'] = 'Only one file can be uploaded.' file_selected = False in_file_name = '' nc_file_name = '' if files: file_selected = True in_file_name = files[0].file.name nc_file_name = os.path.splitext(files[0].name)[0] elif source_names: nc_file_name = os.path.splitext(os.path.basename(source_names[0]))[0] ref_tmpfiles = utils.get_fed_zone_files(source_names) if ref_tmpfiles: in_file_name = ref_tmpfiles[0] file_selected = True if file_selected and in_file_name: # file type validation and existing metadata update and create new ncdump text file nc_dataset = nc_utils.get_nc_dataset(in_file_name) if isinstance(nc_dataset, netCDF4.Dataset): # delete all existing resource files and metadata related for f in ResourceFile.objects.filter(object_id=nc_res.id): delete_resource_file_only(nc_res, f) # update resource modification info user = kwargs['user'] utils.resource_modified(nc_res, user, overwrite_bag=False) # extract metadata res_dublin_core_meta, res_type_specific_meta = nc_meta.get_nc_meta_dict(in_file_name) # update title info if res_dublin_core_meta.get('title'): if nc_res.metadata.title: nc_res.metadata.title.delete() nc_res.metadata.create_element('title', value=res_dublin_core_meta['title']) # update description info if res_dublin_core_meta.get('description'): if nc_res.metadata.description: nc_res.metadata.description.delete() nc_res.metadata.create_element('description', abstract=res_dublin_core_meta.get('description')) # update creator info if res_dublin_core_meta.get('creator_name'): name = res_dublin_core_meta.get('creator_name') email = res_dublin_core_meta.get('creator_email', '') url = res_dublin_core_meta.get('creator_url', '') arguments = dict(name=name, email=email, homepage=url) creator = nc_res.metadata.creators.all().filter(name=name).first() if creator: order = creator.order if order != 1: creator.delete() arguments['order'] = order nc_res.metadata.create_element('creator', **arguments) else: nc_res.metadata.create_element('creator', **arguments) # update contributor info if res_dublin_core_meta.get('contributor_name'): name_list = res_dublin_core_meta['contributor_name'].split(',') existing_contributor_names = [contributor.name for contributor in nc_res.metadata.contributors.all()] for name in name_list: if name not in existing_contributor_names: nc_res.metadata.create_element('contributor', name=name) # update subject info if res_dublin_core_meta.get('subject'): keywords = res_dublin_core_meta['subject'].split(',') existing_keywords = [subject.value for subject in nc_res.metadata.subjects.all()] for keyword in keywords: if keyword not in existing_keywords: nc_res.metadata.create_element('subject', value=keyword) # update source if res_dublin_core_meta.get('source'): for source in nc_res.metadata.sources.all(): source.delete() nc_res.metadata.create_element('source', derived_from=res_dublin_core_meta.get('source')) # update license element: if res_dublin_core_meta.get('rights'): raw_info = res_dublin_core_meta.get('rights') b = re.search("(?P<url>https?://[^\s]+)", raw_info) url = b.group('url') if b else '' statement = raw_info.replace(url, '') if url else raw_info if nc_res.metadata.rights: nc_res.metadata.rights.delete() nc_res.metadata.create_element('rights', statement=statement, url=url) # update relation if res_dublin_core_meta.get('references'): nc_res.metadata.relations.filter(type='cites').all().delete() nc_res.metadata.create_element('relation', type='cites', value=res_dublin_core_meta['references']) # update box info nc_res.metadata.coverages.all().delete() if res_dublin_core_meta.get('box'): nc_res.metadata.create_element('coverage', type='box', value=res_dublin_core_meta['box']) # update period info if res_dublin_core_meta.get('period'): nc_res.metadata.create_element('coverage', type='period', value=res_dublin_core_meta['period']) # update variable info nc_res.metadata.variables.all().delete() for var_info in res_type_specific_meta.values(): nc_res.metadata.create_element('variable', name=var_info['name'], unit=var_info['unit'], type=var_info['type'], shape=var_info['shape'], missing_value=var_info['missing_value'], descriptive_name=var_info['descriptive_name'], method=var_info['method']) # update the original spatial coverage meta nc_res.metadata.ori_coverage.all().delete() if res_dublin_core_meta.get('original-box'): if res_dublin_core_meta.get('projection-info'): nc_res.metadata.create_element( 'originalcoverage', value=res_dublin_core_meta['original-box'], projection_string_type=res_dublin_core_meta['projection-info']['type'], projection_string_text=res_dublin_core_meta['projection-info']['text'], datum=res_dublin_core_meta['projection-info']['datum']) else: nc_res.metadata.create_element('originalcoverage', value=res_dublin_core_meta['original-box']) # create the ncdump text file dump_file = create_header_info_txt_file(in_file_name, nc_file_name) dump_file_name = nc_file_name + '_header_info.txt' uploaded_file = UploadedFile(file=open(dump_file), name=dump_file_name) files.append(uploaded_file) else: validate_files_dict['are_files_valid'] = False validate_files_dict['message'] = 'Please check if the uploaded file is ' \ 'invalid NetCDF format.' if source_names and in_file_name: shutil.rmtree(os.path.dirname(in_file_name))
def delete_metadata_element(request, shortkey, element_name, element_id, *args, **kwargs): res, _, _ = authorize(request, shortkey, edit=True, full=True, superuser=True) res.metadata.delete_element(element_name, element_id) resource_modified(res, request.user) request.session['resource-mode'] = 'edit' return HttpResponseRedirect(request.META['HTTP_REFERER'])
def put(self, request, pk): # Update science metadata based on resourcemetadata.xml uploaded resource, authorized, user = view_utils.authorize( request, pk, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE, raises_exception=False) if not authorized: raise PermissionDenied() files = request.FILES.values() if len(files) == 0: error_msg = {'file': 'No resourcemetadata.xml file was found to update resource ' 'metadata.'} raise ValidationError(detail=error_msg) elif len(files) > 1: error_msg = {'file': ('More than one file was found. Only one file, named ' 'resourcemetadata.xml, ' 'can be used to update resource metadata.')} raise ValidationError(detail=error_msg) scimeta = files[0] if scimeta.content_type not in self.ACCEPT_FORMATS: error_msg = {'file': ("Uploaded file has content type {t}, " "but only these types are accepted: {e}.").format( t=scimeta.content_type, e=",".join(self.ACCEPT_FORMATS))} raise ValidationError(detail=error_msg) expect = 'resourcemetadata.xml' if scimeta.name != expect: error_msg = {'file': "Uploaded file has name {n}, but expected {e}.".format( n=scimeta.name, e=expect)} raise ValidationError(detail=error_msg) # Temp directory to store resourcemetadata.xml tmp_dir = tempfile.mkdtemp() try: # Fake the bag structure so that GenericResourceMeta.read_metadata_from_resource_bag # can read and validate the system and science metadata for us. bag_data_path = os.path.join(tmp_dir, 'data') os.mkdir(bag_data_path) # Copy new science metadata to bag data path scimeta_path = os.path.join(bag_data_path, 'resourcemetadata.xml') shutil.copy(scimeta.temporary_file_path(), scimeta_path) # Copy existing resource map to bag data path # (use a file-like object as the file may be in iRODS, so we can't # just copy it to a local path) resmeta_path = os.path.join(bag_data_path, 'resourcemap.xml') with open(resmeta_path, 'wb') as resmeta: storage = get_file_storage() resmeta_irods = storage.open(AbstractResource.sysmeta_path(pk)) shutil.copyfileobj(resmeta_irods, resmeta) resmeta_irods.close() try: # Read resource system and science metadata domain = Site.objects.get_current().domain rm = GenericResourceMeta.read_metadata_from_resource_bag(tmp_dir, hydroshare_host=domain) # Update resource metadata rm.write_metadata_to_resource(resource, update_title=True, update_keywords=True) create_bag_files(resource) except HsDeserializationDependencyException as e: msg = ("HsDeserializationDependencyException encountered when updating " "science metadata for resource {pk}; depedent resource was {dep}.") msg = msg.format(pk=pk, dep=e.dependency_resource_id) logger.error(msg) raise ValidationError(detail=msg) except HsDeserializationException as e: raise ValidationError(detail=e.message) resource_modified(resource, request.user, overwrite_bag=False) return Response(data={'resource_id': pk}, status=status.HTTP_202_ACCEPTED) finally: shutil.rmtree(tmp_dir)