Ejemplo n.º 1
0
def overwrite_metadata(original_md: Metadata, custom_md: Metadata,
                       editor_form):
    """ Overwrites the original data with the custom date

    Args:
        original_md (Metadata): The original Metadata object
        custom_md (Metadata): The custom Metadata object
        editor_form: The editor form which holds additional data
    Returns:
        nothing
    """
    original_md.title = custom_md.title
    original_md.abstract = custom_md.abstract
    original_md.access_constraints = custom_md.access_constraints
    # we need the metadata_url to reset dataset metadatas
    # original_md.metadata_url = custom_md.metadata_url
    original_md.licence = custom_md.licence
    # get db objects from values

    # Keyword updating
    keywords = editor_form.cleaned_data["keywords"]
    original_md.keywords.clear()
    for kw in keywords:
        keyword = Keyword.objects.get_or_create(keyword=kw)[0]
        original_md.keywords.add(keyword)

    # Language updating
    original_md.language_code = editor_form.cleaned_data["language_code"]

    # Categories updating
    # Categories are provided as id's to prevent language related conflicts
    try:
        categories = editor_form.cleaned_data["categories"]
        original_md.categories.clear()
        for category in categories:
            original_md.categories.add(category)
    except KeyError:
        pass

    # Categories are inherited by subelements
    subelements = original_md.get_described_element().get_subelements(
    ).select_related('metadata')
    for subelement in subelements:
        subelement.metadata.categories.clear()
        for category in categories:
            subelement.metadata.categories.add(category)

    # change capabilities document so that all sensitive elements (links) are proxied
    if original_md.use_proxy_uri != custom_md.use_proxy_uri:
        if custom_md.use_proxy_uri == 'on':
            original_md.set_proxy(True)
        else:
            original_md.set_proxy(False)

    # save metadata
    original_md.is_custom = True
    original_md.save()

    if original_md.is_dataset_metadata:
        overwrite_dataset_metadata_document(original_md)
    else:
        overwrite_capabilities_document(original_md)
Ejemplo n.º 2
0
    def _persist_metadata(self, md_data_entry: dict):
        """ Creates real Metadata model records from the parsed data

        Args:
            md_data_entry (dict):
        Returns:
             metadata (Metadata): The persisted metadata object
        """
        _id = md_data_entry["id"]
        # Remove this id from the set of metadata which shall be deleted in the end.
        try:
            self.deleted_metadata.remove(_id)
        except KeyError:
            pass

        try:
            md = Metadata.objects.get(identifier=_id, )
            is_new = False
            if md.last_remote_change == md_data_entry["date_stamp"]:
                # Nothing to do here!
                return
        except ObjectDoesNotExist:
            md = Metadata(identifier=_id)
            is_new = True
        md.access_constraints = md_data_entry.get("access_constraints", None)
        md.created_by = self.harvesting_group
        md.origin = ResourceOriginEnum.CATALOGUE.value
        md.last_remote_change = md_data_entry.get("date_stamp", None)
        md.title = md_data_entry.get("title", None)
        md.contact = md_data_entry.get("contact", None)
        md.language_code = md_data_entry.get("language_code", None)
        md.metadata_type = md_data_entry.get("metadata_type", None)
        md.abstract = md_data_entry.get("abstract", None)
        md.bounding_geometry = md_data_entry.get("bounding_geometry", None)
        formats = md_data_entry.get("formats", [])
        md.is_active = True
        md.capabilities_original_uri = md_data_entry.get(
            "capabilities_original_url", None)
        try:
            # Improve speed for keyword get-create by fetching (filter) all existing ones and only perform
            # get_or_create on the ones that do not exist yet. Speed up by ~50% for large amount of data
            existing_kws = Keyword.objects.filter(
                keyword__in=md_data_entry["keywords"])
            existing_kws = [kw.keyword for kw in existing_kws]
            new_kws = [
                kw for kw in md_data_entry["keywords"]
                if kw not in existing_kws
            ]
            [Keyword.objects.get_or_create(keyword=kw)[0] for kw in new_kws]
            kws = Keyword.objects.filter(keyword__in=md_data_entry["keywords"])

            # Same for MimeTypes
            existing_formats = MimeType.objects.filter(
                mime_type__in=md_data_entry["formats"])
            existing_formats = [
                _format.mime_type for _format in existing_formats
            ]
            new_formats = [
                _format for _format in md_data_entry["formats"]
                if _format not in existing_formats
            ]
            [
                MimeType.objects.get_or_create(mime_type=_format)[0]
                for _format in new_formats
            ]
            formats = MimeType.objects.filter(
                mime_type__in=md_data_entry["formats"])

            with transaction.atomic():
                if len(md_data_entry["categories"]) > 0:
                    q = Q()
                    for cat in md_data_entry["categories"]:
                        q |= Q(title_EN__iexact=cat)
                    categories = Category.objects.filter(q)
                else:
                    categories = []

                for link in md_data_entry.get("links", []):
                    url = link.get("link", None)
                    if url is None:
                        continue
                    generic_url = GenericUrl()
                    generic_url.description = "[HARVESTED URL] \n{}".format(
                        link.get("description", ""))
                    generic_url.method = "Get"
                    generic_url.url = url
                    generic_url.save()
                    md.additional_urls.add(generic_url)

                md.save(add_monitoring=False)
                md.keywords.add(*kws)
                md.categories.add(*categories)
                md.formats.add(*formats)

                # To reduce runtime, we only create a new MetadataRelation if we are sure there hasn't already been one.
                # Using get_or_create increases runtime on existing metadata too much!
                if is_new:
                    md.add_metadata_relation(
                        to_metadata=self.metadata,
                        relation_type=MetadataRelationEnum.HARVESTED_THROUGH.
                        value,
                        origin=ResourceOriginEnum.CATALOGUE.value)

            parent_id = md_data_entry["parent_id"]
            # Add the found parent_id to the parent_child map!
            if parent_id is not None:
                if self.parent_child_map.get(parent_id, None) is None:
                    self.parent_child_map[parent_id] = [md]
                else:
                    self.parent_child_map[parent_id].append(md)

        except (IntegrityError, DataError) as e:
            csw_logger.error(
                CSW_ERROR_LOG_TEMPLATE.format(md.identifier,
                                              self.metadata.title, e))