示例#1
0
 def on_deleted(self, harvest_object, header):
     """ See :meth:`OAIPMHHarvester.on_deleted`
         Mark package for deletion.
     """
     package_id = get_package_id_by_pid(header.identifier(), 'primary')
     if package_id:
         harvest_object.package_id = package_id
     harvest_object.content = None
     harvest_object.report_status = "deleted"
     harvest_object.save()
     return True
示例#2
0
    def test_get_package_id_by_pid(self):
        package_1_id, package_2_id = self._create_datasets()
        self.assertEquals(utils.get_package_id_by_pid('some_primary_pid_1', 'primary'), package_1_id)
        self.assertEquals(utils.get_package_id_by_pid('some_metadata_pid_1', 'relation'), package_1_id)
        self.assertEquals(utils.get_package_id_by_pid('some_data_pid_1', 'unknown_type'), None)

        self.assertEquals(utils.get_package_id_by_pid('some_data_pid_2', 'relation'), package_2_id)
        self.assertEquals(utils.get_package_id_by_pid('some_part_pid_2', 'relation'), package_2_id)
        self.assertEquals(utils.get_package_id_by_pid('some_unknown_pid_2', 'relation'), None)
        self.assertEquals(utils.get_package_id_by_pid('invalid', 'invalid'), None)
示例#3
0
    def test_get_package_id_by_pid(self):
        package_1_id, package_2_id = self._create_datasets()
        self.assertEquals(utils.get_package_id_by_pid('some_data_pid_1', 'data'), package_1_id)
        self.assertEquals(utils.get_package_id_by_pid('some_metadata_pid_1', 'metadata'), package_1_id)
        self.assertEquals(utils.get_package_id_by_pid('some_data_pid_1', 'metadata'), None)

        self.assertEquals(utils.get_package_id_by_pid('some_data_pid_2', 'data'), package_2_id)
        self.assertEquals(utils.get_package_id_by_pid('some_version_pid_2', 'version'), package_2_id)
        self.assertEquals(utils.get_package_id_by_pid('some_version_pid_2', 'data'), None)
        self.assertEquals(utils.get_package_id_by_pid('invalid', 'version'), None)
示例#4
0
    def read_data(self, xml):
        """ Extract package data from given XML.
        :param xml: xml element (lxml)
        :return: dictionary
        """
        cmd = first(
            xml.xpath('//oai:record/oai:metadata/cmd:CMD',
                      namespaces=self.namespaces))
        if cmd is None:
            raise CmdiReaderException(
                "Unexpected XML format: No CMD -element found")

        resource_info = cmd.xpath("//cmd:Components/cmd:resourceInfo",
                                  namespaces=self.namespaces)[0]
        if resource_info is None:
            raise CmdiReaderException(
                "Unexpected XML format: No resourceInfo -element found")

        metadata_identifiers = self._text_xpath(
            cmd, "//cmd:identificationInfo/cmd:identifier/text()")

        languages = self._text_xpath(
            cmd,
            "//cmd:corpusInfo/cmd:corpusMediaType/cmd:corpusTextInfo/cmd:languageInfo/cmd:languageId/text()"
        )

        # convert the descriptions to a JSON string of type {"fin":"kuvaus", "eng","desc"}
        desc_json = {}
        for desc in xml.xpath("//cmd:identificationInfo/cmd:description",
                              namespaces=self.namespaces):
            lang = convert_language(
                desc.get('{http://www.w3.org/XML/1998/namespace}lang',
                         'undefined').strip())
            desc_json[lang] = unicode(desc.text).strip()

        description = json.dumps(desc_json)

        # convert the titles to a JSON string of type {"fin":"otsikko", "eng","title"}
        transl_json = {}
        for title in xml.xpath('//cmd:identificationInfo/cmd:resourceName',
                               namespaces=self.namespaces):
            lang = convert_language(
                title.get('{http://www.w3.org/XML/1998/namespace}lang',
                          'undefined').strip())
            transl_json[lang] = title.text.strip()

        title = json.dumps(transl_json)
        provider = self.provider
        version = first(
            self._text_xpath(
                resource_info,
                "//cmd:metadataInfo/cmd:metadataLastDateUpdated/text()")) or ""
        coverage = first(
            self._text_xpath(
                resource_info,
                "//cmd:corpusInfo/cmd:corpusMediaType/cmd:corpusTextInfo/cmd:timeCoverageInfo/cmd:timeCoverage/text()"
            )) or ""

        pids = []
        primary_pid = ''
        direct_download_URL = ''
        access_request_URL = ''
        access_application_URL = ''

        # data_identifiers = self._text_xpath(cmd, "//cmd:identificationInfo/cmd:url/text()")

        for pid in [
                CmdiReader._language_bank_urn_pid_enhancement(metadata_pid)
                for metadata_pid in metadata_identifiers
        ]:
            if 'urn' in pid and not primary_pid:
                pids.append(dict(id=pid, provider=provider, type='primary'))
                primary_pid = pid
        #     else:
        #         pids.append(dict(id=pid, provider=provider, type='relation', relation='generalRelation'))
        #
        # pids += [dict(id=CmdiReader._language_bank_urn_pid_enhancement(pid), provider=provider, type='relation',
        #               relation='generalRelation') for pid in data_identifiers]

        license_identifier = CmdiReader._language_bank_license_enhancement(
            first(
                self._text_xpath(
                    resource_info,
                    "//cmd:distributionInfo/cmd:licenceInfo/cmd:licence/text()"
                )) or 'notspecified')
        availability = CmdiReader._language_bank_availability_from_license(
            license_identifier)

        if license_identifier.lower().strip() != 'undernegotiation':
            if availability == 'direct_download':
                direct_download_URL = primary_pid
            if availability == 'access_request':
                access_request_URL = primary_pid
            if availability == 'access_application_other':
                sliced_pid = primary_pid.rsplit('/', 1)
                if len(sliced_pid) >= 2:
                    access_application_URL = 'https://lbr.csc.fi/web/guest/catalogue?domain=LBR&target=basket&resource=' + sliced_pid[
                        1]

        temporal_coverage_begin = ""
        temporal_coverage_end = ""

        if coverage:
            split = [item.strip() for item in coverage.split("-")]
            if len(split) == 2:
                temporal_coverage_begin = split[0]
                temporal_coverage_end = split[1]

        # TODO: Check agent mapping.
        #print "###", _get_persons(resource_info, "//cmd:distributionInfo/cmd:licenceInfo/cmd:licensorPerson")
        #print "###", _get_persons(resource_info, "//cmd:distributionInfo/cmd:licenceInfo/cmd:distributionRightsHolderPerson")
        #print "###", _get_persons(resource_info, "//cmd:distributionInfo/cmd:iprHolderPerson")
        #print "###", _get_persons(resource_info, "//cmd:contactPerson")
        #print "###", _get_persons(resource_info, "//cmd:metadataInfo/cmd:metadataCreator")

        #print "###", _get_organizations(resource_info, "//cmd:distributionInfo/cmd:licenceInfo/cmd:licensorOrganization")
        #print "###", _get_organizations(resource_info, "//cmd:distributionInfo/cmd:licenceInfo/cmd:distributionRightsHolderOrganization")
        #print "###", _get_organizations(resource_info, "//cmd:distributionInfo/cmd:iprHolderOrganization")

        contacts = self._persons_as_contact(
            self._get_persons(resource_info, "//cmd:contactPerson"))

        agents = []
        agents.extend(
            self._persons_as_agent(
                self._get_persons(
                    resource_info,
                    "//cmd:distributionInfo/cmd:iprHolderPerson"), 'author'))
        agents.extend(
            self._persons_as_agent(
                self._get_persons(
                    resource_info,
                    "//cmd:distributionInfo/cmd:licenceInfo/cmd:distributionRightsHolderPerson"
                ), 'owner'))

        agents.extend(
            self._organization_as_agent(
                self._get_organizations(
                    resource_info,
                    "//cmd:distributionInfo/cmd:iprHolderOrganization"),
                'author'))
        agents.extend(
            self._organization_as_agent(
                self._get_organizations(
                    resource_info,
                    "//cmd:distributionInfo/cmd:licenceInfo/cmd:distributionRightsHolderOrganization"
                ), 'owner'))

        existing_package_id = get_package_id_by_pid(primary_pid, u'primary')
        package_id = existing_package_id if existing_package_id else get_unique_package_id(
        )

        result = {
            'name': pid_to_name(package_id),
            'language': ",".join(languages),
            'pids': pids,
            'version': version,
            'notes': description,
            'title': title,
            'type': 'dataset',
            'contact': contacts,
            'agent': agents,
            'availability': availability,
            'direct_download_URL': direct_download_URL,
            'access_request_URL': access_request_URL,
            'access_application_URL': access_application_URL,
            'temporal_coverage_begin': temporal_coverage_begin,
            'temporal_coverage_end': temporal_coverage_end,
            'license_id': license_identifier,
            'license_URL': ''
        }

        if not languages:
            result['langdis'] = u'True'

        if package_id:
            result['id'] = package_id

        # TODO: Ask about distributionAccessMedium
        # _strip_first(_text_xpath(resource_info, "//cmd:distributionInfo/availability/text()"))
        # url = _strip_first(_text_xpath(resource_info, "//cmd:identificationInfo/cmd:url/text()"))

        return result
示例#5
0
    def _ddi2ckan(self, original_url, original_xml, harvest_object):
        '''Extract package values from bs4 object 'ddi_xml' parsed from xml
        '''
        # TODO: Use .extract() and .string.extract() function so handled elements are removed from ddi_xml.
        doc_citation = "ddi_xml.codeBook.docDscr.citation"
        stdy_dscr = "ddi_xml.codeBook.stdyDscr"

        ####################################################################
        #      Read mandatory metadata fields:                             #
        ####################################################################
        # Authors & organizations
        authors = self.get_authors(self.ddi_xml.stdyDscr.citation, 'AuthEnty')
        agent = authors[:]
        agent.extend(self.get_contributors(self.ddi_xml.stdyDscr.citation))

        # Availability
        availability = AVAILABILITY_DEFAULT
        if _access_request_URL_is_found():
            availability = 'direct_download'
        if _is_fsd(original_url):
            availability = AVAILABILITY_FSD

        # Keywords
        keywords = self.get_keywords(self.ddi_xml.stdyDscr.stdyInfo.subject)

        # Language
        # TODO: Where/how to extract multiple languages: 'language': u'eng, fin, swe' ?
        language = self.convert_language(
            self._read_value("ddi_xml.codeBook.get('xml:lang')"))

        # Titles
        titles = self._read_value(stdy_dscr + ".citation.titlStmt(['titl', 'parTitl'])") or \
            self._read_value(doc_citation + ".titlStmt(['titl', 'parTitl'])", mandatory_field=True)

        # langtitle=[dict(lang=self.convert_language(a.get('xml:lang', '')), value=a.text) for a in titles]
        # [{"lang":"fin", "value":"otsikko"}, {"lang:"en", "value":"title"}]

        # convert the titles to a JSON string of type {"fin":"otsikko", "eng","title"}
        transl_json = {}
        first_title = ""

        # default to finnish, since first title has no lang value, which causes the validator to whine
        # we might want to update the DDI harvester to accept a language configuration parameter, if
        # we decide to harvest DDI resources from other sources.
        default_lang = "fi"
        for title in titles:
            transl_json[self.convert_language(title.get('xml:lang', default_lang))] = title.text

            # we want to get save the first title for use lateron
            if not first_title:
                first_title = title.text

        title = json.dumps(transl_json)

        # License
        # TODO: Extract prettier output. Should we check that element contains something?
        # Should this be in optional section if not mandatory_field?
        license_url = self._read_value(stdy_dscr + ".dataAccs.useStmt.get_text(separator=u' ')", mandatory_field=False)
        if _is_fsd(original_url):
            license_id = LICENSE_ID_FSD
        else:
            license_id = LICENSE_ID_DEFAULT

        # Contact (package_extra.key: contact_[k]_name in database, contact in WUI)
        contact_name = self._read_value(stdy_dscr + ".citation.distStmt('contact')") or \
                     self._read_value(stdy_dscr + ".citation.distStmt('distrbtr')") or \
                     self._read_value(doc_citation + ".prodStmt('producer')", mandatory_field=True)
        # TODO: clean out (or ask FSD to clean) mid text newlines (eg. in FSD2482)
        if contact_name and contact_name[0].text:
            contact_name = contact_name[0].text
        else:
            contact_name = self._read_value(stdy_dscr + ".citation.prodStmt.producer.get('affiliation')", mandatory_field=True)
        if _is_fsd(original_url):
            contact_email = CONTACT_EMAIL_FSD
            # TODO: Allow trying other email also in FSD metadata
        else:
            contact_email = self._read_value(stdy_dscr + ".citation.distStmt.contact.get('email')", mandatory_field=True)

        # Modified date
        version = self.get_attr_optional(self.ddi_xml.stdyDscr.citation,
                                         'prodDate', 'date') or \
                  self.get_attr_mandatory(self.ddi_xml.stdyDscr.citation,
                                          'version', 'date')

        # This idNos is an FSD specific solution
        idNos = self._read_value(stdy_dscr + ".citation.titlStmt.find_all('IDNo')", mandatory_field=False)
        if not idNos:
            idNos = self._read_value(doc_citation + ".titlStmt.find_all('IDNo')", mandatory_field=True)

        pids = list()

        idNoValues = [bsIdNo.text for bsIdNo in idNos]
        agencies = [bsIdNo.get('agency') for bsIdNo in idNos]
        primary_pid = None
        if len(idNoValues) == len(agencies):
            for idNoVal, agency in zip(idNoValues, agencies):
                if agency == 'Kansalli' \
                             'skirjasto':
                    pids.append({'id': idNoVal, 'type': 'primary', 'provider': agency})
                    primary_pid = idNoVal
                else:
                    pids.append({'id': agency + idNoVal, 'type': 'relation', 'provider': agency, 'relation': 'generalRelation'})

        # Should we generate a version PID?
        # vpid = utils.generate_pid()
        # pids.append({'id': vpid, 'type': 'version', 'provider': 'kata'})

        # Original web page as resource
        # For FSD 'URI' leads to summary web page of data, hence format='html'
        orig_web_page = self._read_value(doc_citation + ".holdings.get('URI', '')")
        if orig_web_page:
            orig_web_page_resource = {'description': first_title,
                                      'format': u'html',
                                      'resource_type': 'documentation',
                                      'url': orig_web_page}
        else:
            orig_web_page_resource = {}

        # Owner
        owner = self._read_value(stdy_dscr + ".citation.prodStmt.producer.text") or \
                self._read_value(stdy_dscr + ".citation.rspStmt.AuthEnty.text") or \
                self._read_value(doc_citation + ".prodStmt.producer.string", mandatory_field=True)
        agent.append({'role': 'owner',
                      'name': owner})

        # Owner organisation
        if harvest_object:
            hsid = harvest_object.harvest_source_id
            hsooid = model.Session.query(model.Package).filter(model.Package.id==hsid).one().owner_org
            owner_org = model.Session.query(model.Group).filter(model.Group.id==hsooid).one().name
        else:
            owner_org = u''

        # Distributor (Agent: distributor, the same is used as contact)
        agent.append({
            'role': 'distributor',
            'name': contact_name})

        ####################################################################
        #      Read optional metadata fields:                              #
        ####################################################################
        # Availability
        if _is_fsd(original_url):
            access_request_url = ACCESS_REQUEST_URL_FSD
        else:
            access_request_url = u''

        # Contact
        contact_phone = self._read_value(doc_citation + ".holdings.get('callno')") or \
                        self._read_value(stdy_dscr + ".citation.holdings.get('callno')")

        contact_URL = self._read_value( stdy_dscr + ".dataAccs.setAvail.accsPlac.get('URI')") or \
                      self._read_value( stdy_dscr + ".citation.distStmt.contact.get('URI')") or \
                      self._read_value( stdy_dscr + ".citation.distStmt.distrbtr.get('URI')") or \
                      CONTACT_URL_FSD if _is_fsd(original_url) else None

        # convert the descriptions to a JSON string of type {"fin":"aineiston kuvaus", "eng","dataset description"}
        descriptions = self._read_value(stdy_dscr + ".stdyInfo.abstract('p')")
        if not descriptions:
            descriptions = self._read_value(stdy_dscr + ".citation.serStmt.serInfo('p')")
        translated_notes = {}

        for des in descriptions:
            lang = self.convert_language(des.get('xml:lang', 'fi'))
            if lang in translated_notes:
                translated_notes[lang] += '\r\n\r\n' + des.text
            else:
                translated_notes[lang] = des.text

        notes = json.dumps(translated_notes)

        # Discipline
        discipline = self.get_discipline(self.ddi_xml.stdyDscr.stdyInfo.subject)

        # Dataset lifetime events
        events = self._get_events(stdy_dscr, authors)

        # Geographic coverage
        geo_cover = self.get_geo_coverage(self.ddi_xml)

        # Temporal coverage
        temp_start, temp_end = self.get_temporal_coverage(self.ddi_xml)

        # Citation
        citation = self._read_value(stdy_dscr + ".citation.biblCit.text", mandatory_field=False)


        ####################################################################
        #      Flatten rest to 'XPath/path/to/element': 'value' pairs      #
        ####################################################################
        etree_xml = etree.fromstring(str(self.ddi_xml))
        flattened_ddi = importcore.generic_xml_metadata_reader(etree_xml.find('.//{*}docDscr'))
        xpath_dict = flattened_ddi.getMap()
        flattened_ddi = importcore.generic_xml_metadata_reader(etree_xml.find('.//{*}stdyDscr'))
        xpath_dict.update(flattened_ddi.getMap())

        existing_package_id = get_package_id_by_pid(primary_pid, u'primary')
        package_id = existing_package_id if existing_package_id else get_unique_package_id()
        package_name = pid_to_name(package_id)

        package_dict = dict(
            access_application_URL=u'',
            access_request_URL=unicode(access_request_url),
            agent=agent,
            algorithm=u'',   # To be implemented straight in 'resources'
            availability=unicode(availability),
            contact=[{'name': contact_name,
                      'email': contact_email,
                      'URL': contact_URL,
                      'phone': contact_phone}],
            direct_download_URL=u'',  # To be implemented straight in 'resources
            discipline=discipline,
            event=events,
            geographic_coverage=geo_cover,
            groups=[],
            id=package_id,
            langdis=u'True',  # HUOMAA!
            language=language,
            license_URL=license_url,
            license_id=license_id,
            mimetype=u'',  # To be implemented straight in 'resources
            name=package_name,
            notes=notes or u'',
            pids=pids,
            owner_org=owner_org,
            resources=[orig_web_page_resource],
            tag_string=keywords,
            temporal_coverage_begin=temp_start,
            temporal_coverage_end=temp_end,
            title=title,
            type='dataset',
            version=version,
            version_PID='',
            citation=citation
        )
        package_dict['xpaths'] = xpath_dict
        # Above line creates:
        # package_dict = {
        #     'access_request_url': 'some_url',
        #     # ...
        #     'xpaths': {'stdyDscr/othrStdyMat.0/relPubl.34':
        #                'Uskon asia: nuorisobarometri 2006 (2006).'},
        #               {'stdyD...': 'Some value'}]
        # }
        #package_dict['extras'].update(_save_ddi_variables_to_csv(ddi_xml, somepkg))


        # Vanhojen koodien järjestys:
        #_save_original_xml_and_link_as_resources()
        #_save_ddi_variables_to_csv()
        #_create_group_based_on_organizations()
        #_last_statements_to_rewrite()

        # JuhoL: Set harvest object to some end state and commit
        if harvest_object is not None:
            harvest_object.content = None
            # Should this be flushed? model.Session.flush()
        #model.repo.commit()

        return package_dict
    def read_data(self, xml):
        """ Extract package data from given XML.
        :param xml: xml element (lxml)
        :return: dictionary
        """

        # MAP DATACITE MANDATORY FIELD

        # Identifier to primary pid
        identifier = xml.find('.//{http://datacite.org/schema/kernel-3}identifier')
        pids = [{
            'id': identifier.text, 
            'type': 'primary', 
            'provider': identifier.get('identifierType')}]

        # Creator name to agent
        # TODO: map nameIdentifier to agent.id and nameIdentifierScheme and schemeURI 
        # to extras
        agents = []
        for creator in xml.findall('.//{http://datacite.org/schema/kernel-3}creator'):
            creatorName = creator.find('.//{http://datacite.org/schema/kernel-3}creatorName').text
            creatorAffiliation = creator.find('.//{http://datacite.org/schema/kernel-3}affiliation').text
            agents.append({
                'role': u'author', 
                'name': creatorName, 
                'organisation': creatorAffiliation
                })

        # Primary title to title
        # TODO: if titleType is present, check to find out if title is actually primary
        # TODO: map non-primary titles to extras
        title = xml.find('.//{http://datacite.org/schema/kernel-3}title').text
        langtitle = [{'lang': 'en', 'value': title}] # Assuming we always harvest English

        # Publisher to contact
        publisher = xml.find('.//{http://datacite.org/schema/kernel-3}publisher').text
        contacts = [{'name': publisher}]

        # Publication year to event
        publication_year = xml.find('.//{http://datacite.org/schema/kernel-3}publicationYear').text
        events = [{'type': u'published', 'when': publication_year, 'who': publisher, 'descr': u'Dataset was published'}]


        # MAP DATACITE RECOMMENDED FIELDS

        # Subject to tags
        # TODO: map subjectsScheme and schemeURI to extras

        # Contributor to agent
        # TODO: map nameIdentifier to agent.id, nameIdentifierScheme, schemeURI and 
        # contributorType to extras
        for contributor in xml.findall('.//{http://datacite.org/schema/kernel-3}contributor'):
            contributorName = contributor.find('.//{http://datacite.org/schema/kernel-3}contributorName').text
            contributorAffiliation = contributor.find('.//{http://datacite.org/schema/kernel-3}affiliation').text
            agents.append({
                'role': u'contributor', 
                'name': contributorName, 
                'organisation': contributorAffiliation
                })

        # Date to event
        for date in xml.findall('.//{http://datacite.org/schema/kernel-3}date'):
            events.append({
              'type': date.get('dateType'),
              'when': date.text,
              'who': u'unknown',
              'descr': date.get('dateType'),
              })

        # ResourceType to extra
        # TODO: map resourceType and resourceTypeGeneral to extras

        # RelatedIdentifier to showcase
        # TODO: map RelatedIdentifier to showcase title, relatedIdentifierType, relationType, 
        # relatedMetadataScheme, schemeURI and schemeType to showcase description

        # Description to langnotes
        description = ''
        for element in xml.findall('.//{http://datacite.org/schema/kernel-3}description'):
            description += element.get('descriptionType') + ': ' + element.text + ' '
        langnotes = [{
          'lang': 'en', # Assuming we always harvest English
          'value': description,
          }]

        # GeoLocation to geograhic_coverage
        # TODO: map geoLocationPoint and geoLocationBox to extras, geoLocationPlace to 
        # geographic_coverage


        # MAP DATACITE OPTIONAL FIELDS

        # Language to language
        # TODO: map language to language

        # AlternateIdentifier to pids
        # TODO: map AlternateIdentifier to pids.id, alternateIdentifierType to pids.provider

        # Size to extra
        # TODO: map size to extra

        # Format to resources
        # TODO: map format to resources.format

        # Version to extra
        # DataCite version is a string such as 'v3.2.1' and can't be used as Etsin version
        # TODO: map version to extra

        # Rights to license
        license_URL = ''
        for right in xml.findall('.//{http://datacite.org/schema/kernel-3}rights'):
            license_URL += right.text + ' ' + right.get('rightsURI') + ' '


        # OTHER - REQUIRED BY CKANEXT-HARVEST

        # Get or create package id
        existing_package_id = get_package_id_by_pid(identifier.text, u'primary')
        package_id = existing_package_id if existing_package_id else get_unique_package_id()

        result = {
                  'agent': agents,
                  'contact': contacts,
                  'event': events,
                  'id': package_id,
                  'langnotes': langnotes,
                  'langtitle': langtitle,
                  'license_URL': license_URL,
                  'pids': pids,
                  'title': title,
                  'type': 'dataset',
                  'version': datetime.datetime.now().strftime("%Y-%m-%d")
                  }


        return result
示例#7
0
    def read_data(self, xml):
        """ Extract package data from given XML.
        :param xml: xml element (lxml)
        :return: dictionary
        """
        cmd = first(xml.xpath('//oai:record/oai:metadata/cmd:CMD', namespaces=self.namespaces))
        if cmd is None:
            raise CmdiReaderException("Unexpected XML format: No CMD -element found")

        resource_info = cmd.xpath("//cmd:Components/cmd:resourceInfo", namespaces=self.namespaces)[0]
        if resource_info is None:
            raise CmdiReaderException("Unexpected XML format: No resourceInfo -element found")

        metadata_identifiers = self._text_xpath(cmd, "//cmd:identificationInfo/cmd:identifier/text()")

        languages = self._text_xpath(cmd, "//cmd:corpusInfo/cmd:corpusMediaType/cmd:corpusTextInfo/cmd:languageInfo/cmd:languageId/text()")

        # convert the descriptions to a JSON string of type {"fin":"kuvaus", "eng","desc"}
        desc_json = {}
        for desc in xml.xpath("//cmd:identificationInfo/cmd:description", namespaces=self.namespaces):
            lang = convert_language(desc.get('{http://www.w3.org/XML/1998/namespace}lang', 'undefined').strip())
            desc_json[lang] = unicode(desc.text).strip()

        description = json.dumps(desc_json)

        # convert the titles to a JSON string of type {"fin":"otsikko", "eng","title"}
        transl_json = {}
        for title in xml.xpath('//cmd:identificationInfo/cmd:resourceName', namespaces=self.namespaces):
            lang = convert_language(title.get('{http://www.w3.org/XML/1998/namespace}lang', 'undefined').strip())
            transl_json[lang] = title.text.strip()

        title = json.dumps(transl_json)
        provider = self.provider
        version = first(self._text_xpath(resource_info, "//cmd:metadataInfo/cmd:metadataLastDateUpdated/text()")) or ""
        coverage = first(self._text_xpath(resource_info, "//cmd:corpusInfo/cmd:corpusMediaType/cmd:corpusTextInfo/cmd:timeCoverageInfo/cmd:timeCoverage/text()")) or ""

        pids = []
        primary_pid = ''
        direct_download_URL = ''
        access_request_URL = ''
        access_application_URL = ''

        # data_identifiers = self._text_xpath(cmd, "//cmd:identificationInfo/cmd:url/text()")

        for pid in [CmdiReader._language_bank_urn_pid_enhancement(metadata_pid) for metadata_pid in metadata_identifiers]:
            if 'urn' in pid and not primary_pid:
                pids.append(dict(id=pid, provider=provider, type='primary'))
                primary_pid=pid
        #     else:
        #         pids.append(dict(id=pid, provider=provider, type='relation', relation='generalRelation'))
        #
        # pids += [dict(id=CmdiReader._language_bank_urn_pid_enhancement(pid), provider=provider, type='relation',
        #               relation='generalRelation') for pid in data_identifiers]

        license_identifier = CmdiReader._language_bank_license_enhancement(first(self._text_xpath(resource_info, "//cmd:distributionInfo/cmd:licenceInfo/cmd:licence/text()")) or 'notspecified')
        availability = CmdiReader._language_bank_availability_from_license(license_identifier)

        if license_identifier.lower().strip() != 'undernegotiation':
            if availability == 'direct_download':
                direct_download_URL = primary_pid
            if availability == 'access_request':
                access_request_URL = primary_pid
            if availability == 'access_application_other':
                sliced_pid = primary_pid.rsplit('/', 1)
                if len(sliced_pid) >= 2:
                    access_application_URL = 'https://lbr.csc.fi/web/guest/catalogue?domain=LBR&target=basket&resource=' + sliced_pid[1]

        temporal_coverage_begin = ""
        temporal_coverage_end = ""

        if coverage:
            split = [item.strip() for item in coverage.split("-")]
            if len(split) == 2:
                temporal_coverage_begin = split[0]
                temporal_coverage_end = split[1]

        # TODO: Check agent mapping.
        #print "###", _get_persons(resource_info, "//cmd:distributionInfo/cmd:licenceInfo/cmd:licensorPerson")
        #print "###", _get_persons(resource_info, "//cmd:distributionInfo/cmd:licenceInfo/cmd:distributionRightsHolderPerson")
        #print "###", _get_persons(resource_info, "//cmd:distributionInfo/cmd:iprHolderPerson")
        #print "###", _get_persons(resource_info, "//cmd:contactPerson")
        #print "###", _get_persons(resource_info, "//cmd:metadataInfo/cmd:metadataCreator")

        #print "###", _get_organizations(resource_info, "//cmd:distributionInfo/cmd:licenceInfo/cmd:licensorOrganization")
        #print "###", _get_organizations(resource_info, "//cmd:distributionInfo/cmd:licenceInfo/cmd:distributionRightsHolderOrganization")
        #print "###", _get_organizations(resource_info, "//cmd:distributionInfo/cmd:iprHolderOrganization")

        contacts = self._persons_as_contact(self._get_persons(resource_info, "//cmd:contactPerson"))

        agents = []
        agents.extend(self._persons_as_agent(self._get_persons(resource_info, "//cmd:distributionInfo/cmd:iprHolderPerson"), 'author'))
        agents.extend(self._persons_as_agent(self._get_persons(resource_info, "//cmd:distributionInfo/cmd:licenceInfo/cmd:distributionRightsHolderPerson"), 'owner'))

        agents.extend(self._organization_as_agent(self._get_organizations(resource_info, "//cmd:distributionInfo/cmd:iprHolderOrganization"), 'author'))
        agents.extend(self._organization_as_agent(self._get_organizations(resource_info, "//cmd:distributionInfo/cmd:licenceInfo/cmd:distributionRightsHolderOrganization"), 'owner'))

        existing_package_id = get_package_id_by_pid(primary_pid, u'primary')
        package_id = existing_package_id if existing_package_id else get_unique_package_id()

        result = {'name': pid_to_name(package_id),
                  'language': ",".join(languages),
                  'pids': pids,
                  'version': version,
                  'notes': description,
                  'title': title,
                  'type': 'dataset',
                  'contact': contacts,
                  'agent': agents,
                  'availability': availability,
                  'direct_download_URL': direct_download_URL,
                  'access_request_URL': access_request_URL,
                  'access_application_URL': access_application_URL,
                  'temporal_coverage_begin': temporal_coverage_begin,
                  'temporal_coverage_end': temporal_coverage_end,
                  'license_id': license_identifier,
                  'license_URL': ''}

        if not languages:
            result['langdis'] = u'True'

        if package_id:
            result['id'] = package_id

        # TODO: Ask about distributionAccessMedium
        # _strip_first(_text_xpath(resource_info, "//cmd:distributionInfo/availability/text()"))
        # url = _strip_first(_text_xpath(resource_info, "//cmd:identificationInfo/cmd:url/text()"))

        return result