def _parse_and_upsert_metadata(self, md: str): logger.debug('Parsing XML') try: xml = etree.fromstring(md) except Exception as err: logger.error(f'XML parsing failed: {err}') raise logger.debug('Processing metadata') try: record = metadata.parse_record(self.context, xml, self.repo)[0] record.xml = record.xml.decode() logger.info(f"identifier: {record.identifier}") except Exception as err: logger.error(f'Metadata parsing failed: {err}') raise if self.repo.query_ids([record.identifier]): logger.info('Updating record') try: self.repo.update(record) logger.info('record updated') except Exception as err: logger.error(f'record update failed: {err}') raise else: logger.info('Inserting record') try: self.repo.insert(record, 'local', util.get_today_and_now()) logger.info('record inserted') except Exception as err: logger.error(f'record insertion failed: {err}') raise return
def test_get_today_and_now(): fake_now = "2017-01-01T00:00:00Z" with mock.patch.object(util.time, "localtime") as mock_localtime: mock_localtime.return_value = time.strptime(fake_now, "%Y-%m-%dT%H:%M:%SZ") result = util.get_today_and_now() assert result == fake_now
def test_get_today_and_now(): fake_now = "2017-01-01T00:00:00Z" with mock.patch.object(util.time, "localtime") as mock_localtime: mock_localtime.return_value = time.strptime( fake_now, "%Y-%m-%dT%H:%M:%SZ" ) result = util.get_today_and_now() assert result == fake_now
def load_records(context, database, table, xml_dirpath, recursive=False, force_update=False): """Load metadata records from directory of files to database""" repo = repository.Repository(database, context, table=table) file_list = [] if os.path.isfile(xml_dirpath): file_list.append(xml_dirpath) elif recursive: for root, dirs, files in os.walk(xml_dirpath): for mfile in files: if mfile.endswith('.xml'): file_list.append(os.path.join(root, mfile)) else: for rec in glob(os.path.join(xml_dirpath, '*.xml')): file_list.append(rec) total = len(file_list) counter = 0 for recfile in sorted(file_list): counter += 1 LOGGER.info('Processing file %s (%d of %d)', recfile, counter, total) # read document try: exml = etree.parse(recfile, context.parser) except Exception as err: LOGGER.warn('XML document is not well-formed: %s', str(err)) continue record = metadata.parse_record(context, exml, repo) for rec in record: LOGGER.info('Inserting %s %s into database %s, table %s ....', rec.typename, rec.identifier, database, table) # TODO: do this as CSW Harvest try: repo.insert(rec, 'local', util.get_today_and_now()) LOGGER.info('Inserted') except RuntimeError as err: if force_update: LOGGER.info('Record exists. Updating.') repo.update(rec) LOGGER.info('Updated') else: LOGGER.warn('ERROR: not inserted %s', err)
def load_records(context, database, table, xml_dirpath, recursive=False, force_update=False): """Load metadata records from directory of files to database""" repo = repository.Repository(database, context, table=table) file_list = [] if os.path.isfile(xml_dirpath): file_list.append(xml_dirpath) elif recursive: for root, dirs, files in os.walk(xml_dirpath): for mfile in files: if mfile.endswith(".xml"): file_list.append(os.path.join(root, mfile)) else: for rec in glob(os.path.join(xml_dirpath, "*.xml")): file_list.append(rec) total = len(file_list) counter = 0 for recfile in sorted(file_list): counter += 1 LOGGER.info("Processing file %s (%d of %d)", recfile, counter, total) # read document try: exml = etree.parse(recfile, context.parser) except Exception as err: LOGGER.warn("XML document is not well-formed: %s", str(err)) continue record = metadata.parse_record(context, exml, repo) for rec in record: LOGGER.info( "Inserting %s %s into database %s, table %s ....", rec.typename, rec.identifier, database, table ) # TODO: do this as CSW Harvest try: repo.insert(rec, "local", util.get_today_and_now()) LOGGER.info("Inserted") except RuntimeError as err: if force_update: LOGGER.info("Record exists. Updating.") repo.update(rec) LOGGER.info("Updated") else: LOGGER.warn("ERROR: not inserted %s", err)
def _parse_wps(context, repos, record, identifier): from owslib.wps import WebProcessingService serviceobj = repos.dataset() md = WebProcessingService(record) # generate record of service instance _set(context, serviceobj, 'pycsw:Identifier', identifier) _set(context, serviceobj, 'pycsw:Typename', 'csw:Record') _set(context, serviceobj, 'pycsw:Schema', 'http://www.opengis.net/wps/1.0.0') _set(context, serviceobj, 'pycsw:MdSource', record) _set(context, serviceobj, 'pycsw:InsertDate', util.get_today_and_now()) _set(context, serviceobj, 'pycsw:AnyText', util.get_anytext(md._capabilities)) _set(context, serviceobj, 'pycsw:Type', 'service') _set(context, serviceobj, 'pycsw:Title', md.identification.title) _set(context, serviceobj, 'pycsw:Abstract', md.identification.abstract) _set(context, serviceobj, 'pycsw:Keywords', ','.join(md.identification.keywords)) _set(context, serviceobj, 'pycsw:Creator', md.provider.contact.name) _set(context, serviceobj, 'pycsw:Publisher', md.provider.name) _set(context, serviceobj, 'pycsw:Contributor', md.provider.contact.name) _set(context, serviceobj, 'pycsw:OrganizationName', md.provider.contact.name) _set(context, serviceobj, 'pycsw:AccessConstraints', md.identification.accessconstraints) _set(context, serviceobj, 'pycsw:OtherConstraints', md.identification.fees) _set(context, serviceobj, 'pycsw:Source', record) _set(context, serviceobj, 'pycsw:Format', md.identification.type) _set(context, serviceobj, 'pycsw:ServiceType', 'OGC:WPS') _set(context, serviceobj, 'pycsw:ServiceTypeVersion', md.identification.version) _set(context, serviceobj, 'pycsw:Operation', ','.join([d.name for d in md.operations])) _set(context, serviceobj, 'pycsw:OperatesOn', ','.join([o.identifier for o in md.processes])) _set(context, serviceobj, 'pycsw:CouplingType', 'loose') links = [ '%s,OGC-WPS Web Processing Service,OGC:WPS,%s' % (identifier, md.url), '%s,OGC-WPS Capabilities service (ver 1.0.0),OGC:WPS-1.1.0-http-get-capabilities,%s' % (identifier, build_get_url(md.url, {'service': 'WPS', 'version': '1.0.0', 'request': 'GetCapabilities'})), ] _set(context, serviceobj, 'pycsw:Links', '^'.join(links)) _set(context, serviceobj, 'pycsw:XML', caps2iso(serviceobj, md, context)) return serviceobj
def response(self, response, kvp, repository, server_url): """process OAI-PMH request""" mode = kvp.pop('mode', None) if 'config' in kvp: config_val = kvp.pop('config') url = '%smode=oaipmh' % util.bind_url(server_url) node = etree.Element(util.nspath_eval('oai:OAI-PMH', self.namespaces), nsmap=self.namespaces) node.set( util.nspath_eval('xsi:schemaLocation', self.namespaces), '%s http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd' % self.namespaces['oai']) LOGGER.info(etree.tostring(node)) etree.SubElement(node, util.nspath_eval( 'oai:responseDate', self.namespaces)).text = util.get_today_and_now() etree.SubElement(node, util.nspath_eval('oai:request', self.namespaces), attrib=kvp).text = url if 'verb' not in kvp: etree.SubElement( node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Missing \'verb\' parameter' return node if kvp['verb'] not in self.request_model.keys(): etree.SubElement( node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Unknown verb \'%s\'' % kvp['verb'] return node if util.xmltag_split(response.tag) == 'ExceptionReport': etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = response.xpath( '//ows:ExceptionText|//ows20:ExceptionText', namespaces=self.context.namespaces)[0].text return node verb = kvp.pop('verb') if verb in ['GetRecord', 'ListIdentifiers', 'ListRecords']: if 'metadataprefix' not in kvp: etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument' ).text = 'Missing metadataPrefix parameter' return node elif kvp['metadataprefix'] not in self.metadata_formats.keys(): etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument' ).text = 'Invalid metadataPrefix parameter' return node for key, value in kvp.iteritems(): if key != 'mode' and key not in self.request_model[verb]: etree.SubElement( node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Illegal parameter \'%s\'' % key return node verbnode = etree.SubElement( node, util.nspath_eval('oai:%s' % verb, self.namespaces)) if verb == 'Identify': etree.SubElement( verbnode, util.nspath_eval('oai:repositoryName', self.namespaces)).text = self.config.get( 'metadata:main', 'identification_title') etree.SubElement(verbnode, util.nspath_eval('oai:baseURL', self.namespaces)).text = url etree.SubElement( verbnode, util.nspath_eval('oai:protocolVersion', self.namespaces)).text = '2.0' etree.SubElement( verbnode, util.nspath_eval('oai:adminEmail', self.namespaces)).text = self.config.get( 'metadata:main', 'contact_email') etree.SubElement( verbnode, util.nspath_eval( 'oai:earliestDatestamp', self.namespaces)).text = repository.query_insert('min') etree.SubElement( verbnode, util.nspath_eval('oai:deletedRecord', self.namespaces)).text = 'no' etree.SubElement( verbnode, util.nspath_eval( 'oai:granularity', self.namespaces)).text = 'YYYY-MM-DDThh:mm:ssZ' elif verb == 'ListSets': for key, value in self.metadata_sets.iteritems(): setnode = etree.SubElement( verbnode, util.nspath_eval('oai:set', self.namespaces)) etree.SubElement( setnode, util.nspath_eval('oai:setSpec', self.namespaces)).text = key etree.SubElement( setnode, util.nspath_eval('oai:setName', self.namespaces)).text = value[0] elif verb == 'ListMetadataFormats': for key, value in self.metadata_formats.iteritems(): mdfnode = etree.SubElement( verbnode, util.nspath_eval('oai:metadataFormat', self.namespaces)) etree.SubElement( mdfnode, util.nspath_eval('oai:metadataPrefix', self.namespaces)).text = key etree.SubElement( mdfnode, util.nspath_eval('oai:schema', self.namespaces)).text = value['schema'] etree.SubElement( mdfnode, util.nspath_eval( 'oai:metadataNamespace', self.namespaces)).text = value['namespace'] elif verb in ['GetRecord', 'ListIdentifiers', 'ListRecords']: if verb == 'GetRecord': # GetRecordById records = response.getchildren() else: # GetRecords records = response.getchildren()[1].getchildren() for child in records: recnode = etree.SubElement( verbnode, util.nspath_eval('oai:record', self.namespaces)) header = etree.SubElement( recnode, util.nspath_eval('oai:header', self.namespaces)) self._transform_element(header, response, 'oai:identifier') self._transform_element(header, response, 'oai:dateStamp') self._transform_element(header, response, 'oai:setSpec') if verb in ['GetRecord', 'ListRecords']: metadata = etree.SubElement( recnode, util.nspath_eval('oai:metadata', self.namespaces)) if 'metadataprefix' in kvp and kvp[ 'metadataprefix'] == 'oai_dc': child.tag = util.nspath_eval('oai_dc:dc', self.namespaces) metadata.append(child) if verb != 'GetRecord': complete_list_size = response.xpath( '//@numberOfRecordsMatched')[0] next_record = response.xpath('//@nextRecord')[0] cursor = str(int(complete_list_size) - int(next_record) - 1) resumption_token = etree.SubElement( verbnode, util.nspath_eval('oai:resumptionToken', self.namespaces), completeListSize=complete_list_size, cursor=cursor).text = next_record return node
def response(self, response, kvp, repository, server_url): """process OAI-PMH request""" mode = kvp.pop('mode', None) if 'config' in kvp: config_val = kvp.pop('config') url = '%smode=oaipmh' % util.bind_url(server_url) node = etree.Element(util.nspath_eval('oai:OAI-PMH', self.namespaces), nsmap=self.namespaces) node.set(util.nspath_eval('xsi:schemaLocation', self.namespaces), '%s http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd' % self.namespaces['oai']) LOGGER.info(etree.tostring(node)) etree.SubElement(node, util.nspath_eval('oai:responseDate', self.namespaces)).text = util.get_today_and_now() etree.SubElement(node, util.nspath_eval('oai:request', self.namespaces), attrib=kvp).text = url if 'verb' not in kvp: etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Missing \'verb\' parameter' return node if kvp['verb'] not in self.request_model.keys(): etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Unknown verb \'%s\'' % kvp['verb'] return node if util.xmltag_split(response.tag) == 'ExceptionReport': etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = response.xpath('//ows:ExceptionText|//ows20:ExceptionText', namespaces=self.context.namespaces)[0].text return node verb = kvp.pop('verb') if verb in ['GetRecord', 'ListIdentifiers', 'ListRecords']: if 'metadataprefix' not in kvp: etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Missing metadataPrefix parameter' return node elif kvp['metadataprefix'] not in self.metadata_formats.keys(): etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Invalid metadataPrefix parameter' return node for key, value in kvp.items(): if key != 'mode' and key not in self.request_model[verb]: etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Illegal parameter \'%s\'' % key return node verbnode = etree.SubElement(node, util.nspath_eval('oai:%s' % verb, self.namespaces)) if verb == 'Identify': etree.SubElement(verbnode, util.nspath_eval('oai:repositoryName', self.namespaces)).text = self.config.get('metadata:main', 'identification_title') etree.SubElement(verbnode, util.nspath_eval('oai:baseURL', self.namespaces)).text = url etree.SubElement(verbnode, util.nspath_eval('oai:protocolVersion', self.namespaces)).text = '2.0' etree.SubElement(verbnode, util.nspath_eval('oai:adminEmail', self.namespaces)).text = self.config.get('metadata:main', 'contact_email') etree.SubElement(verbnode, util.nspath_eval('oai:earliestDatestamp', self.namespaces)).text = repository.query_insert('min') etree.SubElement(verbnode, util.nspath_eval('oai:deletedRecord', self.namespaces)).text = 'no' etree.SubElement(verbnode, util.nspath_eval('oai:granularity', self.namespaces)).text = 'YYYY-MM-DDThh:mm:ssZ' elif verb == 'ListSets': for key, value in self.metadata_sets.items(): setnode = etree.SubElement(verbnode, util.nspath_eval('oai:set', self.namespaces)) etree.SubElement(setnode, util.nspath_eval('oai:setSpec', self.namespaces)).text = key etree.SubElement(setnode, util.nspath_eval('oai:setName', self.namespaces)).text = value[0] elif verb == 'ListMetadataFormats': for key, value in self.metadata_formats.items(): mdfnode = etree.SubElement(verbnode, util.nspath_eval('oai:metadataFormat', self.namespaces)) etree.SubElement(mdfnode, util.nspath_eval('oai:metadataPrefix', self.namespaces)).text = key etree.SubElement(mdfnode, util.nspath_eval('oai:schema', self.namespaces)).text = value['schema'] etree.SubElement(mdfnode, util.nspath_eval('oai:metadataNamespace', self.namespaces)).text = value['namespace'] elif verb in ['GetRecord', 'ListIdentifiers', 'ListRecords']: if verb == 'GetRecord': # GetRecordById records = response.getchildren() else: # GetRecords records = response.getchildren()[1].getchildren() for child in records: recnode = etree.SubElement(verbnode, util.nspath_eval('oai:record', self.namespaces)) header = etree.SubElement(recnode, util.nspath_eval('oai:header', self.namespaces)) self._transform_element(header, response, 'oai:identifier') self._transform_element(header, response, 'oai:dateStamp') self._transform_element(header, response, 'oai:setSpec') if verb in ['GetRecord', 'ListRecords']: metadata = etree.SubElement(recnode, util.nspath_eval('oai:metadata', self.namespaces)) if 'metadataprefix' in kvp and kvp['metadataprefix'] == 'oai_dc': child.tag = util.nspath_eval('oai_dc:dc', self.namespaces) metadata.append(child) if verb != 'GetRecord': complete_list_size = response.xpath('//@numberOfRecordsMatched')[0] next_record = response.xpath('//@nextRecord')[0] cursor = str(int(complete_list_size) - int(next_record) - 1) resumption_token = etree.SubElement(verbnode, util.nspath_eval('oai:resumptionToken', self.namespaces), completeListSize=complete_list_size, cursor=cursor).text = next_record return node
repo.session.query( repo.dataset.ckan_id).filter_by(ckan_id=ckan_id).delete() log.info('Deleted %s' % ckan_id) repo.session.commit() except Exception, err: repo.session.rollback() raise for ckan_id in new: ckan_info = gathered_records[ckan_id] record = get_record(context, repo, ckan_url, ckan_id, ckan_info) if not record: log.info('Skipped record %s' % ckan_id) continue try: repo.insert(record, 'local', util.get_today_and_now()) log.info('Inserted %s' % ckan_id) except Exception, err: log.error('ERROR: not inserted %s Error:%s' % (ckan_id, err)) for ckan_id in changed: ckan_info = gathered_records[ckan_id] record = get_record(context, repo, ckan_url, ckan_id, ckan_info) if not record: continue update_dict = dict([(getattr(repo.dataset, key), getattr(record, key)) \ for key in record.__dict__.keys() if key != '_sa_instance_state']) try: repo.session.begin() repo.session.query(
def _parse_sos(context, repos, record, identifier, version): from owslib.sos import SensorObservationService bboxs = [] recobjs = [] serviceobj = repos.dataset() if version == '1.0.0': schema = 'http://www.opengis.net/sos/1.0' else: schema = 'http://www.opengis.net/sos/2.0' md = SensorObservationService(record, version=version) # generate record of service instance _set(context, serviceobj, 'pycsw:Identifier', identifier) _set(context, serviceobj, 'pycsw:Typename', 'csw:Record') _set(context, serviceobj, 'pycsw:Schema', schema) _set(context, serviceobj, 'pycsw:MdSource', record) _set(context, serviceobj, 'pycsw:InsertDate', util.get_today_and_now()) _set(context, serviceobj, 'pycsw:AnyText', util.get_anytext(etree.tostring(md._capabilities))) _set(context, serviceobj, 'pycsw:Type', 'service') _set(context, serviceobj, 'pycsw:Title', md.identification.title) _set(context, serviceobj, 'pycsw:Abstract', md.identification.abstract) _set(context, serviceobj, 'pycsw:Keywords', ','.join(md.identification.keywords)) _set(context, serviceobj, 'pycsw:Creator', md.provider.contact.name) _set(context, serviceobj, 'pycsw:Publisher', md.provider.name) _set(context, serviceobj, 'pycsw:Contributor', md.provider.contact.name) _set(context, serviceobj, 'pycsw:OrganizationName', md.provider.contact.name) _set(context, serviceobj, 'pycsw:AccessConstraints', md.identification.accessconstraints) _set(context, serviceobj, 'pycsw:OtherConstraints', md.identification.fees) _set(context, serviceobj, 'pycsw:Source', record) _set(context, serviceobj, 'pycsw:Format', md.identification.type) _set(context, serviceobj, 'pycsw:CRS', 'urn:ogc:def:crs:EPSG:6.11:4326') _set(context, serviceobj, 'pycsw:DistanceUOM', 'degrees') _set(context, serviceobj, 'pycsw:ServiceType', 'OGC:SOS') _set(context, serviceobj, 'pycsw:ServiceTypeVersion', md.identification.version) _set(context, serviceobj, 'pycsw:Operation', ','.join([d.name for d in md.operations])) _set(context, serviceobj, 'pycsw:OperatesOn', ','.join(list(md.contents))) _set(context, serviceobj, 'pycsw:CouplingType', 'tight') links = [ '%s,OGC-SOS Sensor Observation Service,OGC:SOS,%s' % (identifier, md.url), ] _set(context, serviceobj, 'pycsw:Links', '^'.join(links)) # generate record foreach offering LOGGER.debug('Harvesting %d SOS ObservationOffering\'s ', len(md.contents)) for offering in md.contents: recobj = repos.dataset() identifier2 = '%s-%s' % (identifier, md.contents[offering].id) _set(context, recobj, 'pycsw:Identifier', identifier2) _set(context, recobj, 'pycsw:Typename', 'csw:Record') _set(context, recobj, 'pycsw:Schema', schema) _set(context, recobj, 'pycsw:MdSource', record) _set(context, recobj, 'pycsw:InsertDate', util.get_today_and_now()) _set(context, recobj, 'pycsw:Type', 'dataset') _set(context, recobj, 'pycsw:ParentIdentifier', identifier) _set(context, recobj, 'pycsw:Title', md.contents[offering].description) _set(context, recobj, 'pycsw:Abstract', md.contents[offering].description) _set(context, recobj, 'pycsw:TempExtent_begin', util.datetime2iso8601(md.contents[offering].begin_position)) _set(context, recobj, 'pycsw:TempExtent_end', util.datetime2iso8601(md.contents[offering].end_position)) #For observed_properties that have mmi url or urn, we simply want the observation name. observed_properties = [] for obs in md.contents[offering].observed_properties: #Observation is stored as urn representation: urn:ogc:def:phenomenon:mmisw.org:cf:sea_water_salinity if obs.lower().startswith(('urn:', 'x-urn')): observed_properties.append(obs.rsplit(':', 1)[-1]) #Observation is stored as uri representation: http://mmisw.org/ont/cf/parameter/sea_floor_depth_below_sea_surface elif obs.lower().startswith(('http://', 'https://')): observed_properties.append(obs.rsplit('/', 1)[-1]) else: observed_properties.append(obs) #Build anytext from description and the observed_properties. anytext = [] anytext.append(md.contents[offering].description) anytext.extend(observed_properties) _set(context, recobj, 'pycsw:AnyText', util.get_anytext(anytext)) _set(context, recobj, 'pycsw:Keywords', ','.join(observed_properties)) bbox = md.contents[offering].bbox if bbox is not None: tmp = '%s,%s,%s,%s' % (bbox[0], bbox[1], bbox[2], bbox[3]) wkt_polygon = util.bbox2wktpolygon(tmp) _set(context, recobj, 'pycsw:BoundingBox', wkt_polygon) _set(context, recobj, 'pycsw:CRS', md.contents[offering].bbox_srs.id) _set(context, recobj, 'pycsw:DistanceUOM', 'degrees') bboxs.append(wkt_polygon) _set(context, recobj, 'pycsw:XML', caps2iso(recobj, md, context)) recobjs.append(recobj) # Derive a bbox based on aggregated featuretype bbox values bbox_agg = util.bbox_from_polygons(bboxs) if bbox_agg is not None: _set(context, serviceobj, 'pycsw:BoundingBox', bbox_agg) _set(context, serviceobj, 'pycsw:XML', caps2iso(serviceobj, md, context)) recobjs.insert(0, serviceobj) return recobjs
def _parse_wcs(context, repos, record, identifier): from owslib.wcs import WebCoverageService bboxs = [] recobjs = [] serviceobj = repos.dataset() md = WebCoverageService(record, '1.0.0') # generate record of service instance _set(context, serviceobj, 'pycsw:Identifier', identifier) _set(context, serviceobj, 'pycsw:Typename', 'csw:Record') _set(context, serviceobj, 'pycsw:Schema', 'http://www.opengis.net/wcs') _set(context, serviceobj, 'pycsw:MdSource', record) _set(context, serviceobj, 'pycsw:InsertDate', util.get_today_and_now()) _set(context, serviceobj, 'pycsw:AnyText', util.get_anytext(etree.tostring(md._capabilities))) _set(context, serviceobj, 'pycsw:Type', 'service') _set(context, serviceobj, 'pycsw:Title', md.identification.title) _set(context, serviceobj, 'pycsw:Abstract', md.identification.abstract) _set(context, serviceobj, 'pycsw:Keywords', ','.join(md.identification.keywords)) _set(context, serviceobj, 'pycsw:Creator', md.provider.contact.name) _set(context, serviceobj, 'pycsw:Publisher', md.provider.name) _set(context, serviceobj, 'pycsw:Contributor', md.provider.contact.name) _set(context, serviceobj, 'pycsw:OrganizationName', md.provider.contact.name) _set(context, serviceobj, 'pycsw:AccessConstraints', md.identification.accessConstraints) _set(context, serviceobj, 'pycsw:OtherConstraints', md.identification.fees) _set(context, serviceobj, 'pycsw:Source', record) _set(context, serviceobj, 'pycsw:Format', md.identification.type) _set(context, serviceobj, 'pycsw:CRS', 'urn:ogc:def:crs:EPSG:6.11:4326') _set(context, serviceobj, 'pycsw:DistanceUOM', 'degrees') _set(context, serviceobj, 'pycsw:ServiceType', 'OGC:WCS') _set(context, serviceobj, 'pycsw:ServiceTypeVersion', md.identification.version) _set(context, serviceobj, 'pycsw:Operation', ','.join([d.name for d in md.operations])) _set(context, serviceobj, 'pycsw:OperatesOn', ','.join(list(md.contents))) _set(context, serviceobj, 'pycsw:CouplingType', 'tight') links = [ '%s,OGC-WCS Web Coverage Service,OGC:WCS,%s' % (identifier, md.url) ] _set(context, serviceobj, 'pycsw:Links', '^'.join(links)) # generate record foreach coverage LOGGER.debug('Harvesting %d WCS coverages ' % len(md.contents)) for coverage in md.contents: recobj = repos.dataset() identifier2 = '%s-%s' % (identifier, md.contents[coverage].id) _set(context, recobj, 'pycsw:Identifier', identifier2) _set(context, recobj, 'pycsw:Typename', 'csw:Record') _set(context, recobj, 'pycsw:Schema', 'http://www.opengis.net/wcs') _set(context, recobj, 'pycsw:MdSource', record) _set(context, recobj, 'pycsw:InsertDate', util.get_today_and_now()) _set(context, recobj, 'pycsw:Type', 'dataset') _set(context, recobj, 'pycsw:ParentIdentifier', identifier) _set(context, recobj, 'pycsw:Title', md.contents[coverage].title) _set(context, recobj, 'pycsw:Abstract', md.contents[coverage].abstract) _set(context, recobj, 'pycsw:Keywords', ','.join(md.contents[coverage].keywords)) _set(context, recobj, 'pycsw:AnyText', util.get_anytext([md.contents[coverage].title, md.contents[coverage].abstract, ','.join(md.contents[coverage].keywords)])) bbox = md.contents[coverage].boundingBoxWGS84 if bbox is not None: tmp = '%s,%s,%s,%s' % (bbox[0], bbox[1], bbox[2], bbox[3]) wkt_polygon = util.bbox2wktpolygon(tmp) _set(context, recobj, 'pycsw:BoundingBox', wkt_polygon) _set(context, recobj, 'pycsw:CRS', 'urn:ogc:def:crs:EPSG:6.11:4326') _set(context, recobj, 'pycsw:DistanceUOM', 'degrees') bboxs.append(wkt_polygon) links = [ '%s,OGC-Web Coverage Service,OGC:WCS,%s' % (md.contents[coverage].id, md.url) ] _set(context, recobj, 'pycsw:Links', '^'.join(links)) _set(context, recobj, 'pycsw:XML', caps2iso(recobj, md, context)) recobjs.append(recobj) # Derive a bbox based on aggregated coverage bbox values bbox_agg = util.bbox_from_polygons(bboxs) if bbox_agg is not None: _set(context, serviceobj, 'pycsw:BoundingBox', bbox_agg) _set(context, serviceobj, 'pycsw:XML', caps2iso(serviceobj, md, context)) recobjs.insert(0, serviceobj) return recobjs
def _parse_wms(context, repos, record, identifier): from owslib.wms import WebMapService recobjs = [] serviceobj = repos.dataset() md = WebMapService(record) # generate record of service instance _set(context, serviceobj, 'pycsw:Identifier', identifier) _set(context, serviceobj, 'pycsw:Typename', 'csw:Record') _set(context, serviceobj, 'pycsw:Schema', 'http://www.opengis.net/wms') _set(context, serviceobj, 'pycsw:MdSource', record) _set(context, serviceobj, 'pycsw:InsertDate', util.get_today_and_now()) _set(context, serviceobj, 'pycsw:AnyText', util.get_anytext(md.getServiceXML())) _set(context, serviceobj, 'pycsw:Type', 'service') _set(context, serviceobj, 'pycsw:Title', md.identification.title) _set(context, serviceobj, 'pycsw:Abstract', md.identification.abstract) _set(context, serviceobj, 'pycsw:Keywords', ','.join(md.identification.keywords)) _set(context, serviceobj, 'pycsw:Creator', md.provider.contact.name) _set(context, serviceobj, 'pycsw:Publisher', md.provider.name) _set(context, serviceobj, 'pycsw:Contributor', md.provider.contact.name) _set(context, serviceobj, 'pycsw:OrganizationName', md.provider.contact.name) _set(context, serviceobj, 'pycsw:AccessConstraints', md.identification.accessconstraints) _set(context, serviceobj, 'pycsw:OtherConstraints', md.identification.fees) _set(context, serviceobj, 'pycsw:Source', record) _set(context, serviceobj, 'pycsw:Format', md.identification.type) for c in md.contents: if md.contents[c].parent is None: bbox = md.contents[c].boundingBoxWGS84 tmp = '%s,%s,%s,%s' % (bbox[0], bbox[1], bbox[2], bbox[3]) _set(context, serviceobj, 'pycsw:BoundingBox', util.bbox2wktpolygon(tmp)) break _set(context, serviceobj, 'pycsw:CRS', 'urn:ogc:def:crs:EPSG:6.11:4326') _set(context, serviceobj, 'pycsw:DistanceUOM', 'degrees') _set(context, serviceobj, 'pycsw:ServiceType', 'OGC:WMS') _set(context, serviceobj, 'pycsw:ServiceTypeVersion', md.identification.version) _set(context, serviceobj, 'pycsw:Operation', ','.join([d.name for d in md.operations])) _set(context, serviceobj, 'pycsw:OperatesOn', ','.join(list(md.contents))) _set(context, serviceobj, 'pycsw:CouplingType', 'tight') links = [ '%s,OGC-WMS Web Map Service,OGC:WMS,%s' % (identifier, md.url), ] _set(context, serviceobj, 'pycsw:Links', '^'.join(links)) _set(context, serviceobj, 'pycsw:XML', caps2iso(serviceobj, md, context)) recobjs.append(serviceobj) # generate record foreach layer LOGGER.debug('Harvesting %d WMS layers' % len(md.contents)) for layer in md.contents: recobj = repos.dataset() identifier2 = '%s-%s' % (identifier, md.contents[layer].name) _set(context, recobj, 'pycsw:Identifier', identifier2) _set(context, recobj, 'pycsw:Typename', 'csw:Record') _set(context, recobj, 'pycsw:Schema', 'http://www.opengis.net/wms') _set(context, recobj, 'pycsw:MdSource', record) _set(context, recobj, 'pycsw:InsertDate', util.get_today_and_now()) _set(context, recobj, 'pycsw:Type', 'dataset') _set(context, recobj, 'pycsw:ParentIdentifier', identifier) _set(context, recobj, 'pycsw:Title', md.contents[layer].title) _set(context, recobj, 'pycsw:Abstract', md.contents[layer].abstract) _set(context, recobj, 'pycsw:Keywords', ','.join(md.contents[layer].keywords)) _set(context, recobj, 'pycsw:AnyText', util.get_anytext([md.contents[layer].title, md.contents[layer].abstract, ','.join(md.contents[layer].keywords)])) bbox = md.contents[layer].boundingBoxWGS84 if bbox is not None: tmp = '%s,%s,%s,%s' % (bbox[0], bbox[1], bbox[2], bbox[3]) _set(context, recobj, 'pycsw:BoundingBox', util.bbox2wktpolygon(tmp)) _set(context, recobj, 'pycsw:CRS', 'urn:ogc:def:crs:EPSG:6.11:4326') _set(context, recobj, 'pycsw:DistanceUOM', 'degrees') else: bbox = md.contents[layer].boundingBox if bbox: tmp = '%s,%s,%s,%s' % (bbox[0], bbox[1], bbox[2], bbox[3]) _set(context, recobj, 'pycsw:BoundingBox', util.bbox2wktpolygon(tmp)) _set(context, recobj, 'pycsw:CRS', 'urn:ogc:def:crs:EPSG:6.11:%s' % \ bbox[-1].split(':')[1]) params = { 'service': 'WMS', 'version': '1.1.1', 'request': 'GetMap', 'layers': md.contents[layer].name, 'format': 'image/png', 'height': '200', 'width': '200', 'srs': 'EPSG:4326', 'bbox': '%s,%s,%s,%s' % (bbox[0], bbox[1], bbox[2], bbox[3]), 'styles': '' } links = [ '%s,OGC-Web Map Service,OGC:WMS,%s' % (md.contents[layer].name, md.url), '%s,Web image thumbnail (URL),WWW:LINK-1.0-http--image-thumbnail,%s' % (md.contents[layer].name, build_get_url(md.url, params)) ] _set(context, recobj, 'pycsw:Links', '^'.join(links)) _set(context, recobj, 'pycsw:XML', caps2iso(recobj, md, context)) recobjs.append(recobj) return recobjs
def _parse_csw(context, repos, record, identifier, pagesize=10): from owslib.csw import CatalogueServiceWeb recobjs = [] # records serviceobj = repos.dataset() # if init raises error, this might not be a CSW md = CatalogueServiceWeb(record, timeout=60) LOGGER.debug('Setting CSW service metadata') # generate record of service instance _set(context, serviceobj, 'pycsw:Identifier', identifier) _set(context, serviceobj, 'pycsw:Typename', 'csw:Record') _set(context, serviceobj, 'pycsw:Schema', 'http://www.opengis.net/cat/csw/2.0.2') _set(context, serviceobj, 'pycsw:MdSource', record) _set(context, serviceobj, 'pycsw:InsertDate', util.get_today_and_now()) _set(context, serviceobj, 'pycsw:AnyText', util.get_anytext(md._exml)) _set(context, serviceobj, 'pycsw:Type', 'service') _set(context, serviceobj, 'pycsw:Title', md.identification.title) _set(context, serviceobj, 'pycsw:Abstract', md.identification.abstract) _set(context, serviceobj, 'pycsw:Keywords', ','.join(md.identification.keywords)) _set(context, serviceobj, 'pycsw:Creator', md.provider.contact.name) _set(context, serviceobj, 'pycsw:Publisher', md.provider.name) _set(context, serviceobj, 'pycsw:Contributor', md.provider.contact.name) _set(context, serviceobj, 'pycsw:OrganizationName', md.provider.contact.name) _set(context, serviceobj, 'pycsw:AccessConstraints', md.identification.accessconstraints) _set(context, serviceobj, 'pycsw:OtherConstraints', md.identification.fees) _set(context, serviceobj, 'pycsw:Source', record) _set(context, serviceobj, 'pycsw:Format', md.identification.type) _set(context, serviceobj, 'pycsw:ServiceType', 'OGC:CSW') _set(context, serviceobj, 'pycsw:ServiceTypeVersion', md.identification.version) _set(context, serviceobj, 'pycsw:Operation', ','.join([d.name for d in md.operations])) _set(context, serviceobj, 'pycsw:CouplingType', 'tight') links = [ '%s,OGC-CSW Catalogue Service for the Web,OGC:CSW,%s' % (identifier, md.url) ] _set(context, serviceobj, 'pycsw:Links', '^'.join(links)) _set(context, serviceobj, 'pycsw:XML', caps2iso(serviceobj, md, context)) recobjs.append(serviceobj) # get all supported typenames of metadata # so we can harvest the entire CSW # try for ISO, settle for Dublin Core csw_typenames = 'csw:Record' csw_outputschema = 'http://www.opengis.net/cat/csw/2.0.2' grop = md.get_operation_by_name('GetRecords') if all(['gmd:MD_Metadata' in grop.parameters['typeNames']['values'], 'http://www.isotc211.org/2005/gmd' in grop.parameters['outputSchema']['values']]): LOGGER.info('CSW supports ISO') csw_typenames = 'gmd:MD_Metadata' csw_outputschema = 'http://www.isotc211.org/2005/gmd' # now get all records # get total number of records to loop against try: md.getrecords2(typenames=csw_typenames, resulttype='hits', outputschema=csw_outputschema) matches = md.results['matches'] except: # this is a CSW, but server rejects query raise RuntimeError(md.response) if pagesize > matches: pagesize = matches LOGGER.debug('Harvesting %d CSW records' % matches) # loop over all catalogue records incrementally for r in range(1, matches+1, pagesize): try: md.getrecords2(typenames=csw_typenames, startposition=r, maxrecords=pagesize, outputschema=csw_outputschema, esn='full') except Exception as err: # this is a CSW, but server rejects query raise RuntimeError(md.response) for k, v in md.records.iteritems(): # try to parse metadata try: LOGGER.debug('Parsing metadata record: %s', v.xml) if csw_typenames == 'gmd:MD_Metadata': recobjs.append(_parse_iso(context, repos, etree.fromstring(v.xml, context.parser))) else: recobjs.append(_parse_dc(context, repos, etree.fromstring(v.xml, context.parser))) except Exception as err: # parsing failed for some reason LOGGER.warning('Metadata parsing failed %s', err) return recobjs
def _parse_dc(context, repos, exml): from owslib.csw import CswRecord recobj = repos.dataset() links = [] md = CswRecord(exml) if md.bbox is None: bbox = None else: bbox = md.bbox _set(context, recobj, 'pycsw:Identifier', md.identifier) _set(context, recobj, 'pycsw:Typename', 'csw:Record') _set(context, recobj, 'pycsw:Schema', context.namespaces['csw']) _set(context, recobj, 'pycsw:MdSource', 'local') _set(context, recobj, 'pycsw:InsertDate', util.get_today_and_now()) _set(context, recobj, 'pycsw:XML', md.xml) _set(context, recobj, 'pycsw:AnyText', util.get_anytext(exml)) _set(context, recobj, 'pycsw:Language', md.language) _set(context, recobj, 'pycsw:Type', md.type) _set(context, recobj, 'pycsw:Title', md.title) _set(context, recobj, 'pycsw:AlternateTitle', md.alternative) _set(context, recobj, 'pycsw:Abstract', md.abstract) if len(md.subjects) > 0 and None not in md.subjects: _set(context, recobj, 'pycsw:Keywords', ','.join(md.subjects)) _set(context, recobj, 'pycsw:ParentIdentifier', md.ispartof) _set(context, recobj, 'pycsw:Relation', md.relation) _set(context, recobj, 'pycsw:TempExtent_begin', md.temporal) _set(context, recobj, 'pycsw:TempExtent_end', md.temporal) _set(context, recobj, 'pycsw:ResourceLanguage', md.language) _set(context, recobj, 'pycsw:Creator', md.creator) _set(context, recobj, 'pycsw:Publisher', md.publisher) _set(context, recobj, 'pycsw:Contributor', md.contributor) _set(context, recobj, 'pycsw:OrganizationName', md.rightsholder) _set(context, recobj, 'pycsw:AccessConstraints', md.accessrights) _set(context, recobj, 'pycsw:OtherConstraints', md.license) _set(context, recobj, 'pycsw:Date', md.date) _set(context, recobj, 'pycsw:CreationDate', md.created) _set(context, recobj, 'pycsw:PublicationDate', md.issued) _set(context, recobj, 'pycsw:Modified', md.modified) _set(context, recobj, 'pycsw:Format', md.format) _set(context, recobj, 'pycsw:Source', md.source) for ref in md.references: tmp = ',,%s,%s' % (ref['scheme'], ref['url']) links.append(tmp) for uri in md.uris: tmp = '%s,%s,%s,%s' % \ (uri['name'], uri['description'], uri['protocol'], uri['url']) links.append(tmp) if len(links) > 0: _set(context, recobj, 'pycsw:Links', '^'.join(links)) if bbox is not None: try: tmp = '%s,%s,%s,%s' % (bbox.minx, bbox.miny, bbox.maxx, bbox.maxy) _set(context, recobj, 'pycsw:BoundingBox', util.bbox2wktpolygon(tmp)) except: # coordinates are corrupted, do not include _set(context, recobj, 'pycsw:BoundingBox', None) else: _set(context, recobj, 'pycsw:BoundingBox', None) return recobj
def load_records(context, database, table, xml_dirpath, recursive=False, force_update=False): """Load metadata records from directory of files to database""" from sqlalchemy.exc import DBAPIError repo = repository.Repository(database, context, table=table) file_list = [] loaded_files = set() if os.path.isfile(xml_dirpath): file_list.append(xml_dirpath) elif recursive: for root, dirs, files in os.walk(xml_dirpath): for mfile in files: if mfile.endswith('.xml'): file_list.append(os.path.join(root, mfile)) else: for rec in glob(os.path.join(xml_dirpath, '*.xml')): file_list.append(rec) total = len(file_list) counter = 0 for recfile in sorted(file_list): counter += 1 LOGGER.info('Processing file %s (%d of %d)', recfile, counter, total) # read document try: exml = etree.parse(recfile, context.parser) except etree.XMLSyntaxError as err: LOGGER.error('XML document "%s" is not well-formed', recfile) continue except Exception as err: LOGGER.exception('XML document "%s" is not well-formed', recfile) continue try: record = metadata.parse_record(context, exml, repo) except Exception as err: LOGGER.exception('Could not parse "%s" as an XML record', recfile) continue for rec in record: LOGGER.info('Inserting %s %s into database %s, table %s ....', rec.typename, rec.identifier, database, table) # TODO: do this as CSW Harvest try: repo.insert(rec, 'local', util.get_today_and_now()) loaded_files.add(recfile) LOGGER.info('Inserted %s', recfile) except Exception as err: if force_update: LOGGER.info('Record exists. Updating.') repo.update(rec) LOGGER.info('Updated %s', recfile) loaded_files.add(recfile) else: if isinstance(err, DBAPIError) and err.args: # Pull a decent database error message and not the full SQL that was run # since INSERT SQL statements are rather large. LOGGER.error('ERROR: %s not inserted: %s', recfile, err.args[0]) else: LOGGER.error('ERROR: %s not inserted: %s', recfile, err) return tuple(loaded_files)
def load(pycsw_config, ckan_url): database = pycsw_config.get("repository", "database") table_name = pycsw_config.get("repository", "table", "records") context = pycsw.core.config.StaticContext() repo = repository.Repository(database, context, table=table_name) log.info("Started gathering CKAN datasets identifiers: {0}".format( str(datetime.datetime.now()))) query = 'api/search/dataset?qjson={"fl":"id,metadata_modified,extras_harvest_object_id,' \ 'extras_metadata_source", "q":"harvest_object_id:[\\"\\" TO *]", "limit":1000, "start":%s}' start = 0 gathered_records = {} while True: url = ckan_url + query % start response = requests.get(url) listing = response.json() if not isinstance(listing, dict): raise RuntimeError("Wrong API response: %s" % listing) results = listing.get("results") if not results: break for result in results: gathered_records[result["id"]] = { "metadata_modified": result["metadata_modified"], "harvest_object_id": result["extras"]["harvest_object_id"], "source": result["extras"].get("metadata_source"), } start = start + 1000 log.debug("Gathered %s" % start) log.info("Gather finished ({0} datasets): {1}".format( len(gathered_records.keys()), str(datetime.datetime.now()))) existing_records = {} query = repo.session.query(repo.dataset.ckan_id, repo.dataset.ckan_modified) for row in query: existing_records[row[0]] = row[1] repo.session.close() new = set(gathered_records) - set(existing_records) deleted = set(existing_records) - set(gathered_records) changed = set() for key in set(gathered_records) & set(existing_records): if gathered_records[key]["metadata_modified"] > existing_records[key]: changed.add(key) for ckan_id in deleted: try: repo.session.begin() repo.session.query( repo.dataset.ckan_id).filter_by(ckan_id=ckan_id).delete() log.info("Deleted %s" % ckan_id) repo.session.commit() except Exception: repo.session.rollback() raise for ckan_id in new: ckan_info = gathered_records[ckan_id] record = get_record(context, repo, ckan_url, ckan_id, ckan_info) if not record: log.info("Skipped record %s" % ckan_id) continue try: repo.insert(record, "local", util.get_today_and_now()) log.info("Inserted %s" % ckan_id) except Exception as err: log.error("ERROR: not inserted %s Error:%s" % (ckan_id, err)) for ckan_id in changed: ckan_info = gathered_records[ckan_id] record = get_record(context, repo, ckan_url, ckan_id, ckan_info) if not record: continue update_dict = dict([(getattr(repo.dataset, key), getattr(record, key)) for key in record.__dict__.keys() if key != "_sa_instance_state"]) try: repo.session.begin() repo.session.query( repo.dataset).filter_by(ckan_id=ckan_id).update(update_dict) repo.session.commit() log.info("Changed %s" % ckan_id) except Exception as err: repo.session.rollback() raise RuntimeError("ERROR: %s" % str(err))
def _parse_fgdc(context, repos, exml): from owslib.fgdc import Metadata recobj = repos.dataset() links = [] md = Metadata(exml) if md.idinfo.datasetid is not None: # we need an identifier _set(context, recobj, 'pycsw:Identifier', md.idinfo.datasetid) else: # generate one ourselves _set(context, recobj, 'pycsw:Identifier', uuid.uuid1().get_urn()) _set(context, recobj, 'pycsw:Typename', 'fgdc:metadata') _set(context, recobj, 'pycsw:Schema', context.namespaces['fgdc']) _set(context, recobj, 'pycsw:MdSource', 'local') _set(context, recobj, 'pycsw:InsertDate', util.get_today_and_now()) _set(context, recobj, 'pycsw:XML', md.xml) _set(context, recobj, 'pycsw:AnyText', util.get_anytext(exml)) _set(context, recobj, 'pycsw:Language', 'en-US') if hasattr(md.idinfo, 'descript'): _set(context, recobj, 'pycsw:Abstract', md.idinfo.descript.abstract) if hasattr(md.idinfo, 'keywords'): if md.idinfo.keywords.theme: _set(context, recobj, 'pycsw:Keywords', \ ','.join(md.idinfo.keywords.theme[0]['themekey'])) if hasattr(md.idinfo.timeperd, 'timeinfo'): if hasattr(md.idinfo.timeperd.timeinfo, 'rngdates'): _set(context, recobj, 'pycsw:TempExtent_begin', md.idinfo.timeperd.timeinfo.rngdates.begdate) _set(context, recobj, 'pycsw:TempExtent_end', md.idinfo.timeperd.timeinfo.rngdates.enddate) if hasattr(md.idinfo, 'origin'): _set(context, recobj, 'pycsw:Creator', md.idinfo.origin) _set(context, recobj, 'pycsw:Publisher', md.idinfo.origin) _set(context, recobj, 'pycsw:Contributor', md.idinfo.origin) if hasattr(md.idinfo, 'ptcontac'): _set(context, recobj, 'pycsw:OrganizationName', md.idinfo.ptcontac.cntorg) _set(context, recobj, 'pycsw:AccessConstraints', md.idinfo.accconst) _set(context, recobj, 'pycsw:OtherConstraints', md.idinfo.useconst) _set(context, recobj, 'pycsw:Date', md.metainfo.metd) if hasattr(md.idinfo, 'spdom') and hasattr(md.idinfo.spdom, 'bbox'): bbox = md.idinfo.spdom.bbox else: bbox = None if hasattr(md.idinfo, 'citation'): if hasattr(md.idinfo.citation, 'citeinfo'): _set(context, recobj, 'pycsw:Type', md.idinfo.citation.citeinfo['geoform']) _set(context, recobj, 'pycsw:Title', md.idinfo.citation.citeinfo['title']) _set(context, recobj, 'pycsw:PublicationDate', md.idinfo.citation.citeinfo['pubdate']) _set(context, recobj, 'pycsw:Format', md.idinfo.citation.citeinfo['geoform']) if md.idinfo.citation.citeinfo['onlink']: for link in md.idinfo.citation.citeinfo['onlink']: tmp = ',,,%s' % link links.append(tmp) if hasattr(md, 'distinfo') and hasattr(md.distinfo, 'stdorder'): for link in md.distinfo.stdorder['digform']: tmp = ',%s,,%s' % (link['name'], link['url']) links.append(tmp) if len(links) > 0: _set(context, recobj, 'pycsw:Links', '^'.join(links)) if bbox is not None: try: tmp = '%s,%s,%s,%s' % (bbox.minx, bbox.miny, bbox.maxx, bbox.maxy) _set(context, recobj, 'pycsw:BoundingBox', util.bbox2wktpolygon(tmp)) except: # coordinates are corrupted, do not include _set(context, recobj, 'pycsw:BoundingBox', None) else: _set(context, recobj, 'pycsw:BoundingBox', None) return recobj
def load_records(context, database, table, xml_dirpath, recursive=False, force_update=False): """Load metadata records from directory of files to database""" from sqlalchemy.exc import DBAPIError repo = repository.Repository(database, context, table=table) file_list = [] loaded_files = set() if os.path.isfile(xml_dirpath): file_list.append(xml_dirpath) elif recursive: for root, dirs, files in os.walk(xml_dirpath): for mfile in files: if mfile.endswith('.xml'): file_list.append(os.path.join(root, mfile)) else: for rec in glob(os.path.join(xml_dirpath, '*.xml')): file_list.append(rec) total = len(file_list) counter = 0 for recfile in sorted(file_list): counter += 1 LOGGER.info('Processing file %s (%d of %d)', recfile, counter, total) # read document try: exml = etree.parse(recfile, context.parser) except etree.XMLSyntaxError as err: LOGGER.error('XML document "%s" is not well-formed', recfile, exc_info=True) continue except Exception as err: LOGGER.exception('XML document "%s" is not well-formed', recfile) continue try: record = metadata.parse_record(context, exml, repo) except Exception as err: LOGGER.exception('Could not parse "%s" as an XML record', recfile) continue for rec in record: LOGGER.info('Inserting %s %s into database %s, table %s ....', rec.typename, rec.identifier, database, table) # TODO: do this as CSW Harvest try: repo.insert(rec, 'local', util.get_today_and_now()) loaded_files.add(recfile) LOGGER.info('Inserted %s', recfile) except Exception as err: if force_update: LOGGER.info('Record exists. Updating.') repo.update(rec) LOGGER.info('Updated %s', recfile) loaded_files.add(recfile) else: if isinstance(err, DBAPIError) and err.args: # Pull a decent database error message and not the full SQL that was run # since INSERT SQL statements are rather large. LOGGER.error('ERROR: %s not inserted: %s', recfile, err.args[0], exc_info=True) else: LOGGER.error('ERROR: %s not inserted: %s', recfile, err, exc_info=True) return tuple(loaded_files)
def _parse_iso(context, repos, exml): from owslib.iso import MD_Metadata recobj = repos.dataset() links = [] md = MD_Metadata(exml) _set(context, recobj, 'pycsw:Identifier', md.identifier) _set(context, recobj, 'pycsw:Typename', 'gmd:MD_Metadata') _set(context, recobj, 'pycsw:Schema', context.namespaces['gmd']) _set(context, recobj, 'pycsw:MdSource', 'local') _set(context, recobj, 'pycsw:InsertDate', util.get_today_and_now()) _set(context, recobj, 'pycsw:XML', md.xml) _set(context, recobj, 'pycsw:AnyText', util.get_anytext(exml)) _set(context, recobj, 'pycsw:Language', md.language) _set(context, recobj, 'pycsw:Type', md.hierarchy) _set(context, recobj, 'pycsw:ParentIdentifier', md.parentidentifier) _set(context, recobj, 'pycsw:Date', md.datestamp) _set(context, recobj, 'pycsw:Modified', md.datestamp) _set(context, recobj, 'pycsw:Source', md.dataseturi) if md.referencesystem is not None: _set(context, recobj, 'pycsw:CRS','urn:ogc:def:crs:EPSG:6.11:%s' % md.referencesystem.code) if hasattr(md, 'identification'): _set(context, recobj, 'pycsw:Title', md.identification.title) _set(context, recobj, 'pycsw:AlternateTitle', md.identification.alternatetitle) _set(context, recobj, 'pycsw:Abstract', md.identification.abstract) _set(context, recobj, 'pycsw:Relation', md.identification.aggregationinfo) if hasattr(md.identification, 'temporalextent_start'): _set(context, recobj, 'pycsw:TempExtent_begin', md.identification.temporalextent_start) if hasattr(md.identification, 'temporalextent_end'): _set(context, recobj, 'pycsw:TempExtent_end', md.identification.temporalextent_end) if len(md.identification.topiccategory) > 0: _set(context, recobj, 'pycsw:TopicCategory', md.identification.topiccategory[0]) if len(md.identification.resourcelanguage) > 0: _set(context, recobj, 'pycsw:ResourceLanguage', md.identification.resourcelanguage[0]) if hasattr(md.identification, 'bbox'): bbox = md.identification.bbox else: bbox = None if (hasattr(md.identification, 'keywords') and len(md.identification.keywords) > 0): all_keywords = [item for sublist in md.identification.keywords for item in sublist['keywords'] if item is not None] _set(context, recobj, 'pycsw:Keywords', ','.join(all_keywords)) _set(context, recobj, 'pycsw:KeywordType', md.identification.keywords[0]['type']) if (hasattr(md.identification, 'creator') and len(md.identification.creator) > 0): all_orgs = set([item.organization for item in md.identification.creator if hasattr(item, 'organization') and item.organization is not None]) _set(context, recobj, 'pycsw:Creator', ';'.join(all_orgs)) if (hasattr(md.identification, 'publisher') and len(md.identification.publisher) > 0): all_orgs = set([item.organization for item in md.identification.publisher if hasattr(item, 'organization') and item.organization is not None]) _set(context, recobj, 'pycsw:Publisher', ';'.join(all_orgs)) if (hasattr(md.identification, 'contributor') and len(md.identification.contributor) > 0): all_orgs = set([item.organization for item in md.identification.contributor if hasattr(item, 'organization') and item.organization is not None]) _set(context, recobj, 'pycsw:Contributor', ';'.join(all_orgs)) if (hasattr(md.identification, 'contact') and len(md.identification.contact) > 0): all_orgs = set([item.organization for item in md.identification.contact if hasattr(item, 'organization') and item.organization is not None]) _set(context, recobj, 'pycsw:OrganizationName', ';'.join(all_orgs)) if len(md.identification.securityconstraints) > 0: _set(context, recobj, 'pycsw:SecurityConstraints', md.identification.securityconstraints[0]) if len(md.identification.accessconstraints) > 0: _set(context, recobj, 'pycsw:AccessConstraints', md.identification.accessconstraints[0]) if len(md.identification.otherconstraints) > 0: _set(context, recobj, 'pycsw:OtherConstraints', md.identification.otherconstraints[0]) if hasattr(md.identification, 'date'): for datenode in md.identification.date: if datenode.type == 'revision': _set(context, recobj, 'pycsw:RevisionDate', datenode.date) elif datenode.type == 'creation': _set(context, recobj, 'pycsw:CreationDate', datenode.date) elif datenode.type == 'publication': _set(context, recobj, 'pycsw:PublicationDate', datenode.date) if hasattr(md.identification, 'extent') and hasattr(md.identification.extent, 'description_code'): _set(context, recobj, 'pycsw:GeographicDescriptionCode', md.identification.extent.description_code) if len(md.identification.denominators) > 0: _set(context, recobj, 'pycsw:Denominator', md.identification.denominators[0]) if len(md.identification.distance) > 0: _set(context, recobj, 'pycsw:DistanceValue', md.identification.distance[0]) if len(md.identification.uom) > 0: _set(context, recobj, 'pycsw:DistanceUOM', md.identification.uom[0]) if len(md.identification.classification) > 0: _set(context, recobj, 'pycsw:Classification', md.identification.classification[0]) if len(md.identification.uselimitation) > 0: _set(context, recobj, 'pycsw:ConditionApplyingToAccessAndUse', md.identification.uselimitation[0]) if hasattr(md.identification, 'format'): _set(context, recobj, 'pycsw:Format', md.distribution.format) if md.serviceidentification is not None: _set(context, recobj, 'pycsw:ServiceType', md.serviceidentification.type) _set(context, recobj, 'pycsw:ServiceTypeVersion', md.serviceidentification.version) _set(context, recobj, 'pycsw:CouplingType', md.serviceidentification.couplingtype) service_types = [] for smd in md.identificationinfo: if smd.identtype == 'service' and smd.type is not None: service_types.append(smd.type) _set(context, recobj, 'pycsw:ServiceType', ','.join(service_types)) #if len(md.serviceidentification.operateson) > 0: # _set(context, recobj, 'pycsw:operateson = VARCHAR(32), #_set(context, recobj, 'pycsw:operation VARCHAR(32), #_set(context, recobj, 'pycsw:operatesonidentifier VARCHAR(32), #_set(context, recobj, 'pycsw:operatesoname VARCHAR(32), if hasattr(md.identification, 'dataquality'): _set(context, recobj, 'pycsw:Degree', md.dataquality.conformancedegree) _set(context, recobj, 'pycsw:Lineage', md.dataquality.lineage) _set(context, recobj, 'pycsw:SpecificationTitle', md.dataquality.specificationtitle) if hasattr(md.dataquality, 'specificationdate'): _set(context, recobj, 'pycsw:specificationDate', md.dataquality.specificationdate[0].date) _set(context, recobj, 'pycsw:SpecificationDateType', md.dataquality.specificationdate[0].datetype) if hasattr(md, 'contact') and len(md.contact) > 0: _set(context, recobj, 'pycsw:ResponsiblePartyRole', md.contact[0].role) LOGGER.info('Scanning for links') if hasattr(md, 'distribution'): dist_links = [] if hasattr(md.distribution, 'online'): LOGGER.debug('Scanning for gmd:transferOptions element(s)') dist_links.extend(md.distribution.online) if hasattr(md.distribution, 'distributor'): LOGGER.debug('Scanning for gmd:distributorTransferOptions element(s)') for dist_member in md.distribution.distributor: dist_links.extend(dist_member.online) for link in dist_links: if link.url is not None and link.protocol is None: # take a best guess link.protocol = sniff_link(link.url) linkstr = '%s,%s,%s,%s' % \ (link.name, link.description, link.protocol, link.url) links.append(linkstr) try: LOGGER.debug('Scanning for srv:SV_ServiceIdentification links') for sident in md.identificationinfo: if hasattr(sident, 'operations'): for sops in sident.operations: for scpt in sops['connectpoint']: LOGGER.debug('adding srv link %s', scpt.url) linkstr = '%s,%s,%s,%s' % \ (scpt.name, scpt.description, scpt.protocol, scpt.url) links.append(linkstr) except Exception as err: # srv: identification does not exist LOGGER.debug('no srv:SV_ServiceIdentification links found') if len(links) > 0: _set(context, recobj, 'pycsw:Links', '^'.join(links)) if bbox is not None: try: tmp = '%s,%s,%s,%s' % (bbox.minx, bbox.miny, bbox.maxx, bbox.maxy) _set(context, recobj, 'pycsw:BoundingBox', util.bbox2wktpolygon(tmp)) except: # coordinates are corrupted, do not include _set(context, recobj, 'pycsw:BoundingBox', None) else: _set(context, recobj, 'pycsw:BoundingBox', None) return recobj