Exemple #1
0
def main():
    config = ConfigParser.ConfigParser()
    my_path = Path(__file__).parent.parent
    ini_path = os.path.join(my_path, 'config', 'server.ini')
    config.read(ini_path)
    YAML_DIR = config['SERVICE']['yaml_directory']
    METRIC_YAML = config['SERVICE']['metrics_yaml']
    METRIC_YML_PATH = os.path.join(my_path, YAML_DIR, METRIC_YAML)
    SPDX_URL = config['EXTERNAL']['spdx_license_github']
    DATACITE_API_REPO = config['EXTERNAL']['datacite_api_repo']
    RE3DATA_API = config['EXTERNAL']['re3data_api']
    METADATACATALOG_API = config['EXTERNAL']['metadata_catalog']
    isDebug = config.getboolean('SERVICE', 'debug_mode')

    preproc = Preprocessor()
    preproc.retrieve_metrics_yaml(METRIC_YML_PATH)
    print('Total metrics defined: {}'.format(preproc.get_total_metrics()))

    isDebug = config.getboolean('SERVICE', 'debug_mode')
    preproc.retrieve_licenses(SPDX_URL, isDebug)
    preproc.retrieve_datacite_re3repos(RE3DATA_API, DATACITE_API_REPO, isDebug)
    preproc.retrieve_metadata_standards(METADATACATALOG_API, isDebug)
    preproc.retrieve_science_file_formats(isDebug)
    preproc.retrieve_long_term_file_formats(isDebug)

    print('Total SPDX licenses : {}'.format(preproc.get_total_licenses()))
    print('Total re3repositories found from datacite api : {}'.format(
        len(preproc.getRE3repositories())))
    print('Total subjects area of imported metadata standards : {}'.format(
        len(preproc.metadata_standards)))
    start = False
    for identifier in testpids:
        print(identifier)
        if identifier == startpid or not startpid:
            start = True
        if start:
            ft = FAIRCheck(uid=identifier, test_debug=debug)
            uid_result, pid_result = ft.check_unique_persistent()
            core_metadata_result = ft.check_minimal_metatadata()
            content_identifier_included_result = ft.check_content_identifier_included(
            )
            check_searchable_result = ft.check_searchable()
            license_result = ft.check_license()
            relatedresources_result = ft.check_relatedresources()
            access_level_result = ft.check_data_access_level()
            data_file_format_result = ft.check_data_file_format()
            data_provenance_result = ft.check_data_provenance()
            community_standards_result = ft.check_community_metadatastandards()
            data_content_metadata = ft.check_data_content_metadata()
            results = [
                uid_result, pid_result, core_metadata_result,
                content_identifier_included_result, check_searchable_result,
                access_level_result, license_result, data_file_format_result,
                data_provenance_result, community_standards_result,
                data_content_metadata
            ]
            #results=[data_file_format_result]
            print(json.dumps(results, indent=4, sort_keys=True))
Exemple #2
0
def main():
    logging.getLogger('connexion.operation').setLevel('INFO')
    ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
    YAML_DIR = config['SERVICE']['yaml_directory']
    METRIC_YAML = config['SERVICE']['metrics_yaml']
    METRIC_YML_PATH = os.path.join(ROOT_DIR, YAML_DIR, METRIC_YAML)
    SPDX_URL = config['EXTERNAL']['spdx_license_github']
    DATACITE_API_REPO = config['EXTERNAL']['datacite_api_repo']
    RE3DATA_API = config['EXTERNAL']['re3data_api']
    METADATACATALOG_API = config['EXTERNAL']['metadata_catalog']
    LOV_API = config['EXTERNAL']['lov_api']
    LOD_CLOUDNET = config['EXTERNAL']['lod_cloudnet']
    #BIOPORTAL_REST = config['EXTERNAL']['bioportal_rest']
    #BIOPORTAL_APIKEY = config['EXTERNAL']['bioportal_apikey']
    data_files_limit = int(config['SERVICE']['data_files_limit'])
    metric_specification = config['SERVICE']['metric_specification']

    #TODO further implementation on authentication needed
    usr = config['USER']['usr']
    pwd = config['USER']['pwd']
    authen.service_username = usr
    authen.service_password = pwd

    preproc = Preprocessor()
    preproc.retrieve_metrics_yaml(METRIC_YML_PATH, data_files_limit, metric_specification)
    logger.info('Total metrics defined: {}'.format(preproc.get_total_metrics()))

    isDebug = config.getboolean('SERVICE', 'debug_mode')
    preproc.retrieve_licenses(SPDX_URL, isDebug)
    preproc.retrieve_datacite_re3repos(RE3DATA_API, DATACITE_API_REPO, isDebug)
    preproc.retrieve_metadata_standards(METADATACATALOG_API, isDebug)
    #preproc.retrieve_linkedvocabs(lov_api=LOV_API, lodcloud_api=LOD_CLOUDNET, bioportal_api=BIOPORTAL_REST, bioportal_key=BIOPORTAL_APIKEY, isDebugMode=False)
    preproc.retrieve_linkedvocabs(lov_api=LOV_API, lodcloud_api=LOD_CLOUDNET, isDebugMode=isDebug)
    preproc.retrieve_default_namespaces()

    logger.info('Total SPDX licenses : {}'.format(preproc.get_total_licenses()))
    logger.info('Total re3repositories found from datacite api : {}'.format(len(preproc.getRE3repositories())))
    logger.info('Total subjects area of imported metadata standards : {}'.format(len(preproc.metadata_standards)))
    logger.info('Total LD vocabs imported : {}'.format(len(preproc.getLinkedVocabs())))
    logger.info('Total default namespaces specified : {}'.format(len(preproc.getDefaultNamespaces())))

    #you can also use Tornado or gevent as the HTTP server, to do so set server to tornado or gevent
    app = connexion.FlaskApp(__name__, specification_dir=YAML_DIR)
    API_YAML = os.path.join(ROOT_DIR, YAML_DIR, config['SERVICE']['swagger_yaml'])
    app.app.json_encoder = encoder.JSONEncoder
    app.add_api(API_YAML, arguments={'title': 'F-UJI : FAIRsFAIR Research Data Object Assessment Service'}, validate_responses=True)
    app.app.wsgi_app = ProxyFix(app.app.wsgi_app)
    app.run(host=config['SERVICE']['service_host'], port=int(config['SERVICE']['service_port']))
Exemple #3
0
def main():
    config = ConfigParser.ConfigParser()
    my_path = Path(__file__).parent.parent
    ini_path = os.path.join(my_path, 'config', 'server.ini')
    config.read(ini_path)
    YAML_DIR = config['SERVICE']['yaml_directory']
    METRIC_YAML = config['SERVICE']['metrics_yaml']
    METRIC_YML_PATH = os.path.join(my_path, YAML_DIR, METRIC_YAML)
    SPDX_URL = config['EXTERNAL']['spdx_license_github']
    DATACITE_API_REPO = config['EXTERNAL']['datacite_api_repo']
    RE3DATA_API = config['EXTERNAL']['re3data_api']
    METADATACATALOG_API = config['EXTERNAL']['metadata_catalog']
    isDebug = config.getboolean('SERVICE', 'debug_mode')

    preproc = Preprocessor()
    preproc.retrieve_metrics_yaml(METRIC_YML_PATH)
    print('Total metrics defined: {}'.format(preproc.get_total_metrics()))

    isDebug = config.getboolean('SERVICE', 'debug_mode')
    preproc.retrieve_licenses(SPDX_URL, isDebug)
    preproc.retrieve_datacite_re3repos(RE3DATA_API, DATACITE_API_REPO, isDebug)
    preproc.retrieve_metadata_standards(METADATACATALOG_API, isDebug)

    print('Total SPDX licenses : {}'.format(preproc.get_total_licenses()))
    print('Total re3repositories found from datacite api : {}'.format(
        len(preproc.getRE3repositories())))
    print('Total subjects area of imported metadata standards : {}'.format(
        len(preproc.metadata_standards)))

    ft = FAIRCheck(uid=identifier, oai=oai_pmh, test_debug=debug)
    uid_result, pid_result = ft.check_unique_persistent()
    core_metadata_result = ft.check_minimal_metatadata()
    content_identifier_included_result = ft.check_content_identifier_included()
    check_searchable_result = ft.check_searchable()
    license_result = ft.check_license()
    relatedresources_result = ft.check_relatedresources()
    results = [
        uid_result, pid_result, core_metadata_result,
        content_identifier_included_result, license_result
    ]
    # put the debug messages at the right place...
    for result_index, result in enumerate(results):
        results[result_index]['test_debug'] = ft.msg_filter.getMessage(
            result.get('metric_identifier'))

    print(json.dumps(results, indent=4, sort_keys=True))
class RepositoryHelper:

    DATACITE_REPOSITORIES = Preprocessor.getRE3repositories()
    ns = {"r3d": "http://www.re3data.org/schema/2-2"}
    RE3DATA_APITYPES = ['OAI-PMH', 'SOAP', 'SPARQL', 'SWORD', 'OpenDAP']

    def __init__(self, client, pidscheme, logger):
        self.client_id = client
        self.pid_scheme = pidscheme
        self.re3metadata_raw = None
        self.repository_name = None
        self.repository_url = None
        self.repo_apis = {}
        self.repo_standards = []
        self.logger = logging.getLogger(logger)
        #print(__name__)
    def lookup_re3data(self):
        if self.client_id and self.pid_scheme:

            re3doi = RepositoryHelper.DATACITE_REPOSITORIES.get(
                self.client_id)  # {client_id,re3doi}
            #print(self.client_id,'Re3DOI',re3doi, idutils.is_doi(re3doi))
            if re3doi:
                if idutils.is_doi(re3doi):
                    short_re3doi = idutils.normalize_pid(
                        re3doi, scheme='doi')  #https://doi.org/10.17616/R3XS37
                else:
                    re3doi = None

            # pid -> clientId -> repo doi-> re3id, and query repository metadata from re3api
            if re3doi:
                self.logger.info(
                    'FsF-R1.3-01M : Found match re3data (DOI-based) record')
                query_url = Preprocessor.RE3DATA_API + '?query=' + short_re3doi  # https://re3data.org/api/beta/repositories?query=
                q = RequestHelper(url=query_url)
                q.setAcceptType(AcceptTypes.xml)
                re_source, xml = q.content_negotiate(metric_id='RE3DATA')
                try:
                    if isinstance(xml, bytes):
                        xml = xml.decode().encode()
                    root = etree.fromstring(xml)

                    #<link href="https://www.re3data.org/api/beta/repository/r3d100010134" rel="self" />
                    re3link = root.xpath('//link')[0].attrib['href']
                    if re3link is not None:
                        self.logger.info(
                            'FsF-R1.3-01M : Found match re3data metadata record -: '
                            + str(re3link))
                        # query reposiroty metadata
                        q2 = RequestHelper(url=re3link)
                        q2.setAcceptType(AcceptTypes.xml)
                        re3_source, re3_response = q2.content_negotiate(
                            metric_id='RE3DATA')
                        self.re3metadata_raw = re3_response
                        self.parseRepositoryMetadata()
                except Exception as e:
                    self.logger.warning(
                        'FsF-R1.3-01M : Malformed re3data (DOI-based) record received: '
                        + str(e))
            else:
                self.logger.warning(
                    'FsF-R1.3-01M : No DOI of client id is available from datacite api'
                )

    def parseRepositoryMetadata(self):
        #http://schema.re3data.org/3-0/re3data-example-V3-0.xml
        root = etree.fromstring(self.re3metadata_raw)
        # ns = {k: v for k, v in root.nsmap.items() if k}
        name = root.xpath('//r3d:repositoryName',
                          namespaces=RepositoryHelper.ns)
        url = root.xpath('//r3d:repositoryURL', namespaces=RepositoryHelper.ns)
        if name:
            self.repository_name = name[0].text
        if url:
            self.repository_url = url[0].text
        apis = root.xpath('//r3d:api', namespaces=RepositoryHelper.ns)
        for a in apis:
            apiType = a.attrib['apiType']
            if apiType in RepositoryHelper.RE3DATA_APITYPES:
                self.repo_apis[a.attrib['apiType']] = a.text
        standards = root.xpath(
            '//r3d:metadataStandard/r3d:metadataStandardName',
            namespaces=RepositoryHelper.ns)
        #we only use the name as the url specified in re3data is dcc-based, e.g., http://www.dcc.ac.uk/resources/metadata-standards/dif-directory-interchange-format
        self.repo_standards = [s.text for s in standards]

    def getRe3MetadataStandards(self):
        return self.repo_standards

    def getRe3MetadataAPIs(self):
        return self.repo_apis

    def getRepoNameURL(self):
        return self.repository_name, self.repository_url
Exemple #5
0
def main():
    config = ConfigParser.ConfigParser()
    my_path = Path(__file__).parent.parent
    ini_path = os.path.join(my_path, 'config', 'server.ini')
    config.read(ini_path)
    YAML_DIR = config['SERVICE']['yaml_directory']
    METRIC_YAML = config['SERVICE']['metrics_yaml']
    METRIC_YML_PATH = os.path.join(my_path, YAML_DIR, METRIC_YAML)
    SPDX_URL = config['EXTERNAL']['spdx_license_github']
    DATACITE_API_REPO = config['EXTERNAL']['datacite_api_repo']
    RE3DATA_API = config['EXTERNAL']['re3data_api']
    METADATACATALOG_API = config['EXTERNAL']['metadata_catalog']
    isDebug = config.getboolean('SERVICE', 'debug_mode')
    data_files_limit = int(config['SERVICE']['data_files_limit'])
    metric_specification = config['SERVICE']['metric_specification']

    preproc = Preprocessor()
    preproc.retrieve_metrics_yaml(METRIC_YML_PATH, data_files_limit,
                                  metric_specification)
    print('Total metrics defined: {}'.format(preproc.get_total_metrics()))

    isDebug = config.getboolean('SERVICE', 'debug_mode')
    preproc.retrieve_licenses(SPDX_URL, isDebug)
    preproc.retrieve_datacite_re3repos(RE3DATA_API, DATACITE_API_REPO, isDebug)
    preproc.retrieve_metadata_standards(METADATACATALOG_API, isDebug)
    preproc.retrieve_science_file_formats(isDebug)
    preproc.retrieve_long_term_file_formats(isDebug)

    print('Total SPDX licenses : {}'.format(preproc.get_total_licenses()))
    print('Total re3repositories found from datacite api : {}'.format(
        len(preproc.getRE3repositories())))
    print('Total subjects area of imported metadata standards : {}'.format(
        len(preproc.metadata_standards)))
    start = False
    usedatacite = True
    tracemalloc.start()
    n = 1
    for identifier in testpids:

        print(identifier)
        print(n)
        n += 1
        if identifier == startpid or not startpid:
            start = True
        if start:
            ft = FAIRCheck(uid=identifier,
                           test_debug=debug,
                           metadata_service_url=metadata_service_endpoint,
                           metadata_service_type=metadata_service_type,
                           use_datacite=usedatacite)

            #ft = FAIRCheck(uid=identifier,  test_debug=True, use_datacite=usedatacite)

            uid_result, pid_result = ft.check_unique_persistent()
            ft.retrieve_metadata_embedded(ft.extruct_result)
            include_embedded = True
            if ft.repeat_pid_check:
                uid_result, pid_result = ft.check_unique_persistent()
            ft.retrieve_metadata_external()

            core_metadata_result = ft.check_minimal_metatadata()
            content_identifier_included_result = ft.check_content_identifier_included(
            )
            access_level_result = ft.check_data_access_level()
            license_result = ft.check_license()
            relatedresources_result = ft.check_relatedresources()
            check_searchable_result = ft.check_searchable()
            data_content_metadata = ft.check_data_content_metadata()
            data_file_format_result = ft.check_data_file_format()
            community_standards_result = ft.check_community_metadatastandards()
            data_provenance_result = ft.check_data_provenance()
            formal_representation_result = ft.check_formal_metadata()
            semantic_vocabulary_result = ft.check_semantic_vocabulary()
            metadata_preserved_result = ft.check_metadata_preservation()
            standard_protocol_metadata_result = ft.check_standardised_protocol_metadata(
            )
            standard_protocol_data_result = ft.check_standardised_protocol_data(
            )

            results = [
                uid_result, pid_result, core_metadata_result,
                content_identifier_included_result, check_searchable_result,
                access_level_result, formal_representation_result,
                semantic_vocabulary_result, license_result,
                data_file_format_result, data_provenance_result,
                relatedresources_result, community_standards_result,
                data_content_metadata, metadata_preserved_result,
                standard_protocol_data_result,
                standard_protocol_metadata_result
            ]
            #results=[core_metadata_result,uid_result, pid_result]
            #print(ft.metadata_merged)
            debug_messages = ft.get_log_messages_dict()
            ft.logger_message_stream.flush()
            ft.get_assessment_summary(results)
            for res_k, res_v in enumerate(results):
                if ft.isDebug:
                    debug_list = debug_messages.get(res_v['metric_identifier'])
                    #debug_list= ft.msg_filter.getMessage(res_v['metric_identifier'])
                    if debug_list is not None:
                        results[res_k]['test_debug'] = debug_messages.get(
                            res_v['metric_identifier'])
                    else:
                        results[res_k]['test_debug'] = [
                            'INFO: No debug messages received'
                        ]
                else:
                    results[res_k]['test_debug'] = ['INFO: Debugging disabled']
                    debug_messages = {}
            print(json.dumps(results, indent=4, sort_keys=True))
            #remove unused logger handlers and filters to avoid memory leaks
            ft.logger.handlers = [ft.logger.handlers[-1]]
            #ft.logger.filters = [ft.logger.filters]
            current, peak = tracemalloc.get_traced_memory()
            print(
                f"Current memory usage is {current / 10 ** 6}MB; Peak was {peak / 10 ** 6}MB"
            )
            snapshot = tracemalloc.take_snapshot()
            top_stats = snapshot.statistics('traceback')

            # pick the biggest memory block
            stat = top_stats[0]
            print("%s memory blocks: %.1f KiB" %
                  (stat.count, stat.size / 1024))
            for line in stat.traceback.format():
                print(line)

            for i, stat in enumerate(snapshot.statistics('filename')[:5], 1):
                print(i, str(stat))

            #preproc.logger.
            gc.collect()
    tracemalloc.stop()
Exemple #6
0
def main():
    config = ConfigParser.ConfigParser()
    my_path = Path(__file__).parent.parent
    ini_path = os.path.join(my_path,'config','server.ini')
    config.read(ini_path)
    YAML_DIR = config['SERVICE']['yaml_directory']
    METRIC_YAML = config['SERVICE']['metrics_yaml']
    METRIC_YML_PATH = os.path.join(my_path, YAML_DIR , METRIC_YAML)
    SPDX_URL = config['EXTERNAL']['spdx_license_github']
    DATACITE_API_REPO = config['EXTERNAL']['datacite_api_repo']
    RE3DATA_API = config['EXTERNAL']['re3data_api']
    METADATACATALOG_API = config['EXTERNAL']['metadata_catalog']
    isDebug = config.getboolean('SERVICE', 'debug_mode')
    data_files_limit = int(config['SERVICE']['data_files_limit'])
    metric_specification = config['SERVICE']['metric_specification']

    preproc = Preprocessor()
    preproc.retrieve_metrics_yaml(METRIC_YML_PATH, data_files_limit,metric_specification)
    print('Total metrics defined: {}'.format(preproc.get_total_metrics()))

    isDebug = config.getboolean('SERVICE', 'debug_mode')
    preproc.retrieve_licenses(SPDX_URL, isDebug)
    preproc.retrieve_datacite_re3repos(RE3DATA_API, DATACITE_API_REPO, isDebug)
    preproc.retrieve_metadata_standards(METADATACATALOG_API, isDebug)
    preproc.retrieve_science_file_formats(isDebug)
    preproc.retrieve_long_term_file_formats(isDebug)

    print('Total SPDX licenses : {}'.format(preproc.get_total_licenses()))
    print('Total re3repositories found from datacite api : {}'.format(len(preproc.getRE3repositories())))
    print('Total subjects area of imported metadata standards : {}'.format(len(preproc.metadata_standards)))
    start=False
    for identifier in testpids:
        print (identifier)
        if identifier==startpid or not startpid:
            start=True
        if start:
            ft = FAIRCheck(uid=identifier,  test_debug=True, use_datacite=False)
            uid_result, pid_result = ft.check_unique_persistent()
            core_metadata_result = ft.check_minimal_metatadata()
            content_identifier_included_result = ft.check_content_identifier_included()
            access_level_result=ft.check_data_access_level()
            license_result = ft.check_license()
            relatedresources_result = ft.check_relatedresources()
            check_searchable_result = ft.check_searchable()
            data_file_format_result=ft.check_data_file_format()
            community_standards_result=ft.check_community_metadatastandards()
            data_provenance_result=ft.check_data_provenance()
            data_content_metadata = ft.check_data_content_metadata()
            formal_representation_result=ft.check_formal_metadata()
            semantic_vocabulary_result =ft.check_semantic_vocabulary()
            metadata_preserved_result = ft.check_metadata_preservation()
            standard_protocol_data_result = ft.check_standardised_protocol_data()
            standard_protocol_metadata_result = ft.check_standardised_protocol_metadata()
            results = [uid_result, pid_result, core_metadata_result, content_identifier_included_result, check_searchable_result, access_level_result, formal_representation_result,semantic_vocabulary_result, license_result, data_file_format_result,data_provenance_result,relatedresources_result,community_standards_result,data_content_metadata,metadata_preserved_result, standard_protocol_data_result,standard_protocol_metadata_result]
            #results=[core_metadata_result,uid_result, pid_result]
            #print(ft.metadata_merged)
            for res_k, res_v in enumerate(results):
                if ft.isDebug:
                    debug_list= ft.msg_filter.getMessage(res_v['metric_identifier'])
                    if debug_list is not None:
                        results[res_k]['test_debug'] = ft.msg_filter.getMessage(res_v['metric_identifier'])
                    else:
                        results[res_k]['test_debug'] =['INFO: No debug messages received']
                else:
                    results[res_k]['test_debug'] = ['INFO: Debugging disabled']

            print(json.dumps(results, indent=4, sort_keys=True))
class RepositoryHelper:

    DATACITE_REPOSITORIES = Preprocessor.getRE3repositories()
    ns = {"r3d": "http://www.re3data.org/schema/2-2"}
    RE3DATA_APITYPES = ['OAI-PMH', 'SOAP', 'SPARQL', 'SWORD', 'OpenDAP']

    def __init__(self, client, pidscheme):
        self.client_id = client
        self.pid_scheme = pidscheme
        self.re3metadata_raw = None
        self.repository_name = None
        self.repository_url = None
        self.repo_apis = {}
        self.repo_standards = []
        self.logger = logging.getLogger(self.__class__.__name__)

    def lookup_re3data(self):
        if self.client_id and self.pid_scheme:
            re3doi = RepositoryHelper.DATACITE_REPOSITORIES.get(
                self.client_id)  # {client_id,re3doi}
            short_re3doi = idutils.normalize_pid(
                re3doi, scheme='doi')  #https://doi.org/10.17616/R3XS37
            # pid -> clientId -> repo doi-> re3id, and query repository metadata from re3api
            if re3doi:
                self.logger.info('Found match re3data (DOI-based) record')
                query_url = Preprocessor.RE3DATA_API + '?query=' + short_re3doi  # https://re3data.org/api/beta/repositories?query=
                q = RequestHelper(url=query_url)
                q.setAcceptType(AcceptTypes.xml)
                re_source, xml = q.content_negotiate(metric_id='RE3DATA')
                root = etree.fromstring(xml.content)
                #<link href="https://www.re3data.org/api/beta/repository/r3d100010134" rel="self" />
                re3link = root.xpath('//link')[0].attrib['href']
                if re3link is not None:
                    self.logger.info('Found match re3data metadata record')
                    # query reposiroty metadata
                    q2 = RequestHelper(url=re3link)
                    q2.setAcceptType(AcceptTypes.xml)
                    re3_source, re3_response = q2.content_negotiate(
                        metric_id='RE3DATA')
                    self.re3metadata_raw = re3_response.content
                    self.parseRepositoryMetadata()
            else:
                self.logger.warning(
                    'No DOI of client id is available from datacite api')

    def parseRepositoryMetadata(self):
        #http://schema.re3data.org/3-0/re3data-example-V3-0.xml
        root = etree.fromstring(self.re3metadata_raw)
        # ns = {k: v for k, v in root.nsmap.items() if k}
        name = root.xpath('//r3d:repositoryName',
                          namespaces=RepositoryHelper.ns)
        url = root.xpath('//r3d:repositoryURL', namespaces=RepositoryHelper.ns)
        if name:
            self.repository_name = name[0].text
        if url:
            self.repository_url = url[0].text
        apis = root.xpath('//r3d:api', namespaces=RepositoryHelper.ns)
        for a in apis:
            apiType = a.attrib['apiType']
            if apiType in RepositoryHelper.RE3DATA_APITYPES:
                self.repo_apis[a.attrib['apiType']] = a.text
        standards = root.xpath(
            '//r3d:metadataStandard/r3d:metadataStandardName',
            namespaces=RepositoryHelper.ns)
        self.repo_standards = [s.text for s in standards]

    def getRe3MetadataStandards(self):
        return self.repo_standards

    def getRe3MetadataAPIs(self):
        return self.repo_apis

    def getRepoNameURL(self):
        return self.repository_name, self.repository_url