示例#1
0
def edit():
    #return dict(message="study/edit")
    chosenLayout = request.vars.get('layout', None)  # TOP, LEFT, RIGHT
    if chosenLayout:
        response.view = 'study/edit-%s.html' % chosenLayout  #e eg, 'study/edit-RIGHT.html'
    
    # TODO: fetch a fresh list of search contexts for TNRS? see working example in
    # the header search of the main opentree webapp
    view_dict = get_opentree_services_method_urls(request)
    view_dict = get_opentree_services_method_urls(request)
    return view_dict
示例#2
0
def fetch_current_synthetic_tree_ids():
    try:
        # fetch the latest IDs as JSON from remote site
        import simplejson

        method_dict = get_opentree_services_method_urls(request)
        fetch_url = method_dict['getDraftTreeID_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "https:%s" % fetch_url

        fetch_args = {}
        # this needs to be a POST (pass fetch_args or ''); if GET, it just describes the API
        # N.B. that gluon.tools.fetch() can't be used here, since it won't send "raw" JSON data as treemachine expects
        req = urllib2.Request(url=fetch_url,
                              data=simplejson.dumps(fetch_args),
                              headers={"Content-Type": "application/json"})
        ids_response = urllib2.urlopen(req).read()

        ids_json = simplejson.loads(ids_response)
        draftTreeName = str(ids_json['synth_id']).encode('utf-8')
        startNodeID = str(ids_json['root']['node_id']).encode('utf-8')
        return (draftTreeName, startNodeID)

    except Exception, e:
        # throw 403 or 500 or just leave it
        return ('ERROR', e.message)
示例#3
0
def fetch_current_synthetic_tree_ids():
    try:
        # fetch the latest IDs as JSON from remote site
        import json
        import requests

        method_dict = get_opentree_services_method_urls(request)
        fetch_url = method_dict['getDraftTreeID_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "https:%s" % fetch_url

        fetch_args = {}
        # this needs to be a POST (pass fetch_args or ''); if GET, it just describes the API
        ids_json = requests.post(url=fetch_url,
                                 data=json.dumps(fetch_args),
                                 headers={
                                     "Content-Type": "application/json"
                                 }).json()
        draftTreeName = str(ids_json['synth_id']).encode('utf-8')
        startNodeID = str(ids_json['root']['node_id']).encode('utf-8')
        return (draftTreeName, startNodeID)

    except Exception, e:
        # throw 403 or 500 or just leave it
        return ('ERROR', e.message)
示例#4
0
def fetch_current_synthetic_tree_ids():
    try:
        # fetch the latest IDs as JSON from remote site
        import simplejson

        method_dict = get_opentree_services_method_urls(request)
        fetch_url = method_dict['getDraftTreeID_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "http:%s" % fetch_url

        fetch_args = {'startingTaxonOTTId': ""}
        # this needs to be a POST (pass fetch_args or ''); if GET, it just describes the API
        # N.B. that gluon.tools.fetch() can't be used here, since it won't send "raw" JSON data as treemachine expects
        req = urllib2.Request(url=fetch_url, data=simplejson.dumps(fetch_args), headers={"Content-Type": "application/json"}) 
        ids_response = urllib2.urlopen(req).read()

        ids_json = simplejson.loads( ids_response )
        draftTreeName = str(ids_json['draftTreeName']).encode('utf-8')
        # Try to be compatible with different versions of treemachine
        startNodeID = None
        if 'startingNodeID' in ids_json:
            startNodeID = str(ids_json['startingNodeID']).encode('utf-8')
        elif 'startNodeID' in ids_json:
            startNodeID = str(ids_json['startNodeID']).encode('utf-8')
        return (draftTreeName, startNodeID)

    except Exception, e:
        # throw 403 or 500 or just leave it
        return ('ERROR', e.message)
示例#5
0
def fetch_current_TNRS_context_names():
    try:
        # fetch the latest contextName values as JSON from remote site
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)
        fetch_url = method_dict['getContextsJSON_url']
        # as usual, this needs to be a POST (pass empty fetch_args)
        contextnames_response = fetch(fetch_url, data='')

        contextnames_json = simplejson.loads( contextnames_response )
        # start with LIFE group (incl. 'All life'), and add any other ordered suggestions
        ordered_group_names = unique_ordered_list(['LIFE','PLANTS','ANIMALS'] + [g for g in contextnames_json])
        context_names = [ ]
        for gname in ordered_group_names:
            # allow for eventual removal or renaming of expected groups
            if gname in contextnames_json:
                context_names += [n.encode('utf-8') for n in contextnames_json[gname] ]

        # draftTreeName = ids_json['draftTreeName'].encode('utf-8')
        return (context_names)

    except Exception, e:
        # throw 403 or 500 or just leave it
        return ('ERROR', e.message)
示例#6
0
def view():
    """
    Allow any visitor to view (read-only!) a study on the 'master' branch

    ? OR can this include work-in-progress from a personal branch?
    """
    response.view = 'study/edit.html'
    view_dict = get_opentree_services_method_urls(request)
    view_dict['taxonSearchContextNames'] = fetch_current_TNRS_context_names()
    return view_dict
示例#7
0
def index():
    # interpret incoming URL as a tree view, in this format
    #   http://[hostname]/opentree/[{viewer}/]{domSource}@{nodeID}/{taxon name}
    # some valid examples:
    #   http://opentree.com/opentree/argus/ottol@123456/H**o+sapiens
    #   http://opentree.com/opentree/ottol@123456/H**o+sapiens
    #   http://opentree.com/opentree/ottol@123456
    #
    # TODO: add another optional arg 'viewport', like so
    #   http://opentree.com/opentree/argus/0,23,100,400/ottol@123456/H**o+sapiens

    # modify the normal view dictionary to include location+view hints from the URL
    treeview_dict = get_opentree_services_method_urls(request)
    treeview_dict['viewer'] = 'argus'
    treeview_dict['domSource'] = ''
    treeview_dict['nodeID'] = ''
    treeview_dict['nodeName'] = ''
    treeview_dict['viewport'] = ''

    # add a flag to determine whether to force the viewer to this node (vs. using the
    # browser's stored state for this URL, or a default starting node)
    treeview_dict['forcedByURL'] = False

    # handle the first arg (path part) found
    if len(request.args) > 0:
        if request.args[0] in ['argus','onezoom','phylet']:
            treeview_dict['viewer'] = request.args[0]
        elif '@' in request.args[0]:
            treeview_dict['domSource'], treeview_dict['nodeID'] = request.args[0].split('@')
        else:
            # first arg is neither a viewer nor a proper node, which is a Bad Thing
            raise HTTP(404)

    if len(request.args) > 1:
        if not treeview_dict['nodeID']:
        #if (not treeview_dict['nodeID']) and '@' in request.args[1]:
            treeview_dict['domSource'], treeview_dict['nodeID'] = request.args[1].split('@')
        else:
            treeview_dict['nodeName'] = request.args[1]

    if len(request.args) > 2:
        if not treeview_dict['nodeName']:
            treeview_dict['nodeName'] = request.args[2]

    # when all is said and done, do we have enough information to force the location?
    if treeview_dict['domSource'] and treeview_dict['nodeID']:
        treeview_dict['forcedByURL'] = True

    # retrieve latest synthetic-tree ID (and its 'life' node ID)
    # TODO: Only refresh this periodically? Or only when needed for initial destination?
    treeview_dict['draftTreeName'], treeview_dict['lifeNodeID'], treeview_dict['startingNodeID'] = fetch_current_synthetic_tree_ids()
    treeview_dict['taxonSearchContextNames'] = fetch_current_TNRS_context_names()

    return treeview_dict
示例#8
0
def download_subtree():
    id_type = request.args(0)  # 'ottol-id' or 'node-id'
    node_or_ottol_id = request.args(1)
    node_name = request.args(2)
    import cStringIO
    import contenttype as c
    s = cStringIO.StringIO()

    try:
        # fetch the Newick tree as JSON from remote site
        import requests
        import json
        json_headers = {
            'content-type': 'application/json',
            'accept': 'application/json',
        }

        method_dict = get_opentree_services_method_urls(request)

        # use the appropriate web service for this ID type
        fetch_url = method_dict['getDraftSubtree_url']
        newick_text = 'NEWICK_NOT_FETCHED'
        if id_type == 'ottol-id':
            fetch_args = {'ott_id': int(node_or_ottol_id)}
        else:
            fetch_args = {'node_id': node_or_ottol_id}
        fetch_args['format'] = 'newick'
        fetch_args['height_limit'] = -1
        # TODO: allow for dynamic height, based on max tips?

        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "https:%s" % fetch_url

        # apparently this needs to be a POST, or it just describes the API
        tree_response = requests.post(fetch_url,
                                      data=json.dumps(fetch_args),
                                      headers=json_headers)
        tree_json = tree_response.json()
        newick_text = unicode(tree_json.get(
            'newick', u'NEWICK_NOT_FOUND')).encode('utf-8')
        s.write(newick_text)

    except Exception, e:
        # throw 403 or 500 or just leave it
        if id_type == 'ottol-id':
            s.write(
                u'ERROR - Unable to fetch the Newick subtree for ottol id "%s" (%s):\n\n%s'
                % (node_or_ottol_id, node_name, newick_text))
        else:
            s.write(
                u'ERROR - Unable to fetch the Newick subtree for node id "%s" (%s):\n\n%s'
                % (node_or_ottol_id, node_name, newick_text))
示例#9
0
def download_subtree():
    id_type = request.args(0)  # 'ottol-id' or 'node-id'
    node_or_ottol_id = request.args(1)
    max_depth = request.args(2)
    node_name = request.args(3)
    import cStringIO
    import contenttype as c
    s = cStringIO.StringIO()

    try:
        # fetch the Newick tree as JSON from remote site
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)

        # use the appropriate web service for this ID type
        if id_type == 'ottol-id':
            fetch_url = method_dict['getDraftTreeForOttolID_url']
            fetch_args = {'ottId': node_or_ottol_id, 'maxDepth': max_depth}
        else:
            fetch_url = method_dict['getDraftTreeForNodeID_url']
            fetch_args = {'nodeID': node_or_ottol_id, 'maxDepth': max_depth}
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "http:%s" % fetch_url

        # apparently this needs to be a POST, or it just describes the API
        tree_response = fetch(fetch_url, data=fetch_args)
        tree_json = simplejson.loads(tree_response)
        newick_text = str(tree_json['tree']).encode('utf-8')
        s.write(newick_text)

    except Exception, e:
        # throw 403 or 500 or just leave it
        if id_type == 'ottol-id':
            s.write(
                u'ERROR - Unable to fetch the Newick subtree for ottol id "%s" (%s) with max depth %s:\n\n%s'
                % (node_or_ottol_id, node_name, max_depth, newick_text))
        else:
            s.write(
                u'ERROR - Unable to fetch the Newick subtree for node id "%s" (%s) with max depth %s:\n\n%s'
                % (node_or_ottol_id, node_name, max_depth, newick_text))
示例#10
0
def download_subtree():
    id_type = request.args(0)  # 'ottol-id' or 'node-id'
    node_or_ottol_id = request.args(1)
    node_name = request.args(2)
    import cStringIO
    import contenttype as c
    s=cStringIO.StringIO()
     
    try:
        # fetch the Newick tree as JSON from remote site
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)

        # use the appropriate web service for this ID type
        fetch_url = method_dict['getDraftSubtree_url']
        newick_text = 'NEWICK_NOT_FETCHED'
        if id_type == 'ottol-id':
            fetch_args = {'ott_id': int(node_or_ottol_id)}
        else:
            fetch_args = {'node_id': node_or_ottol_id}
        fetch_args['format'] = 'newick';
        fetch_args['height_limit'] = -1;  # TODO: allow for dynamic height, based on max tips?

        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "https:%s" % fetch_url

        # apparently this needs to be a POST, or it just describes the API
        tree_response = fetch(fetch_url, data=fetch_args)
        tree_json = simplejson.loads( tree_response )
        newick_text = str(tree_json.get('newick', 'NEWICK_NOT_FOUND')).encode('utf-8');
        s.write( newick_text )

    except Exception, e:
        # throw 403 or 500 or just leave it
        if id_type == 'ottol-id':
            s.write( u'ERROR - Unable to fetch the Newick subtree for ottol id "%s" (%s):\n\n%s' % (node_or_ottol_id, node_name, newick_text) )
        else:
            s.write( u'ERROR - Unable to fetch the Newick subtree for node id "%s" (%s):\n\n%s' % (node_or_ottol_id, node_name, newick_text) )
示例#11
0
def download_subtree():
    id_type = request.args(0)  # 'ottol-id' or 'node-id'
    node_or_ottol_id = request.args(1)
    max_depth = request.args(2)
    node_name = request.args(3)
    import cStringIO
    import contenttype as c
    s=cStringIO.StringIO()
     
    try:
        # fetch the Newick tree as JSON from remote site
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)

        # use the appropriate web service for this ID type
        if id_type == 'ottol-id':
            fetch_url = method_dict['getDraftTreeForOttolID_url']
            fetch_args = {'ottId': node_or_ottol_id, 'maxDepth': max_depth}
        else:
            fetch_url = method_dict['getDraftTreeForNodeID_url']
            fetch_args = {'nodeID': node_or_ottol_id, 'maxDepth': max_depth}
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "http:%s" % fetch_url

        # apparently this needs to be a POST, or it just describes the API
        tree_response = fetch(fetch_url, data=fetch_args)
        tree_json = simplejson.loads( tree_response )
        newick_text = str(tree_json['tree']).encode('utf-8');
        s.write( newick_text )

    except Exception, e:
        # throw 403 or 500 or just leave it
        if id_type == 'ottol-id':
            s.write( u'ERROR - Unable to fetch the Newick subtree for ottol id "%s" (%s) with max depth %s:\n\n%s' % (node_or_ottol_id, node_name, max_depth, newick_text) )
        else:
            s.write( u'ERROR - Unable to fetch the Newick subtree for node id "%s" (%s) with max depth %s:\n\n%s' % (node_or_ottol_id, node_name, max_depth, newick_text) )
示例#12
0
def fetch_current_synthetic_tree_ids():
    try:
        # fetch the latest IDs as JSON from remote site
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)
        fetch_url = method_dict['getDraftTreeID_url']

        fetch_args = {'startingTaxonName': "cellular organisms"}

        # this needs to be a POST (pass fetch_args or ''); if GET, it just describes the API
        ids_response = fetch(fetch_url, data=fetch_args)

        ids_json = simplejson.loads( ids_response )
        draftTreeName = ids_json['draftTreeName'].encode('utf-8')
        lifeNodeID = ids_json['lifeNodeID'].encode('utf-8')
        # IF we get a separate starting node ID, use it; else we'll start at 'life'
        startingNodeID = ids_json.get('startingNodeID', lifeNodeID).encode('utf-8')
        return (draftTreeName, lifeNodeID, startingNodeID)

    except Exception, e:
        # throw 403 or 500 or just leave it
        return ('ERROR', e.message, 'NO_STARTING_NODE_ID')
示例#13
0
def fetch_current_synthesis_source_data():
    json_headers = {
        'content-type' : 'application/json',
        'accept' : 'application/json',
    }
    try:
        import requests
        import json
        method_dict = get_opentree_services_method_urls(request)

        # fetch a list of all studies that contribute to synthesis
        fetch_url = method_dict['getSynthesisSourceList_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "https:%s" % fetch_url
        # as usual, this needs to be a POST (pass empty fetch_args)
        source_list_response = requests.post(fetch_url, data=json.dumps({'include_source_list':True}), headers=json_headers)
        source_data = source_list_response.json()
        source_id_list = source_data.get('source_list', [ ])
        source_id_map = source_data.get('source_id_map')
        # split these source descriptions, which are in the form '{STUDY_ID_PREFIX}_{STUDY_NUMERIC_ID}_{TREE_ID}_{COMMIT_SHA}'
        contributing_study_info = { }   # store (unique) study IDs as keys, commit SHAs as values

        for source_id in source_id_list:
            source_details = source_id_map.get( source_id )
            if 'taxonomy' in source_details:
                continue
            study_id = source_details.get('study_id')
            # N.B. assume that all study IDs have a two-letter prefix!
            tree_id = source_details.get('tree_id')
            commit_SHA_in_synthesis = source_details.get('git_sha')
            # N.B. assume that any listed study has been used!

            if study_id in contributing_study_info.keys():
                contributing_study_info[ study_id ]['tree_ids'].append( tree_id )
            else:
                contributing_study_info[ study_id ] = {
                    'tree_ids': [ tree_id, ],
                    'commit_SHA_in_synthesis': commit_SHA_in_synthesis
                }

        # fetch the oti metadata (esp. DOI and full reference text) for each
        fetch_url = method_dict['findAllStudies_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "https:%s" % fetch_url

        # as usual, this needs to be a POST (pass empty fetch_args)
        study_metadata_response = requests.post(fetch_url, data=json.dumps({"verbose": True}), headers=json_headers)
        # TODO: add more friendly label to tree metadata? if so, add "includeTreeMetadata":True above
        study_metadata = study_metadata_response.json()

        # filter just the metadata for studies contributing to synthesis
        contributing_studies = [ ]
        for study in study_metadata['matched_studies']:
            # Add any missing study-ID prefixes (assume 'pg') so we can compare
            # with the prefixed IDs provided by getSynthesisSourceList.
            id_parts = study['ot:studyId'].split('_')
            if len(id_parts) == 1:
                prefixed_study_id = 'pg_%s' % study['ot:studyId']
            else:
                prefixed_study_id = study['ot:studyId']
            if prefixed_study_id in contributing_study_info.keys():
                contrib_info = contributing_study_info[ prefixed_study_id ]
                # and commit SHA to support retrieval of *exact* Nexson from synthesis
                study['commit_SHA_in_synthesis'] = contrib_info['commit_SHA_in_synthesis']
                # add contributing tree ID(s) so we can directly link to (or download) them
                study['tree_ids'] = contrib_info['tree_ids']
                contributing_studies.append( study )

        # sort these alphabetically by first author, then render in the page
        contributing_studies.sort(key = lambda x: x.get('ot:studyPublicationReference'))

        # TODO: encode data to utf-8?
        ## context_names += [n.encode('utf-8') for n in contextnames_json[gname] ]
        
        # translate data-deposit DOIs/URLs into friendlier forms
        for study in contributing_studies:
            raw_deposit_doi = study.get('ot:dataDeposit', None)
            if raw_deposit_doi:
                study['friendlyDepositMessage'] = get_data_deposit_message(raw_deposit_doi)
        
        return contributing_studies

    except Exception, e:
        # throw 403 or 500 or just leave it
        return ('ERROR', e.message)
示例#14
0
ot_markdown_attributes = bleach.sanitizer.ALLOWED_ATTRIBUTES.copy()
ot_markdown_attributes.update(common_version_notes_attributes)
ot_cleaner = Cleaner(tags=ot_markdown_tags, attributes=ot_markdown_attributes)

### required - do no delete
def user(): return dict(form=auth())
def download(): return response.download(request,db)
def call(): return service()
### end requires

def index():
    # bump to first About page in menu
    redirect(URL('about', 'open-tree-of-life'))

# try grabbing shared data just once
default_view_dict = get_opentree_services_method_urls(request)
default_view_dict['taxonSearchContextNames'] = fetch_current_TNRS_context_names(request)

# NOTE that web2py should attempt to convert hyphens (dashes) in URLs into underscores

def open_tree_of_life():
    # URL is /opentree/about/open-tree-of-life
    return default_view_dict

def the_synthetic_tree():
    # URL is /opentree/about/the-synthetic-tree
    return default_view_dict

def the_source_tree_manager():
    # URL is /opentree/about/the-source-tree-manager
    return default_view_dict
示例#15
0
def fetch_current_synthesis_source_data():
    try:
        from gluon.tools import fetch
        import simplejson
        method_dict = get_opentree_services_method_urls(request)

        # fetch a list of all studies that contribute to synthesis
        fetch_url = method_dict['getSynthesisSourceList_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "http:%s" % fetch_url
        # as usual, this needs to be a POST (pass empty fetch_args)
        source_list_response = fetch(fetch_url, data='')
        source_list = simplejson.loads( source_list_response )

        # split these source descriptions, which are in the form '{STUDY_ID_PREFIX}_{STUDY_NUMERIC_ID}_{TREE_ID}_{COMMIT_SHA}'
        contributing_study_info = { }   # store (unique) study IDs as keys, commit SHAs as values

        for source_desc in source_list:
            if source_desc == 'taxonomy':
                continue
            source_parts = source_desc.split('_')
            # add default prefix 'pg' to study ID, if not found
            if source_parts[0].isdigit():
                # prepend with default namespace 'pg'
                study_id = 'pg_%s' % source_parts[0]
            else:
                study_id = '_'.join(source_parts[0:2])
            if len(source_parts) == 4:
                tree_id = source_parts[2]
                commit_SHA_in_synthesis = source_parts[3]
            else:
                tree_id = source_parts[1]
                if len(source_parts) == 3:
                    commit_SHA_in_synthesis = source_parts[2]
                else:
                    commit_SHA_in_synthesis = None

            if study_id in contributing_study_info.keys():
                contributing_study_info[ study_id ]['tree_ids'].append( tree_id )
            else:
                contributing_study_info[ study_id ] = {
                    'tree_ids': [ tree_id, ],
                    'commit_SHA_in_synthesis': commit_SHA_in_synthesis
                }


        # fetch the oti metadata (esp. DOI and full reference text) for each
        fetch_url = method_dict['findAllStudies_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "http:%s" % fetch_url

        # as usual, this needs to be a POST (pass empty fetch_args)
        study_metadata_response = fetch(fetch_url, data={"verbose": True}) 
        # TODO: add more friendly label to tree metadata? if so, add "includeTreeMetadata":True above
        study_metadata = simplejson.loads( study_metadata_response )

        # filter just the metadata for studies contributing to synthesis
        contributing_studies = [ ]
        for study in study_metadata:
            # Add any missing study-ID prefixes (assume 'pg') so we can compare
            # with the prefixed IDs provided by getSynthesisSourceList.
            id_parts = study['ot:studyId'].split('_')
            if len(id_parts) == 1:
                prefixed_study_id = 'pg_%s' % study['ot:studyId']
            else:
                prefixed_study_id = study['ot:studyId']
            if prefixed_study_id in contributing_study_info.keys():
                contrib_info = contributing_study_info[ prefixed_study_id ]
                # and commit SHA to support retrieval of *exact* Nexson from synthesis
                study['commit_SHA_in_synthesis'] = contrib_info['commit_SHA_in_synthesis']
                # add contributing tree ID(s) so we can directly link to (or download) them
                study['tree_ids'] = contrib_info['tree_ids']
                contributing_studies.append( study )

        # sort these alphabetically by first author, then render in the page
        contributing_studies.sort(key = lambda x: x.get('ot:studyPublicationReference'))

        # TODO: encode data to utf-8?
        ## context_names += [n.encode('utf-8') for n in contextnames_json[gname] ]
        
        # translate data-deposit DOIs/URLs into friendlier forms
        from pprint import pprint
        for study in contributing_studies:
            raw_deposit_doi = study.get('ot:dataDeposit', None)
            if raw_deposit_doi:
                study['friendlyDepositMessage'] = get_data_deposit_message(raw_deposit_doi)
        
        return contributing_studies

    except Exception, e:
        # throw 403 or 500 or just leave it
        return ('ERROR', e.message)
示例#16
0

def call():
    return service()


### end requires


def index():
    # bump to first About page in menu
    redirect(URL('about', 'open-tree-of-life'))


# try grabbing shared data just once
default_view_dict = get_opentree_services_method_urls(request)
default_view_dict[
    'taxonSearchContextNames'] = fetch_current_TNRS_context_names(request)

# NOTE that web2py should attempt to convert hyphens (dashes) in URLs into underscores


def open_tree_of_life():
    # URL is /opentree/about/open-tree-of-life
    return default_view_dict


def privacy_policy():
    # URL is /opentree/about/privacy-policy
    return default_view_dict
示例#17
0
def fetch_current_synthesis_source_data():
    json_headers = {
        'content-type': 'application/json',
        'accept': 'application/json',
    }
    try:
        import requests
        import json
        method_dict = get_opentree_services_method_urls(request)

        # fetch a list of all studies that contribute to synthesis
        fetch_url = method_dict['getSynthesisSourceList_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "https:%s" % fetch_url
        # as usual, this needs to be a POST (pass empty fetch_args)
        source_list_response = requests.post(
            fetch_url,
            data=json.dumps({'include_source_list': True}),
            headers=json_headers)
        source_data = source_list_response.json()
        source_id_list = source_data.get('source_list', [])
        source_id_map = source_data.get('source_id_map')
        # split these source descriptions, which are in the form '{STUDY_ID_PREFIX}_{STUDY_NUMERIC_ID}_{TREE_ID}_{COMMIT_SHA}'
        contributing_study_info = {
        }  # store (unique) study IDs as keys, commit SHAs as values

        for source_id in source_id_list:
            source_details = source_id_map.get(source_id)
            if 'taxonomy' in source_details:
                continue
            study_id = source_details.get('study_id')
            # N.B. assume that all study IDs have a two-letter prefix!
            tree_id = source_details.get('tree_id')
            commit_SHA_in_synthesis = source_details.get('git_sha')
            # N.B. assume that any listed study has been used!

            if study_id in contributing_study_info.keys():
                contributing_study_info[study_id]['tree_ids'].append(tree_id)
            else:
                contributing_study_info[study_id] = {
                    'tree_ids': [
                        tree_id,
                    ],
                    'commit_SHA_in_synthesis': commit_SHA_in_synthesis
                }

        # fetch the oti metadata (esp. DOI and full reference text) for each
        fetch_url = method_dict['findAllStudies_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "https:%s" % fetch_url

        # as usual, this needs to be a POST (pass empty fetch_args)
        study_metadata_response = requests.post(fetch_url,
                                                data=json.dumps(
                                                    {"verbose": True}),
                                                headers=json_headers)
        # TODO: add more friendly label to tree metadata? if so, add "includeTreeMetadata":True above
        study_metadata = study_metadata_response.json()

        # filter just the metadata for studies contributing to synthesis
        contributing_studies = []
        for study in study_metadata['matched_studies']:
            # Add any missing study-ID prefixes (assume 'pg') so we can compare
            # with the prefixed IDs provided by getSynthesisSourceList.
            id_parts = study['ot:studyId'].split('_')
            if len(id_parts) == 1:
                prefixed_study_id = 'pg_%s' % study['ot:studyId']
            else:
                prefixed_study_id = study['ot:studyId']
            if prefixed_study_id in contributing_study_info.keys():
                contrib_info = contributing_study_info[prefixed_study_id]
                # and commit SHA to support retrieval of *exact* Nexson from synthesis
                study['commit_SHA_in_synthesis'] = contrib_info[
                    'commit_SHA_in_synthesis']
                # add contributing tree ID(s) so we can directly link to (or download) them
                study['tree_ids'] = contrib_info['tree_ids']
                contributing_studies.append(study)

        # sort these alphabetically by first author, then render in the page
        contributing_studies.sort(
            key=lambda x: x.get('ot:studyPublicationReference'))

        # TODO: encode data to utf-8?
        ## context_names += [n.encode('utf-8') for n in contextnames_json[gname] ]

        # translate data-deposit DOIs/URLs into friendlier forms
        for study in contributing_studies:
            raw_deposit_doi = study.get('ot:dataDeposit', None)
            if raw_deposit_doi:
                study['friendlyDepositMessage'] = get_data_deposit_message(
                    raw_deposit_doi)

        return contributing_studies

    except Exception, e:
        # throw 403 or 500 or just leave it
        return ('ERROR', e.message)
示例#18
0
文件: about.py 项目: arlin/opentree
def fetch_current_synthesis_source_data():
    try:
        from gluon.tools import fetch
        import simplejson
        method_dict = get_opentree_services_method_urls(request)

        # fetch a list of all studies that contribute to synthesis
        fetch_url = method_dict['getSynthesisSourceList_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "http:%s" % fetch_url
        # as usual, this needs to be a POST (pass empty fetch_args)
        source_list_response = fetch(fetch_url, data='')
        source_list = simplejson.loads( source_list_response )

        # split these source descriptions, which are in the form '{STUDY_ID_PREFIX}_{STUDY_NUMERIC_ID}_{TREE_ID}_{COMMIT_SHA}'
        contributing_study_info = { }   # store (unique) study IDs as keys, commit SHAs as values

        for source_desc in source_list:
            if source_desc == 'taxonomy':
                continue
            source_parts = source_desc.split('_')
            # add default prefix 'pg' to study ID, if not found
            if source_parts[0].isdigit():
                # prepend with default namespace 'pg'
                study_id = 'pg_%s' % source_parts[0]
            else:
                study_id = '_'.join(source_parts[0:2])
            if len(source_parts) == 4:
                commit_SHA_in_synthesis = source_parts[3]
            else:
                commit_SHA_in_synthesis = None
            contributing_study_info[ study_id ] = commit_SHA_in_synthesis

        # fetch the oti metadata (esp. DOI and full reference text) for each
        fetch_url = method_dict['findAllStudies_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "http:%s" % fetch_url

        # as usual, this needs to be a POST (pass empty fetch_args)
        study_metadata_response = fetch(fetch_url, data={"verbose": True})
        study_metadata = simplejson.loads( study_metadata_response )

        # filter just the metadata for studies contributing to synthesis
        contributing_studies = [ ]
        for study in study_metadata:
            # Add any missing study-ID prefixes (assume 'pg') so we can compare
            # with the prefixed IDs provided by getSynthesisSourceList.
            id_parts = study['ot:studyId'].split('_')
            if len(id_parts) == 1:
                prefixed_study_id = 'pg_%s' % study['ot:studyId']
            else:
                prefixed_study_id = study['ot:studyId']
            if prefixed_study_id in contributing_study_info.keys():
                # add commit SHA to support retrieval of *exact* Nexson from synthesis
                study['commit_SHA_in_synthesis'] = contributing_study_info[ prefixed_study_id ];
                contributing_studies.append( study )

        # TODO: sort these alphabetically(?) and render in the page
        ## contributing_studies.sort(key = lambda x: x.get('ot:studyPublicationReference'))
        # NO, apparently they're pre-sorted to reflect the importance of each study

        # TODO: encode data to utf-8?
        ## context_names += [n.encode('utf-8') for n in contextnames_json[gname] ]
        
        return contributing_studies

    except Exception, e:
        # throw 403 or 500 or just leave it
        return ('ERROR', e.message)