Esempio n. 1
0
def collections():
    """
    Show a filtered list of all tree collections in the system.
    """
    view_dict = get_opentree_services_method_urls(request)
    view_dict['maintenance_info'] = get_maintenance_info(request)
    return view_dict
Esempio n. 2
0
def collections():
    """
    Show a filtered list of all tree collections in the system.
    """
    view_dict = get_opentree_services_method_urls(request)
    view_dict['maintenance_info'] = get_maintenance_info(request)
    return view_dict
Esempio n. 3
0
def create():
    # Block (redirect) if we've suspended study editing
    maintenance_info = get_maintenance_info(request)
    if maintenance_info.get('maintenance_in_progress', False):
        redirect(URL('curator', 'default', 'index', vars={"maintenance_notice":"true"}))
        pass
    view_dict = get_opentree_services_method_urls(request)
    view_dict['message'] = "study/create"
    return view_dict
Esempio n. 4
0
def index():
    """
    Offer creation (or uploading) of a name-mapping dataset
    """

    response.view = 'tnrs.html'
    view_dict = get_opentree_services_method_urls(request)
    #view_dict['message'] = "This would appear at bottom of page.."
    view_dict['maintenance_info'] = get_maintenance_info(request)
    view_dict['taxonSearchContextNames'] = fetch_current_TNRS_context_names(request)
    return view_dict
Esempio n. 5
0
def create():
    # Block (redirect) if we've suspended study editing
    maintenance_info = get_maintenance_info(request)
    if maintenance_info.get('maintenance_in_progress', False):
        redirect(
            URL('curator',
                'default',
                'index',
                vars={"maintenance_notice": "true"}))
        pass
    view_dict = get_opentree_services_method_urls(request)
    view_dict['message'] = "study/create"
    return view_dict
Esempio n. 6
0
def index():
    """
    Show an introduction page for visitors, or personalized curation dashboard for
    a logged-in user.
    """
    #response.flash = T("Welcome to web2py!")
    view_dict = get_opentree_services_method_urls(request)
    view_dict['maintenance_info'] = get_maintenance_info(request)

    if False:  ## auth.is_logged_in():
        # user is logged in, bounce to their personal dashboard
        redirect(URL('dashboard'))
    else:
        # anonymous visitor, show a general info page
        return view_dict
Esempio n. 7
0
def view():
    """
    Allow any visitor to view (read-only!) a study on the 'master' branch

    ? OR can this include work-in-progress from a personal branch?
    """
    response.view = 'study/edit.html'
    view_dict = get_opentree_services_method_urls(request)
    view_dict['maintenance_info'] = get_maintenance_info(request)
    #view_dict['taxonSearchContextNames'] = fetch_current_TNRS_context_names(request)
    view_dict['studyID'] = request.args[0]
    view_dict['latestSynthesisSHA'] = _get_latest_synthesis_sha_for_study_id(view_dict['studyID'])
    view_dict['viewOrEdit'] = 'VIEW'
    view_dict['userCanEdit'] = auth.is_logged_in() and True or False;
    return view_dict
Esempio n. 8
0
def index():
    """
    Show an introduction page for visitors, or personalized curation dashboard for
    a logged-in user.
    """
    #response.flash = T("Welcome to web2py!")
    view_dict = get_opentree_services_method_urls(request)
    view_dict['maintenance_info'] = get_maintenance_info(request)

    if False:  ## auth.is_logged_in():
        # user is logged in, bounce to their personal dashboard
        redirect(URL('dashboard'))
    else:
        # anonymous visitor, show a general info page
        return view_dict
Esempio n. 9
0
def edit():
    # Block (redirect) if we've suspended study editing
    maintenance_info = get_maintenance_info(request)
    if maintenance_info.get('maintenance_in_progress', False):
        redirect(URL('curator', 'study', 'view', 
            vars={"maintenance_notice":"true"}, 
            args=request.args))
    # Fetch a fresh list of search contexts for TNRS? see working example in
    # the header search of the main opentree webapp
    view_dict = get_opentree_services_method_urls(request)
    view_dict['taxonSearchContextNames'] = fetch_current_TNRS_context_names(request)
    view_dict['studyID'] = request.args[0]
    view_dict['latestSynthesisSHA'] = _get_latest_synthesis_sha_for_study_id(view_dict['studyID'])
    view_dict['viewOrEdit'] = 'EDIT'
    return view_dict
Esempio n. 10
0
def view():
    """
    Allow any visitor to view (read-only!) a study on the 'master' branch

    ? OR can this include work-in-progress from a personal branch?
    """
    response.view = 'study/edit.html'
    view_dict = get_opentree_services_method_urls(request)
    view_dict['maintenance_info'] = get_maintenance_info(request)
    #view_dict['taxonSearchContextNames'] = fetch_current_TNRS_context_names(request)
    view_dict['studyID'] = request.args[0]
    view_dict['latestSynthesisSHA'] = _get_latest_synthesis_sha_for_study_id(
        view_dict['studyID'])
    view_dict['viewOrEdit'] = 'VIEW'
    view_dict['userCanEdit'] = auth.is_logged_in() and True or False
    return view_dict
Esempio n. 11
0
def _get_latest_synthesis_sha_for_study_id(study_id):
    # Fetch this SHA from treemachine. If not found in contributing studies, return None
    try:
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)

        # fetch a list of all studies that contribute to synthesis
        fetch_url = method_dict['getSynthesisSourceList_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "http:%s" % fetch_url
        # as usual, this needs to be a POST (pass empty fetch_args)
        source_list_response = fetch(fetch_url, data='')
        source_list = simplejson.loads(source_list_response)

        # split these source descriptions, which are in the form '{STUDY_ID_PREFIX}_{STUDY_NUMERIC_ID}_{TREE_ID}_{COMMIT_SHA}'
        contributing_study_info = {
        }  # store (unique) study IDs as keys, commit SHAs as values

        for source_desc in source_list:
            if source_desc == 'taxonomy':
                continue
            source_parts = source_desc.split('_')
            # add default prefix 'pg' to study ID, if not found
            if source_parts[0].isdigit():
                # prepend with default namespace 'pg'
                study_id = 'pg_%s' % source_parts[0]
            else:
                study_id = '_'.join(source_parts[0:2])
            if len(source_parts) == 4:
                commit_SHA_in_synthesis = source_parts[3]
            else:
                commit_SHA_in_synthesis = None
            contributing_study_info[study_id] = commit_SHA_in_synthesis

        return contributing_study_info.get(study_id, '')

    except Exception, e:
        # throw 403 or 500 or just leave it
        raise HTTP(
            500,
            T('Unable to retrieve latest synthesis SHA for study {u}'.format(
                u=study_id)))
Esempio n. 12
0
def _get_latest_synthesis_details_for_study_id(study_id):
    # Fetch the last synthesis SHA *and* any tree IDs (from this study) from
    # treemachine. If the study is not found in contributing studies, return
    # None for both.
    try:
        import json
        import requests

        method_dict = get_opentree_services_method_urls(request)

        # fetch a list of all studies that contribute to synthesis
        fetch_url = method_dict['getSynthesisSourceList_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "https:%s" % fetch_url
        # as usual, this needs to be a POST (pass empty fetch_args)
        source_list_response = requests.post(
            fetch_url,
            headers={
                "Content-Type": "application/json"
            },
            data=json.dumps({'include_source_list': True})).text
        source_dict = json.loads(source_list_response)['source_id_map']

        # fetch the full source list, then look for this study and its trees
        commit_SHA_in_synthesis = None
        current_study_trees_included = []
        #print(source_dict)
        # ignore source descriptions (e.g. "ot_764@tree1"); just read the details
        for source_details in source_dict.values():
            if source_details.get('study_id', None) == study_id:
                # this is the study we're interested in!
                current_study_trees_included.append(source_details['tree_id'])
                if commit_SHA_in_synthesis is None:
                    commit_SHA_in_synthesis = source_details['git_sha']
            # keep checking, as each tree will have its own entry
        return commit_SHA_in_synthesis, current_study_trees_included

    except Exception, e:
        # throw 403 or 500 or just leave it
        raise HTTP(
            500,
            T('Unable to retrieve latest synthesis details for study {u}'.
              format(u=study_id)))
Esempio n. 13
0
def edit():
    # Block (redirect) if we've suspended study editing
    maintenance_info = get_maintenance_info(request)
    if maintenance_info.get('maintenance_in_progress', False):
        redirect(
            URL('curator',
                'study',
                'view',
                vars={"maintenance_notice": "true"},
                args=request.args))
    # Fetch a fresh list of search contexts for TNRS? see working example in
    # the header search of the main opentree webapp
    view_dict = get_opentree_services_method_urls(request)
    view_dict['taxonSearchContextNames'] = fetch_current_TNRS_context_names(
        request)
    view_dict['studyID'] = request.args[0]
    view_dict['latestSynthesisSHA'] = _get_latest_synthesis_sha_for_study_id(
        view_dict['studyID'])
    view_dict['viewOrEdit'] = 'EDIT'
    return view_dict
Esempio n. 14
0
def _get_latest_synthesis_sha_for_study_id( study_id ):
    # Fetch this SHA from treemachine. If not found in contributing studies, return None
    try:
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)

        # fetch a list of all studies that contribute to synthesis
        fetch_url = method_dict['getSynthesisSourceList_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "http:%s" % fetch_url
        # as usual, this needs to be a POST (pass empty fetch_args)
        source_list_response = fetch(fetch_url, data='')
        source_list = simplejson.loads( source_list_response )

        # split these source descriptions, which are in the form '{STUDY_ID_PREFIX}_{STUDY_NUMERIC_ID}_{TREE_ID}_{COMMIT_SHA}'
        contributing_study_info = { }   # store (unique) study IDs as keys, commit SHAs as values

        for source_desc in source_list:
            if source_desc == 'taxonomy':
                continue
            source_parts = source_desc.split('_')
            # add default prefix 'pg' to study ID, if not found
            if source_parts[0].isdigit():
                # prepend with default namespace 'pg'
                study_id = 'pg_%s' % source_parts[0]
            else:
                study_id = '_'.join(source_parts[0:2])
            if len(source_parts) == 4:
                commit_SHA_in_synthesis = source_parts[3]
            else:
                commit_SHA_in_synthesis = None
            contributing_study_info[ study_id ] = commit_SHA_in_synthesis

        return contributing_study_info.get( study_id, '')

    except Exception, e:
        # throw 403 or 500 or just leave it
        raise HTTP(500, T('Unable to retrieve latest synthesis SHA for study {u}'.format(u=study_id)))
Esempio n. 15
0
def _get_latest_synthesis_details_for_study_id( study_id ):
    # Fetch the last synthesis SHA *and* any tree IDs (from this study) from
    # treemachine. If the study is not found in contributing studies, return
    # None for both.
    try:
        from gluon.tools import fetch
        import simplejson

        method_dict = get_opentree_services_method_urls(request)

        # fetch a list of all studies that contribute to synthesis
        fetch_url = method_dict['getSynthesisSourceList_url']
        if fetch_url.startswith('//'):
            # Prepend scheme to a scheme-relative URL
            fetch_url = "https:%s" % fetch_url
        # as usual, this needs to be a POST (pass empty fetch_args)
        source_list_response = fetch(fetch_url, data={'include_source_list':True})
        source_dict = simplejson.loads( source_list_response )['source_id_map']

        # fetch the full source list, then look for this study and its trees
        commit_SHA_in_synthesis = None
        current_study_trees_included = [ ]
        #print(source_dict)
        # ignore source descriptions (e.g. "ot_764@tree1"); just read the details
        for source_details in source_dict.values():
            if source_details.get('study_id', None) == study_id:
                # this is the study we're interested in!
                current_study_trees_included.append( source_details['tree_id'] )
                if commit_SHA_in_synthesis is None:
                    commit_SHA_in_synthesis = source_details['git_sha']
            # keep checking, as each tree will have its own entry
        return commit_SHA_in_synthesis, current_study_trees_included

    except Exception, e:
        # throw 403 or 500 or just leave it
        raise HTTP(500, T('Unable to retrieve latest synthesis details for study {u}'.format(u=study_id)))
Esempio n. 16
0
def profile():
    """
    shows a personalized profile for any user (default = the current logged-in user) 
    http://..../{app}/default/profile/[username]
    """
    view_dict = get_opentree_services_method_urls(request)
    view_dict['maintenance_info'] = get_maintenance_info(request)

    # if the URL has a [username], try to load their information
    if len(request.args) > 0:
        # try to load a profile for the specified userid, using the GitHub API
        specified_userid = request.args[0]
        view_dict['userid'] = specified_userid
        view_dict['active_user_found'] = False

        # fetch the JSON for this user's activities
        json_response = _fetch_github_api(
            verb='GET', url='/users/{0}'.format(specified_userid))

        error_msg = json_response.get('message', None)
        view_dict['error_msg'] = error_msg
        if error_msg:
            # pass error to the page for display
            print("ERROR FETCHING INFO FOR USERID: ", specified_userid)
            print(error_msg)
            view_dict['user_info'] = None
            view_dict['opentree_activity'] = None
        else:
            # pass user info to the page for display
            view_dict['user_info'] = json_response
            activity = _get_opentree_activity(
                userid=specified_userid,
                username=view_dict['user_info'].get('name', specified_userid))
            if activity:
                view_dict['active_user_found'] = True
            else:
                view_dict['active_user_found'] = False
                view_dict['error_msg'] = 'Not active in OpenTree'
            view_dict['opentree_activity'] = activity

        view_dict['is_current_user_profile'] = False
        if view_dict['active_user_found'] == True and auth.is_logged_in():
            current_userid = auth.user and auth.user.github_login or None
            if specified_userid == current_userid:
                view_dict['is_current_user_profile'] = True

        return view_dict

    else:
        # No userid was provided in the URL. Instead, we should try to bounce to the
        # current user's profile if they're logged in (or try to force a login).
        if auth.is_logged_in():
            current_userid = auth.user and auth.user.github_login or None
            # redirect to the fully expanded profile URL
            expanded_url = URL('curator',
                               'default',
                               'profile',
                               args=(current_userid, ),
                               vars=request.vars)
            redirect(expanded_url)
        else:
            # try to force a login and return here
            redirect(
                URL('curator',
                    'user',
                    'login',
                    vars=dict(
                        _next=URL(args=request.args, vars=request.vars))))
Esempio n. 17
0
def error():
    view_dict = get_opentree_services_method_urls(request)
    return view_dict
Esempio n. 18
0
def _get_opentree_activity(userid=None, username=None):
    # Fetch information about a user's studies, comments, and collections in the
    # OpenTree project. If a dict was provided, add this information to it; else
    # bundle up the information and return it directly
    if not userid:
        return None
    activity_found = False
    activity = {
        'curator_since': None,
        'comments': [],
        'issues': [],
        'added_studies': [],
        'curated_studies': [],
        'curated_studies_in_synthesis': [],
        'added_collections': [],
        'curated_collections': []
    }
    method_dict = get_opentree_services_method_urls(request)

    # Use GitHub API to gather comments from this user, as shown in
    #   https://github.com/OpenTreeOfLife/feedback/issues/created_by/jimallman
    # N.B. that this is limited to 100 most recent items!
    all_comments = _fetch_github_api(
        verb='GET',
        url='/repos/OpenTreeOfLife/feedback/issues/comments?per_page=100')
    for comment in all_comments:
        if comment.get('user', None):
            comment_author = comment.get('user').get('login')
            if comment_author == userid:
                activity.get('comments').append(comment)
                activity_found = True

    # Again, for all feedback issues created by them
    # N.B. that this is probably limited to 100 most recent items!
    created_issues = _fetch_github_api(
        verb='GET',
        url=
        '/repos/OpenTreeOfLife/feedback/issues?state=all&creator={0}&per_page=100'
        .format(userid))
    activity['issues'] = created_issues
    if len(created_issues) > 0:
        activity_found = True

    # fetch a list of all studies that contribute to synthesis
    fetch_url = method_dict['getSynthesisSourceList_url']
    if fetch_url.startswith('//'):
        # Prepend scheme to a scheme-relative URL
        fetch_url = "https:%s" % fetch_url
    # as usual, this needs to be a POST (pass empty fetch_args)
    source_data = requests.post(url=fetch_url,
                                headers={
                                    "Content-Type": "application/json"
                                },
                                data=json.dumps({'include_source_list':
                                                 True})).json()
    source_id_map = source_data.get('source_id_map')
    # N.B. We can ignore the munged ids in source_data['source_list']

    contributing_study_info = {
    }  # build a dict with unique study IDs as keys, commit SHAs as values
    for source_id, source_details in source_id_map.iteritems():
        if 'taxonomy' in source_details:
            continue
        study_id = source_details.get('study_id')
        commit_SHA_in_synthesis = source_details.get('git_sha')
        contributing_study_info[study_id] = commit_SHA_in_synthesis

    # Use oti to gather studies curated and created by this user.
    fetch_url = method_dict['findAllStudies_url']
    if fetch_url.startswith('//'):
        # Prepend scheme to a scheme-relative URL
        fetch_url = "https:%s" % fetch_url
    all_studies = requests.post(
        url=fetch_url,
        headers={
            "Content-Type": "application/json"
        },
        data=json.dumps({'verbose': True})  # include curator list
    ).json().get('matched_studies', [])

    for study in all_studies:
        study_curators = study['ot:curatorName']
        # TODO: improve oti to handle multiple curator names!
        if type(study_curators) is not list:
            study_curators = [study_curators]
        # NB - If there's no "display name" defined, look for their userid
        if (username or userid) in study_curators:
            activity_found = True
            activity['curated_studies'].append(study)
            # first curator name is its original contributor
            if study_curators[0] == (username or userid):
                activity['added_studies'].append(study)
            # does this contribute to synthesis?
            if contributing_study_info.has_key(study['ot:studyId']):
                activity['curated_studies_in_synthesis'].append(study)

    # Use oti to gather collections curated and created by this user.
    fetch_url = method_dict['findAllTreeCollections_url']
    if fetch_url.startswith('//'):
        # Prepend scheme to a scheme-relative URL
        fetch_url = "https:%s" % fetch_url
    all_collections = requests.get(url=fetch_url).json()
    for collection in all_collections:
        # gather all participants and check against their GitHub userids
        if userid == collection.get('creator', {}).get('login', None):
            activity_found = True
            activity['added_collections'].append(collection)
        contributor_ids = [
            c.get('login', None) for c in collection.get('contributors', [])
        ]
        if userid in contributor_ids:
            activity_found = True
            activity['curated_collections'].append(collection)

    if activity_found:
        try:
            # search the repo stats (for each phylesystem shard!) for their earliest contribution
            earliest_activity_date = None  # TODO: make this today? or tomorrow? MAXTIME?
            fetch_url = method_dict['phylesystem_config_url']
            if fetch_url.startswith('//'):
                # Prepend scheme to a scheme-relative URL
                fetch_url = "https:%s" % fetch_url
            phylesystem_config = requests.get(url=fetch_url).json()
            shard_list = phylesystem_config['shards']
            # if GitHub is rebuilding stats cache for any shard, poke them all but ignore dates
            rebuilding_cache = False
            for shard in shard_list:
                shard_name = shard['name']
                shard_contributors = _fetch_github_api(
                    verb='GET',
                    url='/repos/OpenTreeOfLife/{0}/stats/contributors'.format(
                        shard_name))
                if type(shard_contributors) is not list:
                    # Flag this, but try to fetch remaining shards (to nudge the cache)
                    rebuilding_cache = True
                else:
                    for contrib_info in shard_contributors:
                        if contrib_info['author']['login'] == userid:
                            # look for the earliest week here
                            for week in contrib_info['weeks']:
                                if earliest_activity_date:
                                    earliest_activity_date = min(
                                        earliest_activity_date, week['w'])
                                else:
                                    earliest_activity_date = week['w']
                            break  # skip any remaining records

            if rebuilding_cache:
                activity[
                    'curator_since'] = 'Generating data, please try again in a moment...'
            elif not earliest_activity_date:
                activity[
                    'curator_since'] = 'This user has not curated any studies.'
            else:
                # show a very approximate date (stats are just weekly)
                from datetime import datetime
                d = datetime.fromtimestamp(earliest_activity_date)
                activity['curator_since'] = d.strftime("%B %Y")
        except:
            # probably JSONDecodeError due to misconfiguration of the API server
            activity[
                'curator_since'] = "Unable to determine this user's first activity"

        return activity
    else:
        return None
Esempio n. 19
0
def error():
    view_dict = get_opentree_services_method_urls(request)
    return view_dict
Esempio n. 20
0
def profile():
    """
    shows a personalized profile for any user (default = the current logged-in user) 
    http://..../{app}/default/profile/[username]
    """
    view_dict = get_opentree_services_method_urls(request)
    view_dict['maintenance_info'] = get_maintenance_info(request)

    # if the URL has a [username], try to load their information
    if len(request.args) > 0:
        # try to load a profile for the specified userid, using the GitHub API
        specified_userid = request.args[0]
        view_dict['userid'] = specified_userid
        view_dict['active_user_found'] = False

        # fetch the JSON for this user's activities
        json_response = _fetch_github_api(verb='GET', 
            url='/users/{0}'.format(specified_userid))

        error_msg = json_response.get('message', None)
        view_dict['error_msg'] = error_msg
        if error_msg:
            # pass error to the page for display
            print("ERROR FETCHING INFO FOR USERID: ", specified_userid)
            print(error_msg)
            view_dict['user_info'] = None
            view_dict['opentree_activity'] = None 
        else:
            # pass user info to the page for display
            view_dict['user_info'] = json_response
            activity = _get_opentree_activity( 
                userid=specified_userid, 
                username=view_dict['user_info'].get('name', specified_userid)
            )
            if activity:
                view_dict['active_user_found'] = True
            else:
                view_dict['active_user_found'] = False
                view_dict['error_msg'] = 'Not active in OpenTree'
            view_dict['opentree_activity'] = activity
        
        view_dict['is_current_user_profile'] = False
        if view_dict['active_user_found'] == True and auth.is_logged_in():
            current_userid = auth.user and auth.user.github_login or None
            if specified_userid == current_userid:
                view_dict['is_current_user_profile'] = True

        return view_dict

    else:
        # No userid was provided in the URL. Instead, we should try to bounce to the
        # current user's profile if they're logged in (or try to force a login).
        if auth.is_logged_in():
            current_userid = auth.user and auth.user.github_login or None
            # redirect to the fully expanded profile URL
            expanded_url = URL('curator', 'default', 'profile', 
                args=(current_userid,),
                vars=request.vars)
            redirect(expanded_url)
        else:
            # try to force a login and return here
            redirect(URL('curator', 'user', 'login',
                     vars=dict(_next=URL(args=request.args, vars=request.vars))))
Esempio n. 21
0
def _get_opentree_activity( userid=None, username=None ):
    # Fetch information about a user's studies, comments, and collections in the
    # OpenTree project. If a dict was provided, add this information to it; else
    # bundle up the information and return it directly
    if not userid:
        return None
    activity_found = False
    activity = {
        'curator_since': None,
        'comments':[], 
        'issues': [], 
        'added_studies':[], 
        'curated_studies': [], 
        'curated_studies_in_synthesis': [], 
        'added_collections':[],
        'curated_collections':[]
    }
    method_dict = get_opentree_services_method_urls(request)

    # Use GitHub API to gather comments from this user, as shown in
    #   https://github.com/OpenTreeOfLife/feedback/issues/created_by/jimallman
    # N.B. that this is limited to 100 most recent items!
    all_comments = _fetch_github_api(verb='GET', 
        url='/repos/OpenTreeOfLife/feedback/issues/comments?per_page=100')
    for comment in all_comments:
        if comment.get('user', None):
            comment_author = comment.get('user').get('login')
            if comment_author == userid:
                activity.get('comments').append( comment )
                activity_found = True

    # Again, for all feedback issues created by them
    # N.B. that this is probably limited to 100 most recent items!
    created_issues = _fetch_github_api(verb='GET', 
        url='/repos/OpenTreeOfLife/feedback/issues?state=all&creator={0}&per_page=100'.format(userid))
    activity['issues'] = created_issues
    if len(created_issues) > 0:
        activity_found = True

    # fetch a list of all studies that contribute to synthesis
    fetch_url = method_dict['getSynthesisSourceList_url']
    if fetch_url.startswith('//'):
        # Prepend scheme to a scheme-relative URL
        fetch_url = "https:%s" % fetch_url
    # as usual, this needs to be a POST (pass empty fetch_args)
    source_data = requests.post(
        url=fetch_url,
        data={'include_source_list':True}
    ).json()
    source_id_map = source_data.get('source_id_map')
    # N.B. We can ignore the munged ids in source_data['source_list']
    
    contributing_study_info = { }   # build a dict with unique study IDs as keys, commit SHAs as values
    for source_id, source_details in source_id_map.iteritems():
        if 'taxonomy' in source_details:
            continue
        study_id = source_details.get('study_id')
        commit_SHA_in_synthesis = source_details.get('git_sha')
        contributing_study_info[ study_id ] = commit_SHA_in_synthesis

    # Use oti to gather studies curated and created by this user.
    fetch_url = method_dict['findAllStudies_url']
    if fetch_url.startswith('//'):
        # Prepend scheme to a scheme-relative URL
        fetch_url = "https:%s" % fetch_url
    all_studies = requests.post(
        url=fetch_url,
        data={'verbose': True}  # include curator list
    ).json().get('matched_studies', [ ])

    for study in all_studies:
        study_curators = study['ot:curatorName']
        # TODO: improve oti to handle multiple curator names!
        if type(study_curators) is not list:
            study_curators = [study_curators]
        if username in study_curators:
            activity_found = True
            activity['curated_studies'].append(study)
            # first curator name is its original contributor
            if study_curators[0] == username:
                activity['added_studies'].append(study)
            # does this contribute to synthesis?
            if contributing_study_info.has_key( study['ot:studyId'] ):
                activity['curated_studies_in_synthesis'].append(study)

    # Use oti to gather collections curated and created by this user.
    fetch_url = method_dict['findAllTreeCollections_url']
    if fetch_url.startswith('//'):
        # Prepend scheme to a scheme-relative URL
        fetch_url = "https:%s" % fetch_url
    all_collections = requests.get(url=fetch_url).json()
    for collection in all_collections:
        # gather all participants and check against their GitHub userids
        if userid == collection.get('creator', {}).get('login', None):
            activity_found = True
            activity['added_collections'].append(collection)
        contributor_ids = [c.get('login', None) for c in collection.get('contributors', [ ])]
        if userid in contributor_ids:
            activity_found = True
            activity['curated_collections'].append(collection)

    if activity_found:
        try:
            # search the repo stats (for each phylesystem shard!) for their earliest contribution
            earliest_activity_date = None  # TODO: make this today? or tomorrow? MAXTIME?
            fetch_url = method_dict['phylesystem_config_url']
            if fetch_url.startswith('//'):
                # Prepend scheme to a scheme-relative URL
                fetch_url = "https:%s" % fetch_url
            phylesystem_config = requests.get( url=fetch_url ).json()
            shard_list = phylesystem_config['shards']
            # if GitHub is rebuilding stats cache for any shard, poke them all but ignore dates
            rebuilding_cache = False
            for shard in shard_list:
                shard_name = shard['name']
                shard_contributors = _fetch_github_api(verb='GET', 
                    url='/repos/OpenTreeOfLife/{0}/stats/contributors'.format(shard_name))
                if type(shard_contributors) is not list:
                    # Flag this, but try to fetch remaining shards (to nudge the cache)
                    rebuilding_cache = True
                else:
                    for contrib_info in shard_contributors:
                        if contrib_info['author']['login'] == userid:
                            # look for the earliest week here
                            for week in contrib_info['weeks']:
                                if earliest_activity_date:
                                    earliest_activity_date = min(earliest_activity_date, week['w'])
                                else:
                                    earliest_activity_date = week['w']
                            break  # skip any remaining records

            if rebuilding_cache:
                activity['curator_since'] = 'Generating data, please try again in a moment...'
            elif not earliest_activity_date: 
                activity['curator_since'] = 'This user has not curated any studies.'
            else:
                # show a very approximate date (stats are just weekly)
                from datetime import datetime
                d = datetime.fromtimestamp(earliest_activity_date)
                activity['curator_since'] = d.strftime("%B %Y")
        except:
            # probably JSONDecodeError due to misconfiguration of the API server
            activity['curator_since'] = "Unable to determine this user's first activity"

        return activity
    else:
        return None