Ejemplo n.º 1
0
def forums(function, endpoint, method='get', *data):

    from common.graphite import sendmetric
    import common.logger as _logger
    import time

    # optional arg gets wrapped in a tuple?
    if len(data) > 0:
        data = data[0]

    # wrap around do_forums so we can do retries!

    retry_max = 5
    retry_count = 0
    sleep = 1

    while (retry_count < retry_max):
        if retry_count > 0:
            _logger.log('[' + function + '] forum api retry {0} of {1}'.format(retry_count, retry_max), _logger.LogLevel.WARNING)
        code, result = do_forums(function, endpoint, method, data)

        # the only thing that's worth retrying are on 5xx errors, everything else is game

        if code >= 500:
            retry_count += 1
            sendmetric(function, 'forums', 'api_request', 'retry' , 1)
            _logger.log('[' + function + '] forum api call failed. sleeping {0} seconds before retrying'.format(sleep), _logger.LogLevel.WARNING)
            time.sleep(1)
        else:
            return(code, result)
    sendmetric(function, 'forums', 'api_request', 'retry_maxed', 1)
    _logger.log('[' + function + '] forum call failed {0} times. giving up. '.format(retry_max), _logger.LogLevel.ERROR)
    # return the last code/result
    return(code, result)
Ejemplo n.º 2
0
def ts3_monitoring(ts3conn):

    from common.graphite import sendmetric
    import common.credentials.ts3 as _ts3
    import common.logger as _logger

    import ts3

    # server statistics/information
    # might as well log some useful shit

    try:
        resp = ts3conn.serverrequestconnectioninfo()
    except ts3.query.TS3QueryError as err:
        _logger.log('[' + __name__ + '] ts3 error: {0}'.format(err),_logger.LogLevel.ERROR)
        return

    for stat in resp.parsed[0]:
        sendmetric(__name__, 'ts3', 'server_stats', stat, resp.parsed[0][stat])

    try:
        resp = ts3conn.serverinfo()
    except ts3.query.TS3QueryError as err:
        _logger.log('[' + __name__ + '] ts3 error: {0}'.format(err),_logger.LogLevel.ERROR)
        return

    # useful metrics as per the ts3 server query guide

    metrics = [ 'connection_bandwidth_sent_last_minute_total', 'connection_bandwidth_received_last_minute_total', ]

    for metric in metrics:
        sendmetric(__name__, 'ts3', 'vserver_{}'.format(_ts3.TS_SERVER_ID), metric, resp.parsed[0][metric])

    # log settings

    # need to ensure that the TS3 server has the logging settings desired, since there's
    # nothing in the ini that lets you set this information

    # this can be pivoted to managing settings as well

    # they come out as str() rather than int()
    logsettings = dict()
    logsettings['virtualserver_log_channel'] = int(resp.parsed[0]['virtualserver_log_channel'])
    logsettings['virtualserver_log_permissions'] = int(resp.parsed[0]['virtualserver_log_permissions'])
    logsettings['virtualserver_log_filetransfer'] = int(resp.parsed[0]['virtualserver_log_filetransfer'])
    logsettings['virtualserver_log_query'] = int(resp.parsed[0]['virtualserver_log_query'])
    logsettings['virtualserver_log_client'] = int(resp.parsed[0]['virtualserver_log_client'])
    logsettings['virtualserver_log_server'] = int(resp.parsed[0]['virtualserver_log_server'])

    for setting in logsettings:
        if logsettings[setting] == 0:
            _logger.log('[' + __name__ + '] ts3 log option {0} disabled'.format(setting),_logger.LogLevel.WARNING)
Ejemplo n.º 3
0
def esi(function,
        url,
        method='get',
        charid=None,
        data=None,
        version='latest',
        base='esi',
        extraheaders=dict()):
    from common.graphite import sendmetric
    import common.logger as _logger
    import time

    # wrap around do_esi so we can do retries!

    retry_max = 5
    retry_count = 0
    current_page = 1
    sleep = 1
    remaining_pages = 1

    result_list = list()
    result_dict = dict()

    # handle both retries and pagination

    while (retry_count < retry_max and remaining_pages > 0):

        if retry_count > 0:
            _logger.log(
                '[' + function + '] ESI retry {0} of {1} on page {2}'.format(
                    retry_count, retry_max, current_page),
                _logger.LogLevel.WARNING)
        code, current_result, headers = do_esi(function, url, method,
                                               current_page, charid, data,
                                               version, base, extraheaders)

        try:
            pages = int(headers.get('X-Pages', 1))
        except Exception as e:
            pages = 1

        remaining_pages = pages - current_page

        msg = 'pages remaining on request: {0}'.format(remaining_pages)
        _logger.log('[' + function + '] {0}'.format(msg),
                    _logger.LogLevel.DEBUG)

        # the only thing that's worth retrying are on 5xx errors, everything else is game

        if code >= 500:
            retry_count += 1
            sendmetric(function, base, 'request', 'retry', 1)
            _logger.log(
                '[' + function +
                '] {0} page {1} failed. sleeping {2} seconds before retrying'.
                format(url, current_page, sleep), _logger.LogLevel.WARNING)
            time.sleep(1)
        else:

            # return type handling for merging

            if type(current_result) is list:
                result_list += current_result

            elif type(current_result) is dict:
                result_dict.update(current_result)

            # increment and merge the new result set with the old and proceed to the next page
            current_page += 1

    # logging

    if retry_count == retry_max:
        sendmetric(function, base, 'request', 'retry_maxed', 1)
        _logger.log(
            '[' + function +
            '] ESI call on page {0} failed {1} times. giving up. '.format(
                current_page, retry_max), _logger.LogLevel.WARNING)

    # return final data

    if type(current_result) is list:
        return (code, result_list)

    elif type(current_result) is dict:
        return (code, result_dict)
Ejemplo n.º 4
0
def do_esi(function,
           url,
           method,
           page,
           charid=None,
           data=None,
           version='latest',
           base='esi',
           extraheaders={}):

    import requests
    import common.logger as _logger
    import common.ldaphelpers as _ldaphelpers
    import logging
    import json
    import redis
    import re
    from cachecontrol import CacheControl
    from cachecontrol.caches.redis_cache import RedisCache
    from common.graphite import sendmetric
    from common.credentials.g_translate import translate_api_key
    from commands.maint.tokens import eve_tokenthings

    # headers

    useragent = 'triumvirate services - yell at saeka'
    headers = {
        'Accept': 'application/json',
        'User-Agent': useragent,
        'Accept-Encoding': 'gzip'
    }

    if method == 'post':
        # add a header for POST data
        headers['Content-Type'] = 'application/json'

    if extraheaders is not {}:
        # add any custom headers as necessary
        headers.update(extraheaders)

    # shut the F**K up.
    logging.getLogger("requests").setLevel(logging.WARNING)

    # if a charid is specified, this is going to be treated as an authenticated request
    # where an access token is added to the esi request url automatically

    # snag the user's tokens from ldap
    if charid is not None:

        _logger.log(
            '[' + __name__ + '] authenticated {0} request for {1}: {2}'.format(
                base, charid, url), _logger.LogLevel.DEBUG)

        dn = 'ou=People,dc=triumvirate,dc=rocks'
        filterstr = '(uid={})'.format(charid)
        attrlist = [
            'esiAccessToken',
            'esiAccessTokenExpires',
            'discordAccessToken',
        ]
        code, result = _ldaphelpers.ldap_search(__name__, dn, filterstr,
                                                attrlist)

        if code == False:
            _logger.log(
                '[' + __name__ + '] LDAP connectionerror: {}'.format(error),
                _logger.LogLevel.ERROR)
            js = {'error': 'internal ldap error'}
            return 500, js, None

        if result == None:
            js = {'error': 'no tokens for uid {0}'.format(charid)}
            return 500, js, None

        try:
            (dn, result), = result.items()
        except Exception as e:
            print(result)

        esi_atoken = result.get('esiAccessToken')
        esi_atoken_expires = result.get('esiAccessTokenExpires')
        discord_atoken = result.get('discordAccessToken')

        if esi_atoken == None and base == 'esi':
            js = {'error': 'no stored esi access token'}
            return 400, js, None

        if discord_atoken == None and base == 'discord':
            js = {'error': 'no stored discord access token'}
            return 400, js, None

        # make sure the ESI token is current if this is an ESI request

        if base == 'esi':
            # at this point it this is an authenticated request.
            # make sure that the token retrieved is current. if it is not, update it.

            pass

    else:
        _logger.log(
            '[' + __name__ +
            '] unauthenticated {0} request: {1}'.format(base, url),
            _logger.LogLevel.DEBUG)
        token_header = dict()

    # construct the full request url including api version

    # request_esi hits more than just ESI-specific stuff, so some scoping of the base is necessary

    if base == 'esi':
        # ESI ofc
        base_url = 'https://esi.evetech.net/' + version

        # add common query parameters including pagination and datasource
        # if the url doesn't have a ? indicating it has parameters, add the parameter set with them

        pattern = re.compile('.*[?].*')
        if pattern.match(url):
            url += '&datasource=tranquility'
        else:
            url += '?datasource=tranquility'

        # paginating on more than 1 page to be kind to the google cdn
        if page > 1:
            url += '&page={0}'.format(page)

        if charid is not None:
            # add the authenticated header
            headers['Authorization'] = 'Bearer {0}'.format(esi_atoken)
    elif base == 'discord':
        # discord api
        base_url = 'https://discordapp.com/api/' + version

        if charid is not None:
            # add the authenticated header
            headers['Authorization'] = 'Bearer {0}'.format(discord_atoken)

    elif base == 'zkill':
        # zkillboard
        base_url = 'https://zkillboard.com/api'
    elif base == 'triapi':
        # tri api
        base_url = 'https://api.triumvirate.rocks'
    elif base == 'oauth':
        # eve oauth
        base_url = 'https://login.eveonline.com/oauth'
    elif base == 'g_translate':
        # google translate
        base_url = 'https://translation.googleapis.com/language/translate/v2'
        base_url += '?key={0}&target=en&source=text&model=nmt&'.format(
            translate_api_key)
    elif base == 'eve_market':
        # eve marketdata
        base_url = 'https://api.eve-marketdata.com/api/'

    # special google translate bullshit

    if base == 'g_translate':
        full_url = base_url + url
    else:
        full_url = base_url + '/' + url

    # setup redis caching for the requests object
    r = redis.StrictRedis(host='localhost', port=6379, db=0)
    session = requests.Session()
    # redis does not actually connect above, i have to specifically test

    try:
        r.client_list()
        session = CacheControl(session, RedisCache(r))
    except redis.exceptions.ConnectionError as err:
        sendmetric(function, base, 'request', 'rediserror', 1)
        _logger.log('[' + function + '] Redis connection error: ' + str(err),
                    _logger.LogLevel.ERROR)
    except redis.exceptions.ConnectionRefusedError as err:
        sendmetric(function, base, 'request', 'rediserror', 1)
        _logger.log('[' + function + '] Redis connection error: ' + str(err),
                    _logger.LogLevel.ERROR)
    except Exception as err:
        sendmetric(function, base, 'request', 'rediserror', 1)
        logger.error('[' + function + '] Redis generic error: ' + str(err))

    # do the request, but catch exceptions for connection issues

    timeout = 10
    try:
        if method == 'post':
            request = session.post(full_url,
                                   headers=headers,
                                   timeout=timeout,
                                   data=data)
        elif method == 'get':
            request = session.get(full_url, headers=headers, timeout=timeout)

    except requests.exceptions.ConnectionError as err:
        sendmetric(function, base, 'request', 'connection_error', 1)
        _logger.log('[' + function + '] ESI connection error:: ' + str(err),
                    _logger.LogLevel.WARNING)
        return (500, {'error': 'API connection error: ' + str(err)}, None)
    except requests.exceptions.ReadTimeout as err:
        sendmetric(function, base, 'request', 'read_timeout', 1)
        _logger.log(
            '[' + function + '] ESI connection read timeout: ' + str(err),
            _logger.LogLevel.WARNING)
        return (500, {
            'error': 'API connection read timeout: ' + str(err)
        }, None)
    except requests.exceptions.Timeout as err:
        sendmetric(function, base, 'request', 'timeout', 1)
        _logger.log('[' + function + '] ESI connection timeout: ' + str(err),
                    _logger.LogLevel.WARNING)
        return (500, {'error': 'API connection timeout: ' + str(err)}, None)
    except requests.exceptions.SSLError as err:
        sendmetric(function, base, 'request', 'ssl_error', 1)
        _logger.log('[' + function + '] ESI SSL error: ' + str(err),
                    _logger.LogLevel.WARNING)
        return (500, {'error': 'API connection timeout: ' + str(err)}, None)
    except Exception as err:
        sendmetric(function, base, 'request', 'general_error', 1)
        _logger.log('[' + function + '] ESI generic error: ' + str(err),
                    _logger.LogLevel.WARNING)
        return (500, {'error': 'General error: ' + str(err)}, None)

    # need to also check that the api thinks this was success.

    if not request.status_code == 200:
        sendmetric(function, base, 'request', 'failure', 1)

        if request.status_code == 204:
            # empty return
            return (request.status_code, [], request.headers)
        elif request.status_code == 502:
            # load balancer error, don't bother.
            msg = "ESI LB error"
            _logger.log(
                '[' + function + '] ' + msg + ' ' + str(request.status_code) +
                ': ' + str(request.text), _logger.LogLevel.INFO)

            return (request.status_code, [], request.headers)

        # don't bother to log 404 and 403s
        elif not request.status_code == 404 and not request.status_code == 403:
            _logger.log(
                '[' + function + '] ESI API error ' +
                str(request.status_code) + ': ' + str(request.text),
                _logger.LogLevel.WARNING)
            _logger.log('[' + function + '] ESI API error URL: ' + str(url),
                        _logger.LogLevel.WARNING)
    else:
        sendmetric(function, base, 'request', 'success', 1)

    # check for warning headers. mostly for esi.

    warning = request.headers.get('warning')
    pages = request.headers.get('X-Pages')
    content_type = request.headers.get('content-type')

    if content_type:
        content_type = content_type.lower()

    if pages:
        msg = '{0} total pages'.format(pages)
        _logger.log('[' + function + '] {0}'.format(msg),
                    _logger.LogLevel.DEBUG)

    if warning:
        msg = '{0} deprecated endpoint: {1} version {2} - {3}'.format(
            base, version, url, warning)
        _logger.log('[' + function + '] {0}'.format(msg),
                    _logger.LogLevel.WARNING)

    # do metrics

    elapsed_time = request.elapsed.total_seconds()
    sendmetric(function, base, 'request', 'elapsed', elapsed_time)
    sendmetric(function, base, 'request', request.status_code, 1)

    # shouldn't have to typecast it but sometimes:
    # TypeError: the JSON object must be str, not 'LocalProxy'
    try:
        result = json.loads(str(request.text))
    except Exception as error:
        msg = 'could not convert {0} data to json: {1}'.format(base, full_url)
        _logger.log('[' + function + '] {0}'.format(msg),
                    _logger.LogLevel.WARNING)
        return (500, {'code': 500, 'error': msg}, request.headers)

    return (request.status_code, result, request.headers)
Ejemplo n.º 5
0
def do_forums(function, endpoint, method, *data):

    import requests
    import common.logger as _logger
    from common.graphite import sendmetric
    import common.credentials.forums as _forums
    import logging
    import json
    import redis
    from cachecontrol import CacheControl
    from cachecontrol.caches.redis_cache import RedisCache

    # reference:
    # https://invisionpower.com/developers/rest-api

    # shut the F**K up.
    logging.getLogger("requests").setLevel(logging.WARNING)

    # setup redis caching for the requests object
    r = redis.StrictRedis(host='localhost', port=6379, db=0)
    session = requests.Session()
    # redis does not actually connect above, i have to specifically test

    try:
        r.client_list()
        session = CacheControl(session, RedisCache(r))
    except redis.exceptions.ConnectionError as err:
        sendmetric(function, 'forums', 'api_request', 'rediserror', 1)
        _logger.log('[' + function + '] Redis connection error: ' + str(err), _logger.LogLevel.ERROR)
    except redis.exceptions.ConnectionRefusedError as err:
        sendmetric(function, 'forums', 'api_request', 'rediserror', 1)
        _logger.log('[' + function + '] Redis connection error: ' + str(err), _logger.LogLevel.ERROR)
    except Exception as err:
        sendmetric(function, 'forums', 'api_request', 'rediserror', 1)
        logger.error('[' + function + '] Redis generic error: ' + str(err))

    # do the request, but catch exceptions for connection issues

    url = _forums.endpoint
    timeout = 5

    try:
        if method == 'post':
            data = data[0]
            headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
            request = session.post(url, headers=headers, timeout=timeout, data=data, auth=(_forums.api_key, '' ))
        elif method == 'get':
            headers = {'Accept': 'application/json'}
            request = session.get(url, headers=headers, timeout=timeout, auth=(_forums.api_key, '' ))

    except requests.exceptions.ConnectionError as err:
        sendmetric(function, 'forums', 'api_request', 'connection_error', 1)
        _logger.log('[' + function + '] forum api connection error:: ' + str(err), _logger.LogLevel.ERROR)
        return(500, { 'code': 500, 'error': 'API connection error: ' + str(err)})
    except requests.exceptions.ReadTimeout as err:
        sendmetric(function, 'forums', 'api_request', 'read_timeout', 1)
        _logger.log('[' + function + '] forum api connection read timeout: ' + str(err), _logger.LogLevel.ERROR)
        return(500, { 'code': 500, 'error': 'API connection read timeout: ' + str(err)})
    except requests.exceptions.Timeout as err:
        sendmetric(function, 'forums', 'api_request','timeout' , 1)
        _logger.log('[' + function + '] forum api connection timeout: ' + str(err), _logger.LogLevel.ERROR)
        return(500, { 'code': 500, 'error': 'forum API connection timeout: ' + str(err)})
    except Exception as err:
        sendmetric(function, 'forums', 'api_request', 'general_error', 1)
        _logger.log('[' + function + '] forum api generic error: ' + str(err), _logger.LogLevel.ERROR)
        return(500, { 'code': 500, 'error': 'General error: ' + str(err)})

    # need to also check that the api thinks this was success.

    if not request.status_code == 200:
        sendmetric(function, 'forums', 'api_request', 'failure', 1)
        # don't bother to log 404s
        if not request.status_code == 404:
            _logger.log('[' + function + '] forum API error ' + str(request.status_code) + ': ' + str(request.text), _logger.LogLevel.ERROR)
            _logger.log('[' + function + '] forum API error URL: ' + str(url), _logger.LogLevel.ERROR)
    else:
        sendmetric(function, 'forums', 'api_request', 'success', 1)

    # do metrics

    elapsed_time = request.elapsed.total_seconds()
    sendmetric(function, 'forums', 'api_request', 'elapsed', elapsed_time)
    sendmetric(function, 'forums', 'api_request', request.status_code, 1)

    return(request.status_code, request.text)
Ejemplo n.º 6
0
def audit_forums():

    import common.logger as _logger
    import common.credentials.database as _database
    import common.credentials.forums as _forumcreds
    import common.ldaphelpers as _ldaphelpers
    import common.request_esi
    from tri_core.common.testing import vg_blues, vg_alliances
    from common.graphite import sendmetric
    from collections import defaultdict
    import json
    import urllib
    import html
    import MySQLdb as mysql
    import re

    import hashlib
    from passlib.hash import ldap_salted_sha1
    import uuid

    _logger.log('[' + __name__ + '] auditing forums',_logger.LogLevel.INFO)

    try:
        sql_conn_core = mysql.connect(
            database=_database.DB_DATABASE,
            user=_database.DB_USERNAME,
            password=_database.DB_PASSWORD,
            host=_database.DB_HOST)
    except mysql.Error as err:
        _logger.log('[' + __name__ + '] mysql error: ' + str(err), _logger.LogLevel.ERROR)
        return False

    try:
        sql_conn_forum = mysql.connect(
            database=_forumcreds.mysql_db,
            user=_forumcreds.mysql_user,
            password=_forumcreds.mysql_pass,
            host=_forumcreds.mysql_host)
    except mysql.Error as err:
        _logger.log('[' + __name__ + '] mysql error: ' + str(err), _logger.LogLevel.ERROR)
        return False

    # get everything from the permissions table

    cursor = sql_conn_core.cursor()
    query = 'SELECT allianceID,forum FROM Permissions'
    forum_mappings = dict()

    try:
        cursor.execute(query)
        rows = cursor.fetchall()
    except mysql.Error as err:
        _logger.log('[' + __name__ + '] mysql error: ' + str(err), _logger.LogLevel.ERROR)
        return False
    finally:
        cursor.close()
        sql_conn_core.close()

    # this maps the alliance id to the primary forum group

    for row in rows:
        forum_mappings[row[0]] = row[1]

    # handle vanguard blues as well
    # these are a special case of limited access

    for alliance in vg_blues():
        forum_mappings[alliance] = 73

    # get all the forum users and stuff them into a dict for later processing

    cursor = sql_conn_forum.cursor()
    query = 'SELECT name, member_group_id, mgroup_others, ip_address, pp_main_photo, pp_thumb_photo FROM core_members'
    try:
        cursor.execute(query)
        rows = cursor.fetchall()
    except mysql.Error as err:
        _logger.log('[' + __name__ + '] mysql forum error: ' + str(err), _logger.LogLevel.ERROR)
        return False

    total_users = len(rows)
    _logger.log('[' + __name__ + '] forum users: {0}'.format(total_users), _logger.LogLevel.INFO)
    sendmetric(__name__, 'forums', 'statistics', 'total_users', total_users)

    users = dict()
    orphan = 0

    # special forum users that are not to be audited
    special = [ 'Admin', 'Sovereign' ]

    for charname, primary_group, secondary_string, last_ip,  pp_main_photo, pp_thumb_photo in rows:

        # dont work on these
        if charname in special:
            continue

        if charname == '' or charname == None:
            continue

        # recover user information
        users[charname] = dict()
        users[charname]['charname'] = charname
        users[charname]['doomheim'] = False
        users[charname]['primary'] = primary_group
        users[charname]['last_ip'] = last_ip
        users[charname]['pp_main_photo'] = pp_main_photo
        users[charname]['pp_thumb_photo'] = pp_thumb_photo

        # convert the comma separated list of groups to an array of integers

        secondaries = []
        for item in secondary_string.split(','):
            if not item == '' and not item == ' ':
                secondaries.append(int(item))
        users[charname]['secondary'] = secondaries

        cn, _ = _ldaphelpers.ldap_normalize_charname(charname)

        # match up against ldap data
        dn = 'ou=People,dc=triumvirate,dc=rocks'
        filterstr='cn={0}'.format(cn)
        attributes = ['authGroup', 'accountStatus', 'uid', 'alliance', 'corporation' ]
        code, result = _ldaphelpers.ldap_search(__name__, dn, filterstr, attributes)
        if code == False:
            _logger.log('[' + __name__ + '] ldap error: {0}'.format(result), _logger.LogLevel.ERROR)
            return

        if result == None:
            # nothing in ldap.
            # you WILL be dealt with later!
            orphan += 1
            users[charname]['charid'] = None
            users[charname]['alliance'] = None
            users[charname]['authgroups'] = []
            users[charname]['accountstatus'] = None
            users[charname]['corporation'] = None
        else:
            (dn, info), = result.items()

            alliance = info.get('alliance')
            corporation = info.get('corporation')

            if alliance is not None:
                alliance = int(alliance)
            if corporation is not None:
                corporation = int(corporation)

            users[charname]['alliance'] = alliance
            users[charname]['corporation'] = corporation

            users[charname]['charid'] = int( info['uid'] )
            users[charname]['accountstatus'] = info['accountStatus']
            users[charname]['authgroups'] = info['authGroup']

    # postprocessing

    _logger.log('[' + __name__ + '] forum users with no LDAP entry: {0}'.format(orphan),_logger.LogLevel.INFO)

    # map each user to a charID given that the username is exactly
    # a character name

    really_orphan = 0

    for charname in users:
        user = users[charname]
        primary = user['primary']

        if user['charid'] == None:
            # need to map the forum username to a character id
            query = { 'categories': 'character', 'language': 'en-us', 'search': charname, 'strict': 'true' }
            query = urllib.parse.urlencode(query)
            esi_url = 'search/?' + query
            code, result = common.request_esi.esi(__name__, esi_url, 'get')
            _logger.log('[' + __name__ + '] /search output: {}'.format(result), _logger.LogLevel.DEBUG)

            if not code == 200:
                _logger.log('[' + __name__ + '] error searching for user {0}: {1}'.format(charname, result['error']),_logger.LogLevel.INFO)
                continue
            if len(result) == 0:
                really_orphan += 1
                users[charname]['doomheim'] = True
            if len(result) == 1:

                # make a stub ldap entry for them

                charid = result['character'][0]
                users[charname]['charid'] = charid

                # the forum stub exists only to pin
                _ldaphelpers.ldap_create_stub(charname, charid, authgroups=['public', 'forum_stub'])

    _logger.log('[' + __name__ + '] forum users who have biomassed: {0}'.format(really_orphan),_logger.LogLevel.INFO)

    # at this point every forum user has been mapped to their character id either via ldap or esi /search
    # work through each user and determine if they are correctly setup on the forums

    non_tri = 0
    for charname in users:

        charid = users[charname]['charid']

        if users[charname]['doomheim'] == True:
            forumpurge(charname)
            continue

        authgroups = users[charname]['authgroups']
        alliance = users[charname]['alliance']
        corporation = users[charname]['corporation']
        primary_group = users[charname]['primary']
        secondary_groups = users[charname]['secondary']
        forum_lastip = users[charname]['last_ip']

        if users[charname]['pp_main_photo'] is None:
        # set forum portrait to their in-game avatar if the user doesn't have one

            esi_url = 'characters/{0}/portrait/'.format(charid)
            code, result = common.request_esi.esi(__name__, esi_url, 'get')
            _logger.log('[' + __name__ + '] /characters portrait output: {}'.format(result), _logger.LogLevel.DEBUG)

            if code == 200:

                portrait = result['px128x128'].replace("http", "https")
                query = 'UPDATE core_members SET pp_main_photo=%s, pp_thumb_photo=%s, pp_photo_type="custom" WHERE name = %s'
                try:
                    cursor.execute(query, (portrait, portrait, charname,))
                    sql_conn_forum.commit()
                except Exception as err:
                    _logger.log('[' + __name__ + '] mysql error: ' + str(err), _logger.LogLevel.ERROR)
                    return

            else:
                # something broke severely
                _logger.log('[' + __name__ + '] /characters portrait API error {0}: {1}'.format(code, result['error']), _logger.LogLevel.ERROR)


        ## start doing checks

        # only people in tri get anything other than public access on the tri forums
        # forum public/unprivileged groupid: 2

        vanguard = forum_mappings.keys()

        if alliance not in vanguard and primary_group != 2:

            # this char is not in a vanguard alliance or blue but has non-public forum access
            forumpurge(charname)

            # log

            _logger.securitylog(__name__, 'forum demotion', charid=charid, charname=charname, ipaddress=forum_lastip)
            msg = 'non-vanguard character with privileged access demoted. charname: {0} ({1}), alliance: {2}, primary group: {3}, secondary group(s): {4}, last ip: {5}'.format(
                charname, charid, alliance, primary_group, secondary_groups, forum_lastip
            )
            _logger.log('[' + __name__ + '] {}'.format(msg),_logger.LogLevel.WARNING)
            non_tri += 1

        if alliance in vanguard:

            correct_primary = forum_mappings[alliance]
            correct_secondaries = []
            # construct the list of correct secondary groups

            for authgroup in authgroups:
                mapping = authgroup_map(authgroup)
                if not mapping == None:
                    correct_secondaries.append(mapping)

            # map any custom corp groups
            corpgroup = corp_map(corporation)
            if not corpgroup == None:
                correct_secondaries.append(corpgroup)

            ## deal with secondary groups

            # remove secondaries
            msg = 'char {0} current secondaries: {1} correct secondaries: {2}'.format(charname, secondary_groups, correct_secondaries)
            _logger.log('[' + __name__ + '] {}'.format(msg),_logger.LogLevel.DEBUG)

            secondaries_to_remove = list( set(secondary_groups) - set(correct_secondaries) )

            # ips forum likes a comma separated list of secondaries
            if len(secondaries_to_remove) > 0:
                change = ''
                for group in correct_secondaries:
                    change = change + str(group) + ','
                change = change[:-1] # peel off trailing comma

                query = 'UPDATE core_members SET mgroup_others=%s WHERE name=%s'
                try:
                    cursor.execute(query, (change, charname,))
                    sql_conn_forum.commit()
                    msg = 'removed secondary group(s) {0} from {1}'.format(secondaries_to_remove, charname)
                    _logger.log('[' + __name__ + '] {}'.format(msg),_logger.LogLevel.INFO)
                except Exception as err:
                    _logger.log('[' + __name__ + '] mysql error: ' + str(err), _logger.LogLevel.ERROR)
                    return False

            secondaries_to_add = list( set(correct_secondaries) - set(secondary_groups) )

            # ips forum likes a comma separated list of secondaries
            if len(secondaries_to_add) > 0:
                change = ''
                for group in correct_secondaries:
                    change = change + str(group) + ','
                change = change[:-1] # peel off trailing comma

                query = 'UPDATE core_members SET mgroup_others=%s WHERE name=%s'
                try:
                    cursor.execute(query, (change, charname,))
                    sql_conn_forum.commit()
                    msg = 'added secondary group(s) {0} to {1}'.format(secondaries_to_add, charname)
                    _logger.log('[' + __name__ + '] {}'.format(msg),_logger.LogLevel.INFO)
                except Exception as err:
                    _logger.log('[' + __name__ + '] mysql error: ' + str(err), _logger.LogLevel.ERROR)
                    return False

            if not correct_primary == primary_group:
                query = 'UPDATE core_members SET member_group_id=%s WHERE name=%s'
                try:
                    cursor.execute(query, (correct_primary, charname,))
                    sql_conn_forum.commit()
                    _logger.log('[' + __name__ + '] adjusted primary forum group of {0} to {1}'.format(charname, correct_primary),_logger.LogLevel.INFO)
                except Exception as err:
                    _logger.log('[' + __name__ + '] mysql error: ' + str(err), _logger.LogLevel.ERROR)
                    return False
    cursor.close()
    sql_conn_forum.close()
    _logger.log('[' + __name__ + '] non-vanguard forum users reset: {0}'.format(non_tri),_logger.LogLevel.INFO)