def _periodicConnPing(self):
        """
        Pings across all connections on the database. Used to keep the connectivity alive and prevent idle
        timeouts on the MySQL server.
        """
        if singleton.get('castdaemon', strict=False) and (singleton.get('castdaemon').isShuttingDown() or errorutil.checkIfFatalErrorOccurred()):
            return
         
        cannotPingOnConns = []
        firstExceptionValue = None
        for conn in self.connections.values():
            try:
                conn.ping()
            except MySQLdb.OperationalError:
                excType, excValue, excTraceback = sys.exc_info()
                
                #try to close this connection
                try:
                    self._close(conn)
                except MySQLdb.ProgrammingError: #closing a closed connection
                    #connection is already closed, don't include it in our list of bad connections we had
                    pass

                cannotPingOnConns.append(conn)
                if not firstExceptionValue:
                    firstExceptionValue = excValue
        
        if len(cannotPingOnConns):    
            log.msg(u"Could not ping MySQL server for pool \"%s\" on %i of %i pool connections: %s. Reset these connections..." % (
                self._poolName, len(cannotPingOnConns), len(self.connections.values()), firstExceptionValue),
            lvl='w', ss='ss_db')
def triggerFatalError(errorStr, printError=True, doMinimalShutdown=False):
    """I am called on an unrecoverable error. I will signal the error, wait a certain period of time
    (like 2 seconds), and then exit the program. As monit should be running for the process, and it will
    automatically restart it.
    
    @param errorStr: The error string to print out as the error that occurred
    @type errorStr: str
    
    @param doMinimalShutdown: Set to True to have the role not deinitalize the core role
    """
    from twisted.internet import reactor
    global stopfunc
    global _fatalErrorOccurred
    global _doMinimalShutdown
    assert stopfunc
    
    if _fatalErrorOccurred:
        #fatal error already occurred, we should already be in the process of shutting down, so just return here
        return
    
    _fatalErrorOccurred = True
    _doMinimalShutdown = doMinimalShutdown
    log.msg("FATAL ERROR RAISED: %s" % errorStr, lvl='c', ss='ss_castdaemon')
    
    #for the castdaemon, send it out to our zenoss monitoring server if we're configured to do that
    try:
        createZEventOnException = singleton.get('core').getPlatformSetting('create_zevent_on_castdaemon_exception')
    except:
        #probably that the settings cache is not initialized
        createZEventOnException = False
        
    if     singleton.get('castdaemon', strict=False) \
       and createZEventOnException:
        from castdaemon import util as castdaemon_util
        for zenossHostCol in ('health_monitoring_server1', 'health_monitoring_server2'):
            zenossHost = singleton.get('core').getPlatformSetting(zenossHostCol)
            castdaemon_util.sendEventToZenoss(zenossHost, singleton.get('core').getPlatformSetting('zenoss_manager_login'),
                singleton.get('core').getPlatformSetting('zenoss_manager_password'),
                "Castdaemon Exception: %s" % errorStr, str(traceback.format_exc()), 'error')
    
    if printError and getattr(sys, "exc_value", None):
        #print out the traceback of the last exception thrown
        log.msg(traceback.format_exc())
        
    log.msg("EXITING (TO BE RESTARTED AUTOMATICALLY)", lvl='i', ss='ss_castdaemon')
    #time.sleep(consts_shared_twisted.PERIOD_SLEEP_ON_FATAL_ERROR)
    
    try:
        stopfunc() #either reactor.stop() or sys.exit()
    except: #if the reactor isn't running, we will get an exception we can ignore here
        pass
    if stopfunc == reactor.stop: #IGNORE:E1101
        #now raise an exception so that 
        raise Exception("HALT - FATAL ERROR RAISED: %s" % errorStr)
 def _cbEmptyBacklog(self, unused, backlogPresent):
     if backlogPresent and self.getQueryBacklogSize() == 0:
         #NOTE that this condition may trigger two times in quick succession for any given conn pool 
         self._connectivityDownSince = None #connectivity no longer down
         self._connectivityLastRestored = time.time()
         
         #as we successfully cleared a backlog, purposefully refresh our server stats now just to be safe (and to reduce
         # the chance the the AC sees us as dead) (or if this is the case, to properly realize it)
         if self._poolName == 'dbMetastore':
             log.msg(u"dbMetastore connectivity restored, doing full server refresh as a backlog was present...", lvl='i', ss='ss_db')
             singleton.get('core').updateServerStats()
             singleton.get('core').doRefresh(doFullRefresh=False)
 def _periodicTryEmptyQueryBacklog(self):
     """Will periodically check if the query backlog has any entries in it, and if so, try to issue those
     pending queries against the DB.
     """ 
     if singleton.get('castdaemon', strict=False) and (singleton.get('castdaemon').isShuttingDown() or errorutil.checkIfFatalErrorOccurred()):
         return
      
     if not self.getQueryBacklogSize():
         return
     
     #print "PERIODIC trying to empty backlog, size is", self.getQueryBacklogSize() 
     d = self._emptyBacklog()
     return d
def store(key, content, lifetime=consts.IBL_API_MEMCACHE_DEFAULT_LIFETIME):
    """    
    Stores some content in the local memcache on the machine and returns a UUID-reference to that entry.
    
    @param content: The content to store (must be serializable by JSON)
    @param lifetime: (Optional.) The desired lifetime of the content in seconds. If not stated, defaults to
        L{consts_castdaemon.COMMON_STORED_TOKEN_ACTIVE_PERIOD_DEFAULT}. Valid values are
        L{consts_castdaemon.COMMON_STORED_TOKEN_ACTIVE_PERIOD_MIN} to
        L{consts_castdaemon.COMMON_STORED_TOKEN_ACTIVE_PERIOD_MAX} 
        
    @return: The key name created (should be the same as the 'key' parameter), or None on failure.
    """
    #convert unicode strings to ascii strings
    if isinstance(key, unicode):
        key = key.encode('utf-8')
    if isinstance(content, unicode):
        content = content.encode('utf-8')

    if    lifetime < STORED_TOKEN_ACTIVE_PERIOD_MIN \
       or lifetime > STORED_TOKEN_ACTIVE_PERIOD_MAX:
        raise ValueError("lifetime parameter is out of bounds")
   
    log.msg("memcache.store: key=%s, lifetime=%s" % (key, lifetime), lvl='d2', ss='ss_iblapi_memcache')

    memcacheClient = singleton.get('memCacheClient', strict=False)
    if not memcacheClient:
        raise Exception("memCacheClient not initialized yet!")

    d = memcacheClient.set(key, content, expireTime=lifetime)
    d.addCallback(lambda result: result == True and key or None)
    d.addErrback(lambda failure: log.msg("Could not make 'set' query to memcached daemon. Failure: %s" % failure,
        lvl='e', ss='ss_iblapi_memcache'))
    return d
def _directProcessExecute(txn, queryString, argList, fetch, connID, useCache, cacheExpireTime, cacheHashKey, printQuery, many):
    assert isinstance(argList, tuple)
    if many:
        txn.executemany(queryString, argList)
    else:
        txn.execute(queryString, argList)
    #^ may raise an exception if the error can't be handled. don't handle that here (propagate it up)
    if fetch == 'o':
        results = txn.fetchone()
    elif fetch == 'om':
        row = txn.fetchone()
        results = fetchResultRowToDict(txn, row)
    elif fetch == 'a':
        results = txn.fetchall()
    elif fetch == 'am':
        rows = txn.fetchall()
        results = fetchResultRowsToDict(txn, rows)
    elif fetch == 'lid':
        txn.execute("SELECT LAST_INSERT_ID()")
        results = txn.fetchone()[0]
    elif fetch == 'N':
        #N = don't fetch anything
        results = None

    if useCache:
        #cache this result in memory
        assert cacheHashKey
        
        try:
            jsonResults = simplejson.dumps(results)
        except:
            #do not store in the memcache
            log.msg(u"Could not store database results in memcached; could not serialize into JSON for query: \"%s\". Args: \"%s\""
                    % (queryString, argList), lvl='w', ss='ss_db')
            return results

        #if we are running this code in the castdaemon, override the cache period with the DB-based setting
        if     cacheExpireTime == consts.MEMCACHED_DEFAULT_EXPIRE_PERIOD \
           and singleton.get('core', strict=False):
            cacheExpireTime = singleton.get('core').getPlatformSetting('castdaemon_dbcache_interval', strict=False) \
                or consts.MEMCACHED_DEFAULT_EXPIRE_PERIOD
        
        threads.blockingCallFromThread(reactor, memcache.store, cacheHashKey, jsonResults, lifetime=cacheExpireTime)
    return results
def connectToMemcached():
    """
    @return: The memcached protocol client object, or False if a connection attempt is already in progress
    """
    log.msg("%s memcache client connection..." % (singleton.get('memCacheClient', strict=False)
        and "REMAKING" or "Making",), lvl='i', ss='ss_castdaemon')

    d = defer.Deferred()
    client = MemcacheClientFactory(d)
    reactor.connectTCP("localhost", consts.MISC_MEMCACHED_PORT, client)
    return d
def _cbCallProc(cacheValue, procName, inArgList, fetch, connID, useCache, cacheExpireTime, printQuery, cacheHashKey):
    if cacheHashKey and cacheValue:
        #useCache set to true and we found something in the cache
        #serialize it out from JSON
        try:
            cacheValue = simplejson.loads(cacheValue, encoding='utf-8')
            return cacheValue
        except:
            log.msg(u"Could not load previously stored database results from memcached; could not unserialize from JSON for proc: \"%s\". InArgs: \"%s\""
                    % (procName, inArgList), lvl='w', ss='ss_db')
    
    #otherwise we're not caching or did not find anything in the memcache...call the proc
    if singleton.get(connID + 'Type') == 'direct':
        d = singleton.get(connID).runInteraction(_directProcessCallProc, procName, inArgList, fetch, connID, useCache,
            cacheExpireTime, cacheHashKey, printQuery)
        d.addErrback(_directProcessCallProc_onError, procName, inArgList, fetch, connID, useCache,
            cacheExpireTime, cacheHashKey, printQuery)
        return d
    else: #dbproxy
        return singleton.get(connID).callProc(procName, inArgList, fetch, connID, useCache,
            cacheExpireTime, cacheHashKey)
def _cbExecute(cacheValue, queryString, argList, fetch, connID, useCache, cacheExpireTime, printQuery, cacheHashKey,
_alreadyInBacklog, many):
    if cacheHashKey and cacheValue:
        #useCache set to true and we found something in the cache
        #serialize it out from JSON
        try:
            cacheValue = simplejson.loads(cacheValue, encoding='utf-8')
            return cacheValue
        except:
            log.msg(u"Could not load previously stored database results from memcached; could not unserialize from JSON for query: \"%s\". Args: \"%s\""
                    % (queryString, argList), lvl='w', ss='ss_db')

    #otherwise we're not caching or did not find anything in the memcache...make the query
    if singleton.get(connID + 'Type') == 'direct':
        d = singleton.get(connID).runInteraction(_directProcessExecute, queryString, argList, fetch, connID, useCache,
            cacheExpireTime, cacheHashKey, printQuery, many)
        d.addErrback(_directProcessExecute_onError, queryString, argList, fetch, connID, useCache,
            cacheExpireTime, cacheHashKey, printQuery, _alreadyInBacklog)
        return d
    else: #dbproxy
        return singleton.get(connID).execute(queryString, argList, fetch, connID, useCache,
            cacheExpireTime, cacheHashKey)
def _directProcessCallProc(txn, procName, inArgList, fetch, connID, useCache, cacheExpireTime, cacheHashKey,
printQuery):
    try:
        #no out arguments
        if fetch in ('a', 'am', 'N'):
            txn.execute("CALL %s(%s);" % (procName, ", ".join(["'%s'" % arg for arg in inArgList])))
            if fetch == 'o':
                results = txn.fetchone()
            elif fetch == 'om':
                row = txn.fetchone()
                results = fetchResultRowToDict(txn, row)
            elif fetch == 'a':
                results = txn.fetchall()
            elif fetch == 'am':
                rows = txn.fetchall()
                results = fetchResultRowToDict(txn, rows)
    except:
        raise

    if useCache:
        #cache this result in memory
        assert cacheHashKey
        
        try:
            jsonResults = simplejson.dumps(results)
        except:
            #do not store in the memcache
            log.msg(u"Could not store database results in memcached; could not serialize into JSON for proc: \"%s\". InArgs: \"%s\""
                    % (procName, inArgList), lvl='w', ss='ss_db')
            return results

        #if we are running this code in the castdaemon, override the cache period with the DB-based setting
        if     cacheExpireTime == consts.MEMCACHED_DEFAULT_EXPIRE_PERIOD \
           and singleton.get('core', strict=False):
            cacheExpireTime = singleton.get('core').getPlatformSetting('castdaemon_dbcache_interval', strict=False) \
                or consts.MEMCACHED_DEFAULT_EXPIRE_PERIOD
        
        threads.blockingCallFromThread(reactor, memcache.store, cacheHashKey, jsonResults, lifetime=cacheExpireTime)
    return results
def runWithConnection(interaction, *args, **kwargs):
    """
    I am a wrapper to the L{twisted.enterprise.adbapi.ConnectionPool.runWithConnection} function. See the
    documentation for that function for more info.
    
    @param interaction: Please remember that the function passed as this argument may not return a
    Deferred
    @return: A deferred that fires once the interaction is complete.
    
    @note: PLEASE be careful when using this function, as the function called from it must be
    thread-safe (i.e. it may not access non-local and/or non-protected resources).
    
    @note: We CANNOT backlog queries issued in an interaction spawned via this function. That means that
    if you are in the middle of an interaction and DB connectivity fails, the interaction will be aborted and
    an Exception will be generated.
    """
    
    if kwargs and kwargs.has_key('connID'):
        connID = kwargs['connID']
        del kwargs['connID']
    else:
        connID = 'dbMetastore'
        
    assert connID in ('dbMetastore', 'dbLogMetastore')
    assert singleton.get(connID + 'Type') in ('direct', 'dbproxy')
    
    if singleton.get(connID + 'Type') == 'direct':
        try:    
            return singleton.get(connID).runWithConnection(interaction, *args, **kwargs)
        except MySQLdb.ProgrammingError:
            raise #invalid syntax
        except MySQLdb.Error:
            #connectivity went away or other error
            raise
        except: #other error
            raise
    else:
        return singleton.get(connID).runWithConnection(interaction, connID, *args, **kwargs)
def _directProcessCallProc_onError(failure, procName, inArgList, fetch, connID, useCache,
cacheExpireTime, cacheHashKey, printQuery, _alreadyInBacklog):
    
    if failure.check(MySQLdb.ProgrammingError) or failure.check(TypeError):
        log.msg(u"Database query failure. Error: %s. Failed proc was: %s; Args: (%s)"
            % (failure.getErrorMessage(), procName, ', '.join([x for x in inArgList]),), lvl='e', ss='ss_db')
        failure.raiseException() #invalid syntax error
    elif failure.check(MySQLdb.OperationalError):
        if singleton.get('castdaemon', strict=False) and (singleton.get('castdaemon').isShuttingDown() or errorutil.checkIfFatalErrorOccurred()):
            #we shouldn't try to queue queries when the server is going down (as this may hold up the shutdown
            # process)
            failure.raiseException()
        
        if     _isMySQLServerConnDownErrorMessage(failure.getErrorMessage()) \
           and not _alreadyInBacklog:
            #currently not connected, queue up the request to be run when the connection is restored
            dRequestCompleted = defer.Deferred() #will be fired when the query is finally issued against the DB
            backlogItem = {'callType': 'callProc', 'procName': procName, 'inArgList': inArgList,
                'fetch': fetch, 'connID': connID, 'useCache': useCache,
                'cacheExpireTime': cacheExpireTime, 'cacheHashKey': cacheHashKey, 'printQuery': printQuery,
                'dRequestCompleted': dRequestCompleted, }

            log.msg(u"Connectivity failure: Queuing proc (MD5: %s): \"%s\". New backlog length: %i"
                % (_getBacklogEntryLoggingHash(backlogItem), procName,
                   singleton.get(connID).getQueryBacklogSize() + 1), lvl='i', ss='ss_db')

            singleton.get(connID)._addToQueryBacklog(backlogItem)
            return backlogItem['dRequestCompleted']
        elif _isMySQLServerConnDownErrorMessage(failure.getErrorMessage()) and _alreadyInBacklog:
            #don't insert the entry in to the backlog again as it's already there
            #print "dbExecute: CONNECTION STILL DOWN!"
            return EXECUTE_ALREADY_QUEUED_RETURNDATA
        else:
            log.msg(u"Unknown database operational error. Error: %s. Failed proc was: %s; Args: (%s)"
                % (failure.getErrorMessage(), procName,
                    ', '.join([x for x in inArgList]),), lvl='e', ss='ss_db')
            failure.raiseException()
def retrieve(key, clearOnRetrieval=False):
    """
    Given a Key, retrieves content that was stored earlier in a local memcache.
    
    @param key: The key to get.
    @param clearOnRetrieval: Set to True to remove the data from the memcache after retrival.
    @return: The cached data, or None if not found
    """
    assert key
    #convert unicode strings to ascii strings
    if isinstance(key, unicode):
        key = key.encode('utf-8')
        
    log.msg("memcache.retrieve: key=%s, clearOnRetrieval=%s" % (key, clearOnRetrieval),
            lvl='d2', ss='ss_iblapi_memcache')

    memcacheClient = singleton.get('memCacheClient', strict=False)
    if not memcacheClient:
        raise Exception("memCacheClient not initialized yet!")
    d = memcacheClient.get(key)
    d.addCallback(_cbRetrieve, key, clearOnRetrieval, memcacheClient)
    d.addErrback(lambda failure: log.msg(u"Could not make 'get' query to memcached daemon for key '%s'. Failure: %s" % (key, failure),
        lvl='e', ss='ss_iblapi_memcache'))
    return d
def executeOnCursor(txn, queryString, argList=tuple(), fetch='N', useCache=False,
cacheExpireTime=consts.MEMCACHED_DEFAULT_EXPIRE_PERIOD):
    """
    Allows caching of results obtained through a direct, runInteraction-obtained cursor-level execution
        (e.g. not thorugh dbutil.execute or dbutil.callProc).
        
        @see: The arguments for L{callProc}
        @param txn: The transaction/cursor object to operate on.
        @param query: The SQL query string to execute
        @param argList: The list of arguments to operate on.
        @param fetch: The fetch mode, one of the following:
            - 'o': Return a single row of results (e.g. fetchone())
            - 'a': Return all results (e.g. fetchall())
        @return: The fetched results, according to the fetch mode
        
    @note: We CURRENTLY DO NOT/CANNOT backlog queries issued in an interaction spawned via this function. That means that
    if you are in the middle of an interaction and DB connectivity fails, the interaction will be aborted and
    an Exception will be generated.
    """
    #decode any bytestrings passed in as querystring or within arglist into unicode strings
    if isinstance(queryString, str):
        #decode into a unicode string
        queryString = queryString.decode('utf-8')
    #decode string args as well
    for i in xrange(len(argList)):
        if isinstance(argList[i], str):
            argList[i] = argList[i].decode('utf-8')
    
    if     singleton.get('core', strict=False) \
       and singleton.get('core').getPlatformSetting('castdaemon_dbcache_interval', strict=False) == 0:
        #we are running as the castdaemon and caching is disabled
        useCache = False #override any True value
    
    if useCache:
        #use the query string itself, along with the args list as the hash key
        cacheHashKey = _produceCacheHashKey(queryString, argList, fetch)
        
        #get the result from memcached in a blocking manner
        results = threads.blockingCallFromThread(reactor, memcache.retrieve, cacheHashKey)
        if results:
            results = simplejson.loads(results)
            return results
    else:
        cacheHashKey = ""
    
    #result not cached yet, execute the query and cache the results
    txn.execute(queryString, argList)
    if fetch == 'o':
        results = txn.fetchone()
    elif fetch == 'om':
        row = txn.fetchone()
        if row is None:
            return None
        cols = [ d[0] for d in txn.description ]
        results = dict(zip(cols, row))          
    elif fetch == 'a':
        results = txn.fetchall()
    elif fetch == 'am':
        rows = txn.fetchall()
        if rows is None:
            return None
        cols = [ d[0] for d in txn.description ]
        results = [dict(zip(cols, row)) for row in rows ]
    elif fetch == 'N':
        results = None        

    if useCache:
        #cache this result in memory
        assert cacheHashKey
        
        try:
            jsonResults = simplejson.dumps(results)
        except:
            #do not store in the memcache
            log.msg(u"Could not store database results in memcached; could not serialize into JSON for query: \"%s\". Args: \"%s\""
                    % (queryString, argList), lvl='w', ss='ss_db')
            return results
        
        #if we are running this code in the castdaemon, override the cache period with the DB-based setting
        if     cacheExpireTime == consts.MEMCACHED_DEFAULT_EXPIRE_PERIOD \
           and singleton.get('core', strict=False):
            cacheExpireTime = singleton.get('core').getPlatformSetting('castdaemon_dbcache_interval', strict=False) \
                or consts.MEMCACHED_DEFAULT_EXPIRE_PERIOD

        threads.blockingCallFromThread(reactor, memcache.store, cacheHashKey, jsonResults, lifetime=cacheExpireTime)
    
    return results
def execute(queryString, argList=tuple(), fetch='N', connID='dbMetastore', useCache=False,
cacheExpireTime=consts.MEMCACHED_DEFAULT_EXPIRE_PERIOD, printQuery=False,
_alreadyInBacklog=False, many=False):
    """
    Executes a SQL statement on the database
    
    @param useCache: Set to True to use to cache the results of the query in memcached, or
    if the results are already cached in memcached, the cached version will be returned instead of having to
    make a query to the DB again.
    @param cacheExpireTime: The length of time that the result is valid in the memcache for (in seconds).
    This value is only used if memCacheKey is set to True.
    
    @see: The arguments for L{callProc}
    
    @param fetch: Same as the arguments for L{callProc}, with the addition of:
        - 'lid': Return the last insert ID (not in a tuple or list)
        
    @return: A deferred that yields the result of the query, or the data "DBEXECUTE-BACKLOG-ALREADYQUEUED" if we were executing
    a backlogged query (_alreadyInBacklog=True) and the DB was still downed (meaning it could still not be
    successfully processed)

    """
    assert queryString and len(queryString) >= 6
    assert fetch in ('N', 'o', 'om', 'a', 'am', 'lid')
    assert singleton.get(connID + 'Type') in ('direct', 'dbproxy')
    
    #decode any bytestrings passed in as querystring or within arglist into unicode strings
    #if isinstance(queryString, str):
        #decode into a unicode string
    #    queryString = queryString.decode('utf-8')
    #decode string args as well
    #for i in xrange(len(argList)):
    #    if isinstance(argList[i], str):
    #        argList[i] = argList[i].decode('utf-8')
    
    #some sanity checks
    if useCache and queryString.lstrip()[0:6].upper() != 'SELECT':
        raise Exception("useCache can only be set for SELECT queries. Failed query was: %s" % queryString)

    #make sure argList ends up being a tuple
    if not isinstance(argList, tuple):
        argList = tuple(argList)
    
    log.msg(u"Executing query: \"%s\"; Args: %s" % (queryString, argList,),
            lvl=printQuery and 'a' or 'd2', ss='ss_db')

    if     singleton.get('core', strict=False) \
       and singleton.get('core').getPlatformSetting('castdaemon_dbcache_interval', strict=False) == 0:
        #we are running as the castdaemon and caching is disabled
        useCache = False #override any True value

    #first, see if we are to use the memcache and try to pull the item from it if so
    if useCache:
        #use the query string itself, along with the args list as the hash key
        cacheHashKey = _produceCacheHashKey(queryString, argList, fetch)
        d = memcache.retrieve(cacheHashKey)
        d.addCallback(_cbExecute, queryString, argList, fetch, connID, useCache, cacheExpireTime, printQuery,
                      cacheHashKey, _alreadyInBacklog, many)
    else:
        cacheHashKey = ""
        d = defer.succeed(None)
        d.addCallback(_cbExecute, queryString, argList, fetch, connID, useCache, cacheExpireTime, printQuery,
                      cacheHashKey, _alreadyInBacklog, many)
    return d
def callProc(procName, inArgList=(), fetch='N', connID='dbMetastore', useCache=False,
cacheExpireTime=consts.MEMCACHED_DEFAULT_EXPIRE_PERIOD, printQuery=False):
    """
    I execute a stored procedure on the database, and optionally trigger a callback function to handle
    the results.
    
    How it works:
        - Some other code calls CallProc()
        - processingCbFunc called in database pool thread context to query the database, via runIteration
        - runInteraction returns requested data as a deferred

    @param procName: The name of the stored procedure to call
    @type procName: str
    
    @param inArgList: The arguments to the stored procedure. INOUT args not supported currently.
    @type inArgList: tuple
    
    @param fetch: How to fetch the data, one of the following:
        - N: Don't fetch anything back
        - o: Fetch one (as a tuple)
        - om: Fetch one as a map
        - a: Fetch All (as a tuple)
        - am: Fetch All as a map
    @type fetch: str
    
    @param printQuery: Set to True to print the query to be executed. False by default.    
    
    @return: A deferred that is triggered when the database operation is complete
    """
    assert isinstance(procName, basestring)
    assert isinstance(inArgList, tuple)
    assert fetch in ('N', 'o', 'om', 'a', 'am')
    assert singleton.get(connID + 'Type') in ('direct', 'dbproxy')
    
    #decode any bytestrings passed in as querystring or within arglist into unicode strings
    if isinstance(procName, str):
        #decode into a unicode string
        procName = procName.decode('utf-8')
    #decode string args as well
    for i in xrange(len(inArgList)):
        if isinstance(inArgList[i], str):
            inArgList[i] = inArgList[i].decode('utf-8')
    
    log.msg(u"Processing stored proc call: \"%s\"; InArgs: %s" % (procName, inArgList,),
            lvl=printQuery and 'a' or 'd2', ss='ss_db')

    if     singleton.get('core', strict=False) \
       and singleton.get('core').getPlatformSetting('castdaemon_dbcache_interval', strict=False) == 0:
        #we are running as the castdaemon and caching is disabled
        useCache = False #override any True value

    #first, see if we are to use the memcache and try to pull the item from it if so
    if useCache:
        #use the query string itself, along with the args list as the hash key, and the fetch mode
        cacheHashKey = _produceCacheHashKey(procName, inArgList, fetch) 
        d = memcache.retrieve(cacheHashKey)
        d.addCallback(_cbCallProc, procName, inArgList, fetch, connID, useCache, cacheExpireTime, printQuery,
                      cacheHashKey)
    else:
        cacheHashKey = ""
        d = defer.succeed(None)
        d.addCallback(_cbCallProc, procName, inArgList, fetch, connID, useCache, cacheExpireTime, printQuery,
                      cacheHashKey)
    return d