예제 #1
0
def test_threadpool_maxthreads():
    pool = ThreadPool(core_threads=2, max_threads=1)
    eq_(pool.max_threads, 2)

    pool = ThreadPool(core_threads=2, max_threads=3)
    eq_(pool.max_threads, 3)

    pool = ThreadPool(core_threads=0, max_threads=0)
    eq_(pool.max_threads, 1)
예제 #2
0
    def configure(self, gconfig={}, **options):
        """
        Reconfigures the scheduler with the given options. Can only be done when the scheduler isn't running.
        """
        if self.running:
            raise SchedulerAlreadyRunningError

        # Set general options
        config = combine_opts(gconfig, 'apscheduler.', options)
        self.misfire_grace_time = int(config.pop('misfire_grace_time', 1))
        self.coalesce = asbool(config.pop('coalesce', True))
        self.daemonic = asbool(config.pop('daemonic', True))
        self.standalone = asbool(config.pop('standalone', False))

        # Configure the thread pool
        if 'threadpool' in config:
            self._threadpool = maybe_ref(config['threadpool'])
        else:
            threadpool_opts = combine_opts(config, 'threadpool.')
            self._threadpool = ThreadPool(**threadpool_opts)

        # Configure job stores
        jobstore_opts = combine_opts(config, 'jobstore.')
        jobstores = {}
        for key, value in jobstore_opts.items():
            store_name, option = key.split('.', 1)
            opts_dict = jobstores.setdefault(store_name, {})
            opts_dict[option] = value

        for alias, opts in jobstores.items():
            classname = opts.pop('class')
            cls = maybe_ref(classname)
            jobstore = cls(**opts)
            self.add_jobstore(jobstore, alias, True)
예제 #3
0
def test_threadpool():
    pool = ThreadPool(core_threads=2, keepalive=0)
    event1 = Event()
    event2 = Event()
    event3 = Event()
    pool.submit(event1.set)
    pool.submit(event2.set)
    pool.submit(event3.set)
    event1.wait(1)
    event2.wait(1)
    event3.wait(1)
    assert event1.isSet()
    assert event2.isSet()
    assert event3.isSet()
    sleep(0.3)
    eq_(repr(pool), '<ThreadPool at %x; threads=2/20>' % id(pool))

    pool.shutdown()
    eq_(repr(pool), '<ThreadPool at %x; threads=0/20>' % id(pool))

    # Make sure double shutdown is ok
    pool.shutdown()

    # Make sure one can't submit tasks to a thread pool that has been shut down
    assert_raises(RuntimeError, pool.submit, event1.set)
예제 #4
0
def test_threadpool_nocore():
    pool = ThreadPool(keepalive=0)
    event = Event()
    pool.submit(event.set)
    event.wait(1)
    assert event.isSet()
    sleep(1)
    eq_(repr(pool), '<ThreadPool at %x; threads=0/20>' % id(pool))
예제 #5
0
 def prepare_scheduler(self):
     self.scheduler = LogshipperScheduler(
         threadpool=ThreadPool(max_threads=len(self.logfile_configs), ))
예제 #6
0
                jirasync.sync_issues(accounts, au.index)
    except (OperationalError, InvalidRequestError, StatementError) as e:
        app.logger.exception("Database error processing accounts %s, cleaning up session.", accounts)
        db.session.remove()
        store_exception("scheduler-audit-changes", None, e)


def _clear_old_exceptions():
    print("Clearing out exceptions that have an expired TTL...")
    clear_old_exceptions()
    print("Completed clearing out exceptions that have an expired TTL.")


pool = ThreadPool(
    core_threads=app.config.get('CORE_THREADS', 25),
    max_threads=app.config.get('MAX_THREADS', 30),
    keepalive=0
)
scheduler = Scheduler(
    standalone=True,
    threadpool=pool,
    coalesce=True,
    misfire_grace_time=30
)


def setup_scheduler():
    """Sets up the APScheduler"""
    log = logging.getLogger('apscheduler')

    try:
예제 #7
0
    find_iamgroup_changes(account)
    app.logger.info("Account {} is done with IAMGROUP".format(account))
    find_iamrole_changes(account)
    app.logger.info("Account {} is done with IAMROLE".format(account))
    find_keypair_changes(account)
    app.logger.info("Account {} is done with KEYPAIR".format(account))
    find_sns_changes(account)
    app.logger.info("Account {} is done with SNS".format(account))
    time2 = time.time()
    app.logger.info('Run Account %s took %0.1f s' % (account, (time2-time1)))

from apscheduler.threadpool import ThreadPool
from apscheduler.scheduler import Scheduler
import traceback
import time
pool = ThreadPool(core_threads=25, max_threads=30, keepalive=0)
scheduler = Scheduler(standalone=True, threadpool=pool, coalesce=True, misfire_grace_time=30)
interval = 15


def setup_scheduler():
    """Sets up the APScheduler"""
    log = logging.getLogger('apscheduler')
    log.setLevel(app.config.get('LOG_LEVEL'))
    log.addHandler(handler)

    try:
        accounts = Account.query.filter(Account.third_party==False).filter(Account.active==True).all()
        accounts = [account.name for account in accounts]
        for account in accounts:
            print "Scheduler adding account {}".format(account)