Example #1
0
    def schedule_add_activity(self, executionDate: str, username: str,
                              password: str, userId: str, courtId: str,
                              fromDate: str, toDate: str):
        scheduler = BackgroundScheduler()
        scheduler.add_executor('processpool')

        date = dateutil.parser.isoparse(executionDate)
        job = scheduler.add_job(
            self.get_token_and_create_activity,
            'date',
            run_date=date,
            args=[username, password, userId, courtId, fromDate, toDate])
        scheduler.start()
Example #2
0
class Filament(object):
    """Filament initialization and execution engine.

    Filaments are lightweight Python modules which run
    on top of Fibratus. They are often used to enrich/extend the
    functionality of Fibratus by performing any type of logic
    (aggregations, groupings, filters, counters, etc) on the
    kernel event stream.

    """
    def __init__(self):
        """Builds a new instance of the filament.

        Attributes:
        ----------

        filament_module: module
            module which contains the filament logic
        """
        self._filament_module = None
        self._name = None
        self._filters = []
        self._cols = []
        self._tabular = None
        self._limit = 10
        self._interval = 1
        self._sort_by = None
        self._sort_desc = True
        self._logger = None
        self._ansi_term = AnsiTerm()
        self.scheduler = BackgroundScheduler()

    def load_filament(self, name):
        """Loads the filament module.

        Finds and loads the python module which
        holds the filament logic. It also looks up for
        some essential filament methods and raises an error
        if they can't be found.

        Parameters
        ----------
        name: str
            name of the filament to load

        """
        self._name = name
        Filament._assert_root_dir()
        filament_path = self._find_filament_path(name)
        if filament_path:
            loader = SourceFileLoader(name, filament_path)
            self._filament_module = loader.load_module()
            sys.path.append(FILAMENTS_DIR)
            doc = inspect.getdoc(self._filament_module)
            if not doc:
                raise FilamentError('Please provide a short '
                                    'description for the filament')

            on_next_kevent = self._find_filament_func('on_next_kevent')
            if on_next_kevent:
                if self._num_args(on_next_kevent) != 1:
                    raise FilamentError('Missing one argument on_next_kevent '
                                        'method on filament')
                self._initialize_funcs()
            else:
                raise FilamentError('Missing required on_next_kevent '
                                    'method on filament')
        else:
            raise FilamentError('%s filament not found' % name)

    def _initialize_funcs(self):
        """Setup the filament modules functions.

        Functions
        ---------

        set_filter: func
            accepts the comma separated list of kernel events
            for whose the filter should be applied
        set_interval: func
            establishes the fixed repeating interval in seconds
        columns: func
            configure the column set for the table
        add_row: func
            adds a new row to the table
        sort_by: func
            sorts the table by specific column
        """

        def set_filter(*args):
            self._filters = args
        self._filament_module.set_filter = set_filter

        def set_interval(interval):
            if not type(interval) is int:
                raise FilamentError('Interval must be an integer value')
            self._interval = interval
        self._filament_module.set_interval = set_interval

        def columns(cols):
            if not isinstance(cols, list):
                raise FilamentError('Columns must be a list, '
                                    '%s found' % type(cols))
            self._cols = cols
            self._tabular = Tabular(self._cols)
            self._tabular.padding_width = 10
            self._tabular.junction_char = '|'

        def add_row(row):
            if not isinstance(row, list):
                raise FilamentError('Expected list type for the row, found %s'
                                    % type(row))
            self._tabular.add_row(row)

        def sort_by(col, sort_desc=True):
            if len(self._cols) == 0:
                raise FilamentError('Expected at least 1 column but 0 found')
            if col not in self._cols:
                raise FilamentError('%s column does not exist' % col)
            self._sort_by = col
            self._sort_desc = sort_desc

        def limit(l):
            if len(self._cols) == 0:
                raise FilamentError('Expected at least 1 column but 0 found')
            if not type(l) is int:
                raise FilamentError('Limit must be an integer value')
            self._limit = l

        def title(text):
            self._tabular.title = text

        self._filament_module.columns = columns
        self._filament_module.title = title
        self._filament_module.sort_by = sort_by
        self._filament_module.limit = limit
        self._filament_module.add_row = add_row
        self._filament_module.render_tabular = self.render_tabular

        on_init = self._find_filament_func('on_init')
        if on_init and self._zero_args(on_init):
            self._filament_module.on_init()
        if self._find_filament_func('on_interval'):
            self.scheduler.add_executor(ThreadPoolExecutor(max_workers=4))
            self.scheduler.start()

            def on_interval():
                try:
                    self._filament_module.on_interval()
                except Exception:
                    self._logger.error('Unexpected error on interval elapsed %s'
                                       % traceback.format_exc())
            self.scheduler.add_job(on_interval,
                                   IntervalTrigger(),
                                   seconds=self._interval,
                                   max_instances=4,
                                   misfire_grace_time=60)
        if len(self._cols) > 0:
            try:
                self._ansi_term.setup_console()
            except TermInitializationError:
                panic('fibratus run: ERROR - console initialization failed')

    def do_output_accessors(self, outputs):
        """Creates the filament's output accessors.

        Parameters
        ----------

        outputs: dict
            outputs initialized from the configuration
            descriptor
        """
        for name, output in outputs.items():
            setattr(self._filament_module, name, OutputAccessor(output))

    def on_next_kevent(self, kevent):
        try:
            self._filament_module.on_next_kevent(ddict(kevent))
        except Exception as e:
            self._logger.error('Unexpected filament error %s' % e)

    def render_tabular(self):
        """Renders the table on the console.
        """
        if len(self._cols) > 0:
            tabular = self._tabular.get_string(start=1, end=self._limit)
            if self._sort_by:
                tabular = self._tabular.get_string(start=1, end=self._limit,
                                                   sortby=self._sort_by,
                                                   reversesort=self._sort_desc)
            self._tabular.clear_rows()
            self._ansi_term.write_output(tabular)

    def close(self):
        on_stop = self._find_filament_func('on_stop')
        if on_stop and self._zero_args(on_stop):
            self._filament_module.on_stop()
        if self.scheduler.running:
            self.scheduler.shutdown()
        self._ansi_term.restore_console()

    @classmethod
    def exists(cls, filament):
        Filament._assert_root_dir()
        return os.path.exists(os.path.join(FILAMENTS_DIR, '%s.py' % filament))

    @classmethod
    def list_filaments(cls):
        Filament._assert_root_dir()
        filaments = {}
        paths = [os.path.join(FILAMENTS_DIR, path) for path in os.listdir(FILAMENTS_DIR)
                 if path.endswith('.py')]
        for path in paths:
            filament_name = os.path.basename(path)[:-3]
            loader = SourceFileLoader(filament_name, path)
            filament = loader.load_module()
            filaments[filament_name] = inspect.getdoc(filament)
        return filaments

    @classmethod
    def _assert_root_dir(cls):
        if not os.path.exists(FILAMENTS_DIR):
            panic('fibratus run: ERROR - %s path does not exist.' % FILAMENTS_DIR)

    @property
    def filters(self):
        return self._filters

    @property
    def logger(self):
        return self._logger

    @logger.setter
    def logger(self, logger):
        self._logger = logger

    @property
    def filament_module(self):
        return self._filament_module

    @property
    def name(self):
        return self._name

    def _find_filament_func(self, func_name):
        """Finds the function in the filament module.

        Parameters
        ----------

        func_name: str
            the name of the function
        """
        functions = inspect.getmembers(self._filament_module, predicate=inspect.isfunction)
        return next(iter([func for name, func in functions if name == func_name]), None)

    def _find_filament_path(self, filament_name):
        """Resolves the filament full path from the name

        Parameters
        ----------

        filament_name: str
            the name of the filament whose path if about to be resolved
        """
        return next(iter([os.path.join(FILAMENTS_DIR, filament) for filament in os.listdir(FILAMENTS_DIR)
                    if filament.endswith('.py') and filament_name == filament[:-3]]), None)

    def _num_args(self, func):
        return len(inspect.getargspec(func).args)

    def _zero_args(self, func):
        return self._num_args(func) == 0
Example #3
0
class Filament():
    """Filament initialization and execution engine.

    Filaments are lightweight Python modules which run
    on top of Fibratus. They are often used to enrich/extend the
    functionality of Fibratus by performing any type of logic
    (aggregations, groupings, filters, counters, etc) on the
    kernel event stream.

    """

    def __init__(self):
        self._filament = None
        self._filters = []
        self._tabular = None
        self._cols = []
        self._limit = 10
        self._interval = 1
        self._sort_by = None
        self._sort_desc = True
        self.ansi_term = AnsiTerm()
        self.scheduler = BackgroundScheduler()
        self.term_initialized = False
        self._on_stop = None

    def load_filament(self, name):
        Filament._assert_root_dir()
        [filament_path] = [os.path.join(FILAMENTS_DIR, filament) for filament in os.listdir(FILAMENTS_DIR)
                           if filament.endswith('.py') and name == filament[:-3]] or [None]
        if filament_path:
            loader = SourceFileLoader(name, filament_path)
            self._filament = loader.load_module()
            # check for required methods
            # on the filament module
            doc = inspect.getdoc(self._filament)
            if not doc:
                raise FilamentError("Please provide a short description for the filament")

            [on_next_kevent] = self._find_func('on_next_kevent')
            if on_next_kevent:
                args_spec = inspect.getargspec(on_next_kevent)
                if len(args_spec.args) != 1:
                    raise FilamentError('Missing one argument on_next_kevent method on filament')
            else:
                raise FilamentError('Missing required on_next_kevent method on filament')
        else:
            raise FilamentError('%s filament not found' % name)

    def initialize_filament(self):
        if self._filament:
            def set_filter(*args):
                self._filters = args
            self._filament.set_filter = set_filter

            def set_interval(interval):
                if not type(interval) is int:
                    raise FilamentError('Interval must be an integer value')
                self._interval = interval
            self._filament.set_interval = set_interval

            def columns(cols):
                if not isinstance(cols, list):
                    raise FilamentError('Columns must be a list, %s found' % type(cols))
                self._cols = cols
                self._tabular = PrettyTable(self._cols)
                self._tabular.padding_width = 10
                self._tabular.junction_char = '|'

            def sort_by(col, sort_desc=True):
                if len(self._cols) == 0:
                    raise FilamentError('Expected at least 1 column but 0 found')
                if not col in self._cols:
                    raise FilamentError('%s column does not exist' % col)
                self._sort_by = col
                self._sort_desc = sort_desc

            def limit(limit):
                if len(self._cols) == 0:
                    raise FilamentError('Expected at least 1 column but 0 found')
                if not type(limit) is int:
                    raise FilamentError('Limit must be an integer value')
                self._limit = limit

            def title(text):
                self._tabular.title = text

            def add_row(row):
                if not isinstance(row, list):
                    raise FilamentError('Expected list type for the row')
                self._tabular.add_row(row)

            self._filament.columns = columns
            self._filament.title = title
            self._filament.sort_by = sort_by
            self._filament.limit = limit
            self._filament.add_row = add_row
            self._filament.render_tabular = self.render_tabular

            # call filaments methods if defined
            [on_init] = self._find_func('on_init')
            if on_init:
                if len(inspect.getargspec(on_init).args) == 0:
                    self._filament.on_init()

            [on_stop] = self._find_func('on_stop')
            if on_stop:
                if len(inspect.getargspec(on_stop).args) == 0:
                    self._on_stop = on_stop

            [on_interval] = self._find_func('on_interval')
            if on_interval:
                self.scheduler.add_executor(ThreadPoolExecutor(max_workers=8))
                self.scheduler.start()
                self.scheduler.add_job(self._filament.on_interval,
                                       'interval',
                                       seconds=1, max_instances=8,
                                       misfire_grace_time=60)

    def render_tabular(self):
        if len(self._cols) > 0:
            tabular = self._tabular.get_string(start=1, end=self._limit)
            if self._sort_by:
                tabular = self._tabular.get_string(start=1, end=self._limit,
                                                   sortby=self._sort_by,
                                                   reversesort=self._sort_desc)
            if not self.term_initialized:
                self.term_initialized = True
                self.ansi_term.init_console()
            self._tabular.clear_rows()
            self.ansi_term.cls()
            self.ansi_term.write(tabular)

    def process(self, kevent):
        self._filament.on_next_kevent(kevent)

    def close(self):
        if self._on_stop:
            self._on_stop()
        if self.scheduler.running:
            self.scheduler.shutdown()
        self.ansi_term.restore_console()

    @classmethod
    def exists(cls, filament):
        Filament._assert_root_dir()
        return os.path.exists(os.path.join(FILAMENTS_DIR, '%s.py' % filament))

    @classmethod
    def list_filaments(cls):
        Filament._assert_root_dir()
        filaments = {}
        paths = [os.path.join(FILAMENTS_DIR, path) for path in os.listdir(FILAMENTS_DIR)
                 if path.endswith('.py')]
        for path in paths:
            filament_name = os.path.basename(path)[:-3]
            loader = SourceFileLoader(filament_name, path)
            filament = loader.load_module()
            filaments[filament_name] = inspect.getdoc(filament)
        return filaments

    @property
    def filters(self):
        return self._filters

    @classmethod
    def _assert_root_dir(cls):
        if not os.path.exists(FILAMENTS_DIR):
            IO.write_console('fibratus run: ERROR - %s path does not exist.' % FILAMENTS_DIR)
            sys.exit(0)

    def _find_func(self, func_name):
        functions = inspect.getmembers(self._filament, predicate=inspect.isfunction)
        return [func for name, func in functions if name == func_name] or [None]
Example #4
0
import os, time
from datetime import datetime
from apscheduler.schedulers.background import BackgroundScheduler

def myjob_1():
	print('myjob_1: %s' % datetime.now())

def myjob_2():
	print('myjob_2: %s' % datetime.now())

if __name__ == '__main__':
	scheduler = BackgroundScheduler()
	scheduler.add_executor('debug')
	scheduler.add_executor('threadpool')
	# scheduler.add_executor('processpool')
	scheduler.add_job(myjob_1, 'interval', seconds=2)
	scheduler.add_job(myjob_2, 'interval', seconds=5)
	scheduler.start()

	jobs = scheduler.get_jobs()
	print(jobs)

	try:
		while True:
			time.sleep(5)
	except (KeyboardInterrupt, SystemExit):
		scheduler.shutdown()
 
import time
from kafka import KafkaConsumer
from kafka import KafkaProducer
from kafka.errors import KafkaError, KafkaTimeoutError
import tweepy
from tweepy.auth import OAuthHandler
from tweepy.error import TweepError
from flask import Flask, request, jsonify
from apscheduler.schedulers.background import BackgroundScheduler

logging.basicConfig()
logger = logging.getLogger("tweet_producer")
logger.setLevel(logging.DEBUG)

scheduler = BackgroundScheduler()
scheduler.add_executor("threadpool")
scheduler.start()

app = Flask(__name__)
app.config.from_envvar('CONFIG_FILE')
watchlist = set(["aapl"])

kafka_broker = app.config["KAFKA_BROKER"]

consumer_key = app.config["CONSUMER_KEY"]
consumer_secret = app.config["CONSUMER_SECRET"]
access_token = app.config["ACCESS_TOKEN"]
access_secret = app.config["ACCESS_SECRET"]
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
tw_api = tweepy.API(auth)
from apscheduler.jobstores.redis import RedisJobStore
from apscheduler.schedulers.background import BackgroundScheduler
import datetime
import time
from apscheduler.schedulers . blocking import BlockingScheduler
import threading
import decorator_libs

def func(x,y,d):
    time.sleep(5)
    print(f'{d} {x} + {y} = {x + y}')

scheduler = BackgroundScheduler()
# scheduler.add_job(func,'date', run_date=datetime.datetime(2021, 5, 19, 10, 53, 20), args=(5, 6,))
# print(scheduler._executors)
scheduler.add_executor(ThreadPoolExecutor(1))
# scheduler.add_jobstore(RedisJobStore(jobs_key='apscheduler.jobs', run_times_key='apscheduler.run_times',db=6))
# scheduler.add_jobstore()


def job_miss(event):
    # print(event)
    # print(event.__dict__)
    # print(scheduler.get_job(event.job_id))
    print(event.function,event.function_kwargs,event.function_args)


scheduler.add_listener(job_miss, EVENT_JOB_MISSED)
scheduler.start()

with decorator_libs.TimerContextManager():
Example #7
0
class Schedule():
    def __init__(self, host, port):
        self.host =host
        self.port = port
        self.client = MongoClient(self.host, self.port)
        self.sched = BackgroundScheduler()
        self.jobstore = MongoDBJobStore(collection='scanner', database='dns_scanner', client=self.client)
        self.sched.add_jobstore(self.jobstore)
        self.sched.add_executor(ThreadPoolExecutor(10))
        self.mongomanager = MongoManager(self.client)

        self.sched.start()
        self.joblist = []

        self.redisquere = RedisQueue("dns_scanner")
        logging.info("Schedule init success")
        print "init success"

    def rm_all_task(self):
        self.jobstore.remove_all_jobs()

    def rm_task(self, id):
        if id in self.joblist:
            self.jobstore.remove_job(id)

    def run(self):
        while True:
            try:
                cfgstr = self.redisquere.popmsg()
                if not cfgstr or not str(cfgstr).startswith("{"):
                    time.sleep(10)
                    continue
                logging.info(cfgstr)
                conf = json.loads(cfgstr)
                taskname = conf.get("taskname", "")
                taskid = conf.get("taskid", "")
                interval = conf.get("interval", {})
                seconds = interval.get("seconds", 0)
                minutes = interval.get("minutes", 0)
                hours = interval.get("hours", 0)
                days = interval.get("days", 0)
                weeks = interval.get("weeks", 0)

                if weeks + days + hours + hours + minutes + seconds < 1:
                    logging.error(taskname + "-->>interval error")
                    time.sleep(10)
                    continue
                logging.info(taskname + " add job")
                taskinfo = {
                    "taskname": taskname,
                    "_id": taskid,
                    "conf": conf,
                    "create_time": time.time(),
                    "times":0,
                    "process":0
                }
                if not self.mongomanager.insert(taskinfo):
                    logging.error(taskname + "-->>in mongo error")
                    time.sleep(10)
                    continue
                logging.info(taskname + "-->>in mongo success")
                job = self.sched.add_job(runjobs, "interval", seconds=seconds, minutes=minutes, hours=hours, days=days,
                                         weeks=weeks, next_run_time=datetime.datetime.now(),args=[taskid,self.host, self.port])
                self.joblist.append(job)
                time.sleep(70)
            except Exception, e:
                logging.error(str(e))
Example #8
0
class TestSchedulerListener(unittest.TestCase):

    def setUp(self):
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_jobstore(MemoryJobStore(), alias='in_memory')
        self.scheduler.add_executor(ThreadPoolExecutor(1), alias='secondary_executor')

        self.scheduler.start()

    def tearDown(self):
        self.scheduler.shutdown()

    def test_watcher_injection(self):
        watcher = SchedulerWatcher(self.scheduler)

        self.assertEqual(watcher.scheduler, self.scheduler, 'Watcher should keep a reference to the scheduler')
        self.assertEqual(1, len(self.scheduler._listeners), 'Watcher should inject itself as a scheduler listener')

        self.assertEqual(
            self.scheduler._listeners[0][1], EVENT_ALL, 'Watcher should register iself to watch all events'
        )

    def test_scheduler_inspection(self):
        self.scheduler.add_job(lambda: 0, jobstore='in_memory', trigger='interval', minutes=60, id='test_job')

        watcher = SchedulerWatcher(self.scheduler)

        self.assertEqual('running', watcher.scheduler_info['state'], 'Watcher should inspect scheduler status')
        self.assertEqual(
            str(self.scheduler.timezone),
            watcher.scheduler_info['timezone'],
            'Watcher should inspect scheduler timezone'
        )
        self.assertEqual(
            'BackgroundScheduler', watcher.scheduler_info['class'], 'Watcher should inspect scheduler class'
        )

        self.assertEqual(2, len(watcher.jobstores), 'Watcher should inspect all scheduler jobstores')
        self.assertIn('in_memory', watcher.jobstores, 'Watcher should have inspected the in_memory jobstore')

        self.assertEqual(2, len(watcher.executors), 'Watcher should inspect all scheduler executors')
        self.assertIn('secondary_executor', watcher.executors, 'Watcher should have inspected the secondary_executor')

        self.assertEqual(1, len(watcher.jobs), 'Watcher should inspect all jobs in scheduler on init')
        self.assertIn('test_job', watcher.jobs, 'Watcher should index jobs by id')

    def test_job_properties_on_add(self):
        watcher = SchedulerWatcher(self.scheduler)

        self.scheduler.add_job(
            lambda x, y: x + y,
            id='added_job',
            name='Added job',
            jobstore='in_memory',
            trigger='interval',
            minutes=60,
            args=(1,),
            kwargs={'y': 2}
        )

        self.assertIn('added_job', watcher.jobs)

        job_properties = watcher.jobs['added_job']['properties']

        self.assertEqual('added_job', job_properties['id'], 'Job properties should have the job id')
        self.assertEqual('Added job', job_properties['name'], 'Job properties should have the job name')
        self.assertIn('trigger', job_properties, 'Job properties should have a representation of the trigger')
        self.assertEqual('in_memory', job_properties['jobstore'], 'Job properties should have the jobstore name')
        self.assertEqual('default', job_properties['executor'], 'Job properties should have the executor name')
        self.assertIn('lambda', job_properties['func'], 'Job properties should have the function string repr')
        self.assertIn('func_ref', job_properties, 'Job properties should have the function reference')
        self.assertEqual('(1,)', job_properties['args'], 'Job properties should have the job arguments')
        self.assertEqual("{'y': 2}", job_properties['kwargs'], 'Job properties should have the job keyword arguments')
        self.assertIn('pending', job_properties, 'Job properties should have the job pending status')
        self.assertFalse(job_properties['pending'], 'Job status should not be pending')
        self.assertIn('coalesce', job_properties, 'Job properties should have the job coalesce configuration')
        self.assertIn('next_run_time', job_properties, 'Job properties should have the next run time calculated')
        self.assertIn('misfire_grace_time', job_properties, 'Job properties should have the misfire grace time')
        self.assertIn('max_instances', job_properties, 'Job properties should have the max instances configuration')

    def test_job_inspection_matches_job_added_event(self):
        # We're going to add two jobs that should have the exact same properties, except for the id, in two different
        # stages of the usage: before the watcher is created and after we start watching for events.
        def job_function(x, y):
            return x + y
        next_run_time = datetime.now() + timedelta(hours=1)

        # Job that is added before the user calls us.
        self.scheduler.add_job(
            job_function,
            id='job_1',
            name='Added job',
            jobstore='in_memory',
            trigger='interval',
            minutes=60,
            args=(1,),
            kwargs={'y': 2},
            next_run_time=next_run_time
        )

        watcher = SchedulerWatcher(self.scheduler)

        # Job that gets added after we start watching.
        self.scheduler.add_job(
            job_function,
            id='job_2',
            name='Added job',
            jobstore='in_memory',
            trigger='interval',
            minutes=60,
            args=(1,),
            kwargs={'y': 2},
            next_run_time=next_run_time
        )

        self.assertEqual(2, len(watcher.jobs))

        job_1 = watcher.jobs['job_1']
        job_2 = watcher.jobs['job_2']

        for property_name in job_1['properties'].keys():
            # All properties, except the id, should match.
            if property_name == 'id':
                continue
            self.assertEqual(job_1['properties'][property_name], job_2['properties'][property_name])

    def test_all_events_have_a_processing_method(self):
        for event_name in list(SchedulerWatcher.apscheduler_events.values()):
            self.assertIn(event_name, dir(SchedulerWatcher))

    def test_job_execution_monitoring(self):
        watcher = SchedulerWatcher(self.scheduler)

        self.scheduler.add_job(
            lambda: time.sleep(0.02),
            id='waiting_job',
            name='Waiting job',
            jobstore='in_memory',
            trigger='interval',
            seconds=0.2,
            next_run_time=datetime.now()
        )

        job_events = watcher.jobs['waiting_job']['events']

        self.assertEqual(1, len(job_events))
        self.assertEqual('job_added', job_events[0]['event_name'])
        time.sleep(0.05)
        self.assertEqual(3, len(job_events), 'Job execution needs to be tracked in job events')
        self.assertEqual(
            'job_submitted',
            job_events[1]['event_name'],
            'Job submision needs to be tracked in job events'
        )
        self.assertEqual('job_executed', job_events[2]['event_name'], 'Job execution needs to be tracked in job events')

        time.sleep(0.2)

        self.assertEqual(5, len(job_events), 'Subsequent executions get tracked')

    def test_job_failure_monitoring(self):
        watcher = SchedulerWatcher(self.scheduler)

        def fail():
            time.sleep(0.02)
            return 0 / 0

        self.scheduler.add_job(
            fail,
            id='failing_job',
            name='Failing job',
            jobstore='in_memory',
            trigger='interval',
            next_run_time=datetime.now(),
            minutes=60
        )

        failing_job_events = watcher.jobs['failing_job']['events']

        time.sleep(0.05)
        self.assertEqual(3, len(failing_job_events))
        self.assertEqual('job_error', failing_job_events[2]['event_name'])

    def test_scheduler_summary(self):
        watcher = SchedulerWatcher(self.scheduler)

        summary = watcher.scheduler_summary()

        self.assertEqual(sorted(['scheduler', 'jobs', 'executors', 'jobstores']), sorted(summary.keys()))

        self.assertEqual('running', summary['scheduler']['state'], 'scheduler_summary should have the scheduler status')
        self.assertEqual(2, len(summary['executors']), 'scheduler_summaru should have the two added executors')
        self.assertEqual(2, len(summary['jobstores']), 'scheduler_summary should have the two executors')
        self.assertEqual(0, len(summary['jobs']), 'scheduler_summary should have no jobs')

        self.scheduler.add_job(lambda: 0, id='job_1')

        summary = watcher.scheduler_summary()

        self.assertIn('job_1', summary['jobs'], 'scheduler_summary should have the added jobs in it')

        self.scheduler.remove_job('job_1')

        summary = watcher.scheduler_summary()
        self.assertIn('job_1', summary['jobs'], 'scheduler_summary should have all jobs in it, even if job was removed')

    def test_removed_jobs_are_only_flagged_as_removed(self):
        self.scheduler.add_job(lambda: 0, id='a_job')

        watcher = SchedulerWatcher(self.scheduler)

        self.assertIn('a_job', watcher.jobs)
        self.assertIsNone(watcher.jobs['a_job']['removed_time'])

        self.scheduler.remove_job('a_job')

        self.assertIn('a_job', watcher.jobs, 'removed jobs should be still tracked in the scheduler watcher')
        self.assertIsNotNone(watcher.jobs['a_job']['removed_time'], 'removed_time should be set')

    def test_modified_job_properties_are_tracked(self):
        self.scheduler.add_job(
            lambda x, y: x + y,
            id='a_job',
            name='A job',
            jobstore='in_memory',
            trigger='interval',
            minutes=60,
            args=(1,),
            kwargs={'y': 2}
        )

        watcher = SchedulerWatcher(self.scheduler)

        self.assertEqual(watcher.jobs['a_job']['modified_time'], watcher.jobs['a_job']['added_time'])

        next_run_time = watcher.jobs['a_job']['properties']['next_run_time'][0]

        self.scheduler.modify_job('a_job', name='A modified job', next_run_time=datetime.now() + timedelta(days=1))

        self.assertGreater(watcher.jobs['a_job']['modified_time'], watcher.jobs['a_job']['added_time'])
        self.assertEqual('A modified job', watcher.jobs['a_job']['properties']['name'])
        self.assertGreater(watcher.jobs['a_job']['properties']['next_run_time'][0], next_run_time)

    @patch('apschedulerui.watcher.SchedulerWatcher.notify_jobstore_event')
    def test_removing_a_jobstore_removes_all_jobs(self, mock_notify_jobstore_event):
        watcher = SchedulerWatcher(self.scheduler)

        self.scheduler.add_job(lambda: 0, id='job_1', jobstore='in_memory', trigger='interval', minutes=60)
        self.scheduler.add_job(lambda: 0, id='job_2', jobstore='in_memory', trigger='interval', minutes=60)

        self.assertEqual(2, len(watcher.jobs))
        self.assertIsNone(watcher.jobs['job_1']['removed_time'], 'job_1 removed time should be None')
        self.assertEqual('in_memory', watcher.jobs['job_1']['properties']['jobstore'])

        self.scheduler.remove_jobstore('in_memory')

        mock_notify_jobstore_event.assert_called()

        self.assertEqual(2, len(watcher.jobs), 'The amount of jobs after removing a jobstore should not change')
        self.assertIsNotNone(watcher.jobs['job_1']['removed_time'], 'job_1 removed time should be set')
        self.assertIsNotNone(watcher.jobs['job_2']['removed_time'], 'job_2 removed time should be set')

    @patch('apschedulerui.watcher.SchedulerWatcher._repr_job')
    @patch('apschedulerui.watcher.SchedulerWatcher.notify_job_event')
    @patch('apschedulerui.watcher.SchedulerWatcher.notify_jobstore_event')
    def test_adding_a_jobstore_adds_all_jobs_in_it(self, mock_notify_jobstore_event, mock_notify_job_event, _):
        watcher = SchedulerWatcher(self.scheduler)

        jobstore = MemoryJobStore()

        jobstore.add_job(Job(scheduler=self.scheduler, id='job_1', next_run_time=datetime.now() + timedelta(days=1)))
        jobstore.add_job(Job(scheduler=self.scheduler, id='job_2', next_run_time=datetime.now() + timedelta(days=2)))

        self.assertEqual(0, len(watcher.jobs))

        self.scheduler.add_jobstore(jobstore, alias='in_memory_2')

        self.assertIn('in_memory_2', watcher.jobstores, 'Watcher should have the new jobstore tracked')
        self.assertEqual(2, len(watcher.jobs), 'Watcher should add all jobs in the newly added jobstore')
        self.assertTrue(all([job_id in watcher.jobs for job_id in ['job_1', 'job_2']]))
        self.assertEqual(2, mock_notify_job_event.call_count)
        mock_notify_jobstore_event.assert_called_once()

    @patch('apschedulerui.watcher.SchedulerWatcher.notify_job_event')
    def test_removing_all_jobs_flags_all_as_removed(self, mock_notify_job_event):
        watcher = SchedulerWatcher(self.scheduler)

        self.scheduler.add_job(lambda: 0, id='job_1', jobstore='default', trigger='interval', minutes=60)
        self.scheduler.add_job(lambda: 0, id='job_2', jobstore='in_memory', trigger='interval', minutes=60)

        self.assertEqual(2, len(watcher.jobs))
        self.assertEqual(2, mock_notify_job_event.call_count)

        mock_notify_job_event.reset_mock()

        self.scheduler.remove_all_jobs()

        self.assertEqual(2, len(watcher.jobs), 'job count should not change after removing all jobs')
        self.assertEqual(2, mock_notify_job_event.call_count)

    @patch('apschedulerui.watcher.SchedulerWatcher.notify_executor_event')
    def test_adding_and_removing_executors(self, mock_notify_executor_event):
        watcher = SchedulerWatcher(self.scheduler)

        self.scheduler.add_executor(ThreadPoolExecutor(), alias='new_executor')

        self.assertIn('new_executor', watcher.executors)
        mock_notify_executor_event.assert_called()

        mock_notify_executor_event.reset_mock()
        self.scheduler.remove_executor('new_executor')

        self.assertNotIn('new_executor', watcher.executors)
        mock_notify_executor_event.assert_called()

    def test_job_event_history_is_limited(self):
        watcher = SchedulerWatcher(self.scheduler, max_events_per_job=4)

        self.scheduler.add_job(lambda: 0, trigger='interval', seconds=0.01, id='recurrent_job')

        time.sleep(0.1)

        # recurrent_job should have been executed ~10 times now, generating ~20 events (submission + execution).
        self.assertEqual(
            watcher.max_events_per_job,
            len(watcher.jobs['recurrent_job']['events']),
            'job event history should be limited'
        )
Example #9
0
class Filament():
    """Filament initialization and execution engine.

    Filaments are lightweight Python modules which run
    on top of Fibratus. They are often used to enrich/extend the
    functionality of Fibratus by performing any type of logic
    (aggregations, groupings, filters, counters, etc) on the
    kernel event stream.

    """
    def __init__(self):
        self._filament = None
        self._filters = []
        self._tabular = None
        self._cols = []
        self._limit = 10
        self._interval = 1
        self._sort_by = None
        self._sort_desc = True
        self.ansi_term = AnsiTerm()
        self.scheduler = BackgroundScheduler()
        self.term_initialized = False
        self._on_stop = None

    def load_filament(self, name):
        Filament._assert_root_dir()
        [filament_path] = [
            os.path.join(FILAMENTS_DIR, filament)
            for filament in os.listdir(FILAMENTS_DIR)
            if filament.endswith('.py') and name == filament[:-3]
        ] or [None]
        if filament_path:
            loader = SourceFileLoader(name, filament_path)
            self._filament = loader.load_module()
            # check for required methods
            # on the filament module
            doc = inspect.getdoc(self._filament)
            if not doc:
                raise FilamentError(
                    "Please provide a short description for the filament")

            [on_next_kevent] = self._find_func('on_next_kevent')
            if on_next_kevent:
                args_spec = inspect.getargspec(on_next_kevent)
                if len(args_spec.args) != 1:
                    raise FilamentError(
                        'Missing one argument on_next_kevent method on filament'
                    )
            else:
                raise FilamentError(
                    'Missing required on_next_kevent method on filament')
        else:
            raise FilamentError('%s filament not found' % name)

    def initialize_filament(self):
        if self._filament:

            def set_filter(*args):
                self._filters = args

            self._filament.set_filter = set_filter

            def set_interval(interval):
                if not type(interval) is int:
                    raise FilamentError('Interval must be an integer value')
                self._interval = interval

            self._filament.set_interval = set_interval

            def columns(cols):
                if not isinstance(cols, list):
                    raise FilamentError('Columns must be a list, %s found' %
                                        type(cols))
                self._cols = cols
                self._tabular = PrettyTable(self._cols)
                self._tabular.padding_width = 10
                self._tabular.junction_char = '|'

            def sort_by(col, sort_desc=True):
                if len(self._cols) == 0:
                    raise FilamentError(
                        'Expected at least 1 column but 0 found')
                if not col in self._cols:
                    raise FilamentError('%s column does not exist' % col)
                self._sort_by = col
                self._sort_desc = sort_desc

            def limit(limit):
                if len(self._cols) == 0:
                    raise FilamentError(
                        'Expected at least 1 column but 0 found')
                if not type(limit) is int:
                    raise FilamentError('Limit must be an integer value')
                self._limit = limit

            def title(text):
                self._tabular.title = text

            def add_row(row):
                if not isinstance(row, list):
                    raise FilamentError('Expected list type for the row')
                self._tabular.add_row(row)

            self._filament.columns = columns
            self._filament.title = title
            self._filament.sort_by = sort_by
            self._filament.limit = limit
            self._filament.add_row = add_row
            self._filament.render_tabular = self.render_tabular

            # call filaments methods if defined
            [on_init] = self._find_func('on_init')
            if on_init:
                if len(inspect.getargspec(on_init).args) == 0:
                    self._filament.on_init()

            [on_stop] = self._find_func('on_stop')
            if on_stop:
                if len(inspect.getargspec(on_stop).args) == 0:
                    self._on_stop = on_stop

            [on_interval] = self._find_func('on_interval')
            if on_interval:
                self.scheduler.add_executor(ThreadPoolExecutor(max_workers=8))
                self.scheduler.start()
                self.scheduler.add_job(self._filament.on_interval,
                                       'interval',
                                       seconds=1,
                                       max_instances=8,
                                       misfire_grace_time=60)

    def render_tabular(self):
        if len(self._cols) > 0:
            tabular = self._tabular.get_string(start=1, end=self._limit)
            if self._sort_by:
                tabular = self._tabular.get_string(start=1,
                                                   end=self._limit,
                                                   sortby=self._sort_by,
                                                   reversesort=self._sort_desc)
            if not self.term_initialized:
                self.term_initialized = True
                self.ansi_term.init_console()
            self._tabular.clear_rows()
            self.ansi_term.cls()
            self.ansi_term.write(tabular)

    def process(self, kevent):
        self._filament.on_next_kevent(kevent)

    def close(self):
        if self._on_stop:
            self._on_stop()
        if self.scheduler.running:
            self.scheduler.shutdown()
        self.ansi_term.restore_console()

    @classmethod
    def exists(cls, filament):
        Filament._assert_root_dir()
        return os.path.exists(os.path.join(FILAMENTS_DIR, '%s.py' % filament))

    @classmethod
    def list_filaments(cls):
        Filament._assert_root_dir()
        filaments = {}
        paths = [
            os.path.join(FILAMENTS_DIR, path)
            for path in os.listdir(FILAMENTS_DIR) if path.endswith('.py')
        ]
        for path in paths:
            filament_name = os.path.basename(path)[:-3]
            loader = SourceFileLoader(filament_name, path)
            filament = loader.load_module()
            filaments[filament_name] = inspect.getdoc(filament)
        return filaments

    @property
    def filters(self):
        return self._filters

    @classmethod
    def _assert_root_dir(cls):
        if not os.path.exists(FILAMENTS_DIR):
            IO.write_console('fibratus run: ERROR - %s path does not exist.' %
                             FILAMENTS_DIR)
            sys.exit(0)

    def _find_func(self, func_name):
        functions = inspect.getmembers(self._filament,
                                       predicate=inspect.isfunction)
        return [func
                for name, func in functions if name == func_name] or [None]
Example #10
0
import json
import logging

from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR, EVENT_JOB_ADDED
from apscheduler.schedulers.background import BackgroundScheduler
from sqlalchemy.orm.exc import NoResultFound

from .db import engine, metadata, Session
from .models import JobResult

logger = logging.getLogger(__name__)

scheduler = BackgroundScheduler()
scheduler.add_jobstore('sqlalchemy', engine=engine, metadata=metadata)
scheduler.add_executor('processpool')


def on_submit(event):
    logger.debug(u'Job [%s] submitted', event.job_id)
    db_session = Session()
    job_result = JobResult(
        job_id=event.job_id,
        result=None,
        error=0,
        status=JobResult.Status.PENDING,
    )
    db_session.add(job_result)
    db_session.commit()
    Session.remove()

Example #11
0
class TestWebServer(unittest.TestCase):
    def setUp(self):
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_job(lambda: 1,
                               id='a_job',
                               trigger='interval',
                               minutes=10)
        self.scheduler.start()

    def tearDown(self):
        self.scheduler.shutdown()

    def test_webserver_init(self):
        scheduler_server = SchedulerUI(self.scheduler)

        self.assertIsInstance(scheduler_server._scheduler_listener,
                              SchedulerWatcher)

        self.assertRaises(TypeError,
                          SchedulerUI,
                          self.scheduler,
                          operation_timeout=None)
        self.assertRaises(ValueError,
                          SchedulerUI,
                          self.scheduler,
                          operation_timeout=-1)

        self.assertRaises(TypeError,
                          SchedulerUI,
                          self.scheduler,
                          capabilities=set())

    @patch('flask.Flask.add_url_rule')
    def test_webserver_capabilities(self, mock_add_url_rule):
        SchedulerUI(self.scheduler)

        mock_add_url_rule.assert_called()
        base_call_count = mock_add_url_rule.call_count

        mock_add_url_rule.reset_mock()

        SchedulerUI(self.scheduler, capabilities={'pause_job': True})

        self.assertEqual(2 + base_call_count, mock_add_url_rule.call_count)

        mock_add_url_rule.reset_mock()
        SchedulerUI(self.scheduler, capabilities={'run_job': True})
        self.assertEqual(1 + base_call_count, mock_add_url_rule.call_count)

        mock_add_url_rule.reset_mock()
        SchedulerUI(self.scheduler, capabilities={'pause_scheduler': True})

        self.assertEqual(
            2 + base_call_count, mock_add_url_rule.call_count,
            'Web server should register scheduler pause and resume endpoints')

        mock_add_url_rule.reset_mock()
        SchedulerUI(self.scheduler, capabilities={'stop_scheduler': True})

        self.assertEqual(
            2 + base_call_count, mock_add_url_rule.call_count,
            'Web server should register scheduler stop and start endpoints')

        mock_add_url_rule.reset_mock()
        SchedulerUI(self.scheduler, capabilities={'remove_job': True})

        self.assertEqual(
            1 + base_call_count, mock_add_url_rule.call_count,
            'Web server should register the endpoint to remove a job')

    @patch('flask.Flask.send_static_file')
    def test_index_retrieval(self, mock_send_static_file):
        SchedulerUI(self.scheduler)._index('/any_path')

        mock_send_static_file.assert_called_with('index.html')

    @patch('flask.abort')
    @patch('apscheduler.schedulers.background.BackgroundScheduler.pause')
    def test_scheduler_commands_are_serialized(self, mock_pause, mock_abort):
        ui = SchedulerUI(self.scheduler, operation_timeout=0.01)

        with ui._scheduler_lock:
            # If we acquire the lock, every command we send to the web server should be aborted on lock acquire timeout.
            ui._pause_scheduler()

            mock_abort.assert_called()
            mock_pause.assert_not_called()

            ui._resume_scheduler()
            ui._stop_scheduler()
            ui._start_scheduler()
            ui._pause_job('a_job')
            ui._resume_job('a_job')
            ui._run_job('a_job')
            ui._remove_job('a_job')

            self.assertEqual(8, mock_abort.call_count)

    @patch('apscheduler.schedulers.background.BackgroundScheduler.pause')
    @patch('apscheduler.schedulers.background.BackgroundScheduler.resume')
    @patch('apscheduler.schedulers.background.BackgroundScheduler.shutdown')
    @patch('apscheduler.schedulers.background.BackgroundScheduler.start')
    def test_scheduler_requests(self, mock_start, mock_shutdown, mock_resume,
                                mock_pause):
        ui = SchedulerUI(self.scheduler)

        ui._pause_scheduler()
        mock_pause.assert_called()

        ui._resume_scheduler()
        mock_resume.assert_called()

        ui._stop_scheduler()
        mock_shutdown.assert_called()

        ui._start_scheduler()
        mock_start.assert_called()

    @patch('apscheduler.schedulers.background.BackgroundScheduler.remove_job')
    @patch('apscheduler.schedulers.background.BackgroundScheduler.resume_job')
    @patch('apscheduler.schedulers.background.BackgroundScheduler.pause_job')
    def test_job_requests(self, mock_pause_job, mock_resume_job,
                          mock_remove_job):
        ui = SchedulerUI(self.scheduler)

        ui._pause_job('a_job')
        mock_pause_job.assert_called()

        ui._resume_job('a_job')
        mock_resume_job.assert_called()

        ui._remove_job('a_job')
        mock_remove_job.assert_called()

    @patch('flask.abort')
    def test_missing_jobs_requests_are_aborted(self, mock_abort):
        ui = SchedulerUI(self.scheduler)

        ui._pause_job('non_existing_job')
        ui._resume_job('non_existing_job')
        ui._run_job('non_existing_job')

        self.assertEqual(3, mock_abort.call_count)

        mock_abort.reset_mock()

        response = ui._run_job(job_id=None)
        self.assertEqual(response.status_code, 404,
                         'Requests with missing job_id should fail')

    @patch('flask_socketio.SocketIO.run')
    def test_webserver_start(self, mock_run):
        ui = SchedulerUI(self.scheduler)

        self.assertEqual(0, len(ui._scheduler_listener.listeners))

        ui.start()

        self.assertEqual(1, len(ui._scheduler_listener.listeners))
        self.assertEqual(ui, ui._scheduler_listener.listeners[0],
                         'Webserver should register itself as listener')

        # SocketIO.run should be called by the web server thread on start.
        mock_run.assert_called_with(ui._web_server, host='0.0.0.0', port=5000)

    @patch('flask_socketio.SocketIO.emit')
    @patch('flask_socketio.SocketIO.run')
    def test_scheduler_events_are_emitted_to_clients(self, mock_run,
                                                     mock_emit):
        ui = SchedulerUI(self.scheduler)
        ui.start()

        mock_run.assert_called()

        # Pause scheduler.
        self.scheduler.pause()
        mock_emit.assert_called_once()
        self.assertEqual('scheduler_paused',
                         mock_emit.call_args[0][1]['event_name'])

        mock_emit.reset_mock()

        # Resume it.
        self.scheduler.resume()
        self.assertEqual('scheduler_resumed',
                         mock_emit.call_args[0][1]['event_name'])

        # Stop it.
        mock_emit.reset_mock()
        self.scheduler.shutdown()
        self.assertEqual('scheduler_shutdown',
                         mock_emit.call_args[0][1]['event_name'])

        # Start it again.
        mock_emit.reset_mock()
        self.scheduler.start()
        self.assertEqual('scheduler_started',
                         mock_emit.call_args[0][1]['event_name'])

    @patch('flask_socketio.SocketIO.emit')
    @patch('flask_socketio.SocketIO.run')
    def test_job_events_are_emitted_to_clients(self, mock_run, mock_emit):
        ui = SchedulerUI(self.scheduler)
        ui.start()

        self.scheduler.add_job(lambda: time.sleep(0.1),
                               id='waiting_job',
                               name='Waiting job',
                               trigger='interval',
                               seconds=0.2,
                               next_run_time=datetime.now() +
                               timedelta(milliseconds=50))

        mock_emit.assert_called_once()
        emitted_event = mock_emit.call_args[0][1]

        self.assertEqual('job_added', emitted_event['event_name'])
        self.assertIn('properties', emitted_event)
        self.assertIsInstance(
            datetime.strptime(emitted_event['event_ts'],
                              '%Y-%m-%d %H:%M:%S.%f'), datetime)

        mock_emit.reset_mock()

        time.sleep(0.1)
        # Job submission event.
        mock_emit.assert_called_once()

    @patch('flask_socketio.SocketIO.emit')
    @patch('flask_socketio.SocketIO.run')
    def test_jobstore_events_are_emitted_to_clients(self, mock_run, mock_emit):
        ui = SchedulerUI(self.scheduler)
        ui.start()

        # Job store addition.
        self.scheduler.add_jobstore(MemoryJobStore(), alias='in_memory')

        mock_emit.assert_called_once()
        self.assertEqual('jobstore_event', mock_emit.call_args[0][0])

        emitted_event = mock_emit.call_args[0][1]
        self.assertEqual('in_memory', emitted_event['jobstore_name'])
        self.assertEqual('jobstore_added', emitted_event['event_name'])
        self.assertIsInstance(
            datetime.strptime(emitted_event['event_ts'],
                              '%Y-%m-%d %H:%M:%S.%f'), datetime)

        # Job store removal.
        mock_emit.reset_mock()
        self.scheduler.remove_jobstore('in_memory')

        mock_emit.assert_called_once()
        emitted_event = mock_emit.call_args[0][1]
        self.assertEqual('in_memory', emitted_event['jobstore_name'])
        self.assertEqual('jobstore_removed', emitted_event['event_name'])
        self.assertIsInstance(
            datetime.strptime(emitted_event['event_ts'],
                              '%Y-%m-%d %H:%M:%S.%f'), datetime)

    @patch('flask_socketio.SocketIO.emit')
    @patch('flask_socketio.SocketIO.run')
    def test_executors_events_are_emitted_to_clients(self, mock_run,
                                                     mock_emit):
        ui = SchedulerUI(self.scheduler)
        ui.start()

        # Executor addition.
        self.scheduler.add_executor(ThreadPoolExecutor(max_workers=1),
                                    alias='thread_pool')

        mock_emit.assert_called_once()
        self.assertEqual('executor_event', mock_emit.call_args[0][0])

        emitted_event = mock_emit.call_args[0][1]
        self.assertEqual('thread_pool', emitted_event['executor_name'])
        self.assertEqual('executor_added', emitted_event['event_name'])
        self.assertIsInstance(
            datetime.strptime(emitted_event['event_ts'],
                              '%Y-%m-%d %H:%M:%S.%f'), datetime)

        # Executor removal.
        mock_emit.reset_mock()
        self.scheduler.remove_executor('thread_pool')

        mock_emit.assert_called_once()
        emitted_event = mock_emit.call_args[0][1]
        self.assertEqual('thread_pool', emitted_event['executor_name'])
        self.assertEqual('executor_removed', emitted_event['event_name'])
        self.assertIsInstance(
            datetime.strptime(emitted_event['event_ts'],
                              '%Y-%m-%d %H:%M:%S.%f'), datetime)

    @patch('flask_socketio.emit')
    def test_connected_clients_get_initialized(self, mock_emit):
        ui = SchedulerUI(self.scheduler, capabilities={'run_job': True})
        ui.start(port=5001, host='localhost')

        time.sleep(0.1)

        import socketio

        socket_client = socketio.Client()
        socket_client.connect('ws://localhost:5001')
        socket_client.emit(
            'connected'
        )  # Notify server that we're now connected, as frontend would do.

        time.sleep(0.1)

        self.assertEqual(2, mock_emit.call_count,
                         'emit should be called twice when a client connects')

        first_call = mock_emit.call_args_list[0]
        second_call = mock_emit.call_args_list[1]

        self.assertEqual(
            'init_jobs', first_call[0][0],
            'First argument of the first emit should be event name')

        self.assertEqual(
            'init_capabilities', second_call[0][0],
            'First argument of the second emit shoud be the init_capabilities event name'
        )
        self.assertEqual(
            ui.capabilities, second_call[0][1],
            "Second argument of init_capabilities should equal the web server's capabilities"
        )

        socket_client.disconnect()
Example #12
0
logging.basicConfig()
#import click
app = Flask(__name__)
# Load default config and override config from an environment variable
app.config.update(dict(
    DATABASE=os.path.join(app.root_path, 'db', 'notifications.db'),
    SECRET_KEY='secret',
    USERNAME='******',
    PASSWORD='******'
))

# scheduler
sched = BackgroundScheduler(timezone='America/Los_Angeles')
sched.add_jobstore('sqlalchemy', url='sqlite:///db/notifications.db')
sched.add_executor('threadpool')
sched.start()

def connect_db():
    """Connects to the specific database."""
    rv = sqlite3.connect(app.config['DATABASE'])
    rv.row_factory = dict_factory
    return rv

@app.cli.command('initdb')
@click.option('--file',help='the schema file')
def initdb_command(file):
    initdb(file)

def initdb(file):
    """Initialize the database."""
Example #13
0
    finishedMutex.acquire()
    finishedThreads = finishedThreads + 1
    print "[Worker {0}]\tFinished items(count me): {1}".format(worker, finishedThreads)
    if finishedThreads==numWorkers:
        print "[Worker {0}]\tAll finished. Send event".format(worker)
        event.set()
    finishedMutex.release()
    print "[Worker {0}]\tEXIT".format(worker)

finishedThreads = 0
jobs = range(1, 51)
worker = 0

mutex = threading.Lock()
finishedMutex = threading.Lock()
event = threading.Event()

scheduler = BackgroundScheduler()
scheduler.add_executor(ThreadPoolExecutor(max_workers=20))
scheduler.start()


for job in jobs:
    worker += 1
    scheduler.add_job(threadFunc, args=[worker, len(jobs)], max_instances=1000, misfire_grace_time=86400)

print "[Main]\t\tWait for event"
event.wait()
print "[Main]\t\tExit"
from kafka.errors import (KafkaError, KafkaTimeoutError)

logger_format = '%(asctime)-15s %(message)s'
logging.basicConfig(format=logger_format)
logger = logging.getLogger('data-producer')
logger.setLevel(logging.INFO)

app = Flask(__name__)
app.config.from_envvar('ENV_CONFIG_FILE')
kafka_broker = app.config['CONFIG_KAFKA_ENDPOINT']
topic_name = app.config['CONFIG_KAFKA_TOPIC']

producer = KafkaProducer(bootstrap_servers=kafka_broker)

schedule = BackgroundScheduler()
schedule.add_executor('threadpool')
schedule.start()

symbols = set()


def shutdown_hook():
    """
    a shutdown hook to be called before the shutdown
    """
    try:
        logger.info(
            'Flushing pending messages to kafka, timeout is set to 10s')
        producer.flush(10)
        logger.info('Finish flushing pending messages to kafka')
    except KafkaError as kafka_error:
def getScheduler():
    scheduler = BackgroundScheduler()
    scheduler.add_executor("threadpool")
    return scheduler
def send_data_to_kafka(producer,topic):
    for stock_symbol in stock_names_set:
        msg = getQuotes(stock_symbol)
        producer.send(topic=topic,value=json.dumps(msg))
        print('the stock symbol is %s and the result is %s' % (str(stock_symbol),str(msg)))

def shutdown_hook(producer,schedulers):
    producer.flush(10)
    producer.close()
    schedulers.shutdown()


if __name__=='__main__':
    parser = ConfigParser.ConfigParser()
    parser.read(os.getcwd() + '/redis_config.ini')
    kafka_cluster = parser.get('kafka_config','cluster')
    kafka_topic = parser.get('kafka_config','topic')

    producer = KafkaProducer(bootstrap_servers=kafka_cluster)

    schedulers = BackgroundScheduler()
    schedulers.add_executor(ThreadPoolExecutor(5))
    schedulers.add_job(send_data_to_kafka, 'interval', [producer, kafka_topic], seconds=3)
    schedulers.start()

    atexit.register(shutdown_hook, producer,schedulers)
    app.run(host='localhost',port=8081)
    # init and run scheduler


Example #17
0
# 启动调度
scheduler.start()

second_redis_jobstore = RedisJobStore(
    db=2,
    jobs_key="apschedulers.second_jobs",
    run_times_key="apschedulers.second_run_times",
    host="127.0.0.1",
    port=6379,
    password="******"
)

scheduler.add_jobstore(second_redis_jobstore, 'second')
# 定义executor 使用asyncio是的调度执行规则
second_executor = AsyncIOExecutor()
scheduler.add_executor(second_executor, "second")


# ***********               关于 APScheduler中有关Event相关使用示例               *************
# 定义函数监听事件
def job_execute(event):
    """
    监听事件处理
    :param event:
    :return:
    """
    print(
        "job执行job:\ncode => {}\njob.id => {}\njobstore=>{}".format(
            event.code,
            event.job_id,
            event.jobstore
Example #18
0
class Filament(object):
    """Filament initialization and execution engine.

    Filaments are lightweight Python modules which run
    on top of Fibratus. They are often used to enrich/extend the
    functionality of Fibratus by performing any type of logic
    (aggregations, groupings, filters, counters, etc) on the
    kernel event stream.

    """
    def __init__(self):
        """Builds a new instance of the filament.

        Attributes:
        ----------

        filament_module: module
            module which contains the filament logic
        """
        self._filament_module = None
        self._name = None
        self._filters = []
        self._cols = []
        self._tabular = None
        self._limit = 10
        self._interval = 1
        self._sort_by = None
        self._sort_desc = True
        self._logger = None
        self._ansi_term = AnsiTerm()
        self.scheduler = BackgroundScheduler()

    def load_filament(self, name):
        """Loads the filament module.

        Finds and loads the python module which
        holds the filament logic. It also looks up for
        some essential filament methods and raises an error
        if they can't be found.

        Parameters
        ----------
        name: str
            name of the filament to load

        """
        self._name = name
        Filament._assert_root_dir()
        filament_path = self._find_filament_path(name)
        if filament_path:
            loader = SourceFileLoader(name, filament_path)
            self._filament_module = loader.load_module()
            sys.path.append(FILAMENTS_DIR)
            doc = inspect.getdoc(self._filament_module)
            if not doc:
                raise FilamentError('Please provide a short '
                                    'description for the filament')

            on_next_kevent = self._find_filament_func('on_next_kevent')
            if on_next_kevent:
                if self._num_args(on_next_kevent) != 1:
                    raise FilamentError('Missing one argument on_next_kevent '
                                        'method on filament')
                self._initialize_funcs()
            else:
                raise FilamentError('Missing required on_next_kevent '
                                    'method on filament')
        else:
            raise FilamentError('%s filament not found' % name)

    def _initialize_funcs(self):
        """Setup the filament modules functions.

        Functions
        ---------

        set_filter: func
            accepts the comma separated list of kernel events
            for whose the filter should be applied
        set_interval: func
            establishes the fixed repeating interval in seconds
        columns: func
            configure the column set for the table
        add_row: func
            adds a new row to the table
        sort_by: func
            sorts the table by specific column
        """
        def set_filter(*args):
            self._filters = args

        self._filament_module.set_filter = set_filter

        def set_interval(interval):
            if not type(interval) is int:
                raise FilamentError('Interval must be an integer value')
            self._interval = interval

        self._filament_module.set_interval = set_interval

        def columns(cols):
            if not isinstance(cols, list):
                raise FilamentError('Columns must be a list, '
                                    '%s found' % type(cols))
            self._cols = cols
            self._tabular = Tabular(self._cols)
            self._tabular.padding_width = 10
            self._tabular.junction_char = '|'

        def add_row(row):
            if not isinstance(row, list):
                raise FilamentError(
                    'Expected list type for the row, found %s' % type(row))
            self._tabular.add_row(row)

        def sort_by(col, sort_desc=True):
            if len(self._cols) == 0:
                raise FilamentError('Expected at least 1 column but 0 found')
            if col not in self._cols:
                raise FilamentError('%s column does not exist' % col)
            self._sort_by = col
            self._sort_desc = sort_desc

        def limit(l):
            if len(self._cols) == 0:
                raise FilamentError('Expected at least 1 column but 0 found')
            if not type(l) is int:
                raise FilamentError('Limit must be an integer value')
            self._limit = l

        def title(text):
            self._tabular.title = text

        self._filament_module.columns = columns
        self._filament_module.title = title
        self._filament_module.sort_by = sort_by
        self._filament_module.limit = limit
        self._filament_module.add_row = add_row
        self._filament_module.render_tabular = self.render_tabular

        on_init = self._find_filament_func('on_init')
        if on_init and self._zero_args(on_init):
            self._filament_module.on_init()
        if self._find_filament_func('on_interval'):
            self.scheduler.add_executor(ThreadPoolExecutor(max_workers=4))
            self.scheduler.start()

            def on_interval():
                try:
                    self._filament_module.on_interval()
                except Exception:
                    self._logger.error(
                        'Unexpected error on interval elapsed %s' %
                        traceback.format_exc())

            self.scheduler.add_job(on_interval,
                                   'interval',
                                   seconds=self._interval,
                                   max_instances=4,
                                   misfire_grace_time=60)
        if len(self._cols) > 0:
            try:
                self._ansi_term.setup_console()
            except TermInitializationError:
                panic('fibratus run: ERROR - console initialization failed')

    def do_output_accessors(self, outputs):
        """Creates the filament's output accessors.

        Parameters
        ----------

        outputs: dict
            outputs initialized from the configuration
            descriptor
        """
        for name, output in outputs.items():
            setattr(self._filament_module, name, OutputAccessor(output))

    def on_next_kevent(self, kevent):
        try:
            self._filament_module.on_next_kevent(ddict(kevent))
        except Exception as e:
            self._logger.error('Unexpected filament error %s' % e)

    def render_tabular(self):
        """Renders the table on the console.
        """
        if len(self._cols) > 0:
            tabular = self._tabular.get_string(start=1, end=self._limit)
            if self._sort_by:
                tabular = self._tabular.get_string(start=1,
                                                   end=self._limit,
                                                   sortby=self._sort_by,
                                                   reversesort=self._sort_desc)
            self._tabular.clear_rows()
            self._ansi_term.write_output(tabular)

    def close(self):
        on_stop = self._find_filament_func('on_stop')
        if on_stop and self._zero_args(on_stop):
            self._filament_module.on_stop()
        if self.scheduler.running:
            self.scheduler.shutdown()
        self._ansi_term.restore_console()

    @classmethod
    def exists(cls, filament):
        Filament._assert_root_dir()
        return os.path.exists(os.path.join(FILAMENTS_DIR, '%s.py' % filament))

    @classmethod
    def list_filaments(cls):
        Filament._assert_root_dir()
        filaments = {}
        paths = [
            os.path.join(FILAMENTS_DIR, path)
            for path in os.listdir(FILAMENTS_DIR) if path.endswith('.py')
        ]
        for path in paths:
            filament_name = os.path.basename(path)[:-3]
            loader = SourceFileLoader(filament_name, path)
            filament = loader.load_module()
            filaments[filament_name] = inspect.getdoc(filament)
        return filaments

    @classmethod
    def _assert_root_dir(cls):
        if not os.path.exists(FILAMENTS_DIR):
            panic('fibratus run: ERROR - %s path does not exist.' %
                  FILAMENTS_DIR)

    @property
    def filters(self):
        return self._filters

    @property
    def logger(self):
        return self._logger

    @logger.setter
    def logger(self, logger):
        self._logger = logger

    @property
    def filament_module(self):
        return self._filament_module

    @property
    def name(self):
        return self._name

    def _find_filament_func(self, func_name):
        """Finds the function in the filament module.

        Parameters
        ----------

        func_name: str
            the name of the function
        """
        functions = inspect.getmembers(self._filament_module,
                                       predicate=inspect.isfunction)
        return next(
            iter([func for name, func in functions if name == func_name]),
            None)

    def _find_filament_path(self, filament_name):
        """Resolves the filament full path from the name

        Parameters
        ----------

        filament_name: str
            the name of the filament whose path if about to be resolved
        """
        return next(
            iter([
                os.path.join(FILAMENTS_DIR, filament)
                for filament in os.listdir(FILAMENTS_DIR)
                if filament.endswith('.py') and filament_name == filament[:-3]
            ]), None)

    def _num_args(self, func):
        return len(inspect.getargspec(func).args)

    def _zero_args(self, func):
        return self._num_args(func) == 0
Example #19
0
    return str(tz)


ansible_scheduler = BackgroundScheduler({
    'apscheduler.job_defaults.coalesce':
    'true',
    'apscheduler.job_defaults.max_instances':
    '5',
    'apscheduler.timezone':
    get_timezone(),
})

logger.error("create backgroupd schedule with id=%s" % id(ansible_scheduler))
ansible_scheduler.add_jobstore(DjangoJobStore(), 'default')
# ansible_scheduler.add_executor(ProcessPoolExecutor(10), 'default')
ansible_scheduler.add_executor(ThreadPoolExecutor(20), 'default')
ansible_scheduler.start()


def run_cron_job(op, group, entity):
    url = ansible_get_git_url()
    prj_name = url.split('/')[-1].split('.')[0]
    message = {}
    message['event'] = "APSchedule"
    message['type'] = "routine"
    message["role"] = op
    message['group'] = group
    message["host"] = entity
    message["src_path"] = ANSIBLE_PATH + prj_name
    message['username'], message['password'] = get_host_username_and_password(
        entity)