Ejemplo n.º 1
0
    def thread_task():
        schedule = Scheduler()
        schedule.every(30).minutes.do(task)

        while True:
            schedule.run_pending()
            time.sleep(1)
Ejemplo n.º 2
0
class Server:
    def __init__(self):
        self.scheduler = Scheduler()
        self.semaphore = threading.Semaphore()
        self.con = taps_control.TapsControl()
        self.read_config()

    def read_config(self):
        config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                   'config.json')
        with open(config_path, 'r') as f:
            data = json.load(f)
            for job in data.values():
                self.handle_job(job)

    def handle_job(self, job):
        when = job['when']
        con = self.con
        if when == 'daily':
            logging.info('Queued a daily job on channel ' +
                         str(job['channel']))
            self.scheduler.every().day.at(job['start']).do(
                job_func, int(job['channel']), int(job['duration']), con)
        elif when == 'every':
            self.scheduler.every(int(job['interval'])).seconds.do(
                job_func, int(job['channel']), int(job['duration']), con)

        elif when == "other":
            self.scheduler.every(2).days.do(job_func, int(job['channel']),
                                            int(job['duration']), con)

    def run(self):

        while True:
            self.scheduler.run_pending()
Ejemplo n.º 3
0
def main():
    global procs
    global logger
    # ロガーの生成
    logger = gene_logger('log/cron.log')
    logger.log(10, f'{os.getpid()} : On main.')

    # スケジュールのセット
    s = Scheduler()
    s.every(10).minutes.do(proc, args=['python', './rps_watcher.py'], sche=s)

    # セットされたジョブを出力
    for j in s.jobs:
        logger.log(10, f'{os.getpid()} : Jobs...{j}')

    # ジョブの実行待ちループ
    while True:
        s.run_pending()
        # プロセスの実行状態をチェック
        for p in procs:
            if p.poll() is not None:
                logger.log(
                    10, f'{os.getpid()} : #{p.pid} has exited({p.returncode})')
                procs.remove(p)
        time.sleep(1)

    logger.log(10, f'{os.getpid()} : On exit.')
Ejemplo n.º 4
0
def add_and_run_job(func, args):

    schedule = Scheduler()

    schedule.every(5).seconds.do(func, args)

    while True:
        schedule.run_pending()
Ejemplo n.º 5
0
        def task():
            self.check_ip_availability_task()
            schedule = Scheduler()
            schedule.every(10).minutes.do(self.check_ip_availability_task)

            while True:
                schedule.run_pending()
                time.sleep(1)
Ejemplo n.º 6
0
class Bot(DBot):
    def __init__(self, db, **options):
        super().__init__(**options)
        self.db = db
        self.jobs = []
        self.run_jobs = True
        self.schedule = Scheduler()

    async def close(self):
        print("Shutting down!")
        self.run_jobs = False
        await super().close()
        self.db.close_all()

    async def on_ready(self):
        print(f"Bot is ready! Logged in as {self.user}.")
        Thread(target=self.job_runner).start()

    def job_runner(self):
        print("Starting background timer runner.")
        while self.run_jobs:
            try:
                self.schedule.run_pending()
            except Exception as e:
                print(f"{type(e).__name__}: {e}")
            time.sleep(10)

    def register_job_daily(self, daytime, f):
        print(f"Registering job {f.__name__} to run every day at {daytime}")
        self.schedule.every().day.at(daytime).do(f)

    def register_job(self, timer, f):
        print(f"Registering job {f.__name__} to run every {timer} seconds")
        self.schedule.every(timer).seconds.do(f)

    def dbconf_get(self, guild_id, name, default=None):
        result = self.db.get(guild_id).execute("SELECT value FROM config WHERE name = ?", (name,)).fetchall()

        if len(result) < 1:
            return default

        return str(result[0][0])

    def dbconf_set(self, guild_id, name, value):
        saved = self.dbconf_get(guild_id, name)

        if saved is None:
            with self.db.get(guild_id) as db:
                db.execute("INSERT INTO config(name, value) VALUES(?, ?)", (name, value))
            return

        if str(saved) == str(value):
            return

        with self.db.get(guild_id) as db:
            db.execute("UPDATE config SET value = ? WHERE name = ?", (value, name))
Ejemplo n.º 7
0
def schedule_every_monday_at(process, str_time, run_at_start=True):
    scheduler1 = Scheduler()
    scheduler1.every().monday.at(str_time).do(process)

    if run_at_start:
        # Run the job now
        scheduler1.run_all()

    while True:
        scheduler1.run_pending()
        time.sleep(1)
Ejemplo n.º 8
0
    def thread_task():
        def task():
            if not runner.is_running:
                print("============ 开始重新爬取 ===================")
                runner.crawl()

        schedule = Scheduler()
        schedule.every(30).minutes.do(task)

        while True:
            schedule.run_pending()
            time.sleep(1)
Ejemplo n.º 9
0
class CapSaveRecognTask(AbstractTask):
    def __init__(self, uuid):
        super().__init__("capcam")
        self.cam_uuid = uuid
        self.success_interval = camctl.get_one(self.cam_uuid).interval
        self.schedule = Scheduler()

    def run(self):
        self.schedule.clear()
        self.schedule.every(self.success_interval).seconds.do(self.run_threaded, self.__run)
        while not self.stopped:
            try:
                self.schedule.run_pending()
                time.sleep(1)
            except Exception as err:
                logging.error("task(%s@%s) failed, %s", self.name, self.cam_uuid, str(err))
                time.sleep(self.fail_interval)

    def __run(self):
        try:
            cam = camctl.get_one(self.cam_uuid)
            if clkctl.check_period_filter(cam.detection_cycle.get('detection_period'), cam.detection_cycle.get('detection_time')):
                if_sto_img = camctl.if_sto_img(cam)
                saved_path, if_sto_db = bktctl.save(cam.frame(), self.cam_uuid, if_sto_img)
                function_list = rcgctl.period_function_filter(cam)
                model_list = rcgctl.ai_pointer_filter(function_list)
                logging.info(model_list)

                if len(model_list):
                    logging.info("task(%s@%s): start ai recognize, function is : %s, model is : %s", self.name, self.cam_uuid, str(function_list), str(model_list))
                    rcgctl.recognize(saved_path, self.cam_uuid, function_list, model_list)
                    logging.info("task(%s@%s): succ and next in: %s", self.name, self.cam_uuid, cam.interval)
                else:
                    logging.info("task(%s@%s): not in ai recognize cycle", self.name, self.cam_uuid)

                if not if_sto_db:
                    bktctl.delete(saved_path)
            if cam.interval != self.success_interval:
                self.success_interval = cam.interval
                self.schedule.clear()
                self.schedule.every(self.success_interval).seconds.do(self.run_threaded, self.__run)
        except Exception as err:
            logging.error("task(%s) failed", self.name)
            logging.exception(err)
            logging.info("task(%s@%s): fail and next in: %s", self.name, self.cam_uuid, self.fail_interval)
            self.success_interval = self.fail_interval
            self.schedule.clear()
            self.schedule.every(self.fail_interval).seconds.do(self.run_threaded, self.__run)

    def run_threaded(self, func):
        job_thread = threading.Thread(target=func)
        job_thread.start()
Ejemplo n.º 10
0
class LocalController(Thread):
    def __init__(self):
        Thread.__init__(self, name='Local Timer')
        self.__stop = Event()
        self.__days, self.__start_time = parse_config()
        self.__scheduler = Scheduler()

    def stop(self):
        if not self.__stop.is_set():
            self.__stop.set()
        self.join()

    def next_run(self):
        return self.__scheduler.next_run

    def __run_cycle(self):
        state.run_zone_action((ZoneAction.RUN_CYCLE, 0))

    def __schedule_job(self):
        self.__scheduler.clear()
        if in_production():
            for day in self.__days:
                job = Job(1, self.__scheduler)
                job.start_day = day.name.lower()
                job.unit = 'weeks'
                job.at(self.__start_time.strftime("%H:%M")).do(
                    self.__run_cycle)
        else:
            self.__scheduler.every(3).minutes.do(self.__run_cycle)
        logging.info('Next run scheduled for {0}.'.format(
            self.__scheduler.next_run))

    def control_mode_changed(self):
        mode = state.active_controller_mode()
        if mode is not ControllerMode.LOCAL:
            self.__scheduler.clear()
        elif mode is ControllerMode.LOCAL:
            self.__schedule_job()

    def run(self):
        logging.info('Local cycle run controller started.')
        self.__schedule_job()
        while not self.__stop.is_set():
            if state.active_controller_mode() is ControllerMode.LOCAL:
                self.__scheduler.run_pending()
            sleep(1)
        self.__scheduler.clear()
        logging.info('Local cycle run controller stopped.')
Ejemplo n.º 11
0
class Cron:

    pattern = re.compile(r'every (\d+ )?(\w+)(?: at (\d\d:\d\d))?$')

    def __init__(self, app=None):
        self.app = None
        self.scheduler = Scheduler()
        self.stopped = True
        if app is not None:
            self.init_app(app)

    def init_app(self, app):
        self.app = app
        app.extensions['cron'] = self
        app.cli.add_command(Command('cron', callback=self.run))

    def task(self, when):
        def decorator(func):
            match = self.pattern.match(when)
            interval = match.group(1)
            if interval is not None:
                job = self.scheduler.every(int(interval))
            else:
                job = self.scheduler.every()
            getattr(job, match.group(2))
            time_str = match.group(3)
            if time_str is not None:
                job.at(time_str)
            job.do(func)
            return func
        return decorator

    def run(self):
        self.app.logger.info('Starting cron')
        self.stopped = False
        signal(SIGINT, self.stop)
        signal(SIGTERM, self.stop)
        while not self.stopped:
            self.scheduler.run_pending()
            sleep(self.scheduler.idle_seconds)
        self.app.logger.info('Terminating cron')

    def stop(self, signo=None, frame=None):
        self.stopped = True
class ChatBot(object):
    def __init__(self, broker=None):
        self.schedule = Scheduler()
        self.triggers = {}
        self.pollers = []
        self.introspect()
        self.setup_pollers()
        self.broker = broker
        if self.broker is not None:
            self.username = self.broker.username
            self.messages = self.broker.messages

    def on_message(self, iteration_nbr, message):
        self.iteration_nbr = iteration_nbr
        text = message['text'].lower()
        for trigger in self.triggers:
            if trigger in text:
                response = self.triggers[trigger]()
                if response is not None:
                    self.on_posted(self.broker.post(response)['message'])
                return response

    def on_posted(self, message):
        """Called with broker response to just posted message"""
        return

    def setup_pollers(self):
        for poller in self.pollers:
            self.schedule.every().minute.do(poller)

    def run_pending(self):
        self.schedule.run_pending()

    def introspect(self):
        for name, method in inspect.getmembers(self,
                                               predicate=inspect.ismethod):
            if name.startswith('on_'):
                if getattr(method, 'is_trigger', False) is True:
                    event_name = name[3:]
                    self.triggers[event_name] = method
                if getattr(method, 'every_minute', False) is True:
                    self.pollers.append(method)
Ejemplo n.º 13
0
class Scheduler(object):
    def __init__(self, config):
        self._debug = config['debug']
        self._interval = config.get('interval', 10)
        self._sched = Sched()

    def add(self, func, args, tag):
        if self._sched is None:
            raise SchedulerException('required to create scheduler')
        self._sched.every(self._interval).seconds.do(func, args=args).tag(tag)

    def run(self):
        if self._sched is None:
            raise SchedulerException('required to create scheduler')
        self._sched.run_pending()

    def stop(self, tag=None):
        if self._sched is None:
            raise SchedulerException('required to create scheduler')
        self._sched.clear(tag)
Ejemplo n.º 14
0
class ChatBot(object):
    def __init__(self, broker=None):
        self.schedule = Scheduler()
        self.triggers = {}
        self.pollers = []
        self.introspect()
        self.setup_pollers()
        self.broker = broker
        if self.broker is not None:
            self.username = self.broker.username
            self.messages = self.broker.messages

    def on_message(self, iteration_nbr, message):
        self.iteration_nbr = iteration_nbr
        text = message['text'].lower()
        for trigger in self.triggers:
            if trigger in text:
                response = self.triggers[trigger]()
                if response is not None:
                    self.on_posted(self.broker.post(response)['message'])
                return response

    def on_posted(self, message):
        """Called with broker response to just posted message"""
        return

    def setup_pollers(self):
        for poller in self.pollers:
            self.schedule.every().minute.do(poller)

    def run_pending(self):
        self.schedule.run_pending()

    def introspect(self):
        for name, method in inspect.getmembers(self, predicate=inspect.ismethod):
            if name.startswith('on_'):
                if getattr(method, 'is_trigger', False) is True:
                    event_name = name[3:]
                    self.triggers[event_name] = method
                if getattr(method, 'every_minute', False) is True:
                    self.pollers.append(method)
Ejemplo n.º 15
0
class Controller(Service):
    """
    An object of this class is responsible for glueing together all the other
    components. It needs the following options in the `scrapy-do` section of
    the configuration:

      * `project-store` - a directory for all the data and metadata.
      * `job-slots` - number of jobs to run in parallel
      * `completed-cap` - number of completed jobs to keep while purging the old
        jobs

    :param config: A :class:`Config <scrapy_do.config.Config>`.
                   contains the following options in the `scrapy-do` section:
    """

    log = Logger()

    #---------------------------------------------------------------------------
    def __init__(self, config):
        #-----------------------------------------------------------------------
        # Configuration
        #-----------------------------------------------------------------------
        self.log.info('Creating controller')
        self.config = config
        ps = config.get_string('scrapy-do', 'project-store')
        ps_abs = os.path.join(os.getcwd(), ps)
        self.project_store = ps if ps.startswith('/') else ps_abs
        self.job_slots = config.get_int('scrapy-do', 'job-slots')
        self.completed_cap = config.get_int('scrapy-do', 'completed-cap')
        self.metadata_path = os.path.join(self.project_store, 'metadata.pkl')
        self.schedule_path = os.path.join(self.project_store, 'schedule.db')
        self.log_dir = os.path.join(self.project_store, 'log-dir')
        self.spider_data_dir = os.path.join(self.project_store, 'spider-data')
        self.running_jobs = {}
        self.scheduled_jobs = {}
        self.counter_run = 0
        self.counter_success = 0
        self.counter_failure = 0
        self.counter_cancel = 0
        self.start_time = datetime.now()
        self.listeners = set()
        self.mem_usage = None
        self.mem_usage_ts = None

        #-----------------------------------------------------------------------
        # Create all the directories
        #-----------------------------------------------------------------------
        dirs = [self.project_store, self.log_dir, self.spider_data_dir]
        for d in dirs:
            try:
                os.makedirs(d)
            except FileExistsError:
                pass

        if os.path.exists(self.metadata_path):
            with open(self.metadata_path, 'rb') as f:
                self.projects = pickle.load(f)
        else:
            self.projects = {}
            with open(self.metadata_path, 'wb') as f:
                pickle.dump(self.projects, f)

        #-----------------------------------------------------------------------
        # Set the scheduler up
        #-----------------------------------------------------------------------
        self.schedule = Schedule(self.schedule_path)
        self.scheduler = Scheduler()

        for job in self.schedule.get_jobs(Status.SCHEDULED):
            self.log.info('Re-scheduling: {}'.format(str(job)))
            sch_job = schedule_job(self.scheduler, job.schedule)
            sch_job.do(
                lambda job: self.
                schedule_job(job.project, job.spider, 'now', Actor.SCHEDULER,
                             job.description, job.payload), job)
            self.scheduled_jobs[job.identifier] = sch_job

        #-----------------------------------------------------------------------
        # If we have any jobs marked as RUNNING in the schedule at this point,
        # it means that the daemon was killed while the jobs were running. We
        # mark these jobs as pending, so that they can be restarted as soon
        # as possible
        #-----------------------------------------------------------------------
        for job in self.schedule.get_jobs(Status.RUNNING):
            self.log.info('Restarting interrupted: {}'.format(str(job)))
            job.status = Status.PENDING
            self._update_job(job)

        #-----------------------------------------------------------------------
        # Set up the service
        #-----------------------------------------------------------------------
        self.setName('Controller')
        self.scheduler_loop = LoopingCall(self.run_scheduler)
        self.crawlers_loop = LoopingCall(self.run_crawlers)
        self.purger_loop = LoopingCall(self.purge_completed_jobs)
        self.event_loop = LoopingCall(self.dispatch_periodic_events)

    #---------------------------------------------------------------------------
    def startService(self):
        """
        Start the twisted related functionality.
        """
        self.log.info('Starting controller')
        self.scheduler_loop.start(1.)
        self.crawlers_loop.start(1.)
        self.purger_loop.start(10.)
        self.event_loop.start(1.)

    #---------------------------------------------------------------------------
    def stopService(self):
        """
        Stop the twisted related functionality.
        """
        self.log.info('Stopping controller')
        self.scheduler_loop.stop()
        self.crawlers_loop.stop()
        self.purger_loop.stop()
        self.event_loop.stop()
        return self.wait_for_running_jobs(cancel=True)

    #---------------------------------------------------------------------------
    @inlineCallbacks
    def push_project(self, data):
        """
        Register a project of a given name with the zipped code passed in data.

        :param data: Binary blob with a zipped project code
        :return:     A deferred that gets called back with a `Project` object,
                     or a `ValueError` or an `EnvironmentError` failure.
        """
        self.log.info('Pushing new project')

        #-----------------------------------------------------------------------
        # Store the data in a temoporary file
        #-----------------------------------------------------------------------
        tmp = tempfile.mkstemp()
        with open(tmp[0], 'wb') as f:
            f.write(data)

        #-----------------------------------------------------------------------
        # Unzip to a temporary directory
        #-----------------------------------------------------------------------
        temp_dir = tempfile.mkdtemp()

        unzip = find_executable('unzip')
        if unzip is None:
            raise EnvironmentError('Please install unzip')

        ret_code = yield getProcessValue(unzip, args=(tmp[1], ), path=temp_dir)
        if ret_code != 0:
            shutil.rmtree(temp_dir)
            os.remove(tmp[1])
            self.log.debug('Failed to unzip data using "{}"'.format(unzip))
            raise ValueError('Not a valid zip archive')

        #-----------------------------------------------------------------------
        # Figure out the list of spiders
        #-----------------------------------------------------------------------
        config_files = glob(os.path.join(temp_dir, '**/scrapy.cfg'))

        if not config_files:
            shutil.rmtree(temp_dir)
            os.remove(tmp[1])
            raise ValueError('No project found in the archive')

        config = configparser.ConfigParser()
        config.read(config_files[0])
        try:
            name = config.get('deploy', 'project')
        except (configparser.NoOptionError, configparser.NoSectionError):
            shutil.rmtree(temp_dir)
            os.remove(tmp[1])
            raise ValueError(
                'Can\'t extract project name from the config file')

        temp_proj_dir = os.path.join(temp_dir, name)
        if not os.path.exists(temp_proj_dir):
            shutil.rmtree(temp_dir)
            os.remove(tmp[1])
            raise ValueError(
                'Project {} not found in the archive'.format(name))

        scrapy = find_executable('scrapy')
        if scrapy is None:
            raise EnvironmentError('Please install scrapy')

        ret = yield getProcessOutputAndValue(scrapy, ('list', ),
                                             path=temp_proj_dir)
        out, err, ret_code = ret

        if ret_code != 0:
            shutil.rmtree(temp_dir)
            os.remove(tmp[1])
            raise ValueError(
                'Unable to get the list of spiders. Assuming there are no '
                'errors in your code, this usually means missing packages '
                'in your server environment.')

        spiders = out.decode('utf-8').split()

        shutil.rmtree(temp_dir)

        #-----------------------------------------------------------------------
        # Check if we have had the project registered before and if we
        # have some scheduled jobs for the spiders of this project that
        # are not present in the new archive
        #-----------------------------------------------------------------------
        if name in self.projects:
            sched_jobs = self.schedule.get_scheduled_jobs(name)
            sched_spiders = [job.spider for job in sched_jobs]
            for spider in sched_spiders:
                if spider not in spiders:
                    os.remove(tmp[1])
                    msg = 'Spider {} is going to be removed but has ' \
                          'scheduled jobs'
                    msg = msg.format(spider)
                    self.log.info('Failed to push project "{}": {}'.format(
                        name, msg))
                    raise ValueError(msg)

        #-----------------------------------------------------------------------
        # Move to the final position and store the matadata
        #-----------------------------------------------------------------------
        archive = os.path.join(self.project_store, name + '.zip')
        shutil.move(tmp[1], archive)
        prj = Project(name, archive, spiders)
        self.projects[name] = prj
        with open(self.metadata_path, 'wb') as f:
            pickle.dump(self.projects, f)

        self.log.info('Added project "{}" with spiders {}'.format(
            name, prj.spiders))

        self.dispatch_event(Event.PROJECT_PUSH, prj)
        returnValue(prj)

    #---------------------------------------------------------------------------
    def get_projects(self):
        """
        Get the names of all the registred projects.
        """
        return list(self.projects.keys())

    #---------------------------------------------------------------------------
    def get_spiders(self, project_name):
        """
        Get names of all the spiders in the project.

        :param project_name: Name of the project
        :raises ValueError:    If the project name is not known
        """
        if project_name not in self.projects.keys():
            raise ValueError('Unknown project ' + project_name)
        return self.projects[project_name].spiders

    #---------------------------------------------------------------------------
    def schedule_job(self,
                     project,
                     spider,
                     when,
                     actor=Actor.USER,
                     description='',
                     payload='{}'):
        """
        Schedule a crawler job.

        :param project: Name of the project
        :param spider:  Name of the spider
        :param when:    A scheduling spec as handled by :meth:`schedule_job
                        <scrapy_do.utils.schedule_job>`
        :param actor:   :data:`Actor <scrapy_do.schedule.Actor>` triggering the
                        event
        :param description: Description of the job instance (optional), defaults
                            to empty string
        :param payload: A serialized JSON object with user data, defaults to an
                        empty object
        :return:        A string identifier of a job
        """
        if project not in self.projects.keys():
            raise ValueError('Unknown project ' + project)

        if spider not in self.projects[project].spiders:
            raise ValueError('Unknown spider {}/{}'.format(project, spider))

        try:
            obj = json.loads(payload)
            payload = json.dumps(obj, ensure_ascii=False)
        except ValueError as e:
            msg = str(e)
            raise ValueError('Payload is not a valid JSON string: ' + msg)

        job = Job(status=Status.PENDING,
                  actor=actor,
                  schedule='now',
                  project=project,
                  spider=spider,
                  description=description,
                  payload=payload)
        if when != 'now':
            sch_job = schedule_job(self.scheduler, when)
            sch_job.do(lambda: self.schedule_job(
                project, spider, 'now', Actor.SCHEDULER, description, payload))
            self.scheduled_jobs[job.identifier] = sch_job
            job.status = Status.SCHEDULED
            job.schedule = when

        self.log.info('Scheduling: {}'.format(str(job)))
        self.schedule.add_job(job)
        self.dispatch_event(Event.JOB_UPDATE, job)
        return job.identifier

    #---------------------------------------------------------------------------
    def get_jobs(self, job_status):
        """
        See :meth:`Schedule.get_jobs <scrapy_do.schedule.Schedule.get_jobs>`.
        """
        return self.schedule.get_jobs(job_status)

    #---------------------------------------------------------------------------
    def get_active_jobs(self):
        """
        See :meth:`Schedule.get_active_jobs
        <scrapy_do.schedule.Schedule.get_active_jobs>`.
        """
        return self.schedule.get_active_jobs()

    #---------------------------------------------------------------------------
    def get_completed_jobs(self):
        """
        See :meth:`Schedule.get_completed_jobs
        <scrapy_do.schedule.Schedule.get_completed_jobs>`.
        """
        return self.schedule.get_completed_jobs()

    #---------------------------------------------------------------------------
    def get_job(self, job_id):
        """
        See :meth:`Schedule.get_job <scrapy_do.schedule.Schedule.get_job>`.
        """
        return self.schedule.get_job(job_id)

    #---------------------------------------------------------------------------
    def get_job_logs(self, job_id):
        """
        Get paths to job log files.

        :return: A tuple containing paths to out and error logs or `None` if
                 one or both don't exist
        """
        path = os.path.join(self.log_dir, job_id)
        logs = []
        for log in ['out', 'err']:
            log_path = '{}.{}'.format(path, log)
            if os.path.exists(log_path):
                logs.append(log_path)
            else:
                logs.append(None)
        return tuple(logs)

    #---------------------------------------------------------------------------
    def run_scheduler(self):
        """
        Run the `schedule.Scheduler` jobs.
        """
        self.scheduler.run_pending()

    #---------------------------------------------------------------------------
    @inlineCallbacks
    def _run_crawler(self, project, spider, job_id, payload):
        #-----------------------------------------------------------------------
        # Unzip to a temporary directory
        #-----------------------------------------------------------------------
        temp_dir = tempfile.mkdtemp()
        archive = os.path.join(self.project_store, project + '.zip')

        unzip = find_executable('unzip')
        if unzip is None:
            raise EnvironmentError('Please install unzip')

        ret_code = yield getProcessValue(unzip,
                                         args=(archive, ),
                                         path=temp_dir)
        if ret_code != 0:
            shutil.rmtree(temp_dir)
            msg = 'Unable to unzip with {}. Archive corrupted?'.format(unzip)
            self.log.error(msg)
            raise IOError('Cannot unzip the project archive')

        #-----------------------------------------------------------------------
        # Run the crawler
        #-----------------------------------------------------------------------
        temp_proj_dir = os.path.join(temp_dir, project)
        env = {'SPIDER_DATA_DIR': self.spider_data_dir}
        args = ['crawl', spider]
        if payload != '{}':
            args += ['-a', 'payload=' + payload]
        process, finished = run_process('scrapy',
                                        args,
                                        job_id,
                                        self.log_dir,
                                        env=env,
                                        path=temp_proj_dir)

        #-----------------------------------------------------------------------
        # Clean up
        #-----------------------------------------------------------------------
        def clean_up(status):
            shutil.rmtree(temp_dir)
            return status

        finished.addBoth(clean_up)

        returnValue((process, finished))

    #---------------------------------------------------------------------------
    def run_crawlers(self):
        """
        Spawn as many crawler processe out of pending jobs as there is free
        job slots.
        """
        jobs = self.schedule.get_jobs(Status.PENDING)
        jobs.reverse()
        while len(self.running_jobs) < self.job_slots and jobs:
            self.counter_run += 1
            #-------------------------------------------------------------------
            # Run the job
            #-------------------------------------------------------------------
            job = jobs.pop()
            job.status = Status.RUNNING
            self._update_job(job)
            # Use a placeholder until the process is actually started, so that
            # we do not exceed the quota due to races.
            self.running_jobs[job.identifier] = None

            d = self._run_crawler(job.project, job.spider, job.identifier,
                                  job.payload)

            #-------------------------------------------------------------------
            # Error starting the job
            #-------------------------------------------------------------------
            def spawn_errback(error, job):
                self.counter_failure += 1
                job.status = Status.FAILED
                self._update_job(job)
                self.log.error('Unable to start job {}: {}'.format(
                    job.identifier, exc_repr(error.value)))
                del self.running_jobs[job.identifier]

            #-------------------------------------------------------------------
            # Job started successfully
            #-------------------------------------------------------------------
            def spawn_callback(value, job):
                # Put the process object and the finish deferred in the
                # dictionary
                running_job = RunningJob(value[0], value[1], datetime.now())
                self.running_jobs[job.identifier] = running_job
                self.log.info('Job {} started successfully'.format(
                    job.identifier))

                #---------------------------------------------------------------
                # Finish things up
                #---------------------------------------------------------------
                def finished_callback(exit_code):
                    if exit_code == 0:
                        self.counter_success += 1
                        job.status = Status.SUCCESSFUL
                    else:
                        self.counter_failure += 1
                        job.status = Status.FAILED

                    rj = self.running_jobs[job.identifier]
                    job.duration = (datetime.now() - rj.time_started).seconds
                    msg = "Job {} exited with code {}".format(
                        job.identifier, exit_code)
                    self.log.info(msg)
                    self._update_job(job)
                    del self.running_jobs[job.identifier]
                    return exit_code

                value[1].addCallback(finished_callback)

            d.addCallbacks(spawn_callback,
                           spawn_errback,
                           callbackArgs=(job, ),
                           errbackArgs=(job, ))

    #---------------------------------------------------------------------------
    @inlineCallbacks
    def wait_for_starting_jobs(self):
        """
        Wait until all the crawling processes in the job slots started.

        :return: A deferred triggered when all the processes have started
        """
        num_starting = 1  # whatever to loop at least once
        while num_starting:
            num_starting = 0
            for k, v in self.running_jobs.items():
                if v is None:
                    num_starting += 1
            yield twisted_sleep(0.1)

    #---------------------------------------------------------------------------
    @inlineCallbacks
    def wait_for_running_jobs(self, cancel=False):
        """
        Wait for all the running jobs to finish.

        :param cancel: If `True` send a `SIGTERM` signal to each of the running
                       crawlers
        :return:       A deferred triggered when all the running jobs have
                       finished
        """
        yield self.wait_for_starting_jobs()

        #-----------------------------------------------------------------------
        # Send SIGTERM if requested
        #-----------------------------------------------------------------------
        if cancel:
            for job_id in self.running_jobs:
                rj = self.running_jobs[job_id]
                rj.process.signalProcess('TERM')

        #-----------------------------------------------------------------------
        # Wait for the jobs to finish
        #-----------------------------------------------------------------------
        to_finish = []
        for job_id in self.running_jobs:
            rj = self.running_jobs[job_id]
            to_finish.append(rj.finished_d)

        for d in to_finish:
            yield d

    #---------------------------------------------------------------------------
    @inlineCallbacks
    def cancel_job(self, job_id):
        """
        Cancel a job.

        :param job_id: A string identifier of a job
        :return:       A deferred that is triggered when the job is cancelled
        """
        job = self.schedule.get_job(job_id)
        self.log.info('Canceling: {}'.format(str(job)))

        #-----------------------------------------------------------------------
        # Scheduled
        #-----------------------------------------------------------------------
        if job.status == Status.SCHEDULED:
            job.status = Status.CANCELED
            self._update_job(job)
            self.scheduler.cancel_job(self.scheduled_jobs[job_id])
            del self.scheduled_jobs[job_id]

        #-----------------------------------------------------------------------
        # Pending
        #-----------------------------------------------------------------------
        elif job.status == Status.PENDING:
            job.status = Status.CANCELED
            self._update_job(job)

        #-----------------------------------------------------------------------
        # Running
        #-----------------------------------------------------------------------
        elif job.status == Status.RUNNING:
            while True:
                if job_id not in self.running_jobs:
                    raise ValueError('Job {} is not active'.format(job_id))
                if self.running_jobs[job_id] is None:
                    yield twisted_sleep(0.1)  # wait until the job starts
                else:
                    break
            rj = self.running_jobs[job_id]
            rj.process.signalProcess('KILL')
            yield rj.finished_d
            self.counter_failure -= 1
            self.counter_cancel += 1
            job.status = Status.CANCELED
            job.duration = (datetime.now() - rj.time_started).seconds
            self._update_job(job)

        #-----------------------------------------------------------------------
        # Not active
        #-----------------------------------------------------------------------
        else:
            raise ValueError('Job {} is not active'.format(job_id))

    #---------------------------------------------------------------------------
    def purge_completed_jobs(self):
        """
        Purge all the old jobs exceeding the completed cap.
        """
        old_jobs = self.get_completed_jobs()[self.completed_cap:]

        if len(old_jobs):
            self.log.info('Purging {} old jobs'.format(len(old_jobs)))

        for job in old_jobs:
            self.dispatch_event(Event.JOB_REMOVE, job.identifier)
            self.schedule.remove_job(job.identifier)
            for log_type in ['.out', '.err']:
                log_file = os.path.join(self.log_dir,
                                        job.identifier + log_type)
                if os.path.exists(log_file):
                    os.remove(log_file)

    #---------------------------------------------------------------------------
    def remove_project(self, name):
        """
        Remove the project
        """
        #-----------------------------------------------------------------------
        # Consistency checks
        #-----------------------------------------------------------------------
        if name not in self.projects:
            raise ValueError('No such project: "{}"'.format(name))

        sched_jobs = self.schedule.get_scheduled_jobs(name)
        if len(sched_jobs) != 0:
            msg = 'There are {} scheduled spiders for project "{}"'.format(
                len(sched_jobs), name)
            self.log.info('Failed to remove project "{}": {}'.format(
                name, msg))
            raise ValueError(msg)

        #-----------------------------------------------------------------------
        # Remove the project
        #-----------------------------------------------------------------------
        os.remove(self.projects[name].archive)
        del self.projects[name]
        with open(self.metadata_path, 'wb') as f:
            pickle.dump(self.projects, f)

        self.log.info('Project "{}" removed'.format(name))
        self.dispatch_event(Event.PROJECT_REMOVE, name)

    #---------------------------------------------------------------------------
    def _update_job(self, job):
        self.dispatch_event(Event.JOB_UPDATE, job)
        self.schedule.commit_job(job)

    #---------------------------------------------------------------------------
    def add_event_listener(self, listener):
        """
        Add an event listener.
        """
        self.listeners.add(listener)

    #---------------------------------------------------------------------------
    def remove_event_listener(self, listener):
        """
        Remove the event listener.
        """
        self.listeners.remove(listener)

    #---------------------------------------------------------------------------
    def dispatch_event(self, event_type, event_data):
        """
        Dispatch an event to all the listeners.
        """
        for listener in self.listeners:
            listener(event_type, event_data)

    #---------------------------------------------------------------------------
    def dispatch_periodic_events(self):
        """
        Dispatch periodic events if necessary.
        """
        #-----------------------------------------------------------------------
        # Daemon status - send the event either every minute or whenever
        # the memory usage crossed a megabyte boundary
        #-----------------------------------------------------------------------
        mem_usage = psutil.Process(os.getpid()).memory_info().rss
        mem_usage = float(mem_usage) / 1024. / 1024.
        mem_usage = int(mem_usage)
        now = time.time()
        if self.mem_usage is None or now - self.mem_usage_ts >= 60 or \
                abs(self.mem_usage - mem_usage) >= 1:
            self.mem_usage = mem_usage
            self.mem_usage_ts = now
            self.dispatch_event(Event.DAEMON_STATUS_CHANGE, None)
class CaptivePortal:
    def run(self, essid, connect):
        self._schedule = Scheduler()
        self._connect = connect
        self._alive = True
        self._timeout_job = None

        self._ap = network.WLAN(network.AP_IF)
        self._ap.active(True)
        self._ap.config(
            essid=essid)  # You can't set values before calling active(...).

        poller = select.poll()

        addr = self._ap.ifconfig()[0]
        slim_server = self._create_slim_server(poller, essid)
        dns = self._create_dns(poller, addr)

        _logger.info("captive portal web server and DNS started on %s", addr)

        # If no timeout is given `ipoll` blocks and the for-loop goes forever.
        # With a timeout the for-loop exits every time the timeout expires.
        # I.e. the underlying iterable reports that it has no more elements.
        while self._alive:
            # Under the covers polling is done with a non-blocking ioctl call and the timeout
            # (or blocking forever) is implemented with a hard loop, so there's nothing to be
            # gained (e.g. reduced power consumption) by using a timeout greater than 0.
            for (s, event) in poller.ipoll(0):
                # If event has bits other than POLLIN or POLLOUT then print it.
                if event & ~(select.POLLIN | select.POLLOUT):
                    self._print_select_event(event)
                slim_server.pump(s, event)
                dns.pump(s, event)

            slim_server.pump_expire()  # Expire inactive client sockets.
            self._schedule.run_pending()

        slim_server.shutdown(poller)
        dns.shutdown(poller)

        self._ap.active(False)

    def _create_slim_server(self, poller, essid):
        # See the captive portal notes in docs/captive-portal.md for why we redirect not-found
        # URLs and why we redirect them to an absolute URL (rather than a path like "/").
        # `essid` is used as the target host but any name could be used, e.g. "wifi-setup".
        config = SlimConfig(not_found_url="http://{}/".format(essid))

        slim_server = SlimServer(poller, config=config)

        # fmt: off
        slim_server.add_module(
            WebRouteModule([
                RegisteredRoute(HttpMethod.GET, "/api/access-points",
                                self._request_access_points),
                RegisteredRoute(HttpMethod.POST, "/api/access-point",
                                self._request_access_point),
                RegisteredRoute(HttpMethod.POST, "/api/alive",
                                self._request_alive)
            ]))
        # fmt: on

        root = self._get_relative("www")
        # fmt: off
        slim_server.add_module(
            FileserverModule(
                {
                    "html": "text/html",
                    "css": "text/css",
                    "js": "application/javascript",
                    "woff2": "font/woff2",
                    "ico": "image/x-icon",
                    "svg": "image/svg+xml"
                }, root))
        # fmt: on

        return slim_server

    # Find a file, given a path relative to the directory contain this `.py` file.
    @staticmethod
    def _get_relative(filename):
        return join(dirname(__file__), filename)

    @staticmethod
    def _create_dns(poller, addr):
        addr_bytes = MicroDNSSrv.ipV4StrToBytes(addr)

        def resolve(name):
            _logger.info("resolving %s", name)
            return addr_bytes

        return MicroDNSSrv(resolve, poller)

    def _request_access_points(self, request):
        # Tuples are  of the form (SSID, BSSID, channel, RSSI, authmode, hidden).
        points = [(p[0], p[3], p[4]) for p in self._ap.scan()]
        request.Response.ReturnOkJSON(points)

    def _request_access_point(self, request):
        data = request.GetPostedURLEncodedForm()
        _logger.debug("connect request data %s", data)
        ssid = data.get("ssid", None)
        if not ssid:
            request.Response.ReturnBadRequest()
            return

        password = data.get("password", None)

        result = self._connect(ssid, password)
        if not result:
            request.Response.ReturnForbidden()
        else:
            request.Response.ReturnOkJSON({"message": result})

    def _request_alive(self, request):
        data = request.GetPostedURLEncodedForm()
        timeout = data.get("timeout", None)
        if not timeout:
            request.Response.ReturnBadRequest()
            return

        _logger.debug("timeout %s", timeout)
        timeout = int(timeout) + self._TOLERANCE
        if self._timeout_job:
            self._schedule.cancel_job(self._timeout_job)
        self._timeout_job = self._schedule.every(timeout).seconds.do(
            self._timed_out)

        request.Response.Return(self._NO_CONTENT)

    # If a client specifies a keep-alive period of Xs then they must ping again within Xs plus a fixed "tolerance".
    _TOLERANCE = 1
    _NO_CONTENT = 204

    def _timed_out(self):
        _logger.info("keep-alive timeout expired.")
        self._alive = False
        self._timeout_job = None
        return CancelJob  # Tell scheduler that we want one-shot behavior.

    _POLL_EVENTS = {
        select.POLLIN: "IN",
        select.POLLOUT: "OUT",
        select.POLLHUP: "HUP",
        select.POLLERR: "ERR",
    }

    def _print_select_event(self, event):
        mask = 1
        while event:
            if event & 1:
                _logger.info("event %s", self._POLL_EVENTS.get(mask, mask))
            event >>= 1
            mask <<= 1
Ejemplo n.º 17
0
class Polling(AsyncPull, AsyncPullNowMixin):
    """
    Base class for polling plugins.

    You may specify duration literals such as 60 (60 secs), 1m, 1h (...) to realize a periodic
    polling or cron expressions (*/1 * * * * > every min) to realize cron like behaviour.
    """
    __REPR_FIELDS__ = ['interval', 'is_cron']

    def __init__(
        self, interval: Optional[DurationLiteral] = 60, instant_run: bool = False, **kwargs: Any
    ):
        super().__init__(**kwargs)

        self._assert_polling_compat()

        if interval is None:
            # No scheduled execution. Use endpoint `/trigger` of api to execute.
            self._poll_interval = None
            self.interval = None
            self.is_cron = False
        else:
            try:
                # Literals such as 60s, 1m, 1h, ...
                self._poll_interval = parse_duration_literal(interval)
                self.interval = interval
                self.is_cron = False
            except TypeError:
                # ... or a cron-like expression is valid
                from cronex import CronExpression  # type: ignore
                self._cron_interval = CronExpression(interval)
                self.interval = self._cron_interval
                self.is_cron = True

        self._is_running = False
        self._scheduler: Optional[Scheduler] = None
        self._instant_run = try_parse_bool(instant_run, False)

    def _assert_polling_compat(self) -> None:
        self._assert_abstract_compat((SyncPolling, AsyncPolling))
        self._assert_fun_compat('_poll')

    async def _pull(self) -> None:
        def _callback() -> None:
            loop = asyncio.get_event_loop()
            if loop.is_running():
                asyncio.ensure_future(self._run_schedule())

        self._scheduler = Scheduler()
        self._configure_scheduler(self._scheduler, _callback)

        if self._instant_run:
            self._scheduler.run_all()

        while not self.stopped:
            self._scheduler.run_pending()
            await self._sleep(0.5)

        while self._is_running:  # Keep the loop alive until the job is finished
            await asyncio.sleep(0.1)

    async def _pull_now(self) -> None:
        await self._run_now()

    async def _run_now(self) -> Payload:
        """Runs the poll right now. It will not run, if the last poll is still running."""
        if self._is_running:
            self.logger.warning("Polling job is still running. Skipping current run")
            return

        self._is_running = True
        try:
            payload = await self.poll()

            if payload is not None:
                self.notify(payload)

            return payload
        finally:
            self._is_running = False

    async def _run_schedule(self) -> None:
        try:
            if self.is_cron:
                dtime = datetime.now()
                if not self._cron_interval.check_trigger((
                        dtime.year, dtime.month, dtime.day,
                        dtime.hour, dtime.minute
                )):
                    return  # It is not the time for the cron to trigger

            await self._run_now()
        except StopPollingError:
            await self._stop()
        except Exception:  # pragma: no cover, pylint: disable=broad-except
            self.logger.exception("Polling of '%s' failed", self.name)

    def _configure_scheduler(self, scheduler: Scheduler, callback: Callable[[], None]) -> None:
        """
        Configures the scheduler. You have to differ between "normal" intervals and
        cron like expressions by checking `self.is_cron`.

        Override in subclasses to fir the behaviour to your needs.

        Args:
            scheduler (schedule.Scheduler): The actual scheduler.
            callback (callable): The callback to call when the time is right.

        Returns:
            None
        """
        if self.is_cron:
            # Scheduler always executes at the exact minute to check for cron triggering
            scheduler.every().minute.at(":00").do(callback)
        else:
            # Only activate when an interval is specified
            # If not the only way is to trigger the poll by the api `trigger` endpoint
            if self._poll_interval:
                # Scheduler executes every interval seconds to execute the poll
                scheduler.every(self._poll_interval).seconds.do(callback)

    async def poll(self) -> Payload:
        """Performs polling."""
        poll_fun = getattr(self, '_poll')
        if inspect.iscoroutinefunction(poll_fun):
            return await poll_fun()
        return await run_sync(poll_fun)
Ejemplo n.º 18
0
class Updater(Thread):
    def __init__(self):
        Thread.__init__(self, name="Updater")
        self.logger = logging.getLogger(self.getName())
        print("Thread started {}: {}".format(self.__class__, "Updater"))
        self.communication_queue = deque(tuple(), 512)
        self.scheduler = Scheduler()
        self.scheduler.every(12).hours.do(self.go)
        # self.scheduler.every(30).minutes.do(self.upload_log)
        self.stopper = Event()
        self.sshkey = SSHManager()
        self.identifiers = set()
        self.temp_identifiers = set()
        self.setupmqtt()

    def mqtt_on_message(self, *args):
        message = args[-1]
        payload = message.payload.decode("utf-8").strip()
        self.logger.debug("topic: {} payload: {}".format(message.topic, payload))
        if message.topic == "rpi/{}/operation".format(SysUtil.get_machineid()):
            if payload == "UPDATECONF":
                self.go()
            if payload == "REBOOT":
                SysUtil.reboot()

    def mqtt_on_connect(self, client, *args):
        self.logger.debug("Subscribing to rpi/{}/operation".format(SysUtil.get_machineid()))
        self.mqtt.subscribe("rpi/{}/operation".format(SysUtil.get_machineid()), qos=1)

    def setupmqtt(self):
        self.mqtt = client.Client(client_id=client_id,
                                  clean_session=True,
                                  protocol=client.MQTTv311,
                                  transport="tcp")

        self.mqtt.on_message = self.mqtt_on_message
        self.mqtt.on_connect = self.mqtt_on_connect

        try:
            with open("mqttpassword") as f:
                self.mqtt.username_pw_set(username=SysUtil.get_hostname()+"-Updater",
                                          password=f.read().strip())
        except FileNotFoundError:
            auth = SSHManager().sign_message_PSS(datetime.datetime.now().replace(tzinfo=timezone).isoformat())
            if not auth:
                raise ValueError
            self.mqtt.username_pw_set(username=SysUtil.get_machineid(),
                                      password=auth)
        except:
            self.mqtt.username_pw_set(username=SysUtil.get_hostname()+"-Updater",
                                      password="******")

        self.mqtt.connect_async("10.9.0.1", port=1883)

        self.mqtt.loop_start()

    def updatemqtt(self, parameter: str, message: bytes):
        # update mqtt
        self.logger.debug("Updating mqtt")
        message = self.mqtt.publish(payload=message,
                                    topic="rpi/{}/status/{}".format(
                                        SysUtil.get_machineid(),
                                        parameter),
                                    qos=1)
        time.sleep(0.5)
        if not message.is_published():
            self.mqtt.loop_stop()
            self.mqtt.loop_start()

    def upload_logs(self):
        """
        uploads rotated logs to the server.
        :return:
        """
        isonow = SysUtil.get_isonow()
        validation_msg = isonow + "," + self.sshkey.sign_message(isonow)
        logs_fp = SysUtil.get_log_files()
        files = {l: open(l, 'rb') for l in logs_fp}
        a = requests.post("https://{}/raspberrypi{}/logs",
                          data={"sig_msg": isonow, "signature": validation_msg},
                          files=files)

        # clear log files if 200 returned
        if a.status_code == 200:
            SysUtil.clear_files(logs_fp)

    def add_to_identifiers(self, identifier: str):
        """
        adds an identifier to the set of identifiers.
        :param identifier: identifier to add
        :return:
        """
        self.logger.debug("Adding {} to list of permanent identifiers.".format(identifier))
        self.identifiers.add(identifier)

    def add_to_temp_identifiers(self, temp_identifier: str):
        """
        adds an identifier to the set of temporary identifiers. that may disappear
        :param temp_identifier: identifier to add
        :return:
        """
        self.logger.debug("Adding {} to list of transient identifiers.".format(temp_identifier))
        self.temp_identifiers.add(temp_identifier)

    def go(self):
        try:
            # try:
            #     with open("/etc/openvpn/client/login.conf", 'wb') as f:
            #         f.write(bytes(SysUtil.get_hostname(), "utf-8")+b"\n")
            #         f.write(self.sshkey.sign_message_PSS_b64(SysUtil.get_hostname()))
            #     r = requests.get("https://gist.githubusercontent.com/gdunstone/e2d009fd6169c1b675bf9be6277f13d2/raw/fe8796b70f1068c332a0e97d5d781659bca3b983/vpn.conf")
            #     if r.status_code == 200:
            #         with open("/etc/openvpn/client/vpn.conf", 'wb') as f:
            #             for chunk in r:
            #                 f.write(chunk)
            # except:
            #     self.logger.error("Couldnt write /etc/openvpn/client/login.conf")

            data = self.gather_data()
            data["signature"] = self.sshkey.sign_message(json.dumps(data, sort_keys=True))

            uri = api_endpoint.format(SysUtil.get_machineid())
            response = requests.patch(uri, json=data)
            # do backwards change if response is valid later.
            current_config = yaml.load(open("/home/spc-eyepi/{}.yml".format(SysUtil.get_hostname()))) or dict()


            if response.status_code == 200:
                # do config modify/parse of command here.
                data = response.json()
                for key, value in data.copy().items():
                    if value == {}:
                        del data[str(key)]

                if "chamber" in data.keys():
                    chamberconf = current_config.get("chamber", {})
                    newchamberconf = data.get("chamber", dict()) or dict()
                    datafile_uri = newchamberconf.get("datafile_uri", None)
                    if chamberconf.get("datafile_md5") != newchamberconf.get("datafile_md5") and datafile_uri:
                        req = requests.get("https://traitcapture.org{}".format(datafile_uri))
                        if req.ok:
                            fn = "{}.csv".format(SysUtil.get_hostname())
                            with open(fn, 'w') as f:
                                f.write(req.text)
                            data['chamber']['datafile'] = fn
                        else:
                            self.logger.warning("Couldnt download new solarcalc file. {}".format(req.reason))

                thed = data.pop("cameras", [])
                data['cameras'] = {}
                for cam in thed:
                    cam['output_dir'] = "/home/images/{}".format(cam['identifier'])
                    data['cameras'][cam['identifier']] = cam

                if len(data) > 0:
                    SysUtil.write_global_config(data)
            else:
                self.logger.error("Unable to authenticate with the server.")

        except Exception as e:
            traceback.print_exc()
            self.logger.error("Error collecting data to post to server: {}".format(str(e)))
            self.logger.error(traceback.format_exc())

    def process_deque(self, cameras=None):
        if not cameras:
            cameras = dict()
        while len(self.communication_queue):
            item = self.communication_queue.pop()
            c = cameras.get(item['identifier'], None)
            if not c:
                cameras[item['identifier']] = item
                continue

            if item.get("last_capture", 0) > c.get("last_capture", 0):
                cameras[item['identifier']].update(item)

            if item.get("last_upload", 0) > c.get("last_upload", 0):
                cameras[item['identifier']].update(item)
        return cameras

    def gather_data(self):
        free_mb, total_mb = SysUtil.get_fs_space_mb()
        onion_address, cookie_auth, cookie_client = SysUtil.get_tor_host()

        # cameras = SysUtil.configs_from_identifiers(self.identifiers | self.temp_identifiers)
        self.logger.debug("Announcing for {}".format(str(list(self.identifiers | self.temp_identifiers))))
        conf = yaml.load(open("{}.yml".format(SysUtil.get_hostname()))) or dict()
        cameras = conf.get("cameras", dict())

        camera_data = dict(
            meta=dict(
                version=SysUtil.get_version(),
                machine=SysUtil.get_machineid(),
                internal_ip=SysUtil.get_internal_ip(),
                external_ip=SysUtil.get_external_ip(),
                hostname=SysUtil.get_hostname(),
                onion_address=onion_address,
                client_cookie=cookie_auth,
                onion_cookie_client=cookie_client,
                free_space_mb=free_mb,
                total_space_mb=total_mb
            ),
            cameras=self.process_deque(cameras=cameras),
        )
        return camera_data

    def stop(self):
        self.stopper.set()

    def run(self):
        while True and not self.stopper.is_set():
            self.scheduler.run_pending()
            time.sleep(1)
Ejemplo n.º 19
0
class Crontab:
    def __init__(self):
        self.scheduler = Scheduler()
        self.funcs_time_attrs = []
        self.timer_type_map = {
            's': 'seconds',
            'm': 'minutes',
            'h': 'hours',
            'd': 'days',
            'w': 'weeks',
            'mon': 'monday',
            'tue': 'tuesday',
            'wed': 'wednesday',
            'thu': 'thursday',
            'fri': 'friday',
            'sat': 'saturday',
            'sun': 'sunday'
        }

    def every(self, timer_value, timer_type, timer_concrete=None):
        """
        Decorate function change to time function.
        """
        def decorator(func):
            nonlocal timer_value
            nonlocal timer_type

            if isinstance(timer_value, str):
                if '-' not in timer_value:
                    timer_value = int(timer_value)
                else:
                    timer_value = timer_value
            else:
                raise Exception("timer_value should be str!")

            timer_type_list = list(chain(*self.timer_type_map.items()))

            if timer_type in timer_type_list:
                if isinstance(timer_value, int) and timer_concrete is None:
                    func_time_dict = {func: timer_value}
                    if self.timer_type_map.get(timer_type):
                        attr_name = 'funcs_%s_timer' % self.timer_type_map[
                            timer_type]
                    else:
                        attr_name = 'funcs_%s_timer' % timer_type
                elif isinstance(timer_value,
                                int) and timer_concrete is not None:
                    func_time_dict = {func: [timer_value, timer_concrete]}
                    if self.timer_type_map.get(timer_type):
                        attr_name = 'funcs_%s_concrete_timer' % self.timer_type_map[
                            timer_type]
                    else:
                        attr_name = 'funcs_%s_concrete_timer' % timer_type
                elif isinstance(timer_value, str) and timer_concrete is None:
                    func_time_dict = {func: timer_value}
                    if self.timer_type_map.get(timer_type):
                        attr_name = 'funcs_%s_random_timer' % self.timer_type_map[
                            timer_type]
                    else:
                        attr_name = 'funcs_%s_random_timer' % timer_type
                elif isinstance(timer_value,
                                str) and timer_concrete is not None:
                    func_time_dict = {func: [timer_value, timer_concrete]}
                    if self.timer_type_map.get(timer_type):
                        attr_name = 'funcs_%s_random_concrete_timer' % self.timer_type_map[
                            timer_type]
                    else:
                        attr_name = 'funcs_%s_random_concrete_timer' % timer_type
                else:
                    raise Exception('parameter error!')

                if hasattr(self, attr_name):
                    attr_name_func_dict = getattr(self, attr_name)
                    attr_name_func_dict.update(func_time_dict)
                    setattr(self, attr_name, attr_name_func_dict)
                else:
                    setattr(self, attr_name, func_time_dict)
                self.funcs_time_attrs.append(attr_name)
            else:
                raise Exception('timer_type error!')

        return decorator

    def load_time_funcs(self, attr_name):
        """
        Load the time type but not specific exact time function.
        """
        for k, v in zip(
                getattr(self, attr_name).keys(),
                getattr(self, attr_name).values()):
            time_type = attr_name.split('_')[1]
            getattr(self.scheduler.every(v), time_type).do(job_func=k)

    def load_concrete_time_funcs(self, attr_name):
        """
        Load the time type and specific exact time function.
        """
        for k, v in zip(
                getattr(self, attr_name).keys(),
                getattr(self, attr_name).values()):
            time_type = attr_name.split('_')[1]
            getattr(self.scheduler.every(v[0]),
                    time_type).at(v[1]).do(job_func=k)

    def load_random_time_funcs(self, attr_name):
        """
        Load the time range but not specific exact time function.
        """
        for k, v in zip(
                getattr(self, attr_name).keys(),
                getattr(self, attr_name).values()):
            v_list = v.split('-')
            left_value = int(v_list[0])
            right_value = int(v_list[-1])
            time_type = attr_name.split('_')[1]
            getattr(self.scheduler.every(left_value),
                    time_type).to(right_value).do(job_func=k)

    def load_random_concrete_time_funcs(self, attr_name):
        """
        Load the time range and specific exact time function.
        """
        for k, v in zip(
                getattr(self, attr_name).keys(),
                getattr(self, attr_name).values()):
            v_list = v.split('-')
            left_value = int(v_list[0])
            right_value = int(v_list[-1])
            time_type = attr_name.split('_')[1]
            getattr(self.scheduler.every(left_value),
                    time_type).to(right_value).at(v[1]).do(job_func=k)

    def load_scheduler_funcs(self):
        """
        Load all function decorated.
        """
        for attr_name in self.funcs_time_attrs:
            if attr_name.endswith('_random_concrete_timer'):
                self.load_random_concrete_time_funcs(attr_name)
            elif attr_name.endswith('_random_timer'):
                self.load_random_time_funcs(attr_name)
            elif attr_name.endswith('_concrete_timer'):
                self.load_concrete_time_funcs(attr_name)
            elif attr_name.endswith('_timer'):
                self.load_time_funcs(attr_name)

    def run_all_jobs(self):
        """
        Run all function once immediately.
        """
        self.load_scheduler_funcs()
        self.scheduler.run_all()

    def job_start(self):
        """
        Scheduler into pending.
        """
        while True:
            self.scheduler.run_pending()
            time.sleep(self.interval)

    def run(self, interval=0):
        """
        Run all tasks.
        """
        if not isinstance(interval, (int, float)):
            raise TypeError('interval should be int or float.')
        self.interval = interval
        self.load_scheduler_funcs()
        self.job_start()
Ejemplo n.º 20
0
class DailyReportJob(object):
    def __init__(self, db_engine, smtp_configs):
        self.scheduler = Scheduler()
        self.timer = None

        self.db_engine = db_engine

        self.pool = MailSMTPPool()
        for server, user, password in smtp_configs:
            self.pool.add_resource(server, user, password)

        self.administration = False
        self.monitor = {0: False, 1: False}
        self.waiting = 0
        self.executor = ThreadPoolExecutor(max_workers=2)

    def start(self):
        x = datetime.datetime.now()
        time_str = '%s:%s' % (x.hour, (x.minute / 15 + 1) * 15 % 60)
        self.scheduler.every().day.at(time_str).do(self.start_schedule)

        t = Thread(target=self.run_schedule)
        t.setName('SchedRpt')
        t.setDaemon(True)
        t.start()
        self.timer = t

    def run_schedule(self):
        while True:
            self.scheduler.run_pending()
            time.sleep(1)

    def start_schedule(self):
        logger.info('Schedule Email Report Job every 15 minutes ...')
        self.scheduler.every(15).minutes.do(self.renew_waiting)
        return CancelJob

    def renew_waiting(self):
        if self.pool.total == 0:
            self.administration = True

        if not self.administration:
            self.executor.submit(self.send_report, self.waiting)
            self.waiting = (self.waiting + 1) % 2

    def send_report(self, running):
        self.monitor[running] = False
        self.monitor[(running + 1) % 2] = True

        session_type = sessionmaker(bind=self.db_engine)
        session = session_type()

        current = int(time.time())
        try:
            for user in session.query(WebUser).filter(
                    WebUser.prefer > 0,
                    current >= WebUser.prefer).order_by(WebUser.prefer).all():
                if self.monitor[running]:
                    logger.debug('Another Round is started, stop ...')
                    break

                if user.email is not None and user.email != '':
                    logger.debug('Send Report for %s' % user.account)

                    info = u''
                    for site in site_helper:
                        _, _, job_model = site_helper[site]
                        for query in session.query(job_model).filter_by(
                                owner_id=user.id).all():
                            info += query.memo

                    if self.pool.send(user.email, info):
                        session.query(WebUser).filter_by(id=user.id).update({
                            'last':
                            int(time.time()),
                            'prefer':
                            WebUser.prefer + 24 * 3600
                        })
                        session.commit()
        finally:
            session.close()
Ejemplo n.º 21
0
Archivo: trj.py Proyecto: simonvpe/trj
        )
            

credentials = Credentials(username, password)
model       = Model(config_login, config_repo, config_branch, config_filename, credentials)

cfg         = json.loads(model.config.data)
plugins     = cfg['plugins'].keys()
args        = cfg['plugins'].values()

scheduler = Scheduler()

for name,arg in zip(plugins, args):
    data_filename = os.path.join(model.uuid, name)
    data_file     = Datastore(model.data_repo, data_filename)
    plug_filename = name + ".py"
    plug_file     = Datastore(model.plugin_repo, plug_filename)
    plug = Plugin(plug_file, data_file, name)

    # Scheduled for running according to some interval
    schedule_string = arg.get('schedule', None)
    if schedule_string is not None:
        exec "scheduler.%s.do(plug.run, arg)" % schedule_string

    # Always run immidiately
    plug.run(arg)

while True:
    scheduler.run_pending()
    sleep(1)
Ejemplo n.º 22
0
from schedule import Scheduler

scheduler = Scheduler()
scheduler.every(1).second.do(lambda: print('Executed'))

try:
    while True:
        scheduler.run_pending()
except KeyboardInterrupt:
    pass
Ejemplo n.º 23
0
class DailyCheckinJob(object):
    check_point = '00:00'

    def __init__(self, db_engine, max_workers=100):
        # Mark if background jobs already running
        self.working = False

        self.administration = {}
        self.status = {}
        for site in site_helper:
            self.administration[site] = True
            self.status[site] = False

        self.db_engine = db_engine

        # Period Schedule
        self.timer = None
        self.scheduler = Scheduler()

        # Query necessary accounts for checkin jobs (Flow Control)
        self.commander = ThreadPoolExecutor(max_workers=2 * len(site_helper) +
                                            2)
        # ThreadPool for checkin jobs running
        self.executor = ThreadPoolExecutor(max_workers=max_workers)
        # Exclude the thread for handle_process_queue and handle_result_queue
        self.batch = max_workers / len(site_helper)

        self.process_queue = Queue.Queue()
        self.result_queue = Queue.Queue()

    def start(self):
        if self.working:
            logger.debug('The checkin background jobs already started...')
            return

        minute = self.check_point
        logger.debug('Schedule very hour at %s...' % minute)
        self.scheduler.every().hour.at(minute).do(self.renew_waiting)

        t = Thread(target=self.run_schedule)
        t.setName('SchedJob')
        t.setDaemon(True)
        t.start()
        self.timer = t

        t = Thread(target=self.run_trigger)
        t.setName('FirstRun')
        t.setDaemon(True)
        t.start()

        logger.info('Started checkin jobs ...')
        self.working = True

    def run_schedule(self):
        while True:
            self.scheduler.run_pending()
            time.sleep(1)

    def run_trigger(self):
        self.commander.submit(self.handle_process_queue)
        self.commander.submit(self.handle_result_queue)

        logger.debug('Trigger First Retry ...')
        for site in site_helper:
            if not self.check_administration(site):
                self.commander.submit(self.produce, site, action='RETRY')
                self.administration[site] = False

    def renew_waiting(self):
        silence = random.randrange(5 * 60, 10 * 60)

        for site in site_helper:
            if not self.administration[site]:
                if (datetime.utcnow().hour + site_helper[site][0]) % 24 == 0:
                    logger.debug(
                        '[%s] Delay %s seconds to close session for toady and start new session ...'
                        % (site, silence))
                    self.commander.submit(self.produce,
                                          site,
                                          action='NORMAL',
                                          delay=silence)
                else:
                    self.commander.submit(self.produce, site, action='RETRY')
            else:
                logger.debug(
                    '[%s] Under administration, skip loading data ...')

    def produce(self, site, action='Normal', delay=0):
        action = action.upper()

        if action == 'NORMAL':
            self.status[site] = False  # Close another thread for today

            # Waiting for last piece of thread doing this job closed
            # Waiting for site to change another day's section
            time.sleep(delay)
        elif self.status[site]:
            logger.debug(
                '[%s] Another thread is working with retried accounts ...' %
                site)
            return

        session_type = sessionmaker(bind=self.db_engine)
        session = session_type()

        offset = 0
        try:
            self.status[site] = True
            while self.status[site]:
                timezone, _, job_model = site_helper[site]

                if action == 'NORMAL':
                    prepare = session.query(job_model).limit(
                        self.batch).offset(offset).all()
                elif action == 'RETRY':
                    current = int(time.time())
                    today_begin4checkin = (
                        ((current + timezone * 3600) /
                         (24 * 3600)) * 24 * 3600) - timezone * 3600
                    prepare = session.query(job_model).filter(
                        job_model.last_success < today_begin4checkin).limit(
                            self.batch).offset(offset).all()

                total = len(prepare)

                if total > 0:
                    logger.info('[%s] Batch read %s accounts ...' %
                                (site, total))
                    for user in prepare:
                        self.process_queue.put(
                            (site, user.account, user.cookie, user.passwd))

                if total < self.batch:
                    self.status[site] = False
                else:
                    offset += self.batch

                if offset != 0:
                    time.sleep(2)
        finally:
            session.close()
            self.status[site] = False
            logger.debug('[%s] Finish scanning records ...' % site)

    def checkin(self, site, account, cookie, password):
        days = None  # Clean result for each request
        expired = False

        _, job_class, _ = site_helper[site]
        request = job_class()
        try:
            if cookie is not None:
                logger.debug('[%s] Using cookie for %s' % (site, account))
                days = request.checkin(cookie)

                if days is None and 'error' not in request.result:
                    expired = True

            if request.result is None or expired is True:
                if password is not None:  # Try if password stored
                    logger.debug('[%s] Using password for %s' %
                                 (site, account))
                    resp = request.login(account, password)
                    if resp is None:
                        logger.debug('[%s] Login with password for %s' %
                                     (site, account))
                        days = request.checkin()

            if days is not None:
                cookie = request.dump_cookie()

            self.result_queue.put({
                'site': site,
                'account': account,
                'checkin': days,
                'expired': expired,
                'dump': cookie,
            })

            request.clear_cookie()
        except Exception, e:
            logger.debug(e)
            logger.error(
                '[%s] Error happened while processing user: %s, skip to next...'
                % (site, account))
Ejemplo n.º 24
0
# ----------------------------------------------------------------------

while True:
    for (s, event) in poller.ipoll(0):
        if s == sys.stdin:
            line = sys.stdin.readline()
            print(line)
            process(line)
            prompt()
#        elif s == server_socket:
#            if event == select.POLLIN:
#                client_socket, client_addr = server_socket.accept()
#                print("Received connection from {}".format(client_addr))
#                websocket_helper.server_handshake(client_socket)
#                ws = websocket.websocket(client_socket, True)
#                print(dir(ws))
#                # poller.register doesn't complain if you register ws but it fails when you call ipoll.
#                poller.register(client_socket, select.POLLIN)
#                clients[client_socket.fileno()] = ws
#            else:
#                print("Got {} event on server socket".format(event))
        elif isinstance(s, socket.socket) and s.fileno() in clients:
            ws = clients[s.fileno()]
            line = ws.readline().decode("utf-8")
            print(line)
            process(line)
        else:
            slim_server.pump(s, event)
    slim_server.pump_expire()
    _schedule.run_pending()
class Updater(Thread):
    def __init__(self):
        Thread.__init__(self, name="Updater")
        self.logger = logging.getLogger(self.getName())
        self.communication_queue = deque(tuple(), 512)
        self.scheduler = Scheduler()
        self.scheduler.every(60).seconds.do(self.go)
        # self.scheduler.every(30).minutes.do(self.upload_log)
        self.stopper = Event()
        self.sshkey = SSHManager()
        self.identifiers = set()
        self.temp_identifiers = set()

    def upload_logs(self):
        """
        uploads rotated logs to the server.
        :return:
        """
        isonow = SysUtil.get_isonow()
        validation_msg = isonow+","+self.sshkey.sign_message(isonow)
        logs_fp = SysUtil.get_log_files()
        files = {l: open(l, 'rb') for l in logs_fp}
        a = requests.post("https://{}/raspberrypi{}/logs",
                          data={"sig_msg": isonow, "signature": validation_msg},
                          files=files)

        # clear log files if 200 returned
        if a.status_code == 200:
            SysUtil.clear_files(logs_fp)

    def add_to_identifiers(self, identifier: str):
        """
        adds an identifier to the set of identifiers.
        :param identifier: identifier to add
        :return:
        """
        self.logger.debug("Adding {} to list of permanent identifiers.".format(identifier))
        self.identifiers.add(identifier)

    def add_to_temp_identifiers(self, temp_identifier: str):
        """
        adds an identifier to the set of temporary identifiers. that may disappear
        :param temp_identifier: identifier to add
        :return:
        """
        self.logger.debug("Adding {} to list of transient identifiers.".format(temp_identifier))
        self.temp_identifiers.add(temp_identifier)

    def go(self):
        try:
            data = self.gather_data()
            data["signature"] = self.sshkey.sign_message(json.dumps(data, sort_keys=True))
            uri = 'https://{}/api/camera/check-in/{}'.format(remote_server, SysUtil.get_machineid())
            response = requests.post(uri, json=data)
            # do backwards change if response is valid later.
            try:
                if response.status_code == 200:
                    # do config modify/parse of command here.
                    data = response.json()
                    for key, value in data.copy().items():
                        if value == {}:
                            del data[str(key)]
                    if len(data) > 0:
                        self.set_config_data(data)
                else:
                    self.logger.error("Unable to authenticate with the server.")
            except Exception as e:
                self.logger.error("Error getting data from config/status server: {}".format(str(e)))

        except Exception as e:
            self.logger.error("Error collecting data to post to server: {}".format(str(e)))

    def set_config_data(self, data: dict):
        for identifier, update_data in data.items():
            # dont rewrite empty...
            if not len(update_data):
                continue

            if identifier == "meta":
                hostname = update_data.get("hostname", None)
                if hostname:
                    SysUtil.set_hostname(hostname)
                if update_data.get("update", False):
                    SysUtil.update_from_git()

            config = SysUtil.ensure_config(identifier)
            sections = set(config.sections()).intersection(set(update_data.keys()))
            for section in sections:
                update_section = update_data[section]
                options = set(config.options(section)).intersection(set(update_section.keys()))
                for option in options:
                    config.set(section, option, str(update_section[option]))

            SysUtil.write_config(config, identifier)

    def set_yaml_data(self, data):
        pass

    def process_deque(self, cameras=None):
        if not cameras:
            cameras = dict()
        while len(self.communication_queue):
            item = self.communication_queue.pop()
            c = cameras.get(item['identifier'], None)
            if not c:
                cameras[item['identifier']] = item
                continue

            if item.get("last_capture", 0) > c.get("last_capture", 0):
                cameras[item['identifier']].update(item)

            if item.get("last_upload", 0) > c.get("last_upload", 0):
                cameras[item['identifier']].update(item)
        return cameras

    def gather_data(self):
        free_mb, total_mb = SysUtil.get_fs_space_mb()
        onion_address, cookie_auth, cookie_client = SysUtil.get_tor_host()

        cameras = SysUtil.configs_from_identifiers(self.identifiers | self.temp_identifiers)
        self.logger.debug("Announcing for {}".format(str(list(self.identifiers | self.temp_identifiers))))

        camera_data = dict(
            meta=dict(
                version=SysUtil.get_version(),
                machine=SysUtil.get_machineid(),
                internal_ip=SysUtil.get_internal_ip(),
                external_ip=SysUtil.get_external_ip(),
                hostname=SysUtil.get_hostname(),
                onion_address=onion_address,
                client_cookie=cookie_auth,
                onion_cookie_client=cookie_client,
                free_space_mb=free_mb,
                total_space_mb=total_mb
            ),
            cameras=self.process_deque(cameras=cameras),
        )
        return camera_data

    def stop(self):
        self.stopper.set()

    def run(self):
        while True and not self.stopper.is_set():
            self.scheduler.run_pending()
            time.sleep(1)
Ejemplo n.º 26
0
class MqttScheduler:

    schedulePath = 'schedule.json'

    def __init__(self):
        self.prevModified = 0
        self.lastModified = 0
        self.mqttclient = mqtt.Client()
        self.initMqtt()
        self.ss = Scheduler()

    def loop(self):
        while True:
            self.checkModified()
            self.ss.run_pending()
            time.sleep(1)

    def checkModified(self):
        self.lastModified = os.path.getmtime(MqttScheduler.schedulePath)
        if self.lastModified != self.prevModified:
            print('modified')
            print(self.lastModified)
            self.loadTasks()
            self.prevModified = self.lastModified

    def initMqtt(self):
        with open('private.json', 'r') as f:
            private = json.load(f)
        self.mqttclient.username_pw_set(private['mqtt_login'],
                                        private['mqtt_password'])
        self.mqttclient.connect(private['mqtt_host'], private['mqtt_port'], 60)
        self.mqttclient.loop_start()

    @staticmethod
    def getSchedule():
        with open(MqttScheduler.schedulePath, 'r') as f:
            tasks_json = json.load(f)
        return tasks_json

    @staticmethod
    def delTask(tag):
        tasklist = MqttScheduler.getSchedule()
        tasklist.pop(tag, None)
        MqttScheduler.saveSchedule(tasklist)

    @staticmethod
    def saveSchedule(tasks_json):
        with open(MqttScheduler.schedulePath, 'w') as f:
            json.dump(tasks_json, f)

    @staticmethod
    def addTask(name, value):
        tasks_json = MqttScheduler.getSchedule()
        tasks_json[name] = value
        MqttScheduler.saveSchedule(tasks_json)

    def loadTasks(self):
        tasklist = MqttScheduler.getSchedule()
        print('tasklist', tasklist)
        self.ss.clear()
        for task in tasklist:
            self.processTask(task, tasklist[task])

    def mqttpost(self, topic, msg):
        self.mqttclient.publish(topic, msg)
        conf.update_config(topic, json.loads(msg))

    def mqttpostWorkday(self, topic, msg):
        weekno = datetime.datetime.today().weekday()
        if datetime.datetime.today().weekday() < 5:
            self.mqttpost(topic, msg)

    def mqttpostWeekend(self, topic, msg):
        if datetime.datetime.today().weekday() >= 5:
            self.mqttpost(topic, msg)

    def processTask(self, tag, schedobj):
        if schedobj['type'] == 'daily':
            self.ss.every().day.at(schedobj['time']).do(
                self.mqttpost, schedobj['topic'], schedobj['msg'])
        elif schedobj['type'] == 'workday':
            self.ss.every().day.at(schedobj['time']).do(
                self.mqttpostWorkday, schedobj['topic'], schedobj['msg'])
        elif schedobj['type'] == 'weekend':
            self.ss.every().day.at(schedobj['time']).do(
                self.mqttpostWeekend, schedobj['topic'], schedobj['msg'])
        elif schedobj['type'] == 'hour':
            self.ss.every().hour.do(self.mqttpost, schedobj['topic'],
                                    schedobj['msg'])
        elif schedobj['type'] == 'minute':
            self.ss.every().minute.do(self.mqttpost, schedobj['topic'],
                                      schedobj['msg'])
        elif schedobj['type'] == 'second':
            self.ss.every().second.do(self.mqttpost, schedobj['topic'],
                                      schedobj['msg'])
Ejemplo n.º 27
0
class Scheduler:
    """
    Class to schedule recurring jobs (i.e. like cron)

    :param logger: logger (optional)
    :type logger: logging.logger
    """
    def __init__(self, logger=None):
        """
        Initialises class
        """
        self.scheduler = Sched()
        self.logger = logger

    def schedule(self, job, *args, **kwargs):
        """
        Schedule job

        :type job: func
        :param job: job function

        :type scheduler_job_config: dict
        :param scheduler_job_config: configuration parameters for job, entries may be:

        * seconds (*int*) - seconds between job repeats
        * minutes (*int*) - minutes between job repeats
        * parallel (*bool*) - switch for running jobs on different threads (False if omitted)
        * name (*str*) - job name for logging (optional)

        :param args: args for job

        :param kwargs: kwargs for job
        """

        scheduler_job_config = kwargs.pop("scheduler_job_config")

        seconds = scheduler_job_config[
            "seconds"] if "seconds" in scheduler_job_config.keys() else None
        minutes = scheduler_job_config[
            "minutes"] if "minutes" in scheduler_job_config.keys() else None
        hours = scheduler_job_config[
            "hours"] if "hours" in scheduler_job_config.keys() else None
        parallel = scheduler_job_config[
            "parallel"] if "parallel" in scheduler_job_config.keys() else None
        name = scheduler_job_config[
            "name"] if "name" in scheduler_job_config.keys() else None

        if seconds is not None:
            self.scheduler.every(seconds).seconds.do(self.job_wrapper, job,
                                                     parallel, self.logger,
                                                     name, *args, **kwargs)
        elif minutes is not None:
            self.scheduler.every(minutes).minutes.do(self.job_wrapper, job,
                                                     parallel, self.logger,
                                                     name, *args, **kwargs)

        elif hours is not None:
            self.scheduler.every(hours).hours.do(self.job_wrapper, job,
                                                 parallel, self.logger, name,
                                                 *args, **kwargs)

    def get_scheduled_jobs(self):
        """
        Return scheduled jobs

        :return: scheduled jobs
        :rtype: list
        """

        return self.scheduler.jobs

    @staticmethod
    def job_wrapper(job,
                    parallel=False,
                    logger=None,
                    name=None,
                    *args,
                    **kwargs):
        """
        Wraps job function to provide logging, error handling and parallel processing when scheduled

        :type job: func
        :param job: function
        """

        if logger is not None:

            def with_logging(func, logger, name):
                @functools.wraps(func)
                def wrapper(*args, **kwargs):
                    logger.info("Started: " + name)

                    try:
                        msg = func(*args, **kwargs)

                        log_msg = "Completed: " + name
                        if type(msg) == str:
                            log_msg += " (" + msg + ")"

                        logger.info(log_msg)

                        return msg

                    except Exception as exception:

                        exception_type = type(exception).__name__
                        exception_value = exception.__str__()

                        logger.info("Failed: " + name + " - " +
                                    exception_type + ": " + exception_value)

                return wrapper

            job = with_logging(job, logger, name)

        if parallel:
            pool = ThreadPool(processes=1)
            async_result = pool.apply_async(job, args, kwargs)
            return async_result.get()
        else:
            return job(*args, **kwargs)

    def run(self, start_time=None):
        """
        Run scheduled jobs

        :type start_time: datetime.datetime
        :param start_time: time to delay starting running pending jobs too
        """

        # todo - implement start_time feature

        while True:
            self.scheduler.run_pending()
            time.sleep(1)