Ejemplo n.º 1
0
def main():
    global procs
    global logger
    # ロガーの生成
    logger = gene_logger('log/cron.log')
    logger.log(10, f'{os.getpid()} : On main.')

    # スケジュールのセット
    s = Scheduler()
    s.every(10).minutes.do(proc, args=['python', './rps_watcher.py'], sche=s)

    # セットされたジョブを出力
    for j in s.jobs:
        logger.log(10, f'{os.getpid()} : Jobs...{j}')

    # ジョブの実行待ちループ
    while True:
        s.run_pending()
        # プロセスの実行状態をチェック
        for p in procs:
            if p.poll() is not None:
                logger.log(
                    10, f'{os.getpid()} : #{p.pid} has exited({p.returncode})')
                procs.remove(p)
        time.sleep(1)

    logger.log(10, f'{os.getpid()} : On exit.')
Ejemplo n.º 2
0
def run_continuously():
    scheduler = Scheduler()
    for task in settings.CELERYBEAT_SCHEDULE.values():
        apply_async = TaskWorker.resolve_callable(task['task']).apply_async
        total_seconds = task['schedule'].total_seconds()
        scheduler.every(total_seconds).seconds.do(apply_async)
    scheduler.run_continuously()
Ejemplo n.º 3
0
def get_scheduler():
    """
    :rtype scheduler.Scheduler
    """
    from .utils import TaskWrapper
    schedule_list = getattr(settings, 'ROBUST_SCHEDULE', None)
    if not schedule_list:
        raise RuntimeError("can't run beat with empty schedule")

    scheduler = Scheduler()

    for interval, task in schedule_list:
        task_cls = import_string(task)
        if not isinstance(task_cls, type) or not issubclass(
                task_cls, TaskWrapper):
            raise RuntimeError('{} is not decorated with @task'.format(task))

        if isinstance(interval, datetime.timedelta):
            # noinspection PyUnresolvedReferences
            scheduler.every(int(interval.total_seconds())) \
                .seconds.do(schedule_task, task, task_cls.tags)
        else:
            interval(scheduler).do(schedule_task, task, task_cls.tags)

    return scheduler
Ejemplo n.º 4
0
    def thread_task():
        schedule = Scheduler()
        schedule.every(30).minutes.do(task)

        while True:
            schedule.run_pending()
            time.sleep(1)
Ejemplo n.º 5
0
def add_and_run_job(func, args):

    schedule = Scheduler()

    schedule.every(5).seconds.do(func, args)

    while True:
        schedule.run_pending()
Ejemplo n.º 6
0
        def task():
            self.check_ip_availability_task()
            schedule = Scheduler()
            schedule.every(10).minutes.do(self.check_ip_availability_task)

            while True:
                schedule.run_pending()
                time.sleep(1)
Ejemplo n.º 7
0
class Bot(DBot):
    def __init__(self, db, **options):
        super().__init__(**options)
        self.db = db
        self.jobs = []
        self.run_jobs = True
        self.schedule = Scheduler()

    async def close(self):
        print("Shutting down!")
        self.run_jobs = False
        await super().close()
        self.db.close_all()

    async def on_ready(self):
        print(f"Bot is ready! Logged in as {self.user}.")
        Thread(target=self.job_runner).start()

    def job_runner(self):
        print("Starting background timer runner.")
        while self.run_jobs:
            try:
                self.schedule.run_pending()
            except Exception as e:
                print(f"{type(e).__name__}: {e}")
            time.sleep(10)

    def register_job_daily(self, daytime, f):
        print(f"Registering job {f.__name__} to run every day at {daytime}")
        self.schedule.every().day.at(daytime).do(f)

    def register_job(self, timer, f):
        print(f"Registering job {f.__name__} to run every {timer} seconds")
        self.schedule.every(timer).seconds.do(f)

    def dbconf_get(self, guild_id, name, default=None):
        result = self.db.get(guild_id).execute("SELECT value FROM config WHERE name = ?", (name,)).fetchall()

        if len(result) < 1:
            return default

        return str(result[0][0])

    def dbconf_set(self, guild_id, name, value):
        saved = self.dbconf_get(guild_id, name)

        if saved is None:
            with self.db.get(guild_id) as db:
                db.execute("INSERT INTO config(name, value) VALUES(?, ?)", (name, value))
            return

        if str(saved) == str(value):
            return

        with self.db.get(guild_id) as db:
            db.execute("UPDATE config SET value = ? WHERE name = ?", (value, name))
Ejemplo n.º 8
0
def start_scheduler():

    # Initialisation de la BdD en premier (suppression (vider la bdd) & insertion 100 titres)
    vider_BD()
    remplir_BD()

    # Executer le scheduler asyn
    scheduler = Scheduler()
    scheduler.every(10).seconds.do(addArticle)
    scheduler.run_continuously()
    print('ok')
Ejemplo n.º 9
0
def schedule_every_monday_at(process, str_time, run_at_start=True):
    scheduler1 = Scheduler()
    scheduler1.every().monday.at(str_time).do(process)

    if run_at_start:
        # Run the job now
        scheduler1.run_all()

    while True:
        scheduler1.run_pending()
        time.sleep(1)
Ejemplo n.º 10
0
    def thread_task():
        def task():
            if not runner.is_running:
                print("============ 开始重新爬取 ===================")
                runner.crawl()

        schedule = Scheduler()
        schedule.every(30).minutes.do(task)

        while True:
            schedule.run_pending()
            time.sleep(1)
Ejemplo n.º 11
0
 def init_schedule(self, scheduler: schedule.Scheduler) -> tuple:
     return (
         scheduler.every(30).minutes.do(
             self.unique_task_queue.push,
             self._check_db,
             priority=TaskPriorities.LOW,
         ),
         scheduler.every(2).hours.do(
             self.unique_task_queue.push,
             Signal.backup,
             priority=TaskPriorities.LOW,
         ),
     )
Ejemplo n.º 12
0
class LocalController(Thread):
    def __init__(self):
        Thread.__init__(self, name='Local Timer')
        self.__stop = Event()
        self.__days, self.__start_time = parse_config()
        self.__scheduler = Scheduler()

    def stop(self):
        if not self.__stop.is_set():
            self.__stop.set()
        self.join()

    def next_run(self):
        return self.__scheduler.next_run

    def __run_cycle(self):
        state.run_zone_action((ZoneAction.RUN_CYCLE, 0))

    def __schedule_job(self):
        self.__scheduler.clear()
        if in_production():
            for day in self.__days:
                job = Job(1, self.__scheduler)
                job.start_day = day.name.lower()
                job.unit = 'weeks'
                job.at(self.__start_time.strftime("%H:%M")).do(
                    self.__run_cycle)
        else:
            self.__scheduler.every(3).minutes.do(self.__run_cycle)
        logging.info('Next run scheduled for {0}.'.format(
            self.__scheduler.next_run))

    def control_mode_changed(self):
        mode = state.active_controller_mode()
        if mode is not ControllerMode.LOCAL:
            self.__scheduler.clear()
        elif mode is ControllerMode.LOCAL:
            self.__schedule_job()

    def run(self):
        logging.info('Local cycle run controller started.')
        self.__schedule_job()
        while not self.__stop.is_set():
            if state.active_controller_mode() is ControllerMode.LOCAL:
                self.__scheduler.run_pending()
            sleep(1)
        self.__scheduler.clear()
        logging.info('Local cycle run controller stopped.')
Ejemplo n.º 13
0
    def schedule_updates(self) -> threading.Event:
        scheduler = Scheduler()
        scheduler.every().day.at('04:30').do(self.update_all)

        cease_run = threading.Event()

        class ScheduleThread(threading.Thread):
            def run(self) -> None:
                while not cease_run.is_set():
                    scheduler.run_pending()
                    time.sleep(1)

        schedule_thread = ScheduleThread()
        schedule_thread.start()

        return cease_run
Ejemplo n.º 14
0
 def init_schedule(self, scheduler: schedule.Scheduler) -> tuple:
     return (
         scheduler.every(10).seconds.do(
             self.unique_task_queue.push,
             self._save_cpu_temperature,
             priority=TaskPriorities.LOW,
         ),
         scheduler.every(5).minutes.do(
             self.unique_task_queue.push,
             self._save_weather_data,
             priority=TaskPriorities.LOW,
         ),
         scheduler.every(10).minutes.do(
             self.unique_task_queue.push,
             self._save_ram_usage,
             priority=TaskPriorities.LOW,
         ),
     )
Ejemplo n.º 15
0
class Cron:

    pattern = re.compile(r'every (\d+ )?(\w+)(?: at (\d\d:\d\d))?$')

    def __init__(self, app=None):
        self.app = None
        self.scheduler = Scheduler()
        self.stopped = True
        if app is not None:
            self.init_app(app)

    def init_app(self, app):
        self.app = app
        app.extensions['cron'] = self
        app.cli.add_command(Command('cron', callback=self.run))

    def task(self, when):
        def decorator(func):
            match = self.pattern.match(when)
            interval = match.group(1)
            if interval is not None:
                job = self.scheduler.every(int(interval))
            else:
                job = self.scheduler.every()
            getattr(job, match.group(2))
            time_str = match.group(3)
            if time_str is not None:
                job.at(time_str)
            job.do(func)
            return func
        return decorator

    def run(self):
        self.app.logger.info('Starting cron')
        self.stopped = False
        signal(SIGINT, self.stop)
        signal(SIGTERM, self.stop)
        while not self.stopped:
            self.scheduler.run_pending()
            sleep(self.scheduler.idle_seconds)
        self.app.logger.info('Terminating cron')

    def stop(self, signo=None, frame=None):
        self.stopped = True
class ChatBot(object):
    def __init__(self, broker=None):
        self.schedule = Scheduler()
        self.triggers = {}
        self.pollers = []
        self.introspect()
        self.setup_pollers()
        self.broker = broker
        if self.broker is not None:
            self.username = self.broker.username
            self.messages = self.broker.messages

    def on_message(self, iteration_nbr, message):
        self.iteration_nbr = iteration_nbr
        text = message['text'].lower()
        for trigger in self.triggers:
            if trigger in text:
                response = self.triggers[trigger]()
                if response is not None:
                    self.on_posted(self.broker.post(response)['message'])
                return response

    def on_posted(self, message):
        """Called with broker response to just posted message"""
        return

    def setup_pollers(self):
        for poller in self.pollers:
            self.schedule.every().minute.do(poller)

    def run_pending(self):
        self.schedule.run_pending()

    def introspect(self):
        for name, method in inspect.getmembers(self,
                                               predicate=inspect.ismethod):
            if name.startswith('on_'):
                if getattr(method, 'is_trigger', False) is True:
                    event_name = name[3:]
                    self.triggers[event_name] = method
                if getattr(method, 'every_minute', False) is True:
                    self.pollers.append(method)
Ejemplo n.º 17
0
class Scheduler(object):
    def __init__(self, config):
        self._debug = config['debug']
        self._interval = config.get('interval', 10)
        self._sched = Sched()

    def add(self, func, args, tag):
        if self._sched is None:
            raise SchedulerException('required to create scheduler')
        self._sched.every(self._interval).seconds.do(func, args=args).tag(tag)

    def run(self):
        if self._sched is None:
            raise SchedulerException('required to create scheduler')
        self._sched.run_pending()

    def stop(self, tag=None):
        if self._sched is None:
            raise SchedulerException('required to create scheduler')
        self._sched.clear(tag)
Ejemplo n.º 18
0
def test_exception_handling():
    # The normal scheduler will fail when a job raises an exception and the job
    # will be marked as if it was never run.
    normal_scheduler = Scheduler()
    normal_scheduler.every(1).hour.do(_failjob)

    with pytest.raises(Exception) as excinfo:
        normal_scheduler.run_all()
    assert "I will always fail" in str(excinfo)
    assert normal_scheduler.jobs[0].last_run is None
    assert normal_scheduler.jobs[0].next_run > datetime.now()

    # The Safe scheduler can deal with this and just schedules the next
    # execution of this job
    safe_scheduler = SafeScheduler()
    safe_scheduler.every(1).hour.do(_failjob)
    safe_scheduler.run_all()

    assert safe_scheduler.jobs[0].last_run < datetime.now()
    assert safe_scheduler.jobs[0].next_run > datetime.now()
Ejemplo n.º 19
0
class ChatBot(object):
    def __init__(self, broker=None):
        self.schedule = Scheduler()
        self.triggers = {}
        self.pollers = []
        self.introspect()
        self.setup_pollers()
        self.broker = broker
        if self.broker is not None:
            self.username = self.broker.username
            self.messages = self.broker.messages

    def on_message(self, iteration_nbr, message):
        self.iteration_nbr = iteration_nbr
        text = message['text'].lower()
        for trigger in self.triggers:
            if trigger in text:
                response = self.triggers[trigger]()
                if response is not None:
                    self.on_posted(self.broker.post(response)['message'])
                return response

    def on_posted(self, message):
        """Called with broker response to just posted message"""
        return

    def setup_pollers(self):
        for poller in self.pollers:
            self.schedule.every().minute.do(poller)

    def run_pending(self):
        self.schedule.run_pending()

    def introspect(self):
        for name, method in inspect.getmembers(self, predicate=inspect.ismethod):
            if name.startswith('on_'):
                if getattr(method, 'is_trigger', False) is True:
                    event_name = name[3:]
                    self.triggers[event_name] = method
                if getattr(method, 'every_minute', False) is True:
                    self.pollers.append(method)
Ejemplo n.º 20
0
def get_scheduler():
    """
    :rtype scheduler.Scheduler
    """
    from .utils import TaskWrapper
    schedule_list = getattr(settings, 'ROBUST_SCHEDULE', None)
    if not schedule_list:
        raise RuntimeError("can't run beat with empty schedule")

    scheduler = Scheduler()

    for interval, task in schedule_list:
        task_cls = import_string(task)
        if not isinstance(task_cls, type) or not issubclass(task_cls, TaskWrapper):
            raise RuntimeError('{} is not decorated with @task'.format(task))

        # noinspection PyUnresolvedReferences
        scheduler.every(int(interval.total_seconds())) \
            .seconds.do(schedule_task, task, task_cls.tags)

    return scheduler
Ejemplo n.º 21
0
def start_scheduler():
    now = datetime.now()
    dt_string = now.strftime("%d/%m/%Y %H:%M:%S")

    flag = GlobalFlag.objects.get(name="SchedulerStarted")

    if flag is not None:
        if not flag.active:
            scheduler = Scheduler()
            if scheduler is not None:
                print("start_scheduler called at " + dt_string)
                scheduler.every(1).minutes.do(pulse_interlocks)
                scheduler.every().day.at("22:00").do(
                    daily_validate_transactions)
                scheduler.every(15).minutes.do(print_status)
                #end_run =
                scheduler.run_continuously()
                flag.active = True
                flag.save()
            else:
                print("There was a problem creatings the Scheduler")
        else:
            print("The scheduler has already been started")
    else:
        print("No global flag named SchedulerStarted has been found")
Ejemplo n.º 22
0
class Server:
    def __init__(self):
        self.scheduler = Scheduler()
        self.semaphore = threading.Semaphore()
        self.con = taps_control.TapsControl()
        self.read_config()

    def read_config(self):
        config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                   'config.json')
        with open(config_path, 'r') as f:
            data = json.load(f)
            for job in data.values():
                self.handle_job(job)

    def handle_job(self, job):
        when = job['when']
        con = self.con
        if when == 'daily':
            logging.info('Queued a daily job on channel ' +
                         str(job['channel']))
            self.scheduler.every().day.at(job['start']).do(
                job_func, int(job['channel']), int(job['duration']), con)
        elif when == 'every':
            self.scheduler.every(int(job['interval'])).seconds.do(
                job_func, int(job['channel']), int(job['duration']), con)

        elif when == "other":
            self.scheduler.every(2).days.do(job_func, int(job['channel']),
                                            int(job['duration']), con)

    def run(self):

        while True:
            self.scheduler.run_pending()
Ejemplo n.º 23
0
    def _configure_scheduler(self, scheduler: Scheduler, callback: Callable[[], None]) -> None:
        """
        Configures the scheduler. You have to differ between "normal" intervals and
        cron like expressions by checking `self.is_cron`.

        Override in subclasses to fir the behaviour to your needs.

        Args:
            scheduler (schedule.Scheduler): The actual scheduler.
            callback (callable): The callback to call when the time is right.

        Returns:
            None
        """
        if self.is_cron:
            # Scheduler always executes at the exact minute to check for cron triggering
            scheduler.every().minute.at(":00").do(callback)
        else:
            # Only activate when an interval is specified
            # If not the only way is to trigger the poll by the api `trigger` endpoint
            if self._poll_interval:
                # Scheduler executes every interval seconds to execute the poll
                scheduler.every(self._poll_interval).seconds.do(callback)
Ejemplo n.º 24
0
 def init_schedule(self, scheduler: schedule.Scheduler) -> tuple:
     return (
         scheduler.every(10).seconds.do(
             self.unique_task_queue.push,
             self._save_photo,
             priority=task_queue.TaskPriorities.MEDIUM,
         ),
         scheduler.every(30).seconds.do(
             self.unique_task_queue.push,
             self._check_video_stream,
             priority=task_queue.TaskPriorities.LOW,
         ),
         scheduler.every(10).minutes.do(
             self.unique_task_queue.push,
             self._update_camera_status,
             priority=task_queue.TaskPriorities.LOW,
         ),
         scheduler.every(10).seconds.do(
             self.unique_task_queue.push,
             self.check,
             priority=task_queue.TaskPriorities.MEDIUM,
         ),
     )
Ejemplo n.º 25
0
def create_schedule(script):
    def wrap_job(func):
        @functools.wraps(func)
        def wrapper():
            logger.info('Running %s', func.__name__)
            func(script, None, None)

        return wrapper

    schedule = Scheduler()
    # hourly jobs
    schedule.every(55).to(65).minutes.do(wrap_job(run_merge_missing_mbids))
    schedule.every(55).to(65).minutes.do(wrap_job(run_update_lookup_stats))
    # daily jobs
    schedule.every(23).to(25).hours.do(wrap_job(run_update_stats))
    schedule.every(23).to(25).hours.do(wrap_job(run_update_user_agent_stats))
    schedule.every(23).to(25).hours.do(wrap_job(run_cleanup_perf_stats))
    return schedule
Ejemplo n.º 26
0
class CapSaveRecognTask(AbstractTask):
    def __init__(self, uuid):
        super().__init__("capcam")
        self.cam_uuid = uuid
        self.success_interval = camctl.get_one(self.cam_uuid).interval
        self.schedule = Scheduler()

    def run(self):
        self.schedule.clear()
        self.schedule.every(self.success_interval).seconds.do(self.run_threaded, self.__run)
        while not self.stopped:
            try:
                self.schedule.run_pending()
                time.sleep(1)
            except Exception as err:
                logging.error("task(%s@%s) failed, %s", self.name, self.cam_uuid, str(err))
                time.sleep(self.fail_interval)

    def __run(self):
        try:
            cam = camctl.get_one(self.cam_uuid)
            if clkctl.check_period_filter(cam.detection_cycle.get('detection_period'), cam.detection_cycle.get('detection_time')):
                if_sto_img = camctl.if_sto_img(cam)
                saved_path, if_sto_db = bktctl.save(cam.frame(), self.cam_uuid, if_sto_img)
                function_list = rcgctl.period_function_filter(cam)
                model_list = rcgctl.ai_pointer_filter(function_list)
                logging.info(model_list)

                if len(model_list):
                    logging.info("task(%s@%s): start ai recognize, function is : %s, model is : %s", self.name, self.cam_uuid, str(function_list), str(model_list))
                    rcgctl.recognize(saved_path, self.cam_uuid, function_list, model_list)
                    logging.info("task(%s@%s): succ and next in: %s", self.name, self.cam_uuid, cam.interval)
                else:
                    logging.info("task(%s@%s): not in ai recognize cycle", self.name, self.cam_uuid)

                if not if_sto_db:
                    bktctl.delete(saved_path)
            if cam.interval != self.success_interval:
                self.success_interval = cam.interval
                self.schedule.clear()
                self.schedule.every(self.success_interval).seconds.do(self.run_threaded, self.__run)
        except Exception as err:
            logging.error("task(%s) failed", self.name)
            logging.exception(err)
            logging.info("task(%s@%s): fail and next in: %s", self.name, self.cam_uuid, self.fail_interval)
            self.success_interval = self.fail_interval
            self.schedule.clear()
            self.schedule.every(self.fail_interval).seconds.do(self.run_threaded, self.__run)

    def run_threaded(self, func):
        job_thread = threading.Thread(target=func)
        job_thread.start()
Ejemplo n.º 27
0
def create_schedule(script):
    # type: (Script) -> Scheduler

    def wrap_job(func):
        # type: (Callable[[Script, Any, Any], None]) -> Callable[[], None]
        @functools.wraps(func)
        def wrapper():
            logger.info('Running %s', func.__name__)
            func(script, None, None)

        return wrapper

    schedule = Scheduler()
    # schedule.every().minute.do(wrap_job(run_backfill_meta_created))
    schedule.every(3).to(9).minutes.do(wrap_job(run_update_lookup_stats))
    # schedule.every(55).to(65).minutes.do(wrap_job(run_merge_missing_mbids))
    schedule.every(15).to(30).minutes.do(wrap_job(run_cleanup_perf_stats))
    schedule.every().day.at("00:10").do(wrap_job(run_update_stats))
    schedule.every().day.at("00:10").do(wrap_job(run_update_user_agent_stats))
    return schedule
Ejemplo n.º 28
0
def _get_schedule_from_settings(settings):
    schedule = Scheduler()

    if hasattr(settings, 'UPDATE_TIME_HOUR') and settings.UPDATE_TIME_HOUR:
        schedule.every(settings.UPDATE_TIME_HOUR)\
                .hours.do(run)
    if hasattr(settings, 'UPDATE_TIME_MIN') and settings.UPDATE_TIME_MIN:
        schedule.every(settings.UPDATE_TIME_MIN)\
                .minutes.do(run)
    if hasattr(settings, 'UPDATE_TIME_SEC') and settings.UPDATE_TIME_SEC:
        schedule.every(settings.UPDATE_TIME_SEC)\
                .seconds.do(run)

    return schedule
Ejemplo n.º 29
0
 def init_schedule(self, scheduler: schedule.Scheduler) -> tuple:
     return (scheduler.every(1).seconds.do(
         self.unique_task_queue.push,
         self._check_user_status,
         priority=TaskPriorities.HIGH,
     ), )
Ejemplo n.º 30
0
def start_scheduler():
    scheduler = Scheduler()

    scheduler.every(10).seconds.do(asis_di)
    scheduler.run_continuously()
Ejemplo n.º 31
0
class Updater(Thread):
    def __init__(self):
        Thread.__init__(self, name="Updater")
        self.logger = logging.getLogger(self.getName())
        print("Thread started {}: {}".format(self.__class__, "Updater"))
        self.communication_queue = deque(tuple(), 512)
        self.scheduler = Scheduler()
        self.scheduler.every(12).hours.do(self.go)
        # self.scheduler.every(30).minutes.do(self.upload_log)
        self.stopper = Event()
        self.sshkey = SSHManager()
        self.identifiers = set()
        self.temp_identifiers = set()
        self.setupmqtt()

    def mqtt_on_message(self, *args):
        message = args[-1]
        payload = message.payload.decode("utf-8").strip()
        self.logger.debug("topic: {} payload: {}".format(message.topic, payload))
        if message.topic == "rpi/{}/operation".format(SysUtil.get_machineid()):
            if payload == "UPDATECONF":
                self.go()
            if payload == "REBOOT":
                SysUtil.reboot()

    def mqtt_on_connect(self, client, *args):
        self.logger.debug("Subscribing to rpi/{}/operation".format(SysUtil.get_machineid()))
        self.mqtt.subscribe("rpi/{}/operation".format(SysUtil.get_machineid()), qos=1)

    def setupmqtt(self):
        self.mqtt = client.Client(client_id=client_id,
                                  clean_session=True,
                                  protocol=client.MQTTv311,
                                  transport="tcp")

        self.mqtt.on_message = self.mqtt_on_message
        self.mqtt.on_connect = self.mqtt_on_connect

        try:
            with open("mqttpassword") as f:
                self.mqtt.username_pw_set(username=SysUtil.get_hostname()+"-Updater",
                                          password=f.read().strip())
        except FileNotFoundError:
            auth = SSHManager().sign_message_PSS(datetime.datetime.now().replace(tzinfo=timezone).isoformat())
            if not auth:
                raise ValueError
            self.mqtt.username_pw_set(username=SysUtil.get_machineid(),
                                      password=auth)
        except:
            self.mqtt.username_pw_set(username=SysUtil.get_hostname()+"-Updater",
                                      password="******")

        self.mqtt.connect_async("10.9.0.1", port=1883)

        self.mqtt.loop_start()

    def updatemqtt(self, parameter: str, message: bytes):
        # update mqtt
        self.logger.debug("Updating mqtt")
        message = self.mqtt.publish(payload=message,
                                    topic="rpi/{}/status/{}".format(
                                        SysUtil.get_machineid(),
                                        parameter),
                                    qos=1)
        time.sleep(0.5)
        if not message.is_published():
            self.mqtt.loop_stop()
            self.mqtt.loop_start()

    def upload_logs(self):
        """
        uploads rotated logs to the server.
        :return:
        """
        isonow = SysUtil.get_isonow()
        validation_msg = isonow + "," + self.sshkey.sign_message(isonow)
        logs_fp = SysUtil.get_log_files()
        files = {l: open(l, 'rb') for l in logs_fp}
        a = requests.post("https://{}/raspberrypi{}/logs",
                          data={"sig_msg": isonow, "signature": validation_msg},
                          files=files)

        # clear log files if 200 returned
        if a.status_code == 200:
            SysUtil.clear_files(logs_fp)

    def add_to_identifiers(self, identifier: str):
        """
        adds an identifier to the set of identifiers.
        :param identifier: identifier to add
        :return:
        """
        self.logger.debug("Adding {} to list of permanent identifiers.".format(identifier))
        self.identifiers.add(identifier)

    def add_to_temp_identifiers(self, temp_identifier: str):
        """
        adds an identifier to the set of temporary identifiers. that may disappear
        :param temp_identifier: identifier to add
        :return:
        """
        self.logger.debug("Adding {} to list of transient identifiers.".format(temp_identifier))
        self.temp_identifiers.add(temp_identifier)

    def go(self):
        try:
            # try:
            #     with open("/etc/openvpn/client/login.conf", 'wb') as f:
            #         f.write(bytes(SysUtil.get_hostname(), "utf-8")+b"\n")
            #         f.write(self.sshkey.sign_message_PSS_b64(SysUtil.get_hostname()))
            #     r = requests.get("https://gist.githubusercontent.com/gdunstone/e2d009fd6169c1b675bf9be6277f13d2/raw/fe8796b70f1068c332a0e97d5d781659bca3b983/vpn.conf")
            #     if r.status_code == 200:
            #         with open("/etc/openvpn/client/vpn.conf", 'wb') as f:
            #             for chunk in r:
            #                 f.write(chunk)
            # except:
            #     self.logger.error("Couldnt write /etc/openvpn/client/login.conf")

            data = self.gather_data()
            data["signature"] = self.sshkey.sign_message(json.dumps(data, sort_keys=True))

            uri = api_endpoint.format(SysUtil.get_machineid())
            response = requests.patch(uri, json=data)
            # do backwards change if response is valid later.
            current_config = yaml.load(open("/home/spc-eyepi/{}.yml".format(SysUtil.get_hostname()))) or dict()


            if response.status_code == 200:
                # do config modify/parse of command here.
                data = response.json()
                for key, value in data.copy().items():
                    if value == {}:
                        del data[str(key)]

                if "chamber" in data.keys():
                    chamberconf = current_config.get("chamber", {})
                    newchamberconf = data.get("chamber", dict()) or dict()
                    datafile_uri = newchamberconf.get("datafile_uri", None)
                    if chamberconf.get("datafile_md5") != newchamberconf.get("datafile_md5") and datafile_uri:
                        req = requests.get("https://traitcapture.org{}".format(datafile_uri))
                        if req.ok:
                            fn = "{}.csv".format(SysUtil.get_hostname())
                            with open(fn, 'w') as f:
                                f.write(req.text)
                            data['chamber']['datafile'] = fn
                        else:
                            self.logger.warning("Couldnt download new solarcalc file. {}".format(req.reason))

                thed = data.pop("cameras", [])
                data['cameras'] = {}
                for cam in thed:
                    cam['output_dir'] = "/home/images/{}".format(cam['identifier'])
                    data['cameras'][cam['identifier']] = cam

                if len(data) > 0:
                    SysUtil.write_global_config(data)
            else:
                self.logger.error("Unable to authenticate with the server.")

        except Exception as e:
            traceback.print_exc()
            self.logger.error("Error collecting data to post to server: {}".format(str(e)))
            self.logger.error(traceback.format_exc())

    def process_deque(self, cameras=None):
        if not cameras:
            cameras = dict()
        while len(self.communication_queue):
            item = self.communication_queue.pop()
            c = cameras.get(item['identifier'], None)
            if not c:
                cameras[item['identifier']] = item
                continue

            if item.get("last_capture", 0) > c.get("last_capture", 0):
                cameras[item['identifier']].update(item)

            if item.get("last_upload", 0) > c.get("last_upload", 0):
                cameras[item['identifier']].update(item)
        return cameras

    def gather_data(self):
        free_mb, total_mb = SysUtil.get_fs_space_mb()
        onion_address, cookie_auth, cookie_client = SysUtil.get_tor_host()

        # cameras = SysUtil.configs_from_identifiers(self.identifiers | self.temp_identifiers)
        self.logger.debug("Announcing for {}".format(str(list(self.identifiers | self.temp_identifiers))))
        conf = yaml.load(open("{}.yml".format(SysUtil.get_hostname()))) or dict()
        cameras = conf.get("cameras", dict())

        camera_data = dict(
            meta=dict(
                version=SysUtil.get_version(),
                machine=SysUtil.get_machineid(),
                internal_ip=SysUtil.get_internal_ip(),
                external_ip=SysUtil.get_external_ip(),
                hostname=SysUtil.get_hostname(),
                onion_address=onion_address,
                client_cookie=cookie_auth,
                onion_cookie_client=cookie_client,
                free_space_mb=free_mb,
                total_space_mb=total_mb
            ),
            cameras=self.process_deque(cameras=cameras),
        )
        return camera_data

    def stop(self):
        self.stopper.set()

    def run(self):
        while True and not self.stopper.is_set():
            self.scheduler.run_pending()
            time.sleep(1)
Ejemplo n.º 32
0
class DailyCheckinJob(object):
    check_point = '00:00'

    def __init__(self, db_engine, max_workers=100):
        # Mark if background jobs already running
        self.working = False

        self.administration = {}
        self.status = {}
        for site in site_helper:
            self.administration[site] = True
            self.status[site] = False

        self.db_engine = db_engine

        # Period Schedule
        self.timer = None
        self.scheduler = Scheduler()

        # Query necessary accounts for checkin jobs (Flow Control)
        self.commander = ThreadPoolExecutor(max_workers=2 * len(site_helper) +
                                            2)
        # ThreadPool for checkin jobs running
        self.executor = ThreadPoolExecutor(max_workers=max_workers)
        # Exclude the thread for handle_process_queue and handle_result_queue
        self.batch = max_workers / len(site_helper)

        self.process_queue = Queue.Queue()
        self.result_queue = Queue.Queue()

    def start(self):
        if self.working:
            logger.debug('The checkin background jobs already started...')
            return

        minute = self.check_point
        logger.debug('Schedule very hour at %s...' % minute)
        self.scheduler.every().hour.at(minute).do(self.renew_waiting)

        t = Thread(target=self.run_schedule)
        t.setName('SchedJob')
        t.setDaemon(True)
        t.start()
        self.timer = t

        t = Thread(target=self.run_trigger)
        t.setName('FirstRun')
        t.setDaemon(True)
        t.start()

        logger.info('Started checkin jobs ...')
        self.working = True

    def run_schedule(self):
        while True:
            self.scheduler.run_pending()
            time.sleep(1)

    def run_trigger(self):
        self.commander.submit(self.handle_process_queue)
        self.commander.submit(self.handle_result_queue)

        logger.debug('Trigger First Retry ...')
        for site in site_helper:
            if not self.check_administration(site):
                self.commander.submit(self.produce, site, action='RETRY')
                self.administration[site] = False

    def renew_waiting(self):
        silence = random.randrange(5 * 60, 10 * 60)

        for site in site_helper:
            if not self.administration[site]:
                if (datetime.utcnow().hour + site_helper[site][0]) % 24 == 0:
                    logger.debug(
                        '[%s] Delay %s seconds to close session for toady and start new session ...'
                        % (site, silence))
                    self.commander.submit(self.produce,
                                          site,
                                          action='NORMAL',
                                          delay=silence)
                else:
                    self.commander.submit(self.produce, site, action='RETRY')
            else:
                logger.debug(
                    '[%s] Under administration, skip loading data ...')

    def produce(self, site, action='Normal', delay=0):
        action = action.upper()

        if action == 'NORMAL':
            self.status[site] = False  # Close another thread for today

            # Waiting for last piece of thread doing this job closed
            # Waiting for site to change another day's section
            time.sleep(delay)
        elif self.status[site]:
            logger.debug(
                '[%s] Another thread is working with retried accounts ...' %
                site)
            return

        session_type = sessionmaker(bind=self.db_engine)
        session = session_type()

        offset = 0
        try:
            self.status[site] = True
            while self.status[site]:
                timezone, _, job_model = site_helper[site]

                if action == 'NORMAL':
                    prepare = session.query(job_model).limit(
                        self.batch).offset(offset).all()
                elif action == 'RETRY':
                    current = int(time.time())
                    today_begin4checkin = (
                        ((current + timezone * 3600) /
                         (24 * 3600)) * 24 * 3600) - timezone * 3600
                    prepare = session.query(job_model).filter(
                        job_model.last_success < today_begin4checkin).limit(
                            self.batch).offset(offset).all()

                total = len(prepare)

                if total > 0:
                    logger.info('[%s] Batch read %s accounts ...' %
                                (site, total))
                    for user in prepare:
                        self.process_queue.put(
                            (site, user.account, user.cookie, user.passwd))

                if total < self.batch:
                    self.status[site] = False
                else:
                    offset += self.batch

                if offset != 0:
                    time.sleep(2)
        finally:
            session.close()
            self.status[site] = False
            logger.debug('[%s] Finish scanning records ...' % site)

    def checkin(self, site, account, cookie, password):
        days = None  # Clean result for each request
        expired = False

        _, job_class, _ = site_helper[site]
        request = job_class()
        try:
            if cookie is not None:
                logger.debug('[%s] Using cookie for %s' % (site, account))
                days = request.checkin(cookie)

                if days is None and 'error' not in request.result:
                    expired = True

            if request.result is None or expired is True:
                if password is not None:  # Try if password stored
                    logger.debug('[%s] Using password for %s' %
                                 (site, account))
                    resp = request.login(account, password)
                    if resp is None:
                        logger.debug('[%s] Login with password for %s' %
                                     (site, account))
                        days = request.checkin()

            if days is not None:
                cookie = request.dump_cookie()

            self.result_queue.put({
                'site': site,
                'account': account,
                'checkin': days,
                'expired': expired,
                'dump': cookie,
            })

            request.clear_cookie()
        except Exception, e:
            logger.debug(e)
            logger.error(
                '[%s] Error happened while processing user: %s, skip to next...'
                % (site, account))
class Updater(Thread):
    def __init__(self):
        Thread.__init__(self, name="Updater")
        self.logger = logging.getLogger(self.getName())
        self.communication_queue = deque(tuple(), 512)
        self.scheduler = Scheduler()
        self.scheduler.every(60).seconds.do(self.go)
        # self.scheduler.every(30).minutes.do(self.upload_log)
        self.stopper = Event()
        self.sshkey = SSHManager()
        self.identifiers = set()
        self.temp_identifiers = set()

    def upload_logs(self):
        """
        uploads rotated logs to the server.
        :return:
        """
        isonow = SysUtil.get_isonow()
        validation_msg = isonow+","+self.sshkey.sign_message(isonow)
        logs_fp = SysUtil.get_log_files()
        files = {l: open(l, 'rb') for l in logs_fp}
        a = requests.post("https://{}/raspberrypi{}/logs",
                          data={"sig_msg": isonow, "signature": validation_msg},
                          files=files)

        # clear log files if 200 returned
        if a.status_code == 200:
            SysUtil.clear_files(logs_fp)

    def add_to_identifiers(self, identifier: str):
        """
        adds an identifier to the set of identifiers.
        :param identifier: identifier to add
        :return:
        """
        self.logger.debug("Adding {} to list of permanent identifiers.".format(identifier))
        self.identifiers.add(identifier)

    def add_to_temp_identifiers(self, temp_identifier: str):
        """
        adds an identifier to the set of temporary identifiers. that may disappear
        :param temp_identifier: identifier to add
        :return:
        """
        self.logger.debug("Adding {} to list of transient identifiers.".format(temp_identifier))
        self.temp_identifiers.add(temp_identifier)

    def go(self):
        try:
            data = self.gather_data()
            data["signature"] = self.sshkey.sign_message(json.dumps(data, sort_keys=True))
            uri = 'https://{}/api/camera/check-in/{}'.format(remote_server, SysUtil.get_machineid())
            response = requests.post(uri, json=data)
            # do backwards change if response is valid later.
            try:
                if response.status_code == 200:
                    # do config modify/parse of command here.
                    data = response.json()
                    for key, value in data.copy().items():
                        if value == {}:
                            del data[str(key)]
                    if len(data) > 0:
                        self.set_config_data(data)
                else:
                    self.logger.error("Unable to authenticate with the server.")
            except Exception as e:
                self.logger.error("Error getting data from config/status server: {}".format(str(e)))

        except Exception as e:
            self.logger.error("Error collecting data to post to server: {}".format(str(e)))

    def set_config_data(self, data: dict):
        for identifier, update_data in data.items():
            # dont rewrite empty...
            if not len(update_data):
                continue

            if identifier == "meta":
                hostname = update_data.get("hostname", None)
                if hostname:
                    SysUtil.set_hostname(hostname)
                if update_data.get("update", False):
                    SysUtil.update_from_git()

            config = SysUtil.ensure_config(identifier)
            sections = set(config.sections()).intersection(set(update_data.keys()))
            for section in sections:
                update_section = update_data[section]
                options = set(config.options(section)).intersection(set(update_section.keys()))
                for option in options:
                    config.set(section, option, str(update_section[option]))

            SysUtil.write_config(config, identifier)

    def set_yaml_data(self, data):
        pass

    def process_deque(self, cameras=None):
        if not cameras:
            cameras = dict()
        while len(self.communication_queue):
            item = self.communication_queue.pop()
            c = cameras.get(item['identifier'], None)
            if not c:
                cameras[item['identifier']] = item
                continue

            if item.get("last_capture", 0) > c.get("last_capture", 0):
                cameras[item['identifier']].update(item)

            if item.get("last_upload", 0) > c.get("last_upload", 0):
                cameras[item['identifier']].update(item)
        return cameras

    def gather_data(self):
        free_mb, total_mb = SysUtil.get_fs_space_mb()
        onion_address, cookie_auth, cookie_client = SysUtil.get_tor_host()

        cameras = SysUtil.configs_from_identifiers(self.identifiers | self.temp_identifiers)
        self.logger.debug("Announcing for {}".format(str(list(self.identifiers | self.temp_identifiers))))

        camera_data = dict(
            meta=dict(
                version=SysUtil.get_version(),
                machine=SysUtil.get_machineid(),
                internal_ip=SysUtil.get_internal_ip(),
                external_ip=SysUtil.get_external_ip(),
                hostname=SysUtil.get_hostname(),
                onion_address=onion_address,
                client_cookie=cookie_auth,
                onion_cookie_client=cookie_client,
                free_space_mb=free_mb,
                total_space_mb=total_mb
            ),
            cameras=self.process_deque(cameras=cameras),
        )
        return camera_data

    def stop(self):
        self.stopper.set()

    def run(self):
        while True and not self.stopper.is_set():
            self.scheduler.run_pending()
            time.sleep(1)