示例#1
0
class Camera:
    def __init__(self, queue_len=10):
        #初始化对象
        self.q_edit = Queue(maxsize=queue_len)
        self.q_show = Queue(maxsize=queue_len)
        self.camera = cv2.VideoCapture(0)
        self.classifier = Detector()
        self.sch = BackgroundScheduler()
        self.sch.add_job(self.work_once, 'interval', seconds=0.5)

    def work_once(self):
        try:
            if self.camera.isOpened():
                ok, img = self.camera.read()
                if not ok:
                    print('camera cannot be read!')
                    exit(2)
                face_edit, face_show = self.classifier.get_face_by_array(img)
                if face_edit is None:
                    return None
                if self.q_edit.full():
                    self.q_edit.get()
                    self.q_show.get()
                self.q_edit.put(face_edit)
                self.q_show.put(face_show)
            else:
                print('camera cannot open!')
                exit(3)
        except Exception as e:
            print(e)
            pass

    #让相机开始工作
    def start(self):
        self.sch.start()

    def pause(self):
        self.sch.pause()

    def resume(self):
        self.sch.resume()

    def end(self):
        self.sch.shutdown()
        self.camera.release()
        cv2.destroyAllWindows()

    def get_image(self):
        #从队列弹出图片
        #如果队列为空,则返回None
        if self.q_edit.empty():
            return [None, None]
        #否则返回队首的已经处理过的面部图片的文件名
        else:
            rst = [self.q_edit.get(), self.q_show.get()]
            return rst

    def clear(self):
        self.q_edit.queue.clear()
        self.q_show.queue.clear()
def test3():
    """定时执行任务,暂停,恢复"""
    start_time = time.time()
    scheduler = BackgroundScheduler()
    scheduler.add_job(
        my_job, 'interval', args=('123', ), seconds=1,
        id='my_job_id')  # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    scheduler.start()  # 程序运行到这里,任务没有运行完也会往后执行,既执行后面的任务,又执行这个任务。
    print('运行到了这里1')
    while (scheduler.state):
        if time.time() - start_time > 5:
            print('暂停作业')
            scheduler.pause()  # 暂停作业:
            break
    print('恢复作业')
    if time.time() - start_time > 5:
        scheduler.resume()  #
    time.sleep(4)
    print('当前任务列表:{}'.format(
        scheduler.get_jobs()))  # 获得调度作业的列表,可以使用 get_jobs() 来完成,它会返回所有的job实例
    scheduler.get_job('my_job_id')  # 获取id为my_job_id的作业实例

    scheduler.print_jobs()  # 输出所有格式化的作业列表。

    print('移除作业')
    # scheduler.remove_job('my_job_id') # 移除id为my_job_id的作业
    scheduler.remove_all_jobs()  # 移除所有的作业
示例#3
0
class Listener:
    def __init__(self, **kwargs):
        super().__init__()
        self.duration = kwargs.get('duration', 600)
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_job(self.listen, 'interval', seconds=self.duration)

    @abstractmethod
    def listen(self):
        """
        开始监听
        """
        pass

    def start(self):
        self.scheduler.start()

    def notify(self, *args, **kwargs):
        pass

    def shutdown(self):
        self.scheduler.shutdown()

    def pause(self):
        self.scheduler.pause()
示例#4
0
class SerialReader:
    scheduler = None
    job = None

    def __init__(self, serial, callback, interval=0.1):
        self.active = True
        self.interval = interval
        self.serial = serial
        self.data = None
        self.callback = callback

    def start(self):
        self.scheduler = BackgroundScheduler()
        self.job = self.scheduler.add_job(self.test,
                                          'interval',
                                          seconds=self.interval)
        self.scheduler.start()

    def pause(self):
        self.scheduler.pause()

    def resume(self):
        self.scheduler.resume()

    def test(self):
        n_in = self.serial.in_waiting
        if n_in > 0:
            self.data = self.serial.read()
            if self.data is not None and len(self.data) > 0:
                self.callback(self.data)

    def stop(self):
        self.scheduler.remove_all_jobs()
        self.scheduler.shutdown()
示例#5
0
def init_scheduler(server: ServerInterface):
    global sched
    if sched:
        sched.remove_all_jobs()
        sched.shutdown()
        del sched
    sched = BackgroundScheduler()
    set_scheduler(server, True)
    sched.start()
    if not config['enableAutoUpdate']:
        sched.pause()
        debug_log('Scheduler paused')
class GatedScraper:
    def __init__(self, cookie, uid, interval=15):
        if (cookie == None):
            raise Exception('Cookie is required to be set')

        if (uid == None):
            raise Exception('Tadpoles UID is required to be set')

        self.cookie = cookie
        self.uid = uid
        self.requests = []

        self.sched = BackgroundScheduler()
        self.sched.start()
        self.sched.add_job(self.fire_job,
                           'interval',
                           seconds=interval,
                           jitter=5)

    def fire_job(self):
        if (len(self.requests) == 0):
            return

        currentItem = self.requests.pop(0)

        if currentItem['url'] == None:
            currentItem['callback'](None, currentItem['params'])
            return

        curReq = urllib.request.Request(currentItem['url'])
        curReq.add_header('cookie', self.cookie)
        curReq.add_header('x-tadpoles-uid', self.uid)
        resp = urllib.request.urlopen(curReq)
        if (resp.getcode() >= 500):
            self.requests.insert(0, currentItem)
            self.sched.pause()
            time.sleep(10)
            self.sched.resume()
        else:
            currentItem['callback'](resp, currentItem['params'])

    def add_job(self, url, callback, **params):
        to_append = {}
        to_append['url'] = url
        to_append['callback'] = callback
        to_append['params'] = params
        self.requests.append(to_append)

    def pause(self):
        self.sched.pause()

    def start(self):
        self.sched.start()
class Preferences:
    def __init__(self, config_path):
        self._logger = getLogger("raspberry.preferences")
        self.video_list = Queue()
        self.time_on_off = Queue()
        self.configuration_file = config_path
        self._scheduler = BackgroundScheduler()
        self._scheduler.add_job(self.update, "interval", seconds=300)
        self._logger.info("ready")

    def update(self):
        self._logger.info("Checking for new preferences")
        try:
            if not os.path.isfile(self.configuration_file):
                self._logger.error("%s does not exist!" %
                                   self.configuration_file)
            data = self.decode_file(self.configuration_file)
            video_list = data["videos"]
            times = data["time"]
            time_on, time_off = times.split("-")
            self.video_list.put(video_list)
            self.time_on_off.put((time_on, time_off))
        except Exception as e:
            self._logger.error("Error checking preferences: %s " % str(e))

    def start(self):
        self.update()
        self._scheduler.start()

    def stop(self):
        self._scheduler.pause()
        self._logger.info("Finished")

    def decode_file(self, path):
        data = {}
        with open(path, 'r') as stream:
            try:
                data = yaml.load(stream)
            except yaml.YAMLError as exc:
                print(exc)
        hostname = gethostname()
        if hostname in data.keys():
            return data[hostname]
        else:
            self._logger.error(
                "Unable to find my name (%s) in the config file (%s) " %
                (hostname, path))
            return None
示例#8
0
class Scheduler(object):
    def __init__(self):
        self._scheduler = BackgroundScheduler(executors=executors,
                                              job_defaults=job_defaults)
        self._scheduler.add_jobstore('redis',
                                     jobs_key='crontpy.jobs',
                                     run_times_key='crontpy.run_times')

    @property
    def running(self):
        return self._scheduler.running

    def start(self):
        self._scheduler.start()

    def shutdown(self, wait=True):
        self._scheduler.shutdown(wait)

    def pause(self):
        self._scheduler.pause()

    def resume(self):
        self._scheduler.resume()

    def get_jobs(self):
        return self._scheduler.get_jobs()

    def get_job(self, jid):
        return self._scheduler.get_job(job_id=jid)

    def run_job(self, jid):
        job = self.get_job(jid)
        if not job:
            raise Exception('job id:{0} not found'.format(jid))
        job.func(*job.args, **job.kwargs)

    def resume_job(self, jid):
        self._scheduler.resume_job(job_id=jid)

    def pause_job(self, jid):
        self._scheduler.pause_job(job_id=jid)

    def modify_job(self, jid, **changes):
        return self._scheduler.modify_job(job_id=jid, **changes)

    def delete_job(self, jid):
        self._scheduler.remove_job(job_id=jid)
示例#9
0
def begin():
    scheduler = BackgroundScheduler()
    scheduler.add_job(update, 'interval', minutes=1)
    scheduler.add_job(update_aroon, 'interval', minutes=1)
    scheduler.add_job(update_atr, 'interval', minutes=1)
    scheduler.add_job(update_chaikin, 'interval', minutes=1)
    scheduler.add_job(update_sma, 'interval', minutes=1)
    scheduler.add_job(update_ssl, 'interval', minutes=1)
    scheduler.start()

    date = datetime.now()
    if date.isocalendar()[2] == 5 and date.hour >= 13:
        print("closed")
        scheduler.pause()
    if date.isocalendar()[2] == 7 and date.hour >= 14:
        print("open")
        scheduler.resume()
示例#10
0
    def job_event_handler(sched: BackgroundScheduler,
                          err_count: list[int | float],
                          event: JobExecutionEvent):
        if (time.time() - err_count[1]) > 60:
            err_count[0] = 0

        err_count[0] += 1
        err_count[1] = time.time()

        job: apscheduler.job.Job = sched.get_job(event.job_id)
        now = datetime.datetime.now()
        now = now.strftime("%Y-%m-%d %H:%M:%S")

        try:
            if event.exception:
                pushstr = "{}\n{}\n第{}次出现异常\n异常任务: {}\nTraceback (most recent call last):\n{}\n{}\n".format(
                    str(now),
                    str(get_self_dir()[2]),
                    str(err_count[0]),
                    str(job.name),
                    str(event.traceback),
                    str(event.exception),
                )
            else:
                pushstr = "{}\n{}\n第{}次出现异常\n异常任务: {}\n任务被跳过\n原定执行时间: \n{}\n".format(
                    str(now),
                    str(get_self_dir()[2]),
                    str(err_count[0]),
                    str(job.name),
                    str(event.scheduled_run_time),
                )
        except Exception:
            pushstr = "{}\n{}\n第{}次出现异常\n异常任务: 未知\n原定执行时间: \n{}\n".format(
                str(now),
                str(get_self_dir()[2]),
                str(err_count[0]),
                str(event.scheduled_run_time),
            )

        if err_count[0] >= 3:
            sched.pause()
            pushstr += "短时间内出现3次异常, 定时任务已暂停"

        print(pushstr)
        if "pushkey" in push_option:
            print(dayepao_push(pushstr, **push_option))
示例#11
0
def test2():
    """定时执行任务,暂停,恢复"""
    start_time = time.time()
    scheduler = BackgroundScheduler(daemonic = False) # daemonic = False,
    scheduler.add_job(my_job, 'interval', args=('123',),seconds=1, id='my_job_id') # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    scheduler.start() # 程序运行到这里,任务没有运行完也会往后执行,既执行后面的任务,又执行这个任务。
    print('运行到了这里1')
    while (scheduler.state):
        if time.time() - start_time >5:
            print('暂停作业')
            scheduler.pause() # 暂停作业:
            break
    print('恢复作业')
    if time.time() - start_time >5:
        scheduler.resume() # 恢复作业
    time.sleep(4)
    print('再次暂停作业')
示例#12
0
class Repeat:
    def __init__(self, toDo, repeatTime):
        self.isQuit = False
        self.scheduler = BackgroundScheduler()
        self.job = None
        self.i = 1
        self.addJob(toDo, repeatTime)

    def addJob(self, toDo, repeatTime=1800):
        print("Job added every " + str(repeatTime))
        self.job = self.scheduler.add_job(toDo,
                                          'interval',
                                          seconds=repeatTime,
                                          max_instances=3)

    def startJobs(self):
        print("Started job")
        self.scheduler.start()

    def quitJob(self):
        self.scheduler.remove_all_jobs()
        self.scheduler.shutdown(wait=False)

    def sayHi(self):
        print("Hi")
        self.i += 1
        if self.i == 3:
            self.pauseJob()
            self.resumeJob()

    def resumeJob(self):
        self.scheduler.resume()

    def pauseJob(self):
        self.scheduler.pause()
        print("paused")


#   ------------------        usage:          ----------------------
# s = Repeat()

# try:
#    s.startJobs()
# except (KeyboardInterrupt, SystemExit):
#    s.quitJob()
示例#13
0
class Routine:
    def __init__(self,
                 config,
                 routine_id,
                 desc,
                 num_cmd,
                 cmd_list=[],
                 crontab_list=[]):
        self.main_conf = config
        self.num_cmd = num_cmd
        self.routine_id = routine_id
        self.cmd_list, self.crontab_list = cmd_list, crontab_list
        self.sched = BackgroundScheduler()
        for i in range(len(cmd_list)):
            self.sched.add_job(self.sendCommand,
                               CronTrigger.from_crontab(crontab_list[i]),
                               kwargs={'cmd': cmd_list[i]})

    def sendCommand(self, cmd):
        logger.info("Routine #" + str(self.routine_id + 1) +
                    ": sending command " + cmd)
        address = "http://localhost/cgi-bin/picmd"
        try:
            r = requests.post(address, data={'code': cmd})
        except:
            return -1
        finally:
            return 1

    def run(self):
        global routine_status
        self.sched.start()
        status = 1
        while True:
            if routine_status[self.routine_id] == 0 and status == 1:
                self.sched.pause()
                logger.info("Routine #" + str(self.routine_id + 1) + " paused")
                status = 0
            elif routine_status[self.routine_id] == 1 and status == 0:
                self.sched.resume()
                logger.info("Routine #" + str(self.routine_id + 1) +
                            " resumed")
                status = 1
            time.sleep(10)
示例#14
0
def test2():
    """定时执行任务,暂停,恢复"""
    start_time = time.time()
    scheduler = BackgroundScheduler(daemonic=False)  # daemonic = False,
    scheduler.add_job(
        my_job, 'interval', args=('123', ), seconds=1,
        id='my_job_id')  # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    scheduler.start()  # 程序运行到这里,任务没有运行完也会往后执行,既执行后面的任务,又执行这个任务。
    print('运行到了这里1')
    while (scheduler.state):
        if time.time() - start_time > 5:
            print('暂停作业')
            scheduler.pause()  # 暂停作业:
            break
    print('恢复作业')
    if time.time() - start_time > 5:
        scheduler.resume()  # 恢复作业
    time.sleep(4)
    print('再次暂停作业')
示例#15
0
class Map_update:
    def __init__(self) -> None:
        self.scheduler = BackgroundScheduler(standalone=True, coalesce=True)
        pass

    def sche_set(self, set_time):
        self.scheduler.add_job(self.insert_img, "interval", seconds=set_time)

    def sche_start(self):
        self.scheduler.start()

    def sche_shutdown(self):
        self.scheduler.shutdown()

    def sche_pause(self):
        self.scheduler.pause()

    def img_io(self, path):
        image_data = BytesIO()
        im = Image.open(path)
        im.save(image_data, "png")
        image_data.seek(0)
        image_file = image_data.read()
        return image_file

    def insert_img(self):
        path, img_time = meteorological_img.main()
        # image_file = self.img_io(path)
        now = datetime.now()
        cloud = Cloud()
        cloud.img_name = os.path.basename(path)
        cloud.img_path = path
        cloud.img_time = img_time
        cloud.created_at = now
        cloud.tag = "synthetic"
        cloud.zoom_level = 2
        session.add(cloud)
        session.commit()
        print(path)
        print("insert")
示例#16
0
class Scheduler(NamedLogger):
    """Scheduler class creates a default system scheduler with a default refresh
    time and utility functions to create and remove jobs. All jobs are identified
    by an id string."""
    __logname__ = "scheduler"

    def __init__(self, refresh=3600, debug=False):
        self.scheduler = BackgroundScheduler()
        self.refresh = refresh
        first_msg = "created scheduler with refresh period {}".format(refresh)
        self.logger = self.setup_logger(debug=debug, first_msg=first_msg)

    def start(self):
        self.logger.info("starting scheduler")
        self.scheduler.start()

    def stop(self):
        self.logger.info("stopping scheduler")
        self.scheduler.pause()

    def add_interval_job(self, func, job_id, job_args=None):
        """Adds periodic job with the default refresh period"""
        self.logger.info("adding job {} args {}".format(job_id, job_args))
        self.scheduler.add_job(func,
                               trigger="interval",
                               id=job_id,
                               seconds=self.refresh,
                               args=job_args)

    def remove_job(self, job_id):
        """Stops and removes a given job"""
        self.logger.info("removing job {}".format(job_id))
        try:
            self.scheduler.remove_job(job_id)
        except JobLookupError:
            self.logger.error("no such job {}".format(job_id))
示例#17
0
class AutoCyclingDisplayController(CyclableDisplayController):
    """
    Display controller that auto cycles through images.
    """
    def __init__(
            self,
            driver: DisplayDriver,
            image_store: ImageStore,
            identifier: Optional[str] = None,
            image_transformers: Sequence[ImageTransformer] = (),
            cycle_image_after_seconds: float = DEFAULT_SECONDS_BETWEEN_CYCLE):
        """
        Constructor.
        :param driver: see `CyclableDisplayController.__init__`
        :param image_store: see `CyclableDisplayController.__init__`
        :param identifier: see `CyclableDisplayController.__init__`
        :param image_transformers: see `CyclableDisplayController.__init__`
        :param cycle_image_after_seconds: the number of seconds before cycling on to the next image
        """
        super().__init__(driver, image_store, identifier, image_transformers)
        self.cycle_image_after_seconds = cycle_image_after_seconds
        self._scheduler = BackgroundScheduler()

    def start(self):
        if self._scheduler.state != STATE_RUNNING:
            self._scheduler.start()
            self._scheduler.add_job(self.display_next_image,
                                    "interval",
                                    seconds=self.cycle_image_after_seconds)

    def stop(self):
        self._scheduler.remove_all_jobs()
        try:
            self._scheduler.pause()
        except SchedulerNotRunningError:
            pass
示例#18
0
def test3():
    """定时执行任务,暂停,恢复"""
    start_time = time.time()
    scheduler = BackgroundScheduler()
    scheduler.add_job(my_job, 'interval', args=('123',),seconds=1, id='my_job_id') # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    scheduler.start() # 程序运行到这里,任务没有运行完也会往后执行,既执行后面的任务,又执行这个任务。
    print('运行到了这里1')
    while (scheduler.state):
        if time.time() - start_time >5:
            print('暂停作业')
            scheduler.pause() # 暂停作业:
            break
    print('恢复作业')
    if time.time() - start_time >5:
        scheduler.resume() # 
    time.sleep(4)
    print('当前任务列表:{}'.format(scheduler.get_jobs())) # 获得调度作业的列表,可以使用 get_jobs() 来完成,它会返回所有的job实例
    scheduler.get_job('my_job_id') # 获取id为my_job_id的作业实例
    
    scheduler.print_jobs() # 输出所有格式化的作业列表。
    
    print('移除作业')
    # scheduler.remove_job('my_job_id') # 移除id为my_job_id的作业
    scheduler.remove_all_jobs() # 移除所有的作业
class SpecificTimeReporter(object):
    """
    This class is used to get the real-time market data at specific time everyday from yahoo finance database,
    the accessed data will not be saved at local,
    please use this with my Stock Data Reader class
    """
    def __init__(self, function):
        """
        :param function: function of the arranged job, in this case,
        it should be the getCurrentMarketData function
        """
        self._scheduler = None
        self.function = function
        self.count = 1
        self._all_job = {}
        self.start()

    def start(self):
        """
        start the reporter
        :return: None
        """
        self._scheduler = BackgroundScheduler()
        self._scheduler.start()

    def convertInt2Time(self, hour, minute, second):
        """
        You do not need to call this method, you can treat this as a private method
        :param hour: integer ranging from 0 to 23
        :param minute: integer ranging from 0 to 59
        :param second: integer ranging from 0 to 59
        :return: string format of time
        """
        ans = ""
        if hour < 10:
            ans = ans + "0" + str(hour)
        else:
            ans = ans + str(hour)
        if minute < 10:
            ans = ans + "0" + str(minute)
        else:
            ans = ans + str(minute)
        if second < 10:
            ans = ans + "0" + str(second)
        else:
            ans = ans + str(second)
        return ans

    def addJob(self, hour, minute, second, *args):
        """
        add a reporter
        :param hour: integer ranging from 0 to 23
        :param minute: integer ranging from 0 to 59
        :param second: integer ranging from 0 to 59
        :param args: tickerList,like:["AAPL","IBM","JPM"]
        :return: None
        """
        timeString = self.convertInt2Time(hour, minute, second)

        if timeString not in self._all_job:
            self._all_job[timeString] = str(self.count)
            self._scheduler.add_job(self.function,
                                    trigger='cron',
                                    hour=hour,
                                    minute=minute,
                                    second=second,
                                    args=args,
                                    id=str(self.count))
            self.count = self.count + 1
        else:
            self._scheduler.reschedule_job(self._all_job[timeString],
                                           trigger='cron',
                                           hour=hour,
                                           minute=minute,
                                           second=second)

    def removeJob(self, hour, minute, second):
        """
        remove a reporter
        :param hour: integer ranging from 0 to 23
        :param minute: integer ranging from 0 to 59
        :param second: integer ranging from 0 to 59
        :return: None
        """
        timeString = self.convertInt2Time(hour, minute, second)
        if timeString not in self._all_job:
            warnings.warn("Job not found!")
        else:
            self._scheduler.remove_job(self._all_job[timeString])

    def removeAllJobs(self):
        """
        remove all reporters
        :return: None
        """
        self._scheduler.remove_all_jobs()

    def pause(self):
        """
        pause all reporters
        :return: None
        """
        self._scheduler.pause()

    def resume(self):
        """
        resume the paused reporters
        :return: None
        """
        self._scheduler.resume()

    def getAllJobs(self):
        """
        print the information of all reporters
        :return: None
        """
        self._scheduler.print_jobs()

    def shutDown(self):
        """
        shut down all reporters
        :return: None
        """
        self._scheduler.shutdown()
class PeriodicReporter(object):
    """
    This class is used to periodically get real-time market data from Yahoo finance database,
    the accessed data will not be saved at local,
    please use this with my Stock Data Reader class
    """
    def __init__(self, function):
        """

        :param function: function of the arranged job, in this case,
        it should be the getCurrentMarketData function
        """
        self._scheduler = None
        self.function = function
        self.existingJob = False
        self.start()

    def start(self):
        """
        method to start the reporter
        :return: None
        """
        self._scheduler = BackgroundScheduler()
        self._scheduler.start()

    def addJob(self, interval, *args):
        """
        add a reporter
        :param interval: the interval between two reports, 20 means 20 seconds, etc...
        :param args: tickerList,like:["AAPL","IBM","JPM"]
        :return: None
        """
        if not self.existingJob:
            self._scheduler.add_job(self.function,
                                    trigger='interval',
                                    seconds=interval,
                                    args=args)
            self.existingJob = True
        else:
            warnings.warn("Existing job will be removed!")
            self._scheduler.remove_all_jobs()
            self._scheduler.add_job(self.function,
                                    trigger='interval',
                                    seconds=interval,
                                    args=args)

    def removeJob(self):
        """
        remove the current reporter
        :return: None
        """
        self._scheduler.remove_all_jobs()
        self.existingJob = False

    def pause(self):
        """
        pause the current reporter
        :return: None
        """
        self._scheduler.pause()

    def resume(self):
        """
        resume the paused reporter
        :return: None
        """
        self._scheduler.resume()

    def getJob(self):
        """
        print the details of the current reporter
        :return: None
        """
        self._scheduler.print_jobs()

    def shutDown(self):
        """
        shut down the reporter
        :return: None
        """
        self._scheduler.shutdown()
示例#21
0
class MainRunner(object):
    workers = {}
    dirpath = '.'
    defaultOutputPath = 'output'

    class NoRunningFilter(logging.Filter):
        def filter(self, record):
            return not record.msg.startswith('Execution')

    def __init__(self, dirpath='.'):
        '''
        local path for load config
        '''
        logger.info("Initialing Main Runner for Hasal agent")
        if os.path.isdir(dirpath):
            self.dirpath = dirpath
            logger.info("loading runner config folder: " + dirpath)
        else:
            logger.info(dirpath + " is invalid, use default path instead")

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        self.load_dir(self.dirpath)

        event_handler = JsonHandler(patterns=["*.json"], ignore_directories=True)
        event_handler.set_handler(oncreated=self.load, onmodified=self.load, ondeleted=self.remove)
        observer = Observer()
        observer.schedule(event_handler, self.dirpath, recursive=True)
        observer.start()

        my_filter = self.NoRunningFilter()
        logging.getLogger("apscheduler.scheduler").addFilter(my_filter)

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fname in filenames:
            if 'json' in fname[-4:]:
                self.load(os.path.join(dirpath, fname))

    def load(self, fp):
        '''
        given a json file, load and create a task run regularly
        '''
        logger.info(fp + " was loaded!")
        with open("agent.log", 'w+') as f:
            f.write(fp + " was loaded!")
        data = {}
        loaded = False
        for _ in range(10):
            try:
                with open(fp) as in_data:
                    data = json.load(in_data)
                    # default will load JOB_NAME parameter in Jenkins created json file
                    data['name'] = data.get('JOB_NAME', "Jenkins Job")
                    data['path'] = fp
                    loaded = True
            except ValueError as e:
                logger.warning(fp + " loaded failed: " + e.message)
                return None
            except Exception as e:
                logger.warning("File is not ready. Wait 1 second for another try.")
                time.sleep(1)

        if not loaded:
            logger.warning(fp + " is not ready for 10 seconds.")
            return None

        # load interval value from Jenkins created json file (default : 30 )
        interval = int(data.get('interval', 30))

        # load outputpath and defaultoutputpath from Jenkins created json file
        if 'output' in data:
            if 'defaultOutputPath' in data['output']:
                self.defaultOutputPath = data['output']['defaultOutputPath']
            if 'dirpath' in data['output']:
                data['output']['outputPath'] = os.path.join(self.defaultOutputPath, data['output']['dirpath'])
        else:
            data['output'] = {'outputPath': self.defaultOutputPath}

        if fp in self.workers:  # existing runner found
            logger.info("Update exisitng runner [%s]" % fp)
            runner = self.workers[fp]
            runner.update(**data)
            # //memo: Interval can't be modified
            self.scheduler.modify_job(job_id=fp,
                                      func=runner.run,
                                      name=runner.name
                                      )

        else:  # Create new
            logger.info("Create new runner [%s]" % fp)
            module_path = data.get('AGENT_MODULE_PATH', "hasalTask")
            object_name = data.get('AGENT_OBJECT_NAME', "HasalTask")
            try:
                runner_module = getattr(importlib.import_module(
                                        module_path), object_name)
            except Exception as e:
                logger.exception(e)
                return None

            runner = runner_module(**data)
            self.workers[fp] = runner
            self.scheduler.add_job(runner.run, 'interval',
                                   id=fp,
                                   name=runner.name,
                                   seconds=interval
                                   )
        return runner

    def list(self):
        '''
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        '''
        for (fp, worker) in self.workers:
            logger.info("path=" + fp + "," + str(worker) + ";")

    def remove(self, fp):
        '''
        given file path, stop running instance if possible
        '''
        if fp in self.workers:
            self.workers[fp].onstop()
            self.scheduler.remove_job(job_id=fp)
            del self.workers[fp]
            return True
        return False

    def remove_advanced(self):
        '''
        TODO:
        1. remove by start, end
        2. by directory(?)
        '''
        pass

    def unload_all(self):
        '''
        stop all running instances
        '''
        self.scheduler.shutdown()

    def pause(self, fp):
        '''
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        '''
        self.scheduler.pause(job_id=fp)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        '''
        For periodical minions, waking them according to timing
        '''
        pass
示例#22
0
文件: agent.py 项目: ypwalter/Hasal
class MainRunner(object):
    workers = {}
    dirpath = '.'
    defaultOutputPath = 'output'

    def __init__(self, dirpath='.'):
        '''
        local path for load config
        '''
        logger.info("Initialing Main Runner for Hasal agent")
        if os.path.isdir(dirpath):
            self.dirpath = dirpath
            logger.info("loading runner config folder: " + dirpath)
        else:
            logger.info(dirpath + " is invalid, use default path instead")

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        self.load_dir(self.dirpath)

        event_handler = JsonHandler(patterns=["*.json"], ignore_directories=True)
        event_handler.set_handler(oncreated=self.load, onmodified=self.load, ondeleted=self.remove)
        observer = Observer()
        observer.schedule(event_handler, self.dirpath, recursive=True)
        observer.start()

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fname in filenames:
            if 'json' in fname[-4:]:
                self.load(os.path.join(dirpath, fname))

    def load(self, fp):
        '''
        given a json file, load and create a task run regularly
        '''
        logger.info(fp + " was loaded!")
        with open(fp) as in_data:
            try:
                data = json.load(in_data)
                data['name'] = "Jenkins Job"
                data['path'] = fp
            except ValueError as e:
                logger.warning(fp + " loaded failed: " + e.message)
                return None
        interval = 30
        if 'interval' in data:
            interval = int(data['interval'])

        if 'output' in data:
            if 'defaultOutputPath' in data['output']:
                self.defaultOutputPath = data['output']['defaultOutputPath']
            if 'dirpath' in data['output']:
                data['output']['outputPath'] = os.path.join(self.defaultOutputPath, data['output']['dirpath'])
        else:
            data['output'] = {'outputPath': self.defaultOutputPath}

        if fp in self.workers:  # existing runner found
            logger.info("Update exisitng runner [%s]" % fp)
            runner = self.workers[fp]
            runner.update(**data)
            # //memo: Interval can't be modified
            self.scheduler.modify_job(job_id=fp,
                                      func=runner.run,
                                      name=runner.name
                                      )

        else:  # Create new
            logger.info("Create new runner [%s]" % fp)
            module_path = "hasalTask"
            object_name = "HasalTask"
            try:
                runner_module = getattr(importlib.import_module(
                                        module_path), object_name)
            except Exception as e:
                logger.exception(e)
                return None

            runner = runner_module(**data)
            self.workers[fp] = runner
            self.scheduler.add_job(runner.run, 'interval',
                                   id=fp,
                                   name=runner.name,
                                   seconds=interval
                                   )
        return runner

    def list(self):
        '''
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        '''
        for (fp, worker) in self.workers:
            logger.info("path=" + fp + "," + str(worker) + ";")

    def remove(self, fp):
        '''
        given file path, stop running instance if possible
        '''
        if fp in self.workers:
            self.workers[fp].onstop()
            self.scheduler.remove_job(job_id=fp)
            del self.workers[fp]
            return True
        return False

    def remove_advanced(self):
        '''
        TODO:
        1. remove by start, end
        2. by directory(?)
        '''
        pass

    def unload_all(self):
        '''
        stop all running instances
        '''
        self.scheduler.shutdown()

    def pause(self, fp):
        '''
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        '''
        self.scheduler.pause(job_id=fp)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        '''
        For periodical minions, waking them according to timing
        '''
        pass
示例#23
0
        currTimez = currTime.astimezone(local)
        deltaTime = nextTime - currTimez

        if iter % 2 == 0:
            lcd.clear()
            lcd.message = "Next pump in:\n" + str(deltaTime)
        else:
            lcd.clear()
            lcd.message = "PH: " + str(senPH) + "\n" + "EC: " + str(senEC)

        if sysError:
            print("System Paused!")
            lcd.clear()
            lcd.message = "System Paused!\nPress Select to Start"
            time.sleep(slp)
            scheduler.pause()
            while sysError:
                if lcd.select_button:
                    sysError = False
        else:
            scheduler.resume()

        #-------------------------------------------
        # Read Ultrasonic
        senUltra = readSenUltra()
        senPH = readSenPH()
        senEC = readSenEC()

        AutoLevelData()
        AutoLevelTest()
示例#24
0
class CronManager:
    def __init__(self, use_mongo_db=True):
        self.scheduler = BackgroundScheduler(
            timezone=timezone('Asia/Shanghai'))
        self.scheduler.configure()

        if use_mongo_db:
            self.job_store = MongoDBJobStore(database='apscheduler',
                                             collection='cronJob',
                                             client=db)
            self.scheduler.add_jobstore(self.job_store)
            self.is_replace_existing = True
        else:
            self.is_replace_existing = False

    def add_cron(self, cron_instance):
        try:
            if not isinstance(cron_instance, Cron):
                raise TypeError('please add correct cron!')
            if cron_instance.trigger_type == 'interval':
                seconds = cron_instance.trigger_args.get('seconds')
                if not isinstance(
                        seconds,
                        int) and not common.can_convert_to_int(seconds):
                    raise TypeError('please set correct time interval')
                seconds = int(seconds)
                if seconds <= 0:
                    raise ValueError('please set interval > 0')
                job = self.scheduler.add_job(
                    func=cron_instance.cron_mission,
                    trigger=cron_instance.trigger_type,
                    seconds=seconds,
                    replace_existing=self.is_replace_existing,
                    coalesce=True,
                    id=cron_instance.get_cron_job_id(),
                    max_instances=5,
                    jitter=0)
            elif cron_instance.trigger_type == 'date':
                run_date = cron_instance.trigger_args.get('run_date')
                # TODO 判断run_date类型
                job = self.scheduler.add_job(
                    func=cron_instance.cron_mission,
                    trigger=cron_instance.trigger_type,
                    run_date=run_date,
                    replace_existing=self.is_replace_existing,
                    coalesce=True,
                    id=cron_instance.get_cron_job_id())
            elif cron_instance.trigger_type == 'cron':
                raise TypeError('暂时不支持 trigger_type 等于 \'cron\'')

            return cron_instance.get_cron_job_id()
        except BaseException as e:
            with app.app_context():
                current_app.logger.error("add_cron failed. - %s" % str(e))

    def start(self, paused=False):
        self.scheduler.start(paused=paused)

    def pause_cron(self, cron_id=None, pause_all=False):
        if pause_all:
            self.scheduler.pause()
        elif cron_id:
            self.scheduler.pause_job(job_id=cron_id)

    def resume_cron(self, cron_id=None, resume_all=False):
        if resume_all:
            self.scheduler.resume()
        elif cron_id:
            self.scheduler.resume_job(job_id=cron_id)

    def del_cron(self, cron_id=None, del_all=False):
        if del_all:
            self.scheduler.remove_all_jobs()
        elif cron_id:
            self.scheduler.remove_job(job_id=cron_id)

    def update_cron(self, cron_job_id, project_id, cron_info):
        try:
            if not isinstance(cron_job_id, str):
                raise TypeError('cron_id must be str')

            if not isinstance(project_id, str):
                raise TypeError('project_id must be str')

            if not isinstance(cron_info, dict):
                raise TypeError('cron_info must be dict')

            trigger_type = cron_info.get('triggerType')
            interval = cron_info.get('interval')
            run_date = cron_info.get('runDate')
            test_suite_id_list = cron_info.get('testSuiteIdList')
            include_forbidden = cron_info.get('includeForbidden')
            test_env_id = cron_info.get('testEnvId')
            always_send_mail = cron_info.get('alwaysSendMail')
            alarm_mail_group_list = cron_info.get('alarmMailGroupList')
            enable_wxwork_notify = cron_info.get('enableWXWorkNotify')
            wxwork_api_key = cron_info.get('WXWorkAPIKey')
            wxwork_mention_mobile_list = cron_info.get(
                'WXWorkMentionMobileList')
            always_wxwork_notify = cron_info.get('alwaysWXWorkNotify')
            enable_ding_talk_notify = cron_info.get('enableDingTalkNotify')
            ding_talk_access_token = cron_info.get('DingTalkAccessToken')
            ding_talk_at_mobiles = cron_info.get('DingTalkAtMobiles')
            ding_talk_secret = cron_info.get('DingTalkSecret')
            always_ding_talk_notify = cron_info.get('alwaysDingTalkNotify')

            if trigger_type == 'interval' and int(interval) > 0:
                self.scheduler.modify_job(
                    job_id=cron_job_id,
                    trigger=IntervalTrigger(seconds=interval))
            elif trigger_type == 'date':
                self.scheduler.modify_job(
                    job_id=cron_job_id, trigger=DateTrigger(run_date=run_date))
            else:
                raise TypeError('更新定时任务触发器失败!')
            if run_date:
                cron = Cron(
                    cron_job_id=cron_job_id,
                    test_suite_id_list=test_suite_id_list,
                    project_id=project_id,
                    test_env_id=test_env_id,
                    include_forbidden=include_forbidden,
                    enable_wxwork_notify=enable_wxwork_notify,
                    wxwork_api_key=wxwork_api_key,
                    wxwork_mention_mobile_list=wxwork_mention_mobile_list,
                    always_wxwork_notify=always_wxwork_notify,
                    enable_ding_talk_notify=enable_ding_talk_notify,
                    ding_talk_access_token=ding_talk_access_token,
                    ding_talk_at_mobiles=ding_talk_at_mobiles,
                    ding_talk_secret=ding_talk_secret,
                    always_ding_talk_notify=always_ding_talk_notify,
                    always_send_mail=always_send_mail,
                    alarm_mail_group_list=alarm_mail_group_list,
                    trigger_type=trigger_type,  # 更新定时器时,此参数并没有真正起到作用, 仅修改展示字段
                    run_date=run_date)  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
            else:
                cron = Cron(
                    cron_job_id=cron_job_id,
                    test_suite_id_list=test_suite_id_list,
                    project_id=project_id,
                    include_forbidden=include_forbidden,
                    enable_wxwork_notify=enable_wxwork_notify,
                    wxwork_api_key=wxwork_api_key,
                    wxwork_mention_mobile_list=wxwork_mention_mobile_list,
                    always_wxwork_notify=always_wxwork_notify,
                    enable_ding_talk_notify=enable_ding_talk_notify,
                    ding_talk_access_token=ding_talk_access_token,
                    ding_talk_at_mobiles=ding_talk_at_mobiles,
                    ding_talk_secret=ding_talk_secret,
                    always_ding_talk_notify=always_ding_talk_notify,
                    test_env_id=test_env_id,
                    always_send_mail=always_send_mail,
                    alarm_mail_group_list=alarm_mail_group_list,
                    trigger_type=trigger_type,  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
                    seconds=interval)  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
            # 玄学,更改job的时候必须改args,不能改func
            self.scheduler.modify_job(job_id=cron_job_id,
                                      coalesce=True,
                                      args=[cron])
        except BaseException as e:
            with app.app_context():
                current_app.logger.error("update_cron failed. - %s" % str(e))

    def shutdown(self, force_shutdown=False):
        if force_shutdown:
            self.scheduler.shutdown(wait=False)
        else:
            self.scheduler.shutdown(wait=True)

    def get_jobs(self):
        return self.scheduler.get_jobs()
示例#25
0
class MainRunner(object):
    workers = {}
    dirpath = "."
    defaultOutputPath = "output"

    def __init__(self, dirpath="."):
        """
        local path for load config
        """
        logger.info("Initialing Main Runner for Fuzzy Testing")
        if os.path.isdir(dirpath):
            self.dirpath = dirpath
            logger.info("loading runner config folder: " + dirpath)
        else:
            logger.info(dirpath + " is invalid, use default path instead")

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        self.load_dir(dirpath)

        event_handler = JsonHandler(patterns=["*.json"], ignore_directories=True)
        event_handler.set_handler(oncreated=self.load, onmodified=self.load, ondeleted=self.remove)
        observer = Observer()
        observer.schedule(event_handler, self.dirpath, recursive=True)
        observer.start()

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fname in filenames:
            if "json" in fname[-4:]:
                self.load(os.path.join(dirpath, fname))

    def load(self, fp):
        """
        given a json file, load and create a task run regularly
        """
        logger.info(fp + " was loaded!")
        with open(fp) as in_data:
            try:
                data = json.load(in_data)
                data["path"] = fp
            except ValueError as e:
                logger.warning(fp + " loaded failed: " + e.message)
                return None
            interval = 30
            if "interval" in data:
                interval = int(data["interval"])

            if "output" in data:
                if "defaultOutputPath" in data["output"]:
                    self.defaultOutputPath = data["output"]["defaultOutputPath"]
                if "dirpath" in data["output"]:
                    data["output"]["outputPath"] = os.path.join(self.defaultOutputPath, data["output"]["dirpath"])
            else:
                data["output"] = {"outputPath": self.defaultOutputPath}

            if "type" not in data:
                logger.error(
                    "Missing type attribute in \
                                your configuration file [%s]"
                    % fp
                )
                return None

            if fp in self.workers:  # existing runner found
                logger.info("Update exisitng runner [%s]" % fp)
                runner = self.workers[fp]
                runner.update(**data)
                # //memo: Interval can't be modified
                self.scheduler.modify_job(job_id=fp, func=runner.run, name=runner.name)

            else:  # Create new
                logger.info("Create new runner [%s]" % fp)
                module_path = data["type"][: data["type"].rfind(".")]
                object_name = data["type"][data["type"].rfind(".") + 1 :]
                try:
                    runner_module = getattr(importlib.import_module(module_path), object_name)
                except Exception as e:
                    logger.exception(e)
                    return None

                runner = runner_module(**data)
                self.workers[fp] = runner
                self.scheduler.add_job(runner.run, "interval", id=fp, name=runner.name, seconds=interval)
            return runner
        return None

    def list(self):
        """
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        """
        for (fp, worker) in self.workers:
            logger.info("path=" + fp + "," + str(worker) + ";")

    def remove(self, fp):
        """
        given file path, stop running instance if possible
        """
        if fp in self.workers:
            self.workers[fp].onstop()
            self.scheduler.remove_job(job_id=fp)
            del self.workers[fp]
            return True
        return False

    def remove_advanced(self):
        """
        TODO:
        1. remove by start, end
        2. by directory(?)
        """
        pass

    def unload_all(self):
        """
        stop all running instances
        """
        self.scheduler.shutdown()

    def pause(self, fp):
        """
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        """
        self.scheduler.pause(job_id=fp)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        """
        For periodical minions, waking them according to timing
        """
        pass
示例#26
0
class BLC(BanyanBase):
    """
    This is the banyan launcher client. It receives launch instructions
    from the server and then locally launches and manages those processes.
    """

    def __init__(self, subscriber_port='43125', publisher_port='43124',
                 back_plane_ip_address=None, topic=None):
        """
        :param back_plane_ip_address: address of backplane. This is a required
                                      parameter
        :param subscriber_port: backplane subscriber port
        :param publisher_port:  backplane publisher port
        :param topic: subscriber topic containing launch instructions. This is
                      a required parameter
        """

        # To use this class the backplane address is a required parameter
        if not any((back_plane_ip_address, topic)):
            print('You must specify both the backplane ip address and topic.')
            sys.exit(0)

        # start the logging process

        # get the home directory path
        home = expanduser("~")

        # set up the log file
        logging.basicConfig(filename=home + '/banyan_launcher.log', filemode='w',
                            level=logging.ERROR)

        # a popen process object
        self.proc = None

        # maintain a database of information pertaining to each launch item
        # this will be an array of dictionaries, with each row describing a single launched process
        self.launch_db = []

        # the keys defined for a given row are as follows:
        #
        # auto_restart - restart process if it dies
        # append_bp_address - the backplane ip address appended to
        #                     the command_string with -b option
        # command_string - the command used to launch the process
        # process - the value returned from popen after launching process
        # process_id - pid of launched process
        # spawn - spawn process in its own window
        # topic - used to publish to remote launcher
        # reply_topic - reply topic from remote launcher

        # call the parent class to attach this banyan component to the backplane
        super(BLC, self).__init__(back_plane_ip_address=back_plane_ip_address,
                                  subscriber_port=subscriber_port,
                                  publisher_port=publisher_port,
                                  process_name='Banyan Launch Client',
                                  loop_time=.1)

        print('Listening for ' + topic + ' messages.')

        # subscribe to the launch topic specified in the init parameter
        self.set_subscriber_topic(topic)

        # subscribe to the killall topic to exit this program via message
        self.set_subscriber_topic('killall')

        # start the background scheduler to periodically run check_processes and confirm
        self.scheduler = BackgroundScheduler()
        self.job = self.scheduler.add_job(self.check_local_processes, 'interval', seconds=.5)

        self.scheduler.start()

        try:
            # initial launching is complete, so just wait to receive incoming messages.
            self.receive_loop()
        except (KeyboardInterrupt, SystemExit):
            self.clean_up()

    def spawn_local(self, idx):
        """
        This method launches processes that are needed to run on this computer.
        :param idx: An index into launch_db
        """

        # get the launch entry in launch_db
        db_entry = self.launch_db[idx]

        # skip over the entry for the backplane.
        # there shouldn't be one for the client, but the code is
        # kept consist with the server.
        # launch the process either in its own window or just launch it.
        # differentiate between windows and other os's.
        if not db_entry['command_string'] == 'backplane':
            if sys.platform.startswith('win32'):
                if db_entry['spawn'] == 'yes':
                    self.proc = Popen(db_entry['command_string'],
                                      creationflags=subprocess.CREATE_NEW_CONSOLE)
                else:
                    command_list = db_entry['command_string']
                    self.proc = Popen(command_list, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
            else:
                if db_entry['spawn'] == 'yes':
                    self.proc = Popen(['xterm', '-e', db_entry['command_string']],
                                      stdin=subprocess.PIPE, stderr=subprocess.PIPE,
                                      stdout=subprocess.PIPE)
                else:
                    command_list = db_entry['command_string'].split(' ')
                    self.proc = Popen(command_list)

            # update the entry with the launch information
            db_entry['process'] = self.proc
            db_entry['process_id'] = self.proc.pid
            print('{:35} PID = {}'.format(db_entry['command_string'], str(self.proc.pid)))

            # allow a little time for the process to startup
            try:
                time.sleep(0.5)
            except (KeyboardInterrupt, SystemExit):
                # self.scheduler.shutdown()
                self.clean_up()
                sys.exit(0)
    """
    def check_local_processes(self):

        # This method is called by the scheduler periodically.
        # Check to make sure if a local process previously started is still running.
        # If a process is not dead, print that to the console and if the process
        # has the restart flag set, then restart it.
        
        for x, record in enumerate(self.launch_db):

            # ignore backplane
            if not record['command_string'] == 'backplane':
                # get a list of all pids running
                pids = psutil.pids()

                status = None
                # check to see if the process in the launch_db is in this list
                if record['process'].pid in pids:
                    proc = psutil.Process(record['process'].pid)

                    # get the status for this process
                    status = proc.status()

                # if it is not in the list, declare it a zombie to force the print
                if not record['process'].pid in pids:
                    status = psutil.STATUS_ZOMBIE

                if status == psutil.STATUS_ZOMBIE:
                    log_string = '{:35} PID = {} DIED'.format(record['command_string'], str(record['process'].pid))
                    print(log_string)

                    # log_string = record['command_string'] + " PID = " + str(record['process'].pid + 'DIED')
                    logger = logging.getLogger()
                    logger.error(log_string)

                    # reset its state and process, and process ID
                    record['process'] = None
                    record['process_id'] = None

                    # do we need to restart it?
                    if record['auto_restart'] == 'yes':
                        self.spawn_local(x)
    """
    def check_local_processes(self):
        """
        This method is called by the scheduler periodically.
        Check to make sure if a local process previously started is still running.
        If a process is dead, print that to the console and if the process
        has the restart flag set, then restart it.

        Affect the launch_db entry so that only one message is printed and the if
        the process to be restarted, it is restarted once.
        """

        if sys.platform.startswith('win32'):
            for x, record in enumerate(self.launch_db):

                # ignore backplane
                if not record['command_string'] == 'backplane':
                    # get a list of all pids running
                    pids = psutil.pids()

                    # status = None
                    # check to see if the process in the launch_db is in this list
                    if record['process'].pid in pids:
                        proc = psutil.Process(record['process'].pid)

                        # get the status for this process
                        status = proc.status()

                    elif not record['process'].pid in pids:
                        status = psutil.STATUS_ZOMBIE

                    else:
                        status = None

                    # print(status)
                    if status == psutil.STATUS_ZOMBIE:
                        log_string = '{:35} PID = {} DIED'.format(record['command_string'], str(record['process'].pid))
                        print(log_string)

                        # log_string = record['command_string'] + " PID = " + str(record['process'].pid + 'DIED')
                        logger = logging.getLogger()
                        logger.error(log_string)

                        # reset its state and process, and process ID
                        record['process'] = None
                        record['process_id'] = None

                        # do we need to restart it?
                        if record['auto_restart'] == 'yes':
                            self.spawn_local(x)
        else:
            for x, record in enumerate(self.launch_db):
                if not record['command_string'] == 'backplane':
                    if record['process_id']:
                        pids = psutil.pids()
                        # print(record)
                        # check to see if the process in the launch_db is in this list
                        # status = None

                        try:
                            if record['process'].pid in pids:
                                proc = psutil.Process(record['process'].pid)
                                status = proc.status()
                                # print('command: ' + record['command_string'] + 'status: ' + status)

                                if status == psutil.STATUS_SLEEPING:
                                    continue

                                if not status:
                                    status = psutil.STATUS_ZOMBIE

                                if status == psutil.STATUS_ZOMBIE:

                                    log_string = '{:35} PID = {} DIED'.format(record['command_string'],
                                                                              str(record['process'].pid))
                                    print(log_string)

                                    logger = logging.getLogger()
                                    logger.error(log_string)

                                    # reset its process, and process ID
                                    record['process'] = None
                                    record['process_id'] = None

                                    # do we need to restart it?
                                    if record['auto_restart'] == 'yes':
                                        self.spawn_local(x)
                        except AttributeError:
                            pass

    def incoming_message_processing(self, topic, payload):
        """
        Messages are sent here from the receive_loop

        :param topic: Message Topic string
        :param payload: Message Data
        :return:
        """
        # make sure this is not a duplicate launch request

        if topic == 'killall':
            # self.scheduler.shutdown()
            self.clean_up()
            # sys.exit(0)

        else:
            # check to see if the process was already launched and ignore
            # if it was
            for idx, record in enumerate(self.launch_db):
                if record['launch_id'] == payload['launch_id']:
                    return
            # print(topic, payload)
            # idx = len(self.launch_db)
            self.launch_db.append(payload)
            self.spawn_local(len(self.launch_db) - 1)

            record = self.launch_db[-1]

            # pid = str(record['process_id'])
            # send acknowledgement to server
            ack = {'command_string': record['command_string'],
                   'process_id': record['process_id'],
                   'launch_id': record['launch_id']}
            topic = record['reply_topic']
            self.publish_payload(ack, topic)

    def clean_up(self):
        """
        Graceful shutdown - all newly opened windows and associated processes
        are killed
        :return:
        """
        # self.scheduler.shutdown()
        # self.publish_payload({'kill': True}, 'killall')
        # time.sleep(.5)
        self.scheduler.pause()
        for idx, record in enumerate(self.launch_db):
            if record['process']:
                print('{:35} PID = {} KILLED'.format(record['command_string'], str(record['process'].pid)))
                proc = psutil.Process(record['process'].pid)
                proc.kill()
                record['process'] = None
                record['process_id'] = None
        sys.exit(0)
示例#27
0
class APS_Schedule(object):
    """docstring for APS_Schedule"""
    def __init__(self):
        executors = {
            'default': ThreadPoolExecutor(20),
            'processpool': ProcessPoolExecutor(5)
        }

        # jobstores = {
        #     'mongo': MongoDBJobStore(collection='SchedularJobs', database='chatdb',
        #                              host=db_name, port=27017,
        #                              username=mongouser, password=mongopass),
        # }

        job_defaults = {
            'coalesce': True,
            'max_instances': 5,
            'misfire_grace_time': 1000
        }
        self.scheduler = BackgroundScheduler(executors=executors,
                                             job_defaults=job_defaults)

        self.scheduler.start()
        print('Press Ctrl+{0} to exit'.format('Break' if os.name ==
                                              'nt' else 'C'))
        # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.

    def add_execution_reminder(self, msg_title, msg_body, execution_time,
                               user_id, notif_type, type_id):
        try:
            self.scheduler.add_job(
                _send_notification,
                'date',
                run_date=execution_time,
                args=[msg_title, msg_body, user_id, notif_type, type_id],
                id=str(type_id),
                replace_existing=True)
            return True
        except Exception as e:
            print(str(e))
            logger.critical(
                "couldnt add the job from schedular with exception : {}".
                format(str(e)))

    def remove_execution_reminder(self, job_id, job_type):
        try:
            print('before deleting the job.')
            job_del = self.scheduler.remove_job(str(job_id))
            print(job_del, 'after delete')
            return True

        except Exception as e:
            logger.critical(
                "couldnt remove the job from schedular with exception : {}".
                format(str(e)))

    def pause_jobs(self):
        try:
            self.scheduler.pause()
            return True
        except Exception as e:
            logger.error(
                "couldnt pause the schedular with exception : {}".format(
                    str(e)))

    def resume_jobs(self):
        try:
            self.scheduler.resume()
            return True
        except Exception as e:
            logger.error(
                "couldnt resume the schedular with exception : {}".format(
                    str(e)))

    def shutdown_schedular(self):
        try:
            sch = self.scheduler.shutdown()
            return sch
        except Exception as e:
            logger.error(
                "couldnt shutdown the schedular with exception : {}".format(
                    str(e)))
示例#28
0
class DataCollector:
    def __init__(self):
        self.logger = get_logger('data-collector')

        self.scheduler = BackgroundScheduler(timezone="Asia/Seoul")
        self.scheduler.start()
        self.templates = dict()

        self.__global_store = dict()

        self.job_broker = Celery('routine-jobs',
                                 broker=BROKER_URL,
                                 backend=CELERY_RESULT_BACKEND)

    # =========================================================================
    def add_job_schedules(self, schedule_templates: list):
        for schedule_template in schedule_templates:
            schedule_name, trigger = operator.itemgetter(
                'schedule_name', 'trigger')(schedule_template)

            # schedule name can't be duplicated.
            schedule_names = [
                x['schedule_name'] for x in self.get_schedule_jobs()
            ]
            if schedule_name in schedule_names:
                msg = f'The schedule name \'{schedule_name}\' is already assigned.'
                self.logger.error(msg)
                raise ExceptionScheduleReduplicated(msg)

            self._add_job_schedule(schedule_name,
                                   trigger_type=trigger['type'],
                                   trigger_setting=trigger['setting'])

            # store the schedule template
            self.templates[schedule_name] = schedule_template
            self.__global_store[schedule_name] = {'_gv': dict()}

    # =========================================================================
    def _add_job_schedule(self, key, trigger_type, trigger_setting):
        if trigger_type == 'crontab' and 'crontab' in trigger_setting:
            crontab = self.crontab_add_second(trigger_setting['crontab'])
            trigger_type = 'cron'
            trigger_setting = {**trigger_setting, **crontab}
            del trigger_setting['crontab']

        arguments = dict(func=self.request_data,
                         args=(key, ),
                         id=key,
                         trigger=trigger_type)
        arguments = {**arguments, **trigger_setting}

        self.scheduler.pause()
        try:
            self.scheduler.add_job(**arguments)
        finally:
            self.scheduler.resume()

    # =========================================================================
    def remove_job_schedule(self, schedule_name: str):
        self.get_schedule_job(schedule_name)
        self.scheduler.remove_job(schedule_name)
        try:
            del self.templates[schedule_name]
            del self.__global_store[schedule_name]
        except KeyError:
            # it should be failing to collect data. such as not connecting.
            pass

        return

    # =========================================================================
    def modify_job_schedule(self, schedule_name, trigger_type, trigger_args):
        if trigger_type == 'crontab' and 'crontab' in trigger_args:
            crontab = self.crontab_add_second(trigger_args['crontab'])
            trigger = 'cron'

            setting = {**trigger_args, **crontab}
            del setting['crontab']
        else:
            trigger = trigger_type
            setting = trigger_args

        job = self.scheduler.get_job(schedule_name)
        job.reschedule(trigger, **setting)
        self.templates[schedule_name]['trigger'] = dict(type=trigger_type,
                                                        setting=trigger_args)

    # =========================================================================
    @staticmethod
    def get_python_module(code, name):
        module = types.ModuleType(name)
        exec(code, module.__dict__)
        return module

    # =========================================================================
    @staticmethod
    def insert_number_each_line(data: str):
        result = list()
        data = data.split('\n')
        for (number, line) in enumerate(data):
            result.append(f'{number+1:04} {line}')
        return '\n'.join(result)

    # =========================================================================
    @staticmethod
    def filter_dict(dict_to_filter, thing_with_kwargs):
        sig = inspect.signature(thing_with_kwargs)
        filter_keys = [
            param.name for param in sig.parameters.values()
            if param.kind == param.POSITIONAL_OR_KEYWORD
        ]
        filtered_dict = {
            filter_key: dict_to_filter[filter_key]
            for filter_key in filter_keys
        }
        return filtered_dict

    # =========================================================================
    def _source(self, name, setting):
        source_type, code, arguments = operator.itemgetter(
            'type', 'code', 'arguments')(setting)
        module = DataCollector.get_python_module(code, name)
        try:
            _gv = self.__global_store[name]
            arguments = {**arguments, **_gv}
            filterd_arguments = DataCollector.filter_dict(
                arguments, module.main)
            data = module.main(**filterd_arguments)
        except Exception as e:
            code = DataCollector.insert_number_each_line(code)
            self.logger.error(f'{e}\ncode: \n{code}')
            raise
        return data

    # =========================================================================
    def request_data(self, schedule_name):
        schedule = self.templates[schedule_name]
        if schedule_name not in self.templates:
            msg = f'The template "{schedule_name}" ' \
                  f'is not in the main template store'
            self.logger.error(msg)
            raise KeyError(msg)

        # checking use flag
        if not schedule['use']:
            self.logger.info(f'{schedule_name} is disabled.')
            return

        # source
        data = self._source(schedule_name, schedule['source'])
        if data is None:
            message = f'[{schedule_name}] The user function returned None.'
            self.logger.warning(message)

        # works
        # calling function for each works with arguments via celery
        for work in schedule['works']:
            work_type, arguments = operator.itemgetter('type',
                                                       'arguments')(work)
            self.job_broker.send_task(work_type,
                                      args=(data, ),
                                      kwargs=arguments)

        # sending events to the event
        # PING
        # event emitting
        data = json.dumps(data)
        event = {
            'name': schedule_name,
            'event': {
                'type': 'data-collector',
                'schedule_name': schedule_name
            },
            'data': data
        }
        try:
            self.emit_event(schedule_name, event)
        except (urllib3.exceptions.MaxRetryError,
                requests.exceptions.ConnectionError) as e:
            self.logger.error(f'Connection Error: Failed to emit events.')
        except Exception as e:
            import traceback
            traceback.print_exc()
        return

    # =========================================================================
    def emit_event(self, name: str, event: dict):
        with requests.Session() as s:
            api = EVENT_COLLECTOR_URL + '/api/v1/events/emit'
            response = s.post(api, json=event)
            if response.status_code != 200:
                raise Exception(f'code: {response.status_code}\n'
                                f'messages: [{name}] - {response.reason}')
            data = json.loads(response.text)
            self.logger.info(f'[{name}] emitted a event.')

    # =========================================================================
    def remove_job_schedule(self, _id: str):
        self.scheduler.remove_job(_id)
        del self.data[_id]
        return

    # =========================================================================
    def modify_job_schedule(self, _id, seconds):
        self.scheduler.reschedule_job(_id, trigger='interval', seconds=seconds)

    # =========================================================================
    def get_schedule_jobs(self):
        jobs = self.scheduler.get_jobs()
        if not jobs:
            return jobs
        result = list()
        for job in jobs:
            schedule_name = job.id
            next_run_time = job.next_run_time
            template_data = self.templates[schedule_name]
            template_data['next_run_time'] = next_run_time
            result.append(template_data)
        return result
示例#29
0
class MainRunner(object):
    workers = {}
    dirpath = '.'
    defaultOutputPath = 'output'

    def __init__(self, dirpath='.'):
        '''
        local path for load config
        '''
        logger.info("Initialing Main Runner for Hasal agent")
        if os.path.isdir(dirpath):
            self.dirpath = dirpath
            logger.info("loading runner config folder: " + dirpath)
        else:
            logger.info(dirpath + " is invalid, use default path instead")

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        self.load_dir(self.dirpath)

        event_handler = JsonHandler(patterns=["*.json"],
                                    ignore_directories=True)
        event_handler.set_handler(oncreated=self.load,
                                  onmodified=self.load,
                                  ondeleted=self.remove)
        observer = Observer()
        observer.schedule(event_handler, self.dirpath, recursive=True)
        observer.start()

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fname in filenames:
            if 'json' in fname[-4:]:
                self.load(os.path.join(dirpath, fname))

    def load(self, fp):
        '''
        given a json file, load and create a task run regularly
        '''
        logger.info(fp + " was loaded!")
        with open("agent.log", 'w+') as f:
            f.write(fp + " was loaded!")
        data = {}
        with open(fp) as in_data:
            try:
                data = json.load(in_data)
                data['name'] = "Jenkins Job"
                data['path'] = fp
            except ValueError as e:
                logger.warning(fp + " loaded failed: " + e.message)
                return None
        interval = 30
        if 'interval' in data:
            interval = int(data['interval'])

        if 'output' in data:
            if 'defaultOutputPath' in data['output']:
                self.defaultOutputPath = data['output']['defaultOutputPath']
            if 'dirpath' in data['output']:
                data['output']['outputPath'] = os.path.join(
                    self.defaultOutputPath, data['output']['dirpath'])
        else:
            data['output'] = {'outputPath': self.defaultOutputPath}

        if fp in self.workers:  # existing runner found
            logger.info("Update exisitng runner [%s]" % fp)
            runner = self.workers[fp]
            runner.update(**data)
            # //memo: Interval can't be modified
            self.scheduler.modify_job(job_id=fp,
                                      func=runner.run,
                                      name=runner.name)

        else:  # Create new
            logger.info("Create new runner [%s]" % fp)
            module_path = "hasalTask"
            object_name = "HasalTask"
            try:
                runner_module = getattr(importlib.import_module(module_path),
                                        object_name)
            except Exception as e:
                logger.exception(e)
                return None

            runner = runner_module(**data)
            self.workers[fp] = runner
            self.scheduler.add_job(runner.run,
                                   'interval',
                                   id=fp,
                                   name=runner.name,
                                   seconds=interval)
        return runner

    def list(self):
        '''
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        '''
        for (fp, worker) in self.workers:
            logger.info("path=" + fp + "," + str(worker) + ";")

    def remove(self, fp):
        '''
        given file path, stop running instance if possible
        '''
        if fp in self.workers:
            self.workers[fp].onstop()
            self.scheduler.remove_job(job_id=fp)
            del self.workers[fp]
            return True
        return False

    def remove_advanced(self):
        '''
        TODO:
        1. remove by start, end
        2. by directory(?)
        '''
        pass

    def unload_all(self):
        '''
        stop all running instances
        '''
        self.scheduler.shutdown()

    def pause(self, fp):
        '''
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        '''
        self.scheduler.pause(job_id=fp)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        '''
        For periodical minions, waking them according to timing
        '''
        pass
示例#30
0
class Boss(object):
    workers = {}
    dirpath = '.'
    output = None

    def __init__(self, dirpath='.', output='output'):
        '''
        local path for load config
        '''
        logger.info("Initialing BOSS")
        if os.path.isdir(dirpath):
            self.dirpath = dirpath
            logger.info("loading job config folder: " + dirpath)
        else:
            logger.info(dirpath + " is invalid, use default path instead")
        self.output = output
        logger.info("Setup output folder: " + output)
        if not os.path.isdir(output):
            logger.info("target directory "
                        + output
                        + " doesn't exist, creating..")
            os.makedirs(output)

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()

        self.load_dir(dirpath)

        event_handler = JsonHandler(patterns=["*.json"],
                                    ignore_directories=True)
        event_handler.set_handler(oncreated=self.load,
                                  onmodified=self.load,
                                  ondeleted=self.remove)
        observer = Observer()
        observer.schedule(event_handler, self.dirpath, recursive=True)
        observer.start()

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fname in filenames:
            if 'json' in fname[-4:]:
                self.load(os.path.join(dirpath, fname))

    def load(self, fp):
        '''
        given a json file, load and create a task run regularly
        '''
        logger.info(fp + " was loaded!")
        with open(fp) as in_data:
            try:
                data = json.load(in_data)
                data['path'] = fp
            except ValueError as e:
                logger.warning(fp + " loaded failed: " + e.message)
                return None
            interval = 30
            if 'interval' in data:
                interval = int(data['interval'])
            if self.output:
                # TODO: test case for no 'output' key in data
                if 'output' not in data:
                    data['output'] = {}
                output = data['output']
                if 'dirpath' in output:
                    output['dirpath'] = os.path.join(self.output, output['dirpath'])
                else:
                    output['dirpath'] = self.output
                if 'type' not in data:
                    logger.error("Missing type attribute in \
                                    your configruation file [%s]" % fp)
                    return None
            if fp in self.workers:  # existing minion found
                logger.info("Update exisitng minion [%s]" % fp)
                minion = self.workers[fp]
                minion.update(**data)
                # //memo: Interval can't be modified
                self.scheduler.modify_job(job_id=fp,
                                          func=minion.collect,
                                          name=minion.name+'_'+minion.serial
                                          )

            else:  # Create new
                logger.info("Create new minion [%s]" % fp)
                module_path = data['type'][:data['type'].rfind(".")]
                object_name = data['type'][data['type'].rfind(".") + 1:]
                try:
                    minion_module = getattr(importlib.import_module(
                                            module_path), object_name)
                except Exception as e:
                    logger.exception(e)
                    return None

                minion = minion_module(**data)
                self.workers[fp] = minion
                self.scheduler.add_job(minion.collect, 'interval',
                                       id=fp,
                                       name=minion.name + '_' + minion.serial,
                                       seconds=interval
                                       )
            return minion
        return None

    def list(self):
        '''
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        '''
        for (fp, worker) in self.workers:
            logger.info("path=" + fp + "," + str(worker) + ";")

    def remove(self, fp):
        '''
        given file path, stop running instance if possible
        '''
        if fp in self.workers:
            self.workers[fp].onstop()
            self.scheduler.remove_job(job_id=fp)
            del self.workers[fp]
            return True
        return False

    def remove_advanced(self):
        '''
        TODO:
        1. remove by start, end
        2. by directory(?)
        '''
        pass

    def unload_all(self):
        '''
        stop all running instances
        '''
        self.scheduler.shutdown()

    def pause(self, fp):
        '''
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        '''
        self.scheduler.pause(job_id=fp)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        '''
        For periodical minions, waking them according to timing
        '''
        pass
示例#31
0
class Manager:

    timezone = pytz.timezone('America/New_York')

    def __init__(self, data):
        self.data = data
        self.algos = []
        self.broker = None
        self.logger = Logger()
        self.datetime = None
        self.scheduler = BackgroundScheduler()
        self.scheduler.configure(timezone=Manager.timezone)
        self.jobs = {}

    def init_broker(self, backtest=False, **kwargs):
        if backtest:
            self.broker = BacktestBroker(**kwargs)
        else:
            self.broker = AlpacaBroker(**kwargs)
        for algo in self.algos:
            algo.set_broker(self.broker)

    def add_algo(self, algo, live=False):
        if algo.data is None:
            algo.set_data_source(self.data)
        if algo.broker is None:
            algo.set_broker(self.broker)
        self.algos.append(algo)
        if live:
            trigger = convert_trigger_timezone(algo.trigger, Manager.timezone)
            job = self.scheduler.add_job(self.run_algo_live,
                                         trigger,
                                         kwargs={'algo': algo})
            self.jobs[algo] = job

    def stop_algo(self, algo):
        job = self.jobs[algo]
        job.remove()

    def remove_algo(self, algo):
        self.stop_algo(algo)
        del self.algos[algo]

    def log_state(self):
        value = self.broker.get_value(self.datetime)['value']
        benchmark_value = self.data.quote("SPY", self.datetime)
        self.logger.append(value, self.datetime)
        self.logger.append(benchmark_value, self.datetime, benchmark=True)

    def run_algo_live(self, algo):
        self.datetime = datetime.datetime.now().astimezone(Manager.timezone)
        # print("{time}: Running {algo}".format(time=self.datetime, algo=algo.__class__.__name__))
        algo.run_wrapper(time=self.datetime, update=True)

    def backtest(self,
                 start=datetime.datetime(2021, 3, 1),
                 end=datetime.datetime.now(),
                 log_schedule=[{
                     "minute": "30",
                     "hour": "9",
                     "day_of_week": "mon-fri"
                 }]):
        self.init_broker(backtest=True, data=self.data)
        log_trigger = build_trigger(log_schedule)
        algo_trigger = OrTrigger([algo.trigger for algo in self.algos])
        trigger = OrTrigger([algo_trigger, log_trigger])
        logging = isinstance(log_schedule, dict) or len(log_schedule) > 0
        if isinstance(start, int) and not isinstance(end, int):
            start = trading_day_offset(end, -start)
        if not isinstance(start, int) and isinstance(end, int):
            end = trading_day_offset(start, end)
        self.datetime = start
        while self.datetime < end:
            for algo in self.algos:
                if equals_runtime(algo.trigger, self.datetime):
                    print(self.datetime)
                    time = Manager.timezone.localize(self.datetime)
                    self.broker.check_limit_orders(time=time)
                    algo.run_wrapper(time=time, update=False)
            if logging and equals_runtime(log_trigger, self.datetime):
                self.log_state()
            self.datetime = next_runtime(trigger, self.datetime)
        metrics = self.logger.metrics()
        for metric, value in metrics.items():
            print("{metric}: {value:.3f}".format(metric=metric, value=value))
        self.logger.report()
        return metrics

    def run(self,
            paper=False,
            log_schedule=[{
                "minute": "30",
                "hour": "9",
                "day_of_week": "mon-fri"
            }]):
        self.init_broker(backtest=False, paper=paper)
        self.datetime = datetime.datetime.now().astimezone(Manager.timezone)
        for algo in self.algos:
            trigger = convert_trigger_timezone(algo.trigger, Manager.timezone)
            job = self.scheduler.add_job(self.run_algo_live,
                                         trigger,
                                         kwargs={'algo': algo})
            self.jobs[algo] = job
        log_trigger = convert_trigger_timezone(build_trigger(log_schedule),
                                               Manager.timezone)
        job = self.scheduler.add_job(self.log_state, log_trigger)
        self.jobs['logger'] = job
        self.start()
        self.interact()

    def start(self):
        self.scheduler.start()

    def stop(self):
        for algo in self.jobs.keys():
            self.stop_algo(algo)
        self.scheduler.shutdown(wait=True)

    def pause(self):
        self.scheduler.pause()

    def resume(self):
        self.scheduler.resume()

    def interact(self):
        code.interact(local=locals())
示例#32
0
#message= "Server is up!"
#textmessage(message)
# here is the list of Cron Triggers that schedules the program 
#Note for some reason the server running on AWS restarts randomly in the middle of the night
#I had a textmessage funciton here to display when the server booted however I would get a text
#everynight so I removed the function. However even with the server resetting it has not missed a function call
activate()
sched = BackgroundScheduler(daemon=True, timezone="America/Chicago")
skd = BackgroundScheduler(daemon=True, timezone="America/Chicago")
weatherschd = BackgroundScheduler(daemon=True, timezone="America/Chicago")
trigger = OrTrigger([ CronTrigger(hour='0,3,6,9,12,15,18,21')])
raintrigger = OrTrigger([ CronTrigger(hour='10')])
checktrigger = OrTrigger([ CronTrigger(minute='20')])
skd.add_job(update, checktrigger)
weatherschd.add_job(weathercheck, trigger)
sched.add_job(raincheck,raintrigger)
sched.start()
weatherschd.start()
skd.start()
skd.pause()
print("Current Time =", current_time)



# Listen for errors
for code in default_exceptions:
    app.errorhandler(code)(errorhandler)

atexit.register(lambda: cron.shutdown(wait=False))

示例#33
0
class CronManager:
    def __init__(self, use_mongo_db=True):

        self.scheduler = BackgroundScheduler(timezone=shanghai_tz)
        self.scheduler.configure()

        if use_mongo_db:
            self.job_store = MongoDBJobStore(database='apscheduler',
                                             collection='cronTab',
                                             client=db)
            self.scheduler.add_jobstore(self.job_store)
            self.is_replace_existing = True
        else:
            self.is_replace_existing = False

    def add_cron(self, cron_instance):
        if not isinstance(cron_instance, Cron):
            raise TypeError('please add correct cron!')

        if cron_instance.trigger_type == 'interval':
            seconds = cron_instance.trigger_args.get('seconds')
            if not isinstance(seconds,
                              int) and not common.can_convert_to_int(seconds):
                raise TypeError('请输入合法的时间间隔!')
            seconds = int(seconds)
            if seconds <= 0:
                raise TypeError('请输入大于0的时间间隔!')
            job = self.scheduler.add_job(
                func=cron_instance.cron_mission,
                trigger=cron_instance.trigger_type,
                seconds=seconds,
                replace_existing=self.is_replace_existing,
                coalesce=True,
                id=cron_instance.get_id(),
                max_instances=5,
                jitter=0)  # 玄学,新增job的时候不用加args,直接加对象调用的func
        elif cron_instance.trigger_type == 'date':
            run_date = cron_instance.trigger_args.get('run_date')
            # TODO 判断run_date类型
            job = self.scheduler.add_job(
                func=cron_instance.cron_mission,
                trigger=cron_instance.trigger_type,
                run_date=run_date,
                replace_existing=self.is_replace_existing,
                coalesce=True,
                id=cron_instance.get_id())  # 玄学,新增job的时候不用加args,直接加对象调用的func
        elif cron_instance.trigger_type == 'cron':
            raise TypeError('暂时不支持 trigger_type 等于 \'cron\'')

        return cron_instance.get_id()

    def start(self, paused=False):
        self.scheduler.start(paused=paused)

    def pause_cron(self, cron_id=None, pause_all=False):
        if pause_all:
            self.scheduler.pause()
        elif cron_id:
            self.scheduler.pause_job(job_id=cron_id)

    def resume_cron(self, cron_id=None, resume_all=False):
        if resume_all:
            self.scheduler.resume()
        elif cron_id:
            self.scheduler.resume_job(job_id=cron_id)

    def del_cron(self, cron_id=None, del_all=False):
        if del_all:
            self.scheduler.remove_all_jobs()
        elif cron_id:
            self.scheduler.remove_job(job_id=cron_id)

    def update_cron(self, cron_id, cron_info):
        if not isinstance(cron_id, str):
            raise TypeError('cron_id must be str')

        if not isinstance(cron_info, dict):
            raise TypeError('cron_info must be dict')

        trigger_type = cron_info.get('triggerType')
        interval = cron_info.get('interval')
        run_date = cron_info.get('runDate')
        test_case_suite_id_list = cron_info.get('testCaseSuiteIdList')
        is_execute_forbiddened_case = cron_info.get('isExecuteForbiddenedCase')
        test_case_id_list = cron_info.get('testCaseIdList')
        test_domain = cron_info.get('testDomain')
        global_vars_id = cron_info.get('globalVarsId')
        alarm_mail_list = cron_info.get('alarmMailList')
        is_ding_ding_notify = cron_info.get('isDingDingNotify')
        ding_ding_access_token = cron_info.get('dingdingAccessToken')
        ding_ding_notify_strategy = cron_info.get('dingdingNotifyStrategy')
        is_enterprise_wechat_notify = cron_info.get('isEnterpriseWechatNotify')
        enterprise_wechat_access_token = cron_info.get(
            'enterpriseWechatAccessToken')
        enterprise_wechat_notify_strategy = cron_info.get(
            'enterpriseWechatNotifyStrategy')
        cron_name = cron_info.get('name')

        try:
            if trigger_type == 'interval' and int(interval) > 0:
                self.scheduler.modify_job(
                    job_id=cron_id, trigger=IntervalTrigger(seconds=interval))
            elif trigger_type == 'date':
                # TODO 判断run_date类型
                self.scheduler.modify_job(
                    job_id=cron_id, trigger=DateTrigger(run_date=run_date))
            else:
                raise TypeError('更新定时任务触发器失败!')
            if run_date:
                cron = Cron(
                    test_case_suite_id_list=test_case_suite_id_list,
                    is_execute_forbiddened_case=is_execute_forbiddened_case,
                    test_domain=test_domain,
                    global_vars_id=global_vars_id,
                    alarm_mail_list=alarm_mail_list,
                    is_ding_ding_notify=is_ding_ding_notify,
                    ding_ding_access_token=ding_ding_access_token,
                    ding_ding_notify_strategy=ding_ding_notify_strategy,
                    is_enterprise_wechat_notify=is_enterprise_wechat_notify,
                    enterprise_wechat_access_token=
                    enterprise_wechat_access_token,
                    enterprise_wechat_notify_strategy=
                    enterprise_wechat_notify_strategy,
                    trigger_type=trigger_type,  # 更新定时器时,此参数并没有真正起到作用, 仅修改展示字段
                    test_case_id_list=test_case_id_list,
                    run_date=run_date,
                    cron_name=cron_name)  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
            else:
                cron = Cron(
                    test_case_suite_id_list=test_case_suite_id_list,
                    is_execute_forbiddened_case=is_execute_forbiddened_case,
                    test_domain=test_domain,
                    global_vars_id=global_vars_id,
                    alarm_mail_list=alarm_mail_list,
                    is_ding_ding_notify=is_ding_ding_notify,
                    ding_ding_access_token=ding_ding_access_token,
                    ding_ding_notify_strategy=ding_ding_notify_strategy,
                    is_enterprise_wechat_notify=is_enterprise_wechat_notify,
                    enterprise_wechat_access_token=
                    enterprise_wechat_access_token,
                    enterprise_wechat_notify_strategy=
                    enterprise_wechat_notify_strategy,
                    trigger_type=trigger_type,  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
                    test_case_id_list=test_case_id_list,
                    seconds=interval,  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
                    cron_name=cron_name)
            # 玄学,更改job的时候必须改args,不能改func
            self.scheduler.modify_job(job_id=cron_id,
                                      coalesce=True,
                                      args=[cron])

        except BaseException as e:
            raise TypeError('更新定时任务失败: %s' % e)

    def shutdown(self, force_shutdown=False):
        if force_shutdown:
            self.scheduler.shutdown(wait=False)
        else:
            self.scheduler.shutdown(wait=True)

    def get_crons(self):
        return self.scheduler.get_jobs()
示例#34
0
class Timer_handler:
    
    ############################### constructor ###############################
    def __init__(self, state_handler, logger, path):
        self.boInit = True
        self._open_time = None
        self._close_time = None
        self._auto_open = None
        self._auto_close = None
        
        self.state_handler = state_handler
        self.scheduler = BackgroundScheduler()
        self.observers = []
        self.log = logger
        self.settings_file = path + "timer_data.pkl"
        #read most recent settings saved to file, if available
        try:
            #with open(os.environ['HOME'] + 
            #"/hendroid/client-raspi/timer_data.pkl", "rb") as input:
            with open(self.settings_file, "rb") as input:
                self.open_time = pickle.load(input)
                self.log.info("internal: read <open_time> from file: "
                      + str(self.open_time))
                self.close_time = pickle.load(input)
                self.log.info("internal: read <close_time> from file: "
                      + str(self.close_time))
                self.auto_open = pickle.load(input)
                self.log.info("internal: read <auto_open> from file: "
                      + str(self.auto_open))
                self.auto_close = pickle.load(input)
                self.log.info("internal: read <auto_close> from file: "
                      + str(self.auto_close))
                self.boInit = False
        except Exception as e:
            self.log.error("internal error: timer state read from file: ", 
                            exc_info=True)
            self.open_time = time(hour = 7, minute = 0)
            self.close_time = time(hour = 18, minute = 30)
            self.auto_open = False
            self.boInit = False
            self.auto_close = False
            self.log.info("internal: set timer to default settings")
        self.scheduler.start()
    
    
    ########################### property definition ###########################
    #Attrinutes are defined as properties as changing them requires some effort
    # (i.e. changing the Scheduler settings accordingly) rather than changing
    # them directly.
    @property
    def open_time(self):
        return self._open_time
    
    @property
    def close_time(self):
        return self._close_time
    
    @property
    def auto_open(self):
        return self._auto_open
    
    @property
    def auto_close(self):
        return self._auto_close
    
    @auto_open.setter
    def auto_open(self, value):
        if (isinstance(value, bool) 
            and value != None
            and (self.auto_open == None or value != self.auto_open)):
            self._auto_open = value
            if(self.scheduler.running):
                self.scheduler.pause()
            if(self.auto_open):
                self.scheduler.resume_job(job_id=self.open_job.id)
            else:
                self.scheduler.pause_job(job_id=self.open_job.id)
            self.log.info("internal: set <auto_open> to " + str(self.auto_open))
            if(self.scheduler.running):
                self.scheduler.resume()
            self.save_state()
            self.notify_observers("auto-open")
    
    @auto_close.setter
    def auto_close(self, value):
        if (isinstance(value, bool) 
            and value != None
            and (self.auto_close == None or value != self.auto_close)):
            self._auto_close = value
            if(self.scheduler.running):
                self.scheduler.pause()
            if(self.auto_close):
                self.scheduler.resume_job(job_id=self.close_job.id)
            else:
                self.scheduler.pause_job(job_id=self.close_job.id)
            self.log.info("internal: set <auto_close> to " + 
                            str(self.auto_close))
            if(self.scheduler.running):
                self.scheduler.resume()
            self.save_state()
            self.notify_observers("auto-close")
            
    @open_time.setter
    def open_time(self, value):
        if (isinstance(value, time) 
            and value != None
            and (self.open_time == None or value != self.open_time)):
            self._open_time = value
            if(self.scheduler.running):
                self.scheduler.pause()
            self.open_job = self.scheduler.add_job(self.exec_motion
                                                  ,trigger="cron"
                                                  ,args=["opening"]
                                                  ,id="jopen"
                                                  ,name="open_job"
                                                  ,max_instances=1
                                                  ,replace_existing=True
                                                  ,hour = self.open_time.hour
                                                  ,minute = self.open_time.minute
                                                  )
            self.log.info("internal: set <open_time> to " + str(self.open_time))
            if(self.scheduler.running):
                self.scheduler.resume()
            self.save_state()
            self.notify_observers("time-open")
            
    @close_time.setter
    def close_time(self, value):
        if (isinstance(value, time) 
            and value != None
            and (self.close_time == None or value != self.close_time)):
            self._close_time = value
            if(self.scheduler.running):
                self.scheduler.pause()
            self.close_job = self.scheduler.add_job(self.exec_motion
                                                    ,trigger="cron"
                                                    ,args=["closing"]
                                                    ,id="jclose"
                                                    ,name="close_job"
                                                    ,max_instances=1
                                                    ,replace_existing=True
                                                    ,hour = self.close_time.hour
                                                    ,minute = self.close_time.minute
                                                    )
            self.log.info("internal: set <close_time> to " + 
                            str(self.close_time))
            if(self.scheduler.running):
                self.scheduler.resume()
            self.save_state()
            self.notify_observers("time-close")
    
    
    ############################## other methods ##############################
    def register_observer(self, callback):
        self.log.info("internal: registering timer observer")
        self.observers.append(callback)
        self.update_observer(callback)
        
    #Update all observers of the object, i.e. notify them about values
    # of all properties of the object.
    def update_all_observers(self):
        [ self.update_observer(callback) for callback in self.observers ]
        
    #Update observer defined by callback parameter, i.e. notify it about values
    # of all properties of the object.
    def update_observer(self, callback):       
        self.notify_observers("auto-open", [callback])
        self.notify_observers("auto-close", [callback])
        self.notify_observers("time-open", [callback])
        self.notify_observers("time-close", [callback])
        
    # notify given observers about changes encoded in update parameter
    # update parameter must have the form: "<cat>-<subcat>" with
    # cat in {"auto", "time"} and subcat in {"open", "close"}.
    # If observers parameter is omittet, then all registered observers of the
    # object are notified.
    def notify_observers(self, update, observers=None):
        if(self.boInit == False):
            update_vals = {"time-open":   [self.open_time.hour
                                          ,self.open_time.minute]
                           ,"time-close": [self.close_time.hour
                                          ,self.close_time.minute]
                           ,"auto-close": [self.auto_close]
                           ,"auto-open":  [self.auto_open]
                           }
            val_str = ""
            for val in update_vals[update]:
                val_str = val_str + ":" + str(val) 
            update = update + "-" + val_str[1:]
            if(observers == None):
                observers = self.observers
            self.log.info("internal: Calling " + str(len(observers)) + 
                            " timer observers")
            [ callback(update) for callback in observers ]
        
    def exec_motion(self, arg):
        self.log.info("internal: scheduled event - " + arg)
        self.state_handler.handle_event(arg)
    
    def save_state(self):
        if(self.boInit == False):
            try:
                #with open(os.environ['HOME'] + "/hendroid/client-raspi/"
                 #         + "timer_data.pkl", "wb") as output:
                with open(self.settings_file, "wb") as output:
                    pickle.dump(self.open_time, output)
                    pickle.dump(self.close_time, output)
                    pickle.dump(self.auto_open, output)
                    pickle.dump(self.auto_close, output)
                    self.log.info("internal: successfully saved timer setting" +
                                    " to file")
            except Exception as e:
                self.log.error("internal error: write timer setting to file: ",
                                exec_info=True)
    
    def __del__(self):
        self.scheduler.shutdown()
示例#35
0
#######################   Supporting Functions   ####################################
sched = None
MONITOR_BUTTON = None

print(sched)
if sched == None:

    print('Starting Proces ...')
    sched = BackgroundScheduler(daemon=True)
    sched.add_job(sensor,
                  'interval',
                  seconds=int(get_current_interval()),
                  id='my_job_id')
    sched.start()
    sched.pause()
    #sched.resume()
    #MONITOR_BUTTON = 'Start'

    if get_monitor_status() == 'stop':
        sched.resume()

else:
    print('Process already created ...')


@app.route("/on_off")
def on_off():

    global MONITOR_BUTTON
示例#36
0
def main():
    global subs
    global player
    global bridge
    global SRT_FILENAME, AUDIO_FILENAME, MAX_BRIGHTNESS, TICK_TIME, HUE_IP_ADDRESS
    parser = argparse.ArgumentParser(description="LushRoom sound and light command-line player")
    # group = parser.add_mutually_exclusive_group()
    # group.add_argument("-v", "--verbose", action="store_true")
    # group.add_argument("-q", "--quiet", action="store_true")
    parser.add_argument("-s","--srt", default=SRT_FILENAME, help=".srt file name for lighting events")
    parser.add_argument("-a","--audio", default=AUDIO_FILENAME, help="audio file for sound stream")
    parser.add_argument("-b","--brightness", default=MAX_BRIGHTNESS, help="maximum brightness")
    parser.add_argument("-t","--time", default=TICK_TIME, help="time between events")
    parser.add_argument("--hue", default=HUE_IP_ADDRESS, help="Philips Hue bridge IP address")

    args = parser.parse_args()

    print(args)

    if PLAY_AUDIO:
        player = vlc.MediaPlayer(AUDIO_FILENAME)
        event_manager = player.event_manager()
        event_manager.event_attach(vlc.EventType.MediaPlayerEndReached, end_callback)

    if PLAY_HUE:
        # b = Bridge('lushroom-hue.local')
        bridge = Bridge(HUE_IP_ADDRESS, config_file_path="/media/usb/python_hue")
        # If the app is not registered and the button is not pressed, press the button and call connect() (this only needs to be run a single time)
        bridge.connect()
        # Get the bridge state (This returns the full dictionary that you can explore)
        bridge.get_api()
        lights = bridge.lights
        # Print light names
        for l in lights:
            print(l.name)
            #print(dir(l))
        # Set brightness of each light to 10
        for l in lights:
            l.brightness = 1

        # Get a dictionary with the light name as the key
        light_names = bridge.get_light_objects('name')
        print("Light names:", light_names)

    subs = srtopen(SRT_FILENAME)

    print("Number of lighting events",len(subs))

    scheduler = BackgroundScheduler()
    scheduler.add_job(tick, 'interval', seconds=TICK_TIME)
    # scheduler.start(paused=True)
    if PLAY_AUDIO:
        player.play()
    scheduler.start(paused=False)

    try:
        # This is here to simulate application activity (which keeps the main thread alive).
        while True:
            sleep(0.01)
            try:
                if keyboard.is_pressed('p'): # pause
                    scheduler.pause()
                    player.pause()
                elif keyboard.is_pressed('r'): # resume
                    scheduler.resume()
                    player.play()
                # elif keyboard.is_pressed('s'): # stop
                #     scheduler.shutdown()
                #     player.stop()
                #     exit(0)
            except:
                pass
    except (KeyboardInterrupt, SystemExit):
        # Not strictly necessary if daemonic mode is enabled but should be done if possible
        scheduler.shutdown()
        player.stop()