Example #1
0
class PeriodGather(object):
    def __init__(self, db): 
        self.__db = db
        self.__scheduler = BlockingScheduler()
        self.__scheduler.add_job(self.gather, 'cron', day_of_week='mon-fri', hour=16, minute=30)

    def start(self):
        self.__scheduler.start()

    def gather(self):
        _logger.info('period gather stock basic and history data, begin.....')
        try:
            StockBasicCollector(self.__db).collect()
            stock_list = self.__get_stock_list()
            for stock in stock_list:
                HistDataCollector(stock, self.__db).collect()
        except Exception as e:
            _logger.exception(e)
        _logger.info('period gather stock basic and history data, end.....')
    
    def __get_stock_list(self):
        collection = Collection(Constants.BASIC_COLLECTION, self.__db)
        stock_infos = collection.find()
        stock_list = []
        for stock_info in stock_infos:
            stock_list.append(stock_info['code'])
        return stock_list

    def stop(self):
        if self.__scheduler:
            self.__scheduler.shutdown()
Example #2
0
class PeriodicRetrievalManager(RetrievalManager):
    """
    Manages the periodic retrieval of updates.
    """
    def __init__(self, retrieval_period: TimeDeltaInSecondsT, update_mapper: UpdateMapper,
                 logger: Logger=PythonLoggingLogger()):
        """
        Constructor.
        :param retrieval_period: the period that dictates the frequency at which data is retrieved
        :param update_mapper: the object through which updates can be retrieved from the source
        :param logger: log recorder
        """
        super().__init__(update_mapper, logger)
        self._retrieval_period = retrieval_period
        self._running = False
        self._state_lock = Lock()
        self._updates_since = None  # type: datetime

        self._scheduler = BlockingScheduler()
        self._scheduler.add_job(self._do_periodic_retrieval, "interval", seconds=self._retrieval_period, coalesce=True,
                                max_instances=1, next_run_time=datetime.now())

    def run(self, updates_since: datetime=datetime.min):
        self._updates_since = localise_to_utc(updates_since)

        with self._state_lock:
            if self._running:
                raise RuntimeError("Already running")
            self._running = True
        self._scheduler.start()

    def start(self, updates_since: datetime=datetime.min):
        """
        Starts the periodic retriever in a new thread. Cannot start if already running.
        :param updates_since: the time from which to get updates from (defaults to getting all updates).
        """
        Thread(target=self.run, args=(updates_since, )).start()

    def stop(self):
        """
        Stops the periodic retriever.
        """
        with self._state_lock:
            if self._running:
                self._scheduler.shutdown(wait=False)
                self._running = False
                logging.debug("Stopped periodic retrieval manger")

    def _do_periodic_retrieval(self):
        assert self._updates_since is not None
        updates = self._do_retrieval(self._updates_since)

        if len(updates) > 0:
            # Next time, get all updates since the most recent that was received last time
            self._updates_since = updates.get_most_recent()[0].timestamp
        else:
            # Get all updates since same time in future (not going to move since time forward to simplify things - there
            # is no risk of getting duplicates as no updates in range queried previously). Therefore not changing
            # `self._updates_since`.
            pass
Example #3
0
def cronjob():
    scheduler = BlockingScheduler()
    print "*******"
    scheduler.add_job(checkupdate,'cron', second='0', hour='2',minute='0')
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()
Example #4
0
def startCrawlerTask():
    from apscheduler.schedulers.blocking import BlockingScheduler
    scheduler = BlockingScheduler()
    scheduler.add_job(crawlerTask, 'cron', second='0',minute='15', hour='8')
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()
Example #5
0
def startschedule():
    scheduler = BlockingScheduler()
    scheduler.add_job(test,'cron', second='*/3', hour='*')    
    scheduler.add_job(dotraveljobs,'cron', second='*/3', hour='*')
    scheduler.add_job(doindexjobs,'cron', second='*/3', hour='*')
    scheduler.add_job(docontentjobs,'cron', second='*/3', hour='*')
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()     
Example #6
0
class Worker(threading.Thread):
    def __init__(self, hours=0, minutes=0, seconds=0):
        super(Worker, self).__init__()
        if not (hours or minutes or seconds):
            raise ValueError('Hours, minutes and seconds cannot be all equal to 0')

        self.scheduler = BlockingScheduler()
        self.job = self.scheduler.add_job(run, 'interval', hours=hours, minutes=minutes, seconds=seconds)

    def run(self):
        self.scheduler.start()

    def stop(self, *args):
        self.scheduler.shutdown()
class AnalysisTask(object):

	def __init__(self,parent,cellList,interval_min=1):
		# cellList is a list of CellItem
		super(AnalysisTask, self).__init__()
		self.parent = parent
		self.cellList = cellList
		self.stage = parent.stage
		self.interval_min = interval_min
		self.taskOn = True
		self.scheduler = BlockingScheduler()

	def start(self):
		self.t = threading.Thread(target=self.analyseJob,args=(None,))
		self.t.start()

	def stop(self):
		self.taskOn = False
		if self.scheduler.running:
			self.scheduler.shutdown()
			print("Scheduler stopped")

		# self.t.join()

	def analyseJob(self,s):
		self.scheduler.add_job(self.analyseList, 'interval', minutes=self.interval_min)
		self.analyseList()
		self.scheduler.start()

	def analyseList(self):
		analysis_start_time = time.time()
		print("Starting cell analysis")
		for cellItem in self.cellList:
			self.analyseCell(cellItem)
			if not self.taskOn:
				print("AnalysisTask stopped")
				return 
		now = time.time()
		next_analysis_time = analysis_start_time + 60*self.interval_min
		waiting_time = next_analysis_time - now
		next_analysis_time_str = time.strftime("%d %b %Y %H:%M:%S",time.localtime(next_analysis_time))
		print("Next analysis in {} min (on {})".format(waiting_time/60,next_analysis_time_str))

	def analyseCell(self,cellItem):
		print("Going to cell {}".format(cellItem.cellName))
		self.stage.moveAbsolute(cellItem.position,wait=True)
		assert self.stage.position == cellItem.position
		self.parent.saveData(cellItem.cellName)
    def build_cube_from_names_or_file(buildType, cube_name=None, names_file=None, start_time=None, end_time=None, timed_build=None, crontab_options=None):
        cube_name_list_from_file = []

        if names_file is not None:
            cube_name_list_from_file = CSVReader.get_cube_names_from_csv(names_file)
        if cube_name is not None:
            cube_name_list = cube_name.split(',')

        cube_name_list += cube_name_list_from_file
        print "building cubes for", cube_name_list

        if timed_build is not None and crontab_options is not None:
            scheduler = BlockingScheduler()

            if timed_build is 'i' and crontab_options is not None:
                scheduler.add_job(ClientJob.build, 'interval', hours=int(crontab_options),
                                  args=[cube_name_list, buildType, start_time, end_time])
                try:
                    scheduler.start()
                except (KeyboardInterrupt, SystemExit):
                    scheduler.shutdown()
            elif timed_build is 't' and crontab_options is not None:
                time_list = crontab_options.split(',')

                if time_list.__len__() == 6:
                    scheduler.add_job(ClientJob.build, 'date',
                                      run_date=datetime.datetime(int(time_list[0]), int(time_list[1]),
                                                                 int(time_list[2]), int(time_list[3]),
                                                                 int(time_list[4]), int(time_list[5])),
                                      args=[cube_name_list, buildType, start_time, end_time])
                    try:
                        scheduler.start()
                    except (KeyboardInterrupt, SystemExit):
                        scheduler.shutdown()
                else:
                    print 'Bad command line!'
            else:
                print 'Bad command line!'
        else:
            status = ClientJob.build(cube_name_list, buildType, start_time, end_time)
            if(status != True):
                sys.exit(1)
Example #9
0
def transferfile(results):
    src_file_path = results['file_abs_path']
    dest_file_path = "~/example-copy"
    save_json(results, src_file_path)
    auth = get_access_token()
    src = "mattbest#NICHO-LENO5"     # source endpoint
    dst = "mattbest#Oba-Nicho5-Dell"   # destination endpoint
    # authenticate using access token
    api = api_client.TransferAPIClient(
        username=auth.username,
        goauth=auth.token
    )
    # activate endpoints
    status, message, data = api.endpoint_autoactivate(src)
    status, message, data = api.endpoint_autoactivate(dst)
    # get submission id
    code, reason, result = api.transfer_submission_id()
    submission_id = result["value"]
    # designate endpoints(1) and items(2) for transfer(3)
    def transfer(id, source, dest):
        t = Transfer(id, source, dest)
        t.add_item(src_file_path, dest_file_path)
        status, reason, result = api.transfer(t)
        os._exit(1)
    ### SCHEDULE TRANSFER JOB ###
    scheduler = BlockingScheduler()
    now = datetime.now()
    scheduler.add_job(lambda:
        transfer(submission_id, src, dst),
        'date',
        run_date=datetime(now.year, now.month, now.day, now.hour, now.minute+1, 0)
    )
    # returns 0 in the child, pid of the child in the parent
    if os.fork():
        sys.exit()
    scheduler.start()
    scheduler.shutdown()
Example #10
0
class StockJob:
    def __init__(self):
        self.scheduler = BlockingScheduler()
        self.pool = None
        if (platform.system() == 'Windows'):
            self.stockDeal = Trader_gxzq(no=TDX_USER,pwd=TDX_PWD,dimpwd=TDX_DIMPWD)
        self.isopen = False
        self.localData = LocalData.getInstance()
        self.stockData = StockData.getInstance()
        self.tdxData = TdxData.getInstance()
        self.mailUtil = MailUtil.getInstance()


    def add_interval_job(self,fun,days=1,start='2020-05-07 20:56:00'):
        # 在 2020-05-07 20:00:00,每隔1天执行一次
        self.scheduler.add_job(fun, 'interval', days=days, start_date=start)
    '''
    cron: 在特定时间周期性地触发:
        year: 4位数字
        month: 月 (1-12)
        day: 天 (1-31)
        week: 标准周 (1-53)
        day_of_week: 周中某天 (0-6 or mon,tue,wed,thu,fri,sat,sun)
        hour: 小时 (0-23)
        minute:分钟 (0-59)
        second: 秒 (0-59)
        start_date: 最早执行时间
        end_date: 最晚执行时间
        timezone: 执行时间区间
    '''
    def add_cron_job(self,fun,month='*',day='*',day_of_week='*',hour='*',minute='*'):
        self.scheduler.add_job(func=fun, trigger='cron', month=month, day=day,day_of_week=day_of_week, hour=hour, minute=minute)
    def add_date_job(self,fun,date):
        self.scheduler.add_job(fun, 'date', run_date=date)
    def start(self):
        self.scheduler.start()
    def stop(self):
        self.scheduler.shutdown()

    # 回测
    def test(self,default=True):
        config.putByKey('TDX_CATEGORY','6')
        if default:# 默认情况只有交易日可回测
            now = time.strftime("%Y%m%d", time.localtime())
            if not DEBUG and self.stockData.isOpen(now) <= 0:
                return
        # 判断是否交易日,才回测否则不做操作
        codes = self.localData.codes(type=1)
        codes = codes[codes['zs'].eq(0)] #只回测个股
        for index, code in codes.iterrows():
            logging.debug('%s开始更新,当前索引%s,剩余%s'%(code['code'],index,len(codes)-index-1))
            try:
                g57 = StockTest2(code['code'], 0, 'Test2')
                g57.run()
            except Exception as e:
                print(e)
        # 回测完成后设置股票池
        self.pool = self.localData.result_report()

    # 下载数据
    def download(self):
        config.putByKey('TDX_CATEGORY', '6')
        codes = self.localData.codes()
        codes = codes[codes['zs'].eq(0)]
        for index,code in codes.iterrows():
            try:
                self.localData.data(code['code'],code['zs'])
                time.sleep(0.2) # 每分钟60次限制
            except Exception as e:
                print(e)
    # 更新股票代码、板块、关注股东初始化
    def update_codes(self):
        self.stockData.updateStocks()
        # self.tdxData.updateBk()
        self.localData.init_gdwarn()
        # self.localData.update_stock_pool()
    # 更新板块
    def update_bks(self):
        self.tdxData.updateBk()
    # 更新股东
    def update_gds(self):
        self.tdxData.updateGDs()
    # 更新分红
    def update_fhs(self):
        self.tdxData.updateFhYears()
    # 打开交易软件
    def open_tdx(self):
        now = time.strftime("%Y%m%d", time.localtime())
        if not DEBUG and  self.stockData.isOpen(now) <= 0:
            return
        logging.debug('软件开始登陆。。。。')
        self.stockDeal.login()
        time.sleep(10)
        self.stockDeal.click_tree_item('买入')
        # 登录成功后,休眠5秒获取持仓信息
        time.sleep(3)
        results, orders = self.stockDeal.orders()
        self.les_money = float(results['可用'])
        # 可买入的股票,保存在数据库表中(月线为卖出时删除该自选,在关闭时合并股票到表中,如果表中没有股票从当月可操作股票中随机获取)
        # 获取可买入数量,可买入的股票池,每只买入金额,可卖出的股票池
        nums,buy_data,level_money,seal_data = self.localData.get_buy_stocks(self.les_money)
        self.level_money = level_money
        self.seal_data = seal_data
        self.buy_pools = {} # 讲买卖股票池改为{code:'',num:''}形式
        if len(buy_data)>0:
            for i in range(nums):
                self.buy_pools[buy_data[i]]=0
        # 根据持仓查询对应的月回测记录,讲交易状态放入缓存
        self.seal_pools = {}  # 可卖出的股票
        try:
            for i in range(len(orders)):
                self.seal_pools[orders[i][0]]=orders[i][4]
            self.isopen = True
            logging.debug('软件开始登陆成功。\n可用资金%s,买入股票池%s,持仓%s'%(self.les_money,str(self.buy_pools),str(self.seal_pools)))
        except Exception as e:
            logging.debug('获取持仓失败%s'%e)
    # 关闭交易软件
    def close_tdx(self):
        now = time.strftime("%Y%m%d", time.localtime())
        if not DEBUG and  self.stockData.isOpen(now) <= 0:
            return
        logging.debug('软件即将关闭。。。。')
        try:
            # 当日成交,如果有成交记录发送邮件通知
            dayorders = self.stockDeal.day_orders()
            # 关闭交易软件
            self.stockDeal.destory()
            # 根据当日成交更新表成交
            self.localData.update_stock_pool(dayorders,handle=1)
            self.mailUtil.sendEmail([MAIL_USER], '当日成交', self.localData.get_order_msg(dayorders))
            logging.debug('软件已关闭.当日成交明细:\n%s'%("\n".join('%s' % id for id in dayorders)))
        except Exception as e:
            pass
        self.isopen = False
    # 个股预警交易
    def update_datas(self):
        now = time.strftime("%Y%m%d", time.localtime())
        if not DEBUG and self.stockData.isOpen(now) <= 0:
            return
        if not DEBUG and not self.localData.is_deal_time():
            return
        if not self.isopen:
            self.close_tdx()
            self.open_tdx()
        # 设置初始数据
        config.putByKey('TDX_CATEGORY','9')
        pools = {}
        pools.update(self.buy_pools)
        pools.update(self.seal_pools)
        pools_data = []
        for code,num in pools.items():
            # 更新数据
            df = self.localData.data(code,0)
            current_price = df.tail(1)['close'].values[0]
            # 回测
            g57 = StockTest3(code, 0, 'Test3',data=df)
            order,result,msgs = g57.run()
            # 解析回测结果:日期','类型','价格','手数','成交额','扣除费
            # base = self.localData.get_base(code, 0)
            arr = order.split('\t')
            if arr[0] != now: # 当天买组交易
                continue
            # 手数需要重新计算
            if arr[1]=='1' :# 买入
                self.stockDeal.click_toolbar('买入')
                logging.debug('买入:%s'%order)
                if len(self.buy_pools)>0:# 当月线为买入状态方可交易
                    num = int(self.level_money[code] / current_price / 100) * 100
                    # 根据编号
                    # self.stockDeal.buy(code=code,type=base['type'],price=current_price,num=num)
                    self.buy_pools.pop(code,'404') # 买入后移出股票池
                    pools_data.append({'code': code, 'state': 1, 'num': num})
                    logging.info('买入成功:代码:%s,价格:%s,数量:%s' % (code,current_price,num))
                else:
                    logging.error('买入失败,买入股票池%s,待买入%s,价格%s'%(self.buy_pools,code,current_price))
                    self.mailUtil.sendEmail([MAIL_USER], '交易失败,请及时关注', '买入失败%s'%order)
            elif arr[1]=='0':# 卖出
                self.stockDeal.click_toolbar('卖出')
                logging.debug('卖出:%s' % order)
                num = self.seal_pools.get(code,0)
                if self.seal_pools.get(code,0) ==0:
                    err_msg  =  '卖出失败,代码:%s,当前价格:%s,数量:%s' % (code,current_price,num)
                    logging.error(err_msg)
                    self.mailUtil.sendEmail([MAIL_USER], '交易失败,请及时关注', err_msg)
                    continue
                # self.stockDeal.seal(code=code,type=base['type'],price=current_price,num=num)
                pools_data.append({'code': code, 'state': 0, 'num': 0})
                self.seal_pools.pop(code,'404')
                logging.info('卖出成功:代码:%s,价格:%s,数量:%s' % (code, current_price, num))
                # 如果月线为买入状态,需加入到self.pools第一个位置
                if self.seal_data.get(code,0)>0:
                    self.buy_pools[code] = self.stockDeal.get_money(current_price,num)
                else:
                    logging.error('卖出池%s未找到%s'%(str(self.seal_data,code)))

    @classmethod
    def getInstance(cls):
        if not hasattr(StockJob, "_instance"):
            StockJob._instance = StockJob()
        return StockJob._instance
Example #11
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Process BF2 manually or \
                                     through scheduler.')
    parser.add_argument("-s", "--sched", action="store_true", dest="sched",
                        default=False, help="use scheduler to run")
    parser.add_argument("-m", "--man", action="store_true", dest="man",
                        default=False, help="run manually")
    parser.add_argument("-r", "--repo", dest="repo",
                        default="/Users/Christina/Projects/BF2",
                        help="repo working directory")

    args = parser.parse_args()

    if os.path.isfile('.git/config.lock'):
        os.remove('.git/config.lock')
    repo = Repo(args.repo)
    config = repo.config_writer()
    config.set_value("user", "email", "*****@*****.**")
    config.set_value("user", "name", "cmh2166")
    index = repo.index

    if not args.sched and not args.man:
        parser.print_help()
        exit()
    elif args.man:
        main()
    elif args.sched:
        sched.start()
        sched.shutdown(wait=True)
class MyScheduler:
    def __init__(self):
        self._scheduler = BlockingScheduler()
        self._hue = HueWrapper()
        self._enviro = EnviroWrapper()
        self._kasa = KasaWrapper()
        self._bright = 0
        self._init()
        self._jobs_list = [
            self._manage_heater,
            self._manage_lights,
        ]
        self._jobs_cycle = itertools.cycle(self._jobs_list)

        self.heater_on_for = 0
        self.heater_off_for = 0

        self.switch_on = False

    def _init(self):
        logging.basicConfig(level=logging.INFO)
        logging.getLogger("apscheduler.scheduler").addFilter(MuteFilter())

        self._hue.connect()

        self._scheduler.add_job(func=self._get_sunset_sunrise, trigger=at_midnight)
        self._scheduler.add_job(func=self._get_sunset_sunrise)

        self._scheduler.add_job(self._manage_next, trigger=every_thirty_seconds)
        self._scheduler.add_job(self._manage_all)

    def start(self):
        self._scheduler.print_jobs()
        self._scheduler.start()

    def stop(self):
        self._scheduler.shutdown()
        # if self._hue:
        #     self._hue.off()

    def _manage_all(self):
        for job in self._jobs_list:
            job()
            time.sleep(5)

    def _manage_next(self):
        job = next(self._jobs_cycle)
        job()

    def _manage_lights(self):
        if self._hue.is_on:
            self._hue.do_whatever()

    def _manage_heater(self):
        now = datetime.datetime.now()
        weekday = now.weekday()
        hour = now.hour
        mins = now.minute
        month = now.month

        monday = 0
        friday = 4
        on_holiday = False
        in_work_hours = not on_holiday \
                        and monday <= weekday <= friday \
                        and 8 <= hour <= 16 \
                        and (hour != 16 or mins <= 30)

        is_spring = 3 <= month <= 5
        is_summer = 6 <= month <= 8
        is_autumn = 9 <= month <= 11
        is_winter = month == 12 or month <= 2

        is_morning = 0 <= hour <= 12
        # is_early_morning = 0 <= hour <= 8

        logging.info('weekday: {} hour: {} in_work_hours: {}'.format(weekday, hour, in_work_hours))

        temperature = self._enviro.get_temperature()
        logging.info('temperature: {}'.format(temperature))

        if is_spring:
            target_temperature = 18.0
        elif is_summer:
            target_temperature = 10.0
        elif is_autumn:
            target_temperature = 18.0
        elif is_winter:
            target_temperature = 18.0
        else:
            target_temperature = 18.0

        if is_winter:
            # if is_early_morning:
            #     target_temperature += 1
            if is_morning:
                target_temperature += 1

        is_on = self._kasa.is_on()
        logging.info('is_on: {0}'.format(is_on))
        if is_on:
            self.heater_on_for += 1
            self.heater_off_for = 0
        else:
            self.heater_on_for = 0
            self.heater_off_for += 1

        logging.info('heater_on_for: {0}'.format(self.heater_on_for))
        logging.info('heater_off_for: {0}'.format(self.heater_off_for))

        cooler_thx = temperature > target_temperature or self.heater_on_for > 5
        warmer_plz = in_work_hours and temperature < target_temperature - 2

        if cooler_thx:
            logging.info('cooler_thx')
            self.switch_on = False
        elif warmer_plz:
            logging.info('warmer_plz')
            if self.heater_off_for > 1:
                logging.info('Duty cycle on')
                self.switch_on = True

        logging.info('switch_on: {0}'.format(self.switch_on))

        if self.switch_on:
            logging.info('Switching heater on')
            self._kasa.switch_on()
        elif cooler_thx:
            logging.info('Switching heater off')
            self._kasa.switch_off()

    def _get_sunset_sunrise(self):
        a = Astral()
        leeds = a['Leeds']
        today = datetime.date.today()
        self._today_sun_data = leeds.sun(date=today, local=True)
        self.timezone = leeds.timezone
        logging.info(pprint.pformat(self._today_sun_data))

        self.dawn = self._today_sun_data['dawn']
        self.sunrise = self._today_sun_data['sunrise']
        self.sunset = self._today_sun_data['sunset']
        self.dusk = self._today_sun_data['dusk']

        at_dawn = _get_cron_trigger_for_datetime(self.dawn)
        at_sunrise = _get_cron_trigger_for_datetime(self.sunrise)
        at_sunset = _get_cron_trigger_for_datetime(self.sunset)
        at_dusk = _get_cron_trigger_for_datetime(self.dusk)

        during_sunrise = IntervalTrigger(seconds=5, start_date=self.dawn, end_date=self.sunrise)
        during_sunset = IntervalTrigger(seconds=5, start_date=self.sunset, end_date=self.dusk)

        self._scheduler.add_job(func=self._at_dawn, trigger=at_dawn)
        self._scheduler.add_job(func=self._during_sunrise, trigger=during_sunrise)
        self._scheduler.add_job(func=self._at_sunrise, trigger=at_sunrise)

        self._scheduler.add_job(func=self._at_sunset, trigger=at_sunset)
        self._scheduler.add_job(func=self._during_sunset, trigger=during_sunset)
        self._scheduler.add_job(func=self._at_dusk, trigger=at_dusk)

        now = datetime.datetime.now(tz)
        if now <= self.dawn:
            day_factor = 0.0
        elif self.dawn < now <= self.sunrise:
            day_factor = colour_helper.get_day_factor(self.dawn, now, self.sunrise, True)
        elif self.sunrise < now <= self.sunset:
            day_factor = 1.0
        elif self.sunset < now <= self.dusk:
            day_factor = colour_helper.get_day_factor(self.sunset, now, self.dusk, False)
        elif now < self.dusk:
            day_factor = 0.0
        else:
            day_factor = 0.25

        self._set_day_factor(day_factor)

    def _at_dawn(self):
        day_factor = 0.0
        self._set_day_factor(day_factor)
        logging.info('dawn')

    def _at_sunrise(self):
        day_factor = 1.0
        self._set_day_factor(day_factor)
        logging.info('sunrise')

    def _at_sunset(self):
        day_factor = 1.0
        self._set_day_factor(day_factor)
        logging.info('sunset')

    def _at_dusk(self):
        day_factor = 0.0
        self._set_day_factor(day_factor)
        logging.info('dusk')

    def _during_sunrise(self):
        day_factor = colour_helper.get_day_factor(self.dawn, datetime.datetime.now(tz), self.sunrise, True)
        self._set_day_factor(day_factor)

    def _during_sunset(self):
        day_factor = colour_helper.get_day_factor(self.sunset, datetime.datetime.now(tz), self.dusk, False)
        self._set_day_factor(day_factor)

    def _set_day_factor(self, day_factor):
        logging.info('day factor: {}'.format(day_factor))
        colour_helper.set_day_factor(day_factor)
Example #13
0
class Controller:
    def __init__(self, configs):
        self.main_loop_lock = False
        self.scheduler = None
        self.configs = configs
        # self.installer = None
        self.agent_holder = None

        self.need_download = False
        self.need_install = False
        self.need_start = False
        self.need_version_recheck = False

        self.download_path = None
        self.checksum = None

        self.ssl_context = ssl.create_default_context()

        self.recycle_queue = Queue.Queue()
        # self.block_count = 0

    def start(self):
        LOG.debug('controller started')

        recycle_thread = ProcessRecycle(self.recycle_queue)
        recycle_thread.start()

        self.scheduler = None
        if os.path.isfile('/etc/timezone'):
            with open('/etc/timezone', 'a+') as f:
                tz = f.read().strip()
                self.scheduler = BlockingScheduler(timezone=tz)
        if not self.scheduler:
            self.scheduler = BlockingScheduler()
        self.scheduler.add_job(self.update_check,
                               trigger='interval',
                               seconds=self.configs['update_check_period'],
                               next_run_time=datetime.datetime.now())
        self.scheduler.add_job(self.main_job,
                               trigger='cron',
                               minute='*/%d' %
                               self.configs['main_loop_period_minute'],
                               second='0')
        self.scheduler.start()
        LOG.debug('controller stopped')

    def stop(self):
        self.stop_agent()
        if self.scheduler:
            self.scheduler.shutdown()

    def main_job(self):
        if self.main_loop_lock:
            # if self.block_count >= 3:
            #     self.terminate_installer()
            #     self.block_count = 0
            return
        self.main_loop_lock = True
        # self.block_count = 0
        self.health_check()
        if self.need_version_recheck:
            self.version_recheck()

        try:
            if self.need_download:
                LOG.info('********************* download')
                if self.download_agent():
                    self.need_download = False
                    self.need_install = True

            if self.need_install:
                LOG.info('********************* install')

                old_folder = self.get_latest_install_folder()

                update = self.need_install_check()
                # reinstall current agent package
                if not update:
                    self.stop_agent()
                    self.uninstall_agent(old_folder)

                package_path = self.get_latest_install_package()

                if package_path:
                    agent_dir = self.install_agent(package_path)

                    # new agent installed, remove old agent folder
                    if update:
                        self.stop_agent()
                        self.uninstall_agent(old_folder)

                    self.start_agent(agent_dir)
                    self.need_install = False
                    self.need_start = False

            elif self.need_start:
                LOG.info('********************* restart')
                self.need_start = False

                self.stop_agent()

                agent_dir = self.get_latest_install_folder()
                if not agent_dir:
                    package_path = self.get_latest_install_package()
                    if package_path:
                        LOG.debug('********************* restart install')
                        agent_dir = self.install_agent(package_path)
                    else:
                        return

                LOG.debug('********************* restart start')
                self.start_agent(agent_dir)

        except Exception as e:
            pass
        finally:
            self.main_loop_lock = False

    def install_agent(self, package_path):
        checksum = utils.get_checksum(package_path, self.get_checksum_dir())
        if not utils.check_file(package_path, checksum):
            LOG.debug("check downloaded package failed, removing %s",
                      package_path)
            os.remove(package_path)
            utils.remove_checksum(package_path, self.get_checksum_dir())
            return None

        dirname = utils.get_file_name_without_extension(package_path)
        agent_dir = '%s/%s' % (self.get_install_dir(), dirname)

        r = os.system('mkdir -p %s && tar xzf %s -C %s --strip-components 1' %
                      (agent_dir, package_path, agent_dir)) != 0
        if r != 0:
            LOG.debug('decompress failed, %d', r)
            os.remove(package_path)
            os.rmdir(agent_dir)
            return None
        else:
            LOG.debug('decompress success')

        installer = Installer(agent_dir, self.recycle_queue)
        installer.install()
        return agent_dir

    def uninstall_agent(self, agent_dir=None):
        # if self.installer is None:
        #     if agent_dir and os.path.isdir(agent_dir):
        #         self.installer = Installer(agent_dir, self.recycle_queue)
        #     else:
        #         return
        # self.installer.uninstall()
        # self.installer = None
        if agent_dir and os.path.isdir(agent_dir):
            installer = Installer(agent_dir, self.recycle_queue)
            installer.uninstall()

    # def terminate_installer(self):
    #     if self.installer:
    #         self.installer.terminate()
    #         self.installer = None

    def start_agent(self, agent_dir):
        if agent_dir:
            self.agent_holder = AgentHolder(agent_dir, self.recycle_queue)
            agent_process_name = self.agent_holder.start()
            self.save_process_name_to_disk(agent_process_name)
            # self.agent_abnormal = False

    def stop_agent(self):
        if self.agent_holder is None:
            agent_process_name = self.get_process_name_from_disk()
            if agent_process_name:
                os.popen('killall -g %s' % agent_process_name)
                self.save_process_name_to_disk()
        else:
            self.agent_holder.stop()
            self.agent_holder = None
            self.save_process_name_to_disk()

    def health_check(self):
        if self.agent_holder:
            LOG.info('.............. last check before: %d seconds' %
                     (int(time.time()) - self.agent_holder.last_check_time))
            if int(time.time()
                   ) - self.agent_holder.last_check_time > self.configs[
                       'health_check_threshold']:
                LOG.info('.............. check health, failed')
                self.need_install = True
            else:
                self.need_start = False
        else:
            LOG.info('.............. agent_holder is None')
            self.need_start = True

    def update_check(self):
        if self.check_new_version():
            self.need_download = True
        else:
            self.need_download = False

        if self.need_install_check():
            LOG.info('a later version downloaded, need install it')
            self.need_install = True

    def need_install_check(self):
        download_version = int(
            utils.get_version_from_path(self.get_latest_install_package()))
        install_version = int(
            utils.get_version_from_path(self.get_latest_install_folder()))
        if download_version > install_version:
            return True
        else:
            return False

    def version_recheck(self):
        self.need_version_recheck = False
        if self.check_new_version():
            self.need_download = True
        else:
            self.need_download = False

    def check_new_version(self):
        try:
            url = '%s%s' % (self.configs['update_server_url'],
                            self.configs['update_info_path'])
            req = urllib2.Request(url)
            response = urllib2.urlopen(req,
                                       timeout=3,
                                       context=self.ssl_context)
            response_json = json.loads(response.read())
            new_version = response_json['version']
            new_path = response_json['path']
            checksum = response_json['checksum']
            # print new_version
            # print new_path

            version = int(
                utils.get_version_from_path(self.get_latest_install_package()))
            if new_version > version:
                self.download_path = new_path
                self.checksum = checksum
                LOG.info('check new version: new version valid')
                return True
            else:
                LOG.info('check new version: it is the latest version')
                return False
        except Exception as e:
            self.need_version_recheck = True
            LOG.exception('check new version failed, err: %s', e)
            return False

    def download_agent(self):
        if self.download_path and self.checksum:
            url = '%s/%s' % (self.configs['update_server_url'],
                             self.download_path)
            f = urllib2.urlopen(url, timeout=300, context=self.ssl_context)
            package_name = utils.get_file_name(url)
            package_path = '%s/%s' % (self.get_download_dir(), package_name)
            with open(package_path, 'wb') as target:
                target.write(f.read())

            if not utils.check_file(package_path, self.checksum):
                os.remove(package_path)
                LOG.debug('checksum failed, removing %s', package_name)
                return False
            else:
                utils.save_checksum(self.checksum, package_path,
                                    self.get_checksum_dir())
                LOG.debug('download %s success', package_name)

        self.download_path = None
        self.checksum = None
        return True

    def get_install_dir(self):
        path = os.path.join(self.configs['home_dir'], 'install_folders')
        if not os.path.isdir(path):
            os.mkdir(path)
        return path

    def get_download_dir(self):
        path = os.path.join(self.configs['home_dir'], 'download')
        if not os.path.isdir(path):
            os.mkdir(path)
        return path

    def get_checksum_dir(self):
        path = os.path.join(self.configs['home_dir'], 'checksum')
        if not os.path.isdir(path):
            os.mkdir(path)
        return path

    def get_latest_install_folder(self):
        latest_version = 0
        latest_folder_path = None

        if not os.path.isdir(self.get_install_dir()):
            return None

        for folder in os.listdir(self.get_install_dir()):
            version = utils.get_version(folder)
            if version.isdigit() and int(version) > latest_version:
                latest_version = int(version)
                latest_folder_path = folder

        if latest_folder_path:
            return '%s/%s' % (self.get_install_dir(), latest_folder_path)
        else:
            return None

    def get_latest_install_package(self):
        latest_version = 0
        latest_package = None

        if not os.path.isdir(self.get_download_dir()):
            return None

        for package in os.listdir(self.get_download_dir()):
            version = utils.get_version(package)
            if version.isdigit() and int(version) > latest_version:
                latest_version = int(version)
                latest_package = package

        if latest_package:
            return '%s/%s' % (self.get_download_dir(), latest_package)
        else:
            return None

    # for debug
    def get_tmp_dir(self):
        return os.path.join(self.configs['home_dir'], 'tmp')

    def save_process_name_to_disk(self, process_name=''):
        with open('%s/process_name' % self.configs['home_dir'], 'w+') as f:
            f.truncate()
            f.write(process_name)

    def get_process_name_from_disk(self):
        with open('%s/process_name' % self.configs['home_dir'], 'a+') as f:
            return f.read()
Example #14
0
class scheduler():

    """This Class provide the main scheduler, it consumes the config
    and schedules the drivers to run for their respective metrics"""

    def __init__(self, loggingLevel=logging.ERROR):

        self.logger = logging.getLogger('sensipy')
        self.logger.setLevel(loggingLevel)
        fh = logging.FileHandler('error.log')
        fh.setLevel(logging.ERROR)
        ch = logging.StreamHandler()
        ch.setLevel(logging.DEBUG)
        self.logger.addHandler(fh)
        self.logger.addHandler(ch)

        self.sender = Sender()
        executors = {
            'default': ThreadPoolExecutor(20),
            'processpool': ProcessPoolExecutor(5)
        }
        job_defaults = {
            'coalesce': False,
            'max_instances': 3
        }
        self.queue = Queue(connection=Redis())
        self.scheduler = BlockingScheduler(executors=executors, job_defaults=job_defaults)
        self.configFile = 'config.json'

    def start(self):

        """Runs all the initialization methods, then starts the
        scheduler in standalone (blocking) mode"""

        self.loadConf()
        self.loadDrivers()
        self.loadFeeds()
        self.runScheduler()
        self.scheduler.print_jobs()
        self.scheduler.start()
        self.printConf("test")
        print("scheduler started")

    def stop(self):
        print("test")
        self.scheduler.shutdown()

    def loadDrivers(self):

        """Iterates through the Driver instances stored in the config,
        and loads corresponding instances into the driver dict"""

        self.sources = {}
        for source in self.config['sources']:
            sourceConf = self.config['sources'][source]
            baseClass = sourceConf['baseClass']
            self.logger.debug("Loading: " + source +
                              " instance of: " + baseClass)
            sourceArgs = sourceConf['source-config']
            self.sources[source] = {}
            try:
                print(baseClass)
                tempModule = import_module('sources.' + baseClass)
                """tempModule = __import__('sources.' + baseClass,
                                        globals(), locals(), [baseClass], -1)
                                        """
                self.sources[source]['source'] = getattr(tempModule, str(
                    baseClass))(sourceArgs)
            except Exception as e:
                self.logger.error("exception: " + str(e))
        return None

    def loadConf(self):

        """Retrieves config from file specified in __init__"""

        with open(self.configFile) as f:
            self.config = json.load(f)

    def printConf(self, args):
        print(args)
        print(self.config)

    def loadFeeds(self):

        """Sets up each metric in it's corresponding driver instance nice name
        """

        metrics = self.config['metrics']
        for metric in metrics:
            metricConf = self.config['metrics'][metric]
            metricConf['name'] = metric
            source = metricConf['source']['driver']
            if 'metrics' not in self.sources[source['name']]:
                self.sources[source['name']]['metrics'] = []

            self.sources[source['name']]['metrics'].append(metricConf)

    def runScheduler(self):

        """Sets up base scheduler interval for each configured
        driver instance"""

        for source in self.sources:
            intervals = [
                int(self.sources[source]['metrics'][x]['interval']) for x
                in range(0, len(self.sources[source]['metrics']))]
            sourceInterval = self.gcd(intervals)
            self.sources[source]['sourceInterval'] = sourceInterval
            self.logger.debug(self.sources[source]['metrics'])

            self.scheduler.add_job(
                self.getDriverData, 'interval', args=[
                    self.sources[source]['metrics']],
                seconds=sourceInterval)

    def getDriverData(self, metricSet):

        """Gets data from a single driver instance, on the intervals in
        each metrics config, data is put on the queue with all information
        needed to send to service"""

        driverNiceName = metricSet[0]['source']['driver']['name']
        if 'driverCounter' not in self.drivers[driverNiceName]:
            self.drivers[driverNiceName]['driverCounter'] = self.drivers[
                driverNiceName]['driverInterval']
        else:
            self.drivers[driverNiceName]['driverCounter'] += self.drivers[
                driverNiceName]['driverInterval']
        for metric in metricSet:
            count = self.drivers[driverNiceName]['driverCounter']
            metricInterval = int(metric['interval'])
            if count % metricInterval == 0:
                metricId = metric['id']
                value = self.drivers[driverNiceName]['driver'].getData(metric)
                dt = datetime.utcnow()
                self.queue.enqueue(
                    self.sender.send_metric, metricId, value, dt)

    def gcd(self, nums):

        """Recursively computes Greatest Common Divisor for a list of
        numbers, used to compute the base scheduler interval for a
        given set of metric intervals"""

        if len(nums) == 1:
            return nums[0]
        if len(nums) == 0:
            return None
        if len(nums) >= 2:
            a = nums[-1:][0]
            b = nums[-2:-1][0]
            while b:
                a, b = b, a % b
            nums = nums[:-2]
            nums.append(a)
            return self.gcd(nums)

    def showConf(self):

        """Debug method to ensure config is loading correctly, and to
        pretty print a config to clean up one with miffed formatting"""

        return json.dumps(
            self.config, sort_keys=True, indent=4, separators=(',', ': '))
Example #15
0
class DakokuManager(object):
    def __init__(self, config_path, schedule_path):
        self.config_path = config_path
        self.schedule_path = schedule_path
        cfg = self._load_config()
        try:
            human_mode_min = cfg["human_mode"]
        except:
            human_mode_min = 0
        try:
            self.log_dir = cfg["log_dir"]
            file_handler = logging.FileHandler(os.path.join(self.log_dir, "dakoku.log"), 'a+')
            file_handler.level = LEVEL
            log.addHandler(file_handler)
            log.info("saving log to %s", self.log_dir)
        except:
            self.log_dir = None

        sched = self._load_schedule()
        start_date = dt.datetime.strptime(sched["valid"]["start"], '%Y-%m-%d').replace(tzinfo=pytz.timezone('Asia/Tokyo'))
        end_date = dt.datetime.strptime(sched["valid"]["end"], '%Y-%m-%d').replace(tzinfo=pytz.timezone('Asia/Tokyo'))
        log.info("dakoku is valid for %s - %s", start_date, end_date)
        holidays = []
        if "api_key" in cfg:
            try:
                holidays = self._get_holidays(cfg["api_key"], start_date, end_date)
                log.info("holidays are skipped: %s", holidays)
            except Exception as e:
                log.error("failed to import holidays: %s", e)
        else:
            log.warn("No api_key specified. No holiday is registered!")
        self.worker = DakokuWorker(cfg["host"], cfg["user"], cfg["pass"], holidays, self.log_dir)
        self.register(sched["working"], start_date, end_date, holidays, human_mode_min)

    def _load_config(self):
        log.debug("loading config from %s", self.config_path)
        with open(self.config_path, 'r') as f:
            cfg = json.load(f)
        return cfg

    def _load_schedule(self):
        log.debug("loading schedule from %s", self.schedule_path)
        with open(self.schedule_path, 'r') as f:
            cfg = json.load(f)
        return cfg

    def _get_holidays(self, api_key, start_date, end_date):
        srv = Google(serviceName='calendar',
                     version='v3',
                     developerKey=api_key)
        calendar_id = '*****@*****.**'
        cal = srv.events().list(calendarId=calendar_id,
                                maxResults=30,
                                orderBy="startTime",
                                singleEvents=True,
                                timeMin=start_date.strftime('%Y-%m-%dT%H:%M:%SZ'),
                                timeMax=end_date.strftime('%Y-%m-%dT%H:%M:%SZ')).execute()
        return [dt.datetime.strptime(e["start"]["date"], '%Y-%m-%d').replace(tzinfo=pytz.timezone("Asia/Tokyo")) for e in cal["items"]]

    def register(self, working, start_date, end_date, holidays, human_mode_min=0):
        self.scheduler = Scheduler(timezone=pytz.timezone('Asia/Tokyo'), logger=log)
        today = dt.date.today()
        for w in working:
            # schedule shukkin
            h, m = map(int, w["from"].split(':'))
            fromtime = dt.time(h,m,tzinfo=pytz.timezone('Asia/Tokyo'))
            d = dt.datetime.combine(today, fromtime) - dt.timedelta(minutes=human_mode_min)
            trigger = CronTrigger(day_of_week=w["dayOfWeek"],
                                  hour=d.hour, minute=d.minute,
                                  start_date=start_date,
                                  end_date=end_date,
                                  timezone=pytz.timezone('Asia/Tokyo'))
            self.scheduler.add_job(dispatch_after(human_mode_min * 60,
                                                  self.worker.work_start),
                                   trigger)
            # schedule taikin
            h, m = map(int, w["till"].split(':'))
            tilltime = dt.time(h,m,tzinfo=pytz.timezone('Asia/Tokyo'))
            trigger = CronTrigger(day_of_week=w["dayOfWeek"],
                                  hour=tilltime.hour, minute=tilltime.minute,
                                  start_date=start_date,
                                  end_date=end_date,
                                  timezone=pytz.timezone('Asia/Tokyo'))
            self.scheduler.add_job(dispatch_after(human_mode_min * 60,
                                                  self.worker.work_end),
                                   trigger)
        self.scheduler.print_jobs()

    def start(self):
        self.scheduler.start()

    def shutdown(self):
        self.scheduler.shutdown()
Example #16
0
    ]

    for s in servers:
        url = s + today + '.txt'
        logger.info(url)
        try:
            html = requests.get(url).content.splitlines()
        except:
            logger.error('http get failed.')
            continue

        logger.info('total: ' + str(len(html)))

        hashes = set()
        for h in html:
            h = h.strip().lower()
            if not h:
                continue
            hashes.add(h)
        if hashes:
            save(hashes)

    logger.info('over')


atexit.register(lambda: sched.shutdown(wait=False))


if __name__ == '__main__':
    sched.start()
Example #17
0
# -*- coding: utf-8 -*-
"""
Created on Thu Jun  3 17:03:38 2021

@author: S. Hiram Rubio
Script principal
"""

#Run de code / Correr código
if __name__ == "__main__":
    #Libs / librerias
    from apscheduler.schedulers.blocking import BlockingScheduler
    from BTVis import generate_graph
    # / Generar gráfica
    generate_graph(FLAG=True, DELETE=False)
    #Scheduler / planificador
    #Creation and job initialization / Creacion e inicio de trabajo
    scheduler = BlockingScheduler()
    job = scheduler.add_job(lambda: generate_graph(DELETE=False),
                            'interval',
                            hours=2.22)
    scheduler.start()
    scheduler.print_jobs()
    if (False): scheduler.shutdown()
Example #18
0
class JobScheduler(object):
    def __init__(self, every=30, unit='second'):
        self.mongo = mongopool.get()
        self.cursor = self.mongo.get_database('apscheduler').get_collection(
            'jobs')
        self.every = every
        self.unit = unit
        self.scheduler = BlockingScheduler(logger=logger)
        self.scheduler.configure(jobstores=jobstores,
                                 executors=executors,
                                 job_defaults=job_defaults,
                                 timezone=pytz.timezone('Asia/Saigon'))
        self._set_trigger(every, unit)

    def _set_trigger(self, every, unit):
        now = datetime.now().astimezone(pytz.timezone('Asia/Saigon'))
        if unit == 'second':
            self.trigger = CronTrigger(second='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'minute':
            self.trigger = CronTrigger(minute='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'hour':
            self.trigger = CronTrigger(hour='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'day':
            self.trigger = CronTrigger(day='*/{}'.format(every),
                                       start_date=now)
        else:
            raise Exception(message='Unknown time unit')

    def add_jobstore(self, jobstore, alias):
        self.scheduler.add_jobstore(jobstore, alias)

    def add_executor(self, executor, alias):
        self.scheduler.add_executor(executor, alias)

    def add_job(self,
                job_fn,
                id='id1',
                name='job1',
                jobstore='default',
                executor='default',
                args=None,
                kwargs=None):
        now = datetime.now().astimezone(pytz.timezone('Asia/Saigon'))
        history = list(self.cursor.find({'_id': id}))
        if history:
            #TODO: process missing jobs
            self.cursor.delete_one({'_id': id})
        next_run_time = self.trigger.get_next_fire_time(None, now)
        if kwargs:
            kwargs['run_time'] = next_run_time
        else:
            kwargs = {'run_time': next_run_time}

        self.scheduler.add_job(job_fn,
                               trigger=self.trigger,
                               next_run_time=next_run_time,
                               id=id,
                               name=name,
                               jobstore=jobstore,
                               executor=executor,
                               args=args,
                               kwargs=kwargs)

    def remove_job(self, id, jobstore='default'):
        self.scheduler.remove_job(job_id=id, jobstore=jobstore)

    def callback(self, callback_fn, mark=EVENT_ALL):
        self.scheduler.add_listener(callback_fn)

    def start(self):
        mongopool.put(self.mongo)
        self.scheduler.start()

    def shutdown(self):
        self.scheduler.shutdown()
        self.scheduler.scheduled_job
Example #19
0
    if options.password == '' or options.username == '':
        print u'请输入用户名密码'
        exit(1)
    '''
        
    # longin
    jd.login_by_QR()
    good_data = jd.good_detail(options.good, options.area)
    #print(good_data)
    # time task
    sched = BlockingScheduler()
    
    if len(good_data['link']) >10 and jd.pre_add_cart(good_data):
        # 快捷抢购,预先添加购物车
        print("预添加成功!快捷抢购中...")
        sched.add_job(lite_Q,'date', run_date=options.time)
    else:
        # 无法预添加购物车则进行常规抢购
        print("预添加失败!常规抢购中...")
        sched.add_job(reg_Q,'date', run_date=options.time, args=[good_data])
    #sched.print_jobs()
    print('Press Ctrl+{0} to exit'.format('Pause(Break)' if os.name == 'nt' else 'C'))
    try:
        sched.start()
    except (KeyboardInterrupt, SystemExit):
        sched.shutdown(wait=False)
        print('Exit The Job!')
    else:
        sched.shutdown(wait=True)
    
Example #20
0
    'default': ThreadPoolExecutor(10),
    'processpool': ProcessPoolExecutor(3),
}


def my_job():
    cur_day = datetime.now().strftime('%Y-%m-%d')
    cur_time = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
    report_path = '../Fanli/Report/' + cur_day + '/'
    path = '../Fanli/'
    result = os.system('nosetests ' + path + ' --with-html  --html-report=' +
                       report_path + 'fanli_report_' + cur_time + '.html ')
    if result != 0:
        files = os.listdir(report_path)
        file = report_path + files[-1]
        mail.send_mail(file, 'getResouces接口Fail')


def create_json():
    CreateFile().create_file('getResource')


s = BlockingScheduler(executors=executors)
s.add_job(my_job, 'interval', seconds=5)
s.add_job(create_json, 'interval', seconds=5)

try:
    s.start()
except (SystemExit, KeyboardInterrupt):
    s.shutdown()
Example #21
0
class ApsPlanner(BasePlanner):
    """
    Planner implementing scheduling using the |APS|_. Scheduling sets the :any:`APS Job <apscheduler.job.Job>` as links' job.

    .. |APS| replace:: Advanced Python Scheduler
    .. _APS: https://apscheduler.readthedocs.io/en/stable/index.html
    .. _configuring-scheduler: https://apscheduler.readthedocs.io/en/stable/userguide.html#configuring-the-scheduler

    """
    def __init__(self,
                 links: Union[Link, List[Link]] = None,
                 threads: int = 30,
                 executors_override: dict = None,
                 job_defaults_override: dict = None,
                 ignore_exceptions: bool = False,
                 catch_exceptions: bool = None):
        """

        :type links: :any:`Link` or list[:any:`Link`]
        :param links: Links that should be added and scheduled.
            |default| :code:`None`

        :type threads: int
        :param threads: Number of threads available for job execution. Each link will be run on a separate thread job.
            |default| :code:`30`

        :type executors_override: dict
        :param executors_override: Overrides for executors option of `APS configuration <configuring-scheduler_>`__
            |default| :code:`None`

        :type job_defaults_override: dict
        :param job_defaults_override: Overrides for job_defaults option of `APS configuration <configuring-scheduler_>`__
            |default| :code:`None`

        :type ignore_exceptions: bool
        :param ignore_exceptions: Whether exceptions should be ignored or halt the planner.
            |default| :code:`False`
        """

        self._threads = threads
        self._ignore_exceptions = ignore_exceptions
        if catch_exceptions is not None:  # pragma: no cover
            self._ignore_exceptions = catch_exceptions
            warnings.warn(
                '\'catch_exceptions\' was renamed to \'ignore_exceptions\' in version 0.2.0 and will be permanently changed in version 1.0.0',
                DeprecationWarning)

        if executors_override is None:
            executors_override = {}
        if job_defaults_override is None:
            job_defaults_override = {}

        executors = {
            'default': ThreadPoolExecutor(threads),
            **executors_override
        }
        job_defaults = {
            'coalesce': False,
            'max_instances': threads,
            **job_defaults_override
        }

        self._scheduler = BlockingScheduler(executors=executors,
                                            job_defaults=job_defaults,
                                            timezone='UTC')
        # self._scheduler = BackgroundScheduler(executors=executors, job_defaults=job_defaults, timezone=utc)
        self._scheduler.add_listener(self._on_exception, EVENT_JOB_ERROR)

        super().__init__(links)

    def _on_exception(self, event):
        if event.code is EVENT_JOB_ERROR:
            try:
                # It would be amazing if we could print the entire Link, but APS serialises Link.transfer to a string and that's all we have from Job's perspective.
                extra_info = f'\n\nRaised when executing {self._scheduler.get_job(event.job_id)}'
                exception_message = str(event.exception) + f'{extra_info}'
                traceback = event.exception.__traceback__

                try:
                    raise type(event.exception)(
                        exception_message).with_traceback(traceback)
                except TypeError as type_exception:
                    # Some custom exceptions won't let you use the common constructor and will throw an error on initialisation. We catch these and just throw a generic RuntimeError.
                    raise Exception(exception_message).with_traceback(
                        traceback) from None
            except Exception as e:
                _LOGGER.exception(e)

            if not self._ignore_exceptions and self.running:
                self.shutdown(wait=False)

    def _schedule(self, link: Link):
        """
        Schedule a link. Sets :any:`APS Job <apscheduler.job.Job>` as this link's job.

        :type link: :any:`Link`
        :param link: Link to be scheduled
        """

        job = self._scheduler.add_job(
            link.transfer,
            trigger=IntervalTrigger(seconds=link.interval.total_seconds()))
        link.set_job(job)

    def _unschedule(self, link: Link):
        """
        Unschedule a link.

        :type link: :any:`Link`
        :param link: Link to be unscheduled
        """
        if link.job is not None:
            link.job.remove()
            link.set_job(None)

    def start(self):
        """
        Start this planner. Calls :any:`APS Scheduler.start() <apscheduler.schedulers.base.BaseScheduler.start>`

        See :ref:`Start and Shutdown <start_shutdown>` to learn more about starting and shutdown.
        """
        super().start()

    def _start_planner(self):
        self._scheduler.start()

    def pause(self):
        """
        Pause this planner. Calls :any:`APScheduler.pause() <apscheduler.schedulers.base.BaseScheduler.pause>`
        """
        _LOGGER.info('Pausing %s' % str(self))
        self._scheduler.pause()

    def resume(self):
        """
        Resume this planner. Calls :any:`APScheduler.resume() <apscheduler.schedulers.base.BaseScheduler.resume>`
        """
        _LOGGER.info('Resuming %s' % str(self))
        self._scheduler.resume()

    def shutdown(self, wait: bool = True):
        """
        Shutdown this planner. Calls :any:`APScheduler.shutdown() <apscheduler.schedulers.base.BaseScheduler.shutdown>`

        See :ref:`Start and Shutdown <start_shutdown>` to learn more about starting and shutdown.

        :type wait: bool
        :param wait: Whether to wait until all currently executing jobs have finished.
            |default| :code:`True`
        """
        super().shutdown(wait)

    def _shutdown_planner(self, wait: bool = True):
        """
        Shutdown this planner. Calls :any:`APScheduler.shutdown() <apscheduler.schedulers.base.BaseScheduler.shutdown>`

        :type wait: bool
        :param wait: Whether to wait until all currently executing jobs have finished.
            |default| :code:`True`
        """
        self._scheduler.shutdown(wait=wait)

    def purge(self):
        """
        Unschedule and clear all links. It can be used while planner is running. APS automatically removes jobs, so we only clear the links.
        """
        for link in self.links:
            try:
                link.job.remove()
            except JobLookupError:
                pass  # APS already removed jobs if shutdown was called before purge, otherwise let's do it ourselves
            link.set_job(None)

        self._links = []

    @property
    def running(self):
        """
        Whether this planner is currently running. Changed by calls to :any:`start` and :any:`shutdown`.


        :return: State of this planner
        :rtype: bool
        """
        return self._scheduler.state == STATE_RUNNING

    def __repr__(self):
        return 'ApsPlanner(threads:%s)' % (self._threads)
Example #22
0
class SendScheduler(threading.Thread):
    def __init__(self):
        self._running = True
        self.scheduler = BlockingScheduler()

    def get_transfer_latency(self, send_files_list, band_list):
        sum_latency = len(band_list) - 1
        last_latency = len(send_files_list[-1]) * BUFFER_SIZE / band_list[-1]
        return sum_latency + last_latency

    def send_job(self, sock, send_file, current_band, times):
        sock.send(send_file)
        print("[Client][{}kB/s] sending file packet {}...".format(
            current_band / 1000, times),
              end="")
        sent_size = len(send_file)
        print(len(send_file))

        # print(send_file)
        size_pack = sock.recv(4)
        recv_size, = struct.unpack("!I", size_pack)
        while sent_size != recv_size:
            print("times: {}|sent_size:{}|recv_size:{}".format(
                times, sent_size, recv_size))
            sock.send(send_file)
            print("[Client][{}kB/s] sending file packet {}...".format(
                current_band / 1000, times),
                  end="")
            sent_size = len(send_file)
            print(len(send_file))
            # print(send_file)
            size_pack = sock.recv(4)
            recv_size, = struct.unpack("!I", size_pack)

    def run(self, sock, send_files_list, band_list):
        if len(send_files_list) != len(band_list):
            print(
                "[Send Scheduler] Send ERROR: The send files fail to match the bandwidth list."
            )
            return
        else:
            band_num = len(band_list)

        i = 0
        j = 0
        for i in range(band_num):
            current_band = band_list[i]
            packet_num = math.ceil(current_band / BUFFER_SIZE)
            if len(send_files_list[i]) != packet_num and i != band_num - 1:
                print("[Send Scheduler]: Send ERROR: Wrong packet numbers.")
                print("i: {}|len: {}|packet_num:{}|band_num: {}".format(
                    i, len(send_files_list[i]), packet_num, band_num))
                return
            latency_delta = 1 / len(send_files_list[i])
            print("Cur_band: {}|packt_num:{}|latency_delta:{}".format(
                current_band, packet_num, latency_delta))
            send_files = send_files_list[i]
            count = 0
            for send_file in send_files:
                self.scheduler.add_job(
                    func=self.send_job,
                    args=(
                        sock,
                        send_file,
                        current_band,
                        j,
                    ),
                    next_run_time=datetime.datetime.now() +
                    datetime.timedelta(seconds=latency_delta * count))
                j += 1
                count += 1
        try:
            self.scheduler.start()
        except (KeyboardInterrupt, SystemExit):
            self.scheduler.shutdown()

    def terminate(self):
        self.scheduler.shutdown()
        print("[Client] Send scheduler terminated.")
        self._running = False
Example #23
0
    if args.action == 'add':
        add_char()

    elif args.action == 'list':
        list_char()

    elif args.action == 'remove':
        rm_char()

    elif args.action == 'start':
        km = KeyManager(key_store_path)
        if len(km.keys) == 0:
            print('no keys, add one with the add command')
            exit(3)
        for k in KeyManager(key_store_path).keys:
            iteration[tuple(k)] = 0  # init 24 hour counter for every key

        notify_store = StorageDictList(notify_store_path)
        scheduler = BlockingScheduler(timezone=utc)
        scheduler.add_job(do_stuff, 'interval', hours=1)
        try:
            print('starting scheduler...')
            scheduler.start()
        except (KeyboardInterrupt, SystemExit):
            scheduler.shutdown(wait=False)
            print('scheduler stopped')
    elif args.action == 'test':
        test_mail()
    else:
        print('unknown action {act}'.format(act=args.action))
def corn_trigger():
    global SCHEDULER
    SCHEDULER.add_job(func=alarm,
                      args=['cron'],
                      trigger='cron',
                      second='*/5',
                      id='corn_job')


# # 循环执行
# def interval_trigger():
#     global SCHEDULER
#     SCHEDULER.add_job(func=alarm, args=['interval'], trigger='interval', seconds=5, id='interval_job')
#
#
# # 一次执行
# def date_trigger():
#     global SCHEDULER
#     SCHEDULER.add_job(func=alarm, args=['date'], trigger='date', run_date=datetime.now(), id='date_job')

SCHEDULER = BlockingScheduler()
if __name__ == '__main__':
    corn_trigger()
    # interval_trigger()
    # date_trigger()

    try:
        SCHEDULER.start()
    except (KeyboardInterrupt, SystemExit):
        SCHEDULER.shutdown()
Example #25
0
class Heating(object):
  def __init__(self):
    with open('config.json') as json_data:
      self.config = json.load(json_data)
      json_data.close()
    logger.debug('Configuration: ' + str(self.config))

    self.processing_lock = threading.Lock()
    self.calendar_lock = threading.Lock()
    self.relay_lock = threading.Lock()

    self.heating_trigger = None
    self.preheat_trigger = None
    self.event_trigger = None
    #Sensible defaults
    self.events = None
    self.desired_temp = self.config['heating_settings']['minimum_temperature']
    self.current_temp = None
    self.proportional_time = 0
    self.time_on = None
    self.time_off = None
    self.event_sync_id = None

    self.relays = None
    self.relays_heating = None
    self.relays_preheat = None

    self.http_server = None
    self.temp_sensors = {}
    self.sched = None

    self.outside_temp = None
    self.outside_apparent_temp = None

  def start(self):
    logger.info('Starting')
    self.credentials = self.get_credentials()

    self.darksky_details = self.get_darksky_details()

    logger.debug('Setting up scheduler and error handler')
    self.sched = BlockingScheduler()
    self.sched.add_listener(self.scheduler_listener, EVENT_JOB_ERROR)

    logger.debug('Searching for temperature sensors')
    try:
      self.find_temp_sensors()
    except NoTagsFoundException as e:
      pass

    logger.debug('Searching for relay')
    #self.relay = BTRelay.find_relay()
    #self.relay = USBRelay.find_relay()
    self.relays = USBMultipleRelays.find_relays()
    for relay in self.relays._relays:
      if relay.port_numbers == tuple(self.config['relays']['heating']):
        self.relays_heating = relay
        logger.info('Found heating relay at ' + str(relay.port_numbers))
      elif relay.port_numbers == tuple(self.config['relays']['preheat']):
        self.relays_preheat = relay
        logger.info('Found preheat relay at ' + str(relay.port_numbers))
    if self.relays_heating is None:
      raise Exception('No heating relay found')

    logger.debug('Creating scheduler jobs')
    #Get new events every X minutes
    self.sched.add_job(self.get_next_event, trigger = 'cron', \
        next_run_time = pytz.utc.localize(datetime.datetime.utcnow()), hour = '*/' + str(self.config['calendar_settings']['update_calendar_interval_hours']), minute = 0)

    self.sched.add_job(self.update_outside_temperature, trigger = 'cron', \
        next_run_time = pytz.utc.localize(datetime.datetime.utcnow()), hour = '*', minute = '*/15')

    #Scan for new devices every minute
    self.sched.add_job(self.find_temp_sensors, trigger = 'cron', \
        next_run_time = pytz.utc.localize(datetime.datetime.utcnow()), hour = '*', minute = '*')

    HttpHandler.heating = self
    logger.debug('Starting HTTP server')
    self.http_server = ThreadedHTTPServer(('localhost', 8080), HttpHandler)
    http_server_thread = threading.Thread(target=self.http_server.serve_forever)
    http_server_thread.setDaemon(True) # don't hang on exit
    http_server_thread.start()

    logger.debug('Starting scheduler')
    try:
      self.sched.start()
    except Exception as e:
      logger.error('Error in scheduler: ' + str(e))
      self.http_server.shutdown()
      self.sched.shutdown(wait = False)

  def scheduler_listener(self, event):
    if event.exception is not None or event.code == EVENT_JOB_MAX_INSTANCES:
      logger.error('Error in scheduled event: ' + str(event))
      logger.debug(type(event.exception))
      if not isinstance(event.exception, NoTemperatureException) and not isinstance(event.exception, NoTagsFoundException):
        logger.error('Killing all the things')
        raise Exception(str(event))
        #self.http_server.shutdown()
        #self.sched.shutdown(wait = False)
        #exit(1)

  def find_temp_sensors(self):
    self.temp_sensors = TempSensor.find_temp_sensors(self.temp_sensors)
    for sensor in list(self.temp_sensors.values()):
      if sensor.temp_job_id is None:
        logger.info('Setting scheduler job for ' + sensor.mac)
        #Get a new temperature every minute
        sensor.temp_job_id = self.sched.add_job(self.get_temperature, trigger = 'interval', \
          start_date = datetime.datetime.now(), seconds = self.config['heating_settings']['update_temperature_interval_seconds'], \
          name = sensor.mac + ' temperature job', args = (sensor,))

  def heating_on(self, proportion):
    self.time_on = pytz.utc.localize(datetime.datetime.utcnow())
    self.time_off = None
    self.proportional_time = proportion
    logger.debug('Getting relay lock')
    self.relay_lock.acquire()
    logger.debug('Got relay lock')
    self.relays_heating.on()
    logger.debug('Releasing relay lock')
    self.relay_lock.release()
    self.set_heating_trigger(proportion, True)

  def heating_off(self, proportion):
    self.time_off = pytz.utc.localize(datetime.datetime.utcnow())
    self.time_on = None
    logger.debug('Getting relay lock')
    self.relay_lock.acquire()
    logger.debug('Got relay lock')
    self.relays_heating.off()
    logger.debug('Releasing relay lock')
    self.relay_lock.release()
    self.set_heating_trigger(proportion, False)

  def preheat_on(self, time_off):
    logger.debug('Getting relay lock')
    self.relay_lock.acquire()
    logger.debug('Got relay lock')
    self.relays_preheat.on()
    logger.debug('Releasing relay lock')
    self.relay_lock.release()
    self.set_preheat_trigger(time_off)

  def preheat_off(self):
    logger.debug('Getting relay lock')
    self.relay_lock.acquire()
    logger.debug('Got relay lock')
    self.relays_preheat.off()
    logger.debug('Releasing relay lock')
    self.relay_lock.release()

  def check_relay_states(self):
    logger.debug('Checking states ' + str(self.relays.all_status()))
    iter = 0
    for s in self.relays.all_status():
      iter += 1
      if s == 0:
        self.relays.one_off(iter)
      else:
        self.relays.one_on(iter)

  def set_heating_trigger(self, proportion, on):
    self.proportional_time = proportion
    if self.heating_trigger is not None:
      try:
        self.heating_trigger.remove()
      except JobLookupError as e:
        pass
      self.heating_trigger = None

    if on:
      if proportion < self.config['heating_settings']['proportional_heating_interval_minutes']:
        run_date = self.time_on + datetime.timedelta(0,self.proportional_time * 60)
        logger.info('New proportional time: ' + str(proportion) + '/' + str(self.config['heating_settings']['proportional_heating_interval_minutes']) +\
          ' mins - will turn off at ' + str(run_date.astimezone(get_localzone())))
        self.heating_trigger = self.sched.add_job(\
          self.process, trigger='date', run_date=run_date, name='Proportional off at ' + str(run_date.astimezone(get_localzone())))
    else:
      if proportion > 0:
        if self.time_off is None:
          self.time_off = pytz.utc.localize(datetime.datetime.utcnow())
        run_date = self.time_off + datetime.timedelta(0,(self.config['heating_settings']['proportional_heating_interval_minutes'] - self.proportional_time) * 60)
        logger.info('New proportional time: ' + str(proportion) + '/' + str(self.config['heating_settings']['proportional_heating_interval_minutes']) +\
          ' mins - will turn on at ' + str(run_date.astimezone(get_localzone())))
        self.heating_trigger = self.sched.add_job(\
          self.process, trigger='date', run_date=run_date, name='Proportional on at ' + str(run_date.astimezone(get_localzone())))

  def set_preheat_trigger(self, time_off):
    if self.preheat_trigger is not None:
      try:
        self.preheat_trigger.remove()
      except JobLookupError as e:
        pass
      self.preheat_trigger = None
    logger.info('Preheat off at ' + str(time_off.astimezone(get_localzone())))
    self.preheat_trigger = self.sched.add_job(\
      self.process, trigger='date', run_date=time_off, name='Preheat off at ' + str(time_off.astimezone(get_localzone())))


  def get_temperature(self, sensor):
    try:
      sensor.get_ambient_temp()
    except NoTemperatureException as e:
      logger.warn('Removing sensor ' + sensor.mac + ' from sensors list due to disconnection')
      try:
        sensor.temp_job_id.remove()
      except JobLookupError as e:
        pass
      del self.temp_sensors[sensor.mac]

    self.update_current_temp()

  def update_current_temp(self):
    temps = []
    for mac, sensor in self.temp_sensors.items():
      if sensor.amb_temp is not None:
        temps.append(sensor.amb_temp)

    if not temps:
      raise NoTemperatureException()
    #self.current_temp = sum(temps) / float(len(temps))
    self.current_temp = min(temps)
    logger.info('Overall temperature is now ' + str(self.current_temp) + ' from ' + str(temps))

    self.process()

  def get_next_event(self):
    self.calendar_lock.acquire()
    http = self.credentials.authorize(httplib2.Http(timeout=self.config['calendar_settings']['calendar_timeout_seconds']))
    service = discovery.build('calendar', 'v3', http=http)

    now = datetime.datetime.utcnow().isoformat() + 'Z'
    logger.debug('Getting the next event')
    try:
      eventsResult = service.events().list(
        calendarId=self.config['calendar_settings']['calendar_id'], timeMin=now, maxResults=3, singleEvents=True, orderBy='startTime').execute()
      events = eventsResult.get('items', [])
      self.event_sync_id = str(uuid.uuid4())
      logger.debug('Sending request: ' + str({'id':self.event_sync_id, \
              'type':'web_hook', \
              'address':'https://www.steev.me.uk/heating/events', \
              'expiration':(int(time.time())+(self.config['calendar_settings']['update_calendar_interval_hours']*60*60))*1000 \
             }))
      hook_response = service.events().watch(calendarId=self.config['calendar_settings']['calendar_id'], \
        body={'id':self.event_sync_id, \
              'type':'web_hook', \
              'address':'https://www.steev.me.uk/heating/events', \
              'expiration':(int(time.time())+(self.config['calendar_settings']['update_calendar_interval_hours']*60*60))*1000 \
             })\
        .execute()
      if hook_response is not None:
        logger.debug('Got response' + str(hook_response) + ' from web_hook call')
    except HttpError as e:
      logger.error('HttpError, resp = ' + str(e.resp) + '; content = ' + str(e.content))
      logger.exception(e)
      self.calendar_lock.release()
      return
    except Exception as e:
      logger.exception(e)
      self.calendar_lock.release()
      return

    parsed_events = []
    if events:
      counter = 0
      for event in events:
        counter += 1
        start = event['start'].get('dateTime', event['start'].get('date'))
        start_date = parser.parse(start)
        end = event['end'].get('dateTime', event['end'].get('date'))
        end_date = parser.parse(end)

        try:
          desired_temp = float(event['summary'])
        except ValueError:
          if event['summary'].lower() == 'on':
            desired_temp = 'On'
          if event['summary'].lower() == 'preheat':
            desired_temp = 'Preheat'

        logger.info('Event ' + str(counter) + ' is ' + str(start_date.astimezone(get_localzone())) + \
          ' to ' + str(end_date.astimezone(get_localzone())) + ': ' + str(desired_temp))
        parsed_events.append({'start_date': start_date, 'end_date': end_date, 'desired_temp': desired_temp})
        if counter == 1:
          #Set a schedule to get the one after this
          if self.event_trigger is not None:
            try:
              self.event_trigger.remove()
            except JobLookupError as e:
              pass
            self.event_trigger = None

          self.event_trigger = self.sched.add_job(self.get_next_event, \
            trigger='date', run_date=end_date, name='Event end at ' + str(end_date.astimezone(get_localzone())))

          #Tell the processing that this is a new event so it resets the proportion to start again
          if self.events is None or start_date != self.events[0]['start_date'] or end_date != self.events[0]['end_date'] or desired_temp != self.events[0]['desired_temp']:
            logger.info('New event starting, resetting time off.')
            self.time_off = None

      self.events = parsed_events
    else:
      self.events = None

    self.calendar_lock.release()
    self.process()

  def update_outside_temperature(self):
    try:
      logger.info('Getting new outside temperature')
      with urllib.request.urlopen('https://api.darksky.net/forecast/' + self.darksky_details['api_key'] + '/' + self.darksky_details['latlong'] + '?exclude=[minutely,hourly,daily]&units=si') as darksky_url:
        data = json.loads(darksky_url.read().decode())
      logger.debug(str(data))

      if data['currently']:
        if data['currently']['apparentTemperature']:
          self.outside_apparent_temp = data['currently']['apparentTemperature']
          logger.info('Got outside apparent temperature: ' + str(self.outside_apparent_temp))
        if data['currently']['temperature']:
          self.outside_temp = data['currently']['temperature']
          logger.info('Got outside temperature: ' + str(self.outside_temp))
    except Exception as e:
      pass

  def process(self):
    logger.debug('Processing')
    #Main calculations. Figure out whether the heating needs to be on or not.
    if self.current_temp is None:
      return

    self.processing_lock.acquire()

    current_time = pytz.utc.localize(datetime.datetime.utcnow())
    current_temp = self.current_temp
    time_due_on  = None
    have_temp_event = False
    forced_on = False
    have_preheat = False

    if current_temp < self.config['heating_settings']['minimum_temperature']:
      #If we're below the minimum allowed temperature, turn on at full blast.
      logger.info('Temperature is below minimum, turning on')
      self.desired_temp = str(self.config['heating_settings']['minimum_temperature'])
      self.heating_on(self.config['heating_settings']['proportional_heating_interval_minutes'])

    elif self.events is not None:
      #Find preheat events
      index = -1
      while index < 3:
        index += 1

        if index >= len(self.events):
          break

        if self.events[index]['desired_temp'] == 'Preheat':
          if self.events[index]['start_date'] < current_time and not self.events[index]['end_date'] < current_time:
            have_preheat = True
            if not(self.relays_preheat._status):
              logger.info('Preheat on')
              self.preheat_on(self.events[index]['end_date'])
            break

      if (not have_preheat) and self.relays_preheat._status:
        self.preheat_off()

      #Find normal events
      index = -1
      next_time = None

      while index < 3:
        index += 1

        if index >= len(self.events):
          break

        if self.events[index]['desired_temp'] == 'Preheat':
          continue
        elif self.events[index]['desired_temp'] == 'On':
          if self.events[index]['start_date'] < current_time and not self.events[index]['end_date'] < current_time:
            forced_on = True
            if not(self.relays_heating._status):
              logger.info('Heating forced on')
              self.heating_on(self.config['heating_settings']['proportional_heating_interval_minutes'])
        else:
          have_temp_event = True
          break

    if have_temp_event:
      next_time =     self.events[index]['start_date']
      next_time_end = self.events[index]['end_date']
      next_temp =     self.events[index]['desired_temp']

      logger.debug('Processing data: ' + str(next_time.astimezone(get_localzone())) + \
        ' to ' + str(next_time_end.astimezone(get_localzone())) + ', ' + str(next_temp))

      self.desired_temp = str(next_temp)

      if next_time_end < current_time:
        #If the last event ended in the past, off.
        logger.warn('Event end time is in the past.')
        self.heating_off(0)

      elif not forced_on:
        temp_diff = next_temp - current_temp
        new_proportional_time = None
        if next_time < current_time:
          time_due_on = next_time
          logger.info('Currently in an event starting at ' + str(next_time.astimezone(get_localzone())) + \
            ' ending at ' + str(next_time_end.astimezone(get_localzone())) + ' temp diff is ' + str(temp_diff))

        #Check all events for warm-up temperature
        for event in self.events:
          if event['desired_temp'] == 'On' or event['desired_temp'] == 'Preheat':
            continue

          event_next_time = event['start_date']
          if event_next_time > current_time:
            event_desired_temp = event['desired_temp']
            event_temp_diff = event_desired_temp - current_temp
            logger.debug('Future event starting at ' + str(event_next_time.astimezone(get_localzone())) + \
              ' temp difference is ' + str(event_temp_diff))
            if event_temp_diff > 0:
              #Start X minutes earlier for each degree the heating is below the desired temp, plus Y minutes.
              event_time_due_on = event_next_time - datetime.timedelta(0,(event_temp_diff * self.config['heating_settings']['minutes_per_degree'] * 60) + (self.config['heating_settings']['effect_delay_minutes'] * 60))
              logger.debug('Future event needs warm up, due on at ' + str(event_time_due_on.astimezone(get_localzone())))
              if time_due_on is None or event_time_due_on < time_due_on or event_time_due_on < current_time:
                time_due_on = event_time_due_on
                next_temp = event_desired_temp
                temp_diff = event_temp_diff
                logger.debug('Future event starting at ' + str(event_next_time.astimezone(get_localzone())) + \
                  ' warm-up, now due on at ' + str(time_due_on.astimezone(get_localzone())))
                #Full blast until 0.3 degrees difference
                if event_temp_diff > 0.3:
                  new_proportional_time = 30
              elif time_due_on is None or event_next_time < time_due_on:
                time_due_on = event_next_time
              elif time_due_on is None or event_next_time < time_due_on:
                time_due_on = event_next_time

        if time_due_on < next_time:
          logger.info('Before an event starting at ' + str(next_time.astimezone(get_localzone())) +\
            ' temp diff is ' + str(temp_diff) + ' now due on at ' + str(time_due_on.astimezone(get_localzone())))

        if time_due_on <= current_time:
          if temp_diff < 0:
            logger.info('Current temperature ' + str(current_temp) + ' is higher than the desired temperature ' + str(next_temp))
            self.heating_off(0)
          else:
            if new_proportional_time is None:
              #Calculate the proportional amount of time the heating needs to be on to reach the desired temperature
              new_proportional_time = temp_diff * self.config['heating_settings']['proportional_heating_interval_minutes'] / 2

            if new_proportional_time < self.config['heating_settings']['minimum_active_period_minutes']: #Minimum time boiler can be on to be worthwhile
              new_proportional_time = self.config['heating_settings']['minimum_active_period_minutes']
            elif new_proportional_time > self.config['heating_settings']['proportional_heating_interval_minutes']:
              new_proportional_time = self.config['heating_settings']['proportional_heating_interval_minutes']

            #Are we currently on or off?
            if not(self.relays_heating._status) or self.time_on is None: #Off
              if self.time_off is None:
                time_due_on = next_time
                new_time_due_on = next_time
              elif new_proportional_time <= self.proportional_time:
                #Need to be on for less time - turn on in a bit
                time_due_on = self.time_off + datetime.timedelta(0,(self.config['heating_settings']['proportional_heating_interval_minutes'] * 60) - (self.proportional_time * 60))
                new_time_due_on = self.time_off + datetime.timedelta(0,(self.config['heating_settings']['proportional_heating_interval_minutes'] * 60) - (new_proportional_time * 60))
              else:
                #Need to be on for more time - turn on now
                time_due_on = self.time_off + datetime.timedelta(0,(self.config['heating_settings']['proportional_heating_interval_minutes'] * 60) - (self.proportional_time * 60))
                new_time_due_on = current_time

              if new_time_due_on <= current_time:
                logger.info('Heating is off, due on at ' + str(new_time_due_on.astimezone(get_localzone())) +'; Turning on')
                self.heating_on(new_proportional_time)
              else:
                if new_proportional_time != self.proportional_time:
                  logger.info('Changing time next due on.')
                  self.set_heating_trigger(new_proportional_time, self.relays_heating._status)
                if time_due_on != new_time_due_on:
                  logger.info('Heating was off, due on at ' + str(time_due_on.astimezone(get_localzone())) +\
                                 '. Now due on at ' + str(new_time_due_on.astimezone(get_localzone())))
            else: #On
              time_due_off = self.time_on + datetime.timedelta(0,self.proportional_time * 60)
              if new_proportional_time < self.config['heating_settings']['proportional_heating_interval_minutes']:
                #Must have a time_on at this point
                new_time_due_off = self.time_on + datetime.timedelta(0,new_proportional_time * 60)
              else:
                new_time_due_off = next_time_end

              if new_time_due_off < current_time:
                logger.info('Heating was on, due off at ' + str(time_due_off.astimezone(get_localzone())) +'; Turning off')
                self.heating_off(new_proportional_time)
              else:
                if new_proportional_time != self.proportional_time:
                  logger.info('Changing time next due off.')
                  self.set_heating_trigger(new_proportional_time, self.relays_heating._status)
                if new_time_due_off != time_due_off:
                  logger.info('Heating was on, due off at ' + str(time_due_off.astimezone(get_localzone())) +\
                               '. Now due off at ' + str(new_time_due_off.astimezone(get_localzone())))

    else:
      self.desired_temp = str(self.config['heating_settings']['minimum_temperature'])
      #If we don't have an event yet, warn and ensure relay is off
      logger.info('No events available')
      if self.relays_heating._status:
        logger.debug('Heating off')
        self.heating_off(0)
      if have_preheat:
        logger.info('Preheat but no normal event available.')
      else:
        if self.relays_preheat._status:
          logger.debug('Preheat off')
          self.preheat_off()

    self.check_relay_states()
    self.processing_lock.release()

  def get_credentials(self):
    '''Gets valid user credentials from storage.

    If nothing has been stored, or if the stored credentials are invalid,
    the OAuth2 flow is completed to obtain the new credentials.

    Returns:
        Credentials, the obtained credential.
    '''
    home_dir = os.path.expanduser('~')
    credential_dir = os.path.join(home_dir, '.credentials')
    logger.debug('Getting credentials from ' + credential_dir)
    if not os.path.exists(credential_dir):
      os.makedirs(credential_dir)
    credential_path = os.path.join(credential_dir, 'calendar-heating.json')

    store = oauth2client.file.Storage(credential_path)
    credentials = store.get()
    parser = argparse.ArgumentParser(parents=[tools.argparser])
    flags = parser.parse_args()
    if not credentials or credentials.invalid:
      flow = client.flow_from_clientsecrets('client_secret.json', 'https://www.googleapis.com/auth/calendar.readonly')
      flow.user_agent = 'Heating'
      credentials = tools.run_flow(flow, store, flags)
      logger.info('Storing credentials to ' + credential_path)
    return credentials

  def get_darksky_details(self):
    with open('darksky_details.json') as json_data:
      details = json.load(json_data)
      json_data.close()
    logger.debug('DarkSky details: ' + str(details))
    return details
    def my_listener(event):
        if event.exception:
            print('任务出错了。' + event)
            # sms.send_wrong_sms()
            sched.shutdown()
            # time.sleep(5)
            # sched.start()
        else:
            pass

    # 正常发送短信
    def semd_sms():
        sms.send_normal_sms()

    # sched.add_job(func=my_email, trigger='interval', seconds=60)
    sched.add_job(func=my_bd, trigger='interval', seconds=150)
    # sched.add_job(func=semd_sms, trigger='cron', hour=15, minute=00)
    sched.add_listener(my_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    sched._logger = logging
    sched.start()
except Exception as e:
    # sms.send_wrong_sms()
    nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    newsql = "insert into tab_send_email (address_to,mail_subject,mail_text) values('*****@*****.**','定时任务出现问题'+'" + \
        nowtime + "','" + str(e) + "')"
    tools.warning(str(e))
    ms.ExecNonQuery(newsql)
    sched.shutdown()
    time.sleep(5)
    sched.start()
Example #27
0
class FreezerScheduler(object):
    def __init__(self, apiclient, interval, job_path):
        # config_manager
        self.client = apiclient
        self.freezerc_executable = spawn.find_executable('freezerc')
        if self.freezerc_executable is None:
            # Needed in the case of a non-activated virtualenv
            self.freezerc_executable = spawn.find_executable(
                'freezerc', path=':'.join(sys.path))
        self.job_path = job_path
        self._client = None
        self.lock = threading.Lock()
        self.execution_lock = threading.Lock()
        job_defaults = {
            'coalesce': True,
            'max_instances': 1
        }
        self.scheduler = BlockingScheduler(job_defaults=job_defaults)
        if self.client:
            self.scheduler.add_job(self.poll, 'interval',
                                   seconds=interval, id='api_poll')

        self.add_job = self.scheduler.add_job
        self.remove_job = self.scheduler.remove_job
        self.jobs = {}

    def get_jobs(self):
        if self.client:
            job_doc_list = utils.get_active_jobs_from_api(self.client)
            try:
                utils.save_jobs_to_disk(job_doc_list, self.job_path)
            except Exception as e:
                logging.error('Unable to save jobs to {0}. '
                              '{1}'.format(self.job_path, e))
            return job_doc_list
        else:
            return utils.get_jobs_from_disk(self.job_path)

    def start_session(self, session_id, job_id, session_tag):
        if self.client:
            return self.client.sessions.start_session(session_id,
                                                      job_id,
                                                      session_tag)
        else:
            raise Exception("Unable to start session: api not in use.")

    def end_session(self, session_id, job_id, session_tag, result):
        if self.client:
            return self.client.sessions.end_session(session_id,
                                                    job_id,
                                                    session_tag,
                                                    result)
        else:
            raise Exception("Unable to end session: api not in use.")

    def upload_metadata(self, metadata_doc):
        if self.client:
            self.client.backups.create(metadata_doc)

    def start(self):
        utils.do_register(self.client)
        self.poll()
        self.scheduler.start()

    def update_job(self, job_id, job_doc):
        if self.client:
            try:
                return self.client.jobs.update(job_id, job_doc)
            except Exception as e:
                logging.error("[*] Job update error: {0}".format(e))

    def update_job_status(self, job_id, status):
        doc = {'job_schedule': {'status': status}}
        self.update_job(job_id, doc)

    def is_scheduled(self, job_id):
        return self.scheduler.get_job(job_id) is not None

    def create_job(self, job_doc):
        job = Job.create(self, self.freezerc_executable, job_doc)
        if job:
            self.jobs[job.id] = job
            logging.info("Created job {0}".format(job.id))
        return job

    def poll(self):
        try:
            work_job_doc_list = self.get_jobs()
        except Exception as e:
            logging.error("[*] Unable to get jobs: {0}".format(e))
            return

        work_job_id_list = []

        # create job if necessary, then let it process its events
        for job_doc in work_job_doc_list:
            job_id = job_doc['job_id']
            work_job_id_list.append(job_id)
            job = self.jobs.get(job_id, None) or self.create_job(job_doc)
            if job:
                job.process_event(job_doc)

        # request removal of any job that has been removed in the api
        for job_id, job in self.jobs.iteritems():
            if job_id not in work_job_id_list:
                job.remove()

        remove_list = [job_id for job_id, job in self.jobs.items()
                       if job.can_be_removed()]

        for k in remove_list:
            self.jobs.pop(k)

    def stop(self):
        try:
            self.scheduler.shutdown(wait=False)
        except:
            pass

    def reload(self):
        logging.warning("reload not supported")
Example #28
0
        name = name.replace(' ', '%20')

        insertPrice(price, name, currency)

        return {'name': name, 'price': price, 'currency': currency}

    elif req.status_code == 400:

        print('400: Resource not found')

        return {}


# SCHEDULER ############################################################

sched = BlockingScheduler()

# @sched.scheduled_job('interval', minutes=1)
# def interval_job():
#     scrapBookDepository()


@sched.scheduled_job('cron', day_of_week='mon-sun', hour=15)
def cron_job():
    scrapBookDepository()


atexit.register(lambda: sched.shutdown())

sched.start()
Example #29
0
            status = cachet.PERFORMANCE_ISSUES
        elif first == 'FAILED' and second == 'SUCCESS':
            status = cachet.PARTIAL_OUTAGE
        else:
            status = cachet.MAJOR_OUTAGE

        monitors.append(CachetComponent(id='synthetics-{}'.format(monitor_id),
                                        name=monitor_name[monitor_id],
                                        status=status))
    return monitors


def update():
    fetch_snitches()
    fetch_newrelic()
    fetch_synthetics()
    if DMS_PING_URL:
        requests.get(DMS_PING_URL)


if __name__ == '__main__':
    from apscheduler.schedulers.blocking import BlockingScheduler
    scheduler = BlockingScheduler()

    scheduler.add_job(update, 'interval', minutes=2, max_instances=1, coalesce=True)

    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()
Example #30
0
        # else:
        #     forecast.forecast(res.keys()[0][0])


if __name__ == '__main__':
    jobstores = {'default': MemoryJobStore()}
    executors = {'default': ThreadPoolExecutor(20)}
    job_defaults = {'coalesce': False, 'max_instances': 1}
    logger.info("Starting preprocessor")
    scheduler = BlockingScheduler(jobstores=jobstores,
                                  executors=executors,
                                  job_defaults=job_defaults,
                                  timezone=utc)
    for query_key in queries_config:
        kwArgs = {
            "query": queries_config[query_key],
            "rawDataDB": rawDataDB,
            "preprocessOutputDB": preprocessOutputDB
        }
        logger.debug(kwArgs)
        logger.info("Adding job to preprocess %s", query_key)
        scheduler.add_job(loadAndPreprocess,
                          'interval',
                          name=query_key,
                          kwargs=kwArgs,
                          seconds=int(default_config['query_mean_interval']))
    try:
        scheduler.start()
    finally:
        scheduler.shutdown()
Example #31
0
class ScrapeScheduler(Executor):
    def __init__(self):
        Executor.register(ScrapeScheduler)
        self._logger = logging.getLogger(type(self).__name__)
        self._scheduler = None
        self._executor = None
        self._job_list = list()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self._executor.shutdown()

    def register_cache(self, cache_name):
        self._cache.register_catelog(cache_name)

    def register_job(self, job: BaseJob):
        '''
        Default to run every minute
        :param job:
        :return:
        '''
        self._job_list.append(job)

    def _create_executors(self):
        max_instances = len(
            [job for job in self._job_list if job.schedule_type != 'one-off'])
        max_instances = LogicUtil.if_else_default(max_instances, 1,
                                                  lambda x: x > 0)
        job_defaults = {'max_instances': max_instances}
        self._scheduler = BlockingScheduler(job_defaults=job_defaults)
        self._executor = TaskExecutor()

    def _add_hooks(self):
        _self = self

        def shutdown_hook(event):
            e = event.exception
            if e:
                _self._logger.error(
                    f'{Formatter.get_timestamp()} - Scheduler crashed!, {type(e)} - {e}'
                )
                if isinstance(e, KeyboardInterrupt):
                    if None is not _self._scheduler:
                        _self._scheduler.remove_all_jobs()
                        _self._scheduler.shutdown()
                    if None is not _self._executor:
                        _self._executor.shutdown()

        self._scheduler.add_listener(shutdown_hook, EVENT_JOB_ERROR)

    def _add_registred_jobs(self):
        for job in self._job_list:
            if job.schedule_type == "sec":
                self._scheduler.add_job(job.run,
                                        'interval',
                                        id=type(job).__name__,
                                        seconds=job.sec)
            elif job.schedule_type == "cron":
                self._scheduler.add_job(job.run,
                                        CronTrigger.from_crontab(job.cron),
                                        id=type(job).__name__)
            elif job.schedule_type == "one-off":
                self._scheduler.add_job(job.run, id=type(job).__name__)

    def start(self):
        # Lazy init
        self._create_executors()
        self._add_hooks()
        self._add_registred_jobs()

        self._scheduler.start()

    def shutdown(self):
        self._scheduler.shutdown()
Example #32
0
class DakokuManager(object):
    def __init__(self, config_path, schedule_path):
        self.config_path = config_path
        self.schedule_path = schedule_path
        cfg = self._load_config()
        try:
            human_mode_min = cfg["human_mode"]
        except:
            human_mode_min = 0
        try:
            self.log_dir = cfg["log_dir"]
            file_handler = logging.FileHandler(os.path.join(self.log_dir, "dakoku.log"), 'a+')
            file_handler.level = LEVEL
            log.addHandler(file_handler)
            log.info("saving log to %s", self.log_dir)
        except:
            self.log_dir = None
        self.api_key = cfg["api_key"]

        sched = self._load_schedule()
        start_date = dt.datetime.strptime(sched["valid"]["start"], '%Y-%m-%d').replace(tzinfo=pytz.timezone('Asia/Tokyo'))
        end_date = dt.datetime.strptime(sched["valid"]["end"], '%Y-%m-%d').replace(tzinfo=pytz.timezone('Asia/Tokyo'))
        log.info("dakoku is valid for %s - %s", start_date, end_date)
        holidays = self._get_holidays(start_date, end_date)
        self.worker = DakokuWorker(cfg["host"], cfg["user"], cfg["pass"], holidays, self.log_dir)
        self.register(sched["working"], start_date, end_date, holidays, human_mode_min)

    def _load_config(self):
        log.debug("loading config from %s", self.config_path)
        with open(self.config_path, 'r') as f:
            cfg = json.load(f)
        return cfg

    def _load_schedule(self):
        log.debug("loading schedule from %s", self.schedule_path)
        with open(self.schedule_path, 'r') as f:
            cfg = json.load(f)
        return cfg
        
    def _get_holidays(self, start_date, end_date):
        holidays = japanese_holiday.getholidays(
            str(self.api_key),  # this function does not allow unicode
            japanese_holiday.HOLIDAY_TYPE_OFFICIAL_JA,
            start_date.strftime('%Y-%m-%d'),
            end_date.strftime('%Y-%m-%d'),
        )
        ret = []
        for hd in holidays:
            date_str = hd['start']['date']
            ret.append(dt.datetime.strptime(date_str, '%Y-%m-%d').replace(tzinfo=pytz.timezone('Asia/Tokyo')))
        return ret

    def register(self, working, start_date, end_date, holidays, human_mode_min=0):
        self.scheduler = Scheduler(timezone=pytz.timezone('Asia/Tokyo'), logger=log)
        today = dt.date.today()
        for w in working:
            # schedule shukkin
            h, m = map(int, w["from"].split(':'))
            fromtime = dt.time(h,m,tzinfo=pytz.timezone('Asia/Tokyo'))
            d = dt.datetime.combine(today, fromtime) - dt.timedelta(minutes=human_mode_min)
            trigger = CronTrigger(day_of_week=w["dayOfWeek"],
                                  hour=d.hour, minute=d.minute,
                                  start_date=start_date,
                                  end_date=end_date,
                                  timezone=pytz.timezone('Asia/Tokyo'))
            self.scheduler.add_job(dispatch_after(human_mode_min * 60,
                                                  self.worker.work_start),
                                   trigger)
            # schedule taikin
            h, m = map(int, w["till"].split(':'))
            tilltime = dt.time(h,m,tzinfo=pytz.timezone('Asia/Tokyo'))
            trigger = CronTrigger(day_of_week=w["dayOfWeek"],
                                  hour=tilltime.hour, minute=tilltime.minute,
                                  start_date=start_date,
                                  end_date=end_date,
                                  timezone=pytz.timezone('Asia/Tokyo'))
            self.scheduler.add_job(dispatch_after(human_mode_min * 60,
                                                  self.worker.work_end),
                                   trigger)
        self.scheduler.print_jobs()

    def start(self):
        self.scheduler.start()

    def shutdown(self):
        self.scheduler.shutdown()
Example #33
0
    def plantaskdsfsdfsafdsafsd(self,
                                func,
                                targger="cron",
                                args=None,
                                year=None,
                                month=None,
                                week="*",
                                day_of_week='*',
                                day=None,
                                hour=None,
                                minute=None,
                                second='0'):
        """添加定时器任务、计划任务

        func 函数名

        args 函数参数  例:(1,)

        year 年

        month 月

        day 日

        hour 几点

        minute 几分

        second 几秒
        """
        BlockingSchedulers = BlockingScheduler()
        if targger == 'cron':
            BlockingSchedulers.add_job(func,
                                       targger,
                                       args=args,
                                       year=year,
                                       month=month,
                                       week=week,
                                       day_of_week=day_of_week,
                                       day=day,
                                       hour=hour,
                                       minute=minute,
                                       second=second)
        elif targger == 'interval':
            if day:
                BlockingSchedulers.add_job(func,
                                           targger,
                                           args=args,
                                           days=int(day))
            elif hour:
                BlockingSchedulers.add_job(func,
                                           targger,
                                           args=args,
                                           hours=int(hour))
            elif minute:
                BlockingSchedulers.add_job(func,
                                           targger,
                                           args=args,
                                           minutes=int(minute))
            elif second:
                BlockingSchedulers.add_job(func,
                                           targger,
                                           args=args,
                                           seconds=int(second))
        try:
            BlockingSchedulers.start()
        except:
            BlockingSchedulers.shutdown()
Example #34
0
class JobManage():
    def __init__(self):
        jobstores = {'default': MemoryJobStore()}
        executors = {
            'default': ThreadPoolExecutor(50)
            #             'processpool': ProcessPoolExecutor(3)
        }
        job_defaults = {'coalesce': False, 'max_instances': 50}
        self.sched = BlockingScheduler(jobstores=jobstores,
                                       executors=executors,
                                       job_defaults=job_defaults)
        self.addError()
        self.addJobExecuted()

    def addJob(self, func, jobId=None, cron=None, args=[], kwargs={}):
        '''
                                只支持cron的形式
            *  *  *  *  *  command
                                分 时 日 月 周 命令
                                
                                第1列表示分钟1~59 每分钟用*或者 */1表示
                                第2列表示小时1~23(0表示0点)
                                第3列表示日期1~31
                                第4列表示月份1~12
                                第5列标识号星期0~6(0表示星期天)
                                第6列要运行的命令
        '''
        if cron is None:
            raise Exception("cron cannot be Null")

        (minute, hour, day, month, week) = cron.split(" ")
        self.sched.add_job(func,
                           trigger='cron',
                           id=jobId,
                           hour=hour,
                           minute=minute,
                           day=day,
                           month=month,
                           week=week,
                           args=args,
                           kwargs=kwargs)

    def removeJob(self, jobId):
        self.sched.remove_job(jobId)

    def start(self):
        self.sched.start()

    def shutdown(self):
        self.sched.shutdown()

    def printJobs(self):
        self.sched.print_jobs()

    def getJobs(self):
        return self.sched.get_jobs()

    def addError(self, func=None):
        if func is None:
            func = self.listener
        self.sched.add_listener(func, EVENT_JOB_ERROR)

    def addJobExecuted(self, func=None):
        if func is None:
            func = self.listener
        self.sched.add_listener(func, EVENT_JOB_EXECUTED)

    def listener(self, event):
        if event.exception:
            log.error("任务【%s】 任务出错 : %s" % (event.job_id, event.traceback))
        else:
            log.debug("任务【%s】已经跑完,结束时间 : %s " % (event.job_id, getNow()))


# jobMange = JobManage()