Exemple #1
0
def cron_job():
    sched = BlockingScheduler()
    sched.add_job(job, 'cron', hour='7', id='ZongYiCidSupSpider')
    sched.start()
Exemple #2
0
 def scheduler(self):
     scheduler = BlockingScheduler()
     scheduler.add_job(self.sendwx, 'cron', hour=8, minute=30)
     #scheduler.add_job(self.sendwx, 'interval', seconds=10)
     scheduler.start()
    print("init")
    Base.metadata.drop_all(engine)
    Base.metadata.create_all(engine)
    initYears()


def initYears():
    session = Session(engine)
    for year in range(1900, 2018):
        session.add(MovieYearPage(year))
    session.commit()


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--init", action="store_true")
    args = parser.parse_args()

    if args.init:
        init()
        sys.exit()

    logging.getLogger().addHandler(logging.StreamHandler())
    search_movies()

    scheduler = BlockingScheduler(
        {'apscheduler.timezone': os.environ['TIMEZONE']})
    scheduler.add_job(movie_fetcher, 'interval', seconds=1)
    scheduler.add_job(search_movies, 'interval', seconds=15)
    scheduler.start()
Exemple #4
0
]

MARKOV = [
    [0, 1 / 7, 1 / 7, 1 / 7, 1 / 7, 1 / 7, 1 / 7, 1 / 7, 0],  # <START>
    [0, 0, 0.7 / 7, 0.8 / 7, 0.7 / 7, 0.4 / 7, 0.7 / 7, 0.7 / 7, 3 / 7],  # dé
    [0, 6 / 25, 0, 6 / 25, 3 / 25, 6 / 25, 0, 4 / 25, 0],  # ma
    [0, 1.4 / 4, 0, 0, 0, 1.2 / 4, 0, 1.2 / 4, 0.2 / 4],  # allora
    [0, 11 / 25, 2 / 25, 2 / 25, 0, 5 / 25, 0, 3 / 25, 2 / 25],  # certo
    [0, 14 / 20, 0, 2 / 20, 2 / 20, 0, 0, 2 / 20, 0],  # però
    [0, 12 / 25, 0, 1 / 25, 1 / 25, 1 / 25, 0, 10 / 25, 0],  # comunque
    [0, 1 / 7, 0.7 / 7, 0.6 / 7, 0.6 / 7, 0.6 / 7, 0.5 / 7, 0, 3 / 7]
]  # boia

TWITTER_API = Twython(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN,
                      ACCESS_TOKEN_SECRET)
SCHEDULER = BlockingScheduler()


def generate_next_token(last_token):
    row_index = STATES.index(last_token)
    next_token = np.random.choice(STATES, 1, p=MARKOV[row_index])
    return next_token[0]


def generate_status():
    generated_sequence = []
    last_token = '<START>'
    while True:
        next_token = generate_next_token(last_token)
        if next_token == '<EOM>':
            status = ''
Exemple #5
0
 def scheduler(self):
     from apscheduler.schedulers.blocking import BlockingScheduler
     return BlockingScheduler()
Exemple #6
0
def doJob():
    scheduler = BlockingScheduler()
    scheduler.add_job(startJob, 'cron', hour=9, minute=1, id='job1')
    scheduler.add_job(killJob, 'cron', hour=23, minute=30, id='job2')
    scheduler.add_job(processDayInfo, 'cron', hour=15, minute=20, id='job3')
    scheduler.start()
Exemple #7
0
# -*- coding: utf-8 -*-
from apscheduler.schedulers.blocking import BlockingScheduler
import os
import TextTweet
import GetTweet

# APIの秘密鍵
CK, CKS, AT, ATS = os.environ["CONSUMER_KEY"], os.environ[
    "CONSUMER_SECRET"], os.environ["ACCESS_TOKEN_KEY"], os.environ[
        "ACCESS_TOKEN_SECRET"]

twische = BlockingScheduler()


@twische.scheduled_job('interval', minutes=30)
def timed_tweet():
    # 30分に一度ツイート
    TextTweet.puttweet()


def collect_tweet():
    #ツイートを取得
    GetTweet.gettweet(CK, CKS, AT, ATS)

    #data.txtに保存
    f = open("data.txt", encoding="utf-8")
    text = f.read()
    f.close()

    #チェーンを作成、dbに保存
    chain = PrepareChain(text)
 def __init__(self):
     self.scdl = BlockingScheduler()
Exemple #9
0
def startScheduler():
	sched = BlockingScheduler()
	sched.add_job(sentimentAnalysis,  'interval', hours=int(HOURS))
	sched.start()
Exemple #10
0
def my_scheduler(runtime):
    sched = BlockingScheduler()  # 生成对象
    sched.add_job(my_job, 'interval', seconds=runtime)  # 在指定时间运行一次
    sched.start()
Exemple #11
0
yesterday = str(yesterday).split(' ')[0]
file_name = r"./timed_task-{}.log".format(str(datetime.now()).split(' ')[0])
logging.basicConfig(
    level=logging.DEBUG,
    format=LOG_FORMAT,
    datefmt=DATE_FORMAT,  # 有了filename参数就不会直接输出显示到控制台,而是直接写入文件
)
headle = logging.FileHandler(filename=file_name, encoding='utf-8')
logger = logging.getLogger()
logger.addHandler(headle)

executors = {
    # 'default': ThreadPoolExecutor(20),  # 线程
    'processpool': ProcessPoolExecutor(11)  # 进程   两者可同时启用也可以单独启用
}
sched = BlockingScheduler(executors=executors)
sched_two = BackgroundScheduler(executors=executors)


def zhihu():
    os.system('python ./../zhihu/zhihu.py')
    print('知乎爬虫任务开启......')


def xiaohongshu():
    os.system('python ./../xiaohongshu/xiaohongshu_selenium.py')
    print('小红书爬虫任务开启......')


def dianping():
    os.system('python ./../dazhongdianping/dianping.py')
import time

ms = sql.MSSQL()
logging.basicConfig()
logging.getLogger('apscheduler').setLevel(logging.WARNING)


# 所有定时任务
try:

    job_defaults = {
        'coalesce': True,  # 积攒的任务只跑一次
        'max_instances': 10,  # 支持10个实例并发
        'misfire_grace_time': 600  # 600秒的任务超时容错
    }
    sched = BlockingScheduler(job_defaults=job_defaults)
    # 有date, interval, cron可供选择,其实看字面意思也可以知道,date表示具体的一次性任务,interval表示循环任务,cron表示定时任务

    # 发送邮件
    def my_email():
        es.mail()

    # 检测下单
    def my_bd():
        bandao.bd()

    def my_listener(event):
        if event.exception:
            print('任务出错了。')
            # sms.send_wrong_sms()
            sched.shutdown()
Exemple #13
0
 def __init__(self):
     self.scheduler = BlockingScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults)
     self.spider = DailySpider()
Exemple #14
0
from pytz import utc
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.executors.pool import ThreadPoolExecutor
from HealthCalculator import HealthCalculator

if __name__ == '__main__':
    configs = os.getenv('configs')
    if configs is None:
        print('Exiting due to missing configs')
        exit()

    try:
        configs_json = json.loads(configs)
    except:
        configs_json = dict()

    tenant_id = configs_json.get('tenant', 'xtenant')

    SCHEDULER_INTERVAL = constatnts.SCHEDULER_INTERVAL  # in seconds
    executors = {'default': ThreadPoolExecutor()}

    healthCalculator = HealthCalculator(tenant_id)
    healthCalculator.start()

    app_scheduler = BlockingScheduler(executors=executors, timezone=utc)
    app_scheduler.add_job(healthCalculator.start,
                          'interval',
                          seconds=SCHEDULER_INTERVAL,
                          id='health collector scheduler')
    app_scheduler.start()
Exemple #15
0
def main():
    scheduler = BlockingScheduler()
    scheduler.add_job(check_in, 'interval', seconds=4)
    scheduler.start()
Exemple #16
0
def cron_job():
    sched = BlockingScheduler()
    sched.add_job(job, 'cron', hour='12', minute='10', id='GetVidSpider')
    sched.start()
Exemple #17
0
    # dataMonitor.heartBeat('demoName_heartbeat')


def start_spider(spider_name):
    command = "scrapy crawl " + spider_name
    out_bytes = subprocess.check_output(command, shell=True)
    print('end')


def start():
    start_spider('demoName_detail')


timeSpace = 10 * 60
heartTime = 1 * 60  # 心跳跳动时间间隔
scheduler = BlockingScheduler(daemonic=False)
scheduler.add_job(heartBeat, 'interval', seconds=heartTime)
# 先马上开始执行
scheduler.add_job(start, 'date')
# 后再抓取之后的某个时间段开始间隔执行
scheduler.add_job(start,
                  'interval',
                  seconds=timeSpace,
                  start_date=datetime.datetime.now() +
                  datetime.timedelta(seconds=timeSpace))
scheduler.start()

# demo
# def timerr():
#     print 1111
# scheduler = BlockingScheduler(daemonic=False)
from etutorservice.logic.class_create_task import ClassCreateTaskManager
from etutorservice.logic.coach_invite import CoachInviteManager
from etutorservice.logic.temporary_substitute_coach_task import TemporarySubstituteCoachTaskManager
from etutorservice.logic.correct_coach_status import CorrectCoachStatusTaskManager
from etutorservice.logic.class_coach import ClassCoachManager
from etutorservice.logic.monitor import MonitorManager
from etutorservice.logic.continue_class import ContinueClassManager
from etutorservice.logic.service_reservation import ServiceReservationManager
from etutorservice.logic.business import SaleCardManager

logger = logging.getLogger(__name__)

scheduler = BlockingScheduler({
    'apscheduler.executors.default': {
        'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
        'max_workers': '20'
    },
    'apscheduler.timezone': 'Asia/Shanghai'
})

mail_sender = MailSender()


def _notify_scheduler(event):
    if event.code == EVENT_SCHEDULER_START:
        content = 'scheduler start now %s' % get_now()
    elif event.code == EVENT_SCHEDULER_SHUTDOWN:
        content = 'scheduler shutdown now %s' % get_now()
    else:
        content = 'unknown'
    mail_sender.send_email(config.data['system_admin_email_list'],
Exemple #19
0
    def __init__(self, **kwargs):
        """
        Args:
            weeks: number of weeks to wait (DEFAULT: 0).
            days: number of days to wait (DEFAULT: 0).
            hours: number of hours to wait (DEFAULT: 0).
            minutes: number of minutes to wait (DEFAULT: 0).
            seconds: number of seconds to wait (DEFAULT: 0).
            mailto: The scheduler will send an email to `mailto` every `remindme_s` seconds.
                (DEFAULT: None i.e. not used).
            verbose: (int) verbosity level. (DEFAULT: 0)
            use_dynamic_manager: "yes" if the :class:`TaskManager` must be re-initialized from
                file before launching the jobs. (DEFAULT: "no")
            max_njobs_inqueue: Limit on the number of jobs that can be present in the queue. (DEFAULT: 200)
            remindme_s: The scheduler will send an email to the user specified by `mailto` every `remindme_s` seconds.
                (int, DEFAULT: 1 day).
            max_num_pyexcs: The scheduler will exit if the number of python exceptions is > max_num_pyexcs
                (int, DEFAULT: 0)
            max_num_abierrs: The scheduler will exit if the number of errored tasks is > max_num_abierrs
                (int, DEFAULT: 0)
            safety_ratio: The scheduler will exits if the number of jobs launched becomes greater than
               `safety_ratio` * total_number_of_tasks_in_flow. (int, DEFAULT: 5)
            max_nlaunches: Maximum number of tasks launched in a single iteration of the scheduler.
                (DEFAULT: -1 i.e. no limit)
            debug: Debug level. Use 0 for production (int, DEFAULT: 0)
            fix_qcritical: "yes" if the launcher should try to fix QCritical Errors (DEFAULT: "yes")
            rmflow: If "yes", the scheduler will remove the flow directory if the calculation
                completed successfully. (DEFAULT: "no")
            killjobs_if_errors: "yes" if the scheduler should try to kill all the runnnig jobs
                before exiting due to an error. (DEFAULT: "yes")
        """
        # Options passed to the scheduler.
        self.sched_options = AttrDict(
            weeks=kwargs.pop("weeks", 0),
            days=kwargs.pop("days", 0),
            hours=kwargs.pop("hours", 0),
            minutes=kwargs.pop("minutes", 0),
            seconds=kwargs.pop("seconds", 0),
            #start_date=kwargs.pop("start_date", None),
        )
        if all(not v for v in self.sched_options.values()):
            raise self.Error("Wrong set of options passed to the scheduler.")

        self.mailto = kwargs.pop("mailto", None)
        self.verbose = int(kwargs.pop("verbose", 0))
        self.use_dynamic_manager = as_bool(
            kwargs.pop("use_dynamic_manager", False))
        self.max_njobs_inqueue = kwargs.pop("max_njobs_inqueue", 200)
        self.max_ncores_used = kwargs.pop("max_ncores_used", None)
        self.contact_resource_manager = as_bool(
            kwargs.pop("contact_resource_manager", False))

        self.remindme_s = float(kwargs.pop("remindme_s", 1 * 24 * 3600))
        self.max_num_pyexcs = int(kwargs.pop("max_num_pyexcs", 0))
        self.max_num_abierrs = int(kwargs.pop("max_num_abierrs", 0))
        self.safety_ratio = int(kwargs.pop("safety_ratio", 5))
        #self.max_etime_s = kwargs.pop("max_etime_s", )
        self.max_nlaunches = kwargs.pop("max_nlaunches", -1)
        self.debug = kwargs.pop("debug", 0)
        self.fix_qcritical = as_bool(kwargs.pop("fix_qcritical", True))
        self.rmflow = as_bool(kwargs.pop("rmflow", False))
        self.killjobs_if_errors = as_bool(
            kwargs.pop("killjobs_if_errors", True))

        self.customer_service_dir = kwargs.pop("customer_service_dir", None)
        if self.customer_service_dir is not None:
            self.customer_service_dir = Directory(self.customer_service_dir)
            self._validate_customer_service()

        if kwargs:
            raise self.Error("Unknown arguments %s" % kwargs)

        if not has_apscheduler:
            raise RuntimeError("Install apscheduler with pip")

        if has_sched_v3:
            logger.warning("Using scheduler v>=3.0.0")
            from apscheduler.schedulers.blocking import BlockingScheduler
            self.sched = BlockingScheduler()
        else:
            from apscheduler.scheduler import Scheduler
            self.sched = Scheduler(standalone=True)

        self.nlaunch = 0
        self.num_reminders = 1

        # Used to keep track of the exceptions raised while the scheduler is running
        self.exceptions = deque(maxlen=self.max_num_pyexcs + 10)

        # Used to push additional info during the execution.
        self.history = deque(maxlen=100)
Exemple #20
0
from apscheduler.schedulers.blocking import BlockingScheduler

def job():
    print('job 3s')

if __name__=='__main__':
    sched = BlockingScheduler(timezone='Asia/Shanghai')
    sched.add_job(job, 'interval', id='3_second_job', seconds=3)
    sched.start()
Exemple #21
0
def doJob():
    scheduler = BlockingScheduler()
    scheduler.add_job(processJudge, 'interval', seconds=60, id='job1')
    scheduler.start()
Exemple #22
0
from apscheduler.schedulers.blocking import BlockingScheduler
from scrapy import cmdline
base = BlockingScheduler()

def fun_min():
    cmdline.execute('scrapy crawl xinxi'.split())
fun_min()
base.add_job(fun_min,'interval',days=1)
base.start()
Exemple #23
0
def run():
    schedule = BlockingScheduler()
    schedule.add_job(main,'interval',hours=1)
    schedule.start()
Exemple #24
0
from datetime import datetime
import os
from apscheduler.schedulers.blocking import BlockingScheduler


def tick():  # 定义作业,打印当前时间
    print("Tick time is:s%", datetime.now())


if __name__ == "__main__":  # 定义主函数入口
    schduler = BlockingScheduler()  # 实例化BlockingScheduler类,不带参默认储存器-内存
    schduler.add_job(tick, 'interval', seconds=5)
    print("Press ctrl+{0} to exit".format('break')
          if os.name == 'nt' else 'c')  #  打印退出方法信息,触发器date,cron,interval
    try:
        schduler.start()  # 启动调度器
    except {KeyboardInterrupt, SystemExit}:
        pass
Exemple #25
0
def main():
    scheduler = BlockingScheduler()
    scheduler.add_job(randomize_vpn, 'interval', hours=1)
    scheduler.start()
Exemple #26
0
# what is going on ... whew!
def handlePacket(data):
    print('In handlePacket: '),
    print(data['id']),
    if data['id'] == 'tx_status':
        print(data['deliver_status'].encode('hex'))
    elif data['id'] == 'rx':
        print(data['rf_data'])
    else:
        print('Unimplemented frame type')


# Create XBee library API object, which spawns a new thread
zb = ZigBee(ser, callback=message_received)

sendsched = BlockingScheduler()
sendsched.start()

# every 30 seconds send a house query packet to the XBee network
sendsched.add_interval_job(sendQueryPacket, seconds=30)

# Do other stuff in the main thread
while True:
    try:
        time.sleep(0.1)
        if packets.qsize() > 0:
            # got a packet from recv thread
            # See, the receive thread gets them
            # puts them on a queue and here is
            # where I pick them off to use
            newPacket = packets.get_nowait()
Exemple #27
0
        polo.fitBalance()
        polo.sendMailBalance(polo.getSummary())
        polo.savePoloniexBalanceToCsv()
    except:
        pass

    # --- Write log
    for coinIndex in range(len(coins)):
        pp = ppList[coinIndex]
        writeBotLog(pp.getSummary())
    writeBotLog(polo.getSummary())

    # --- Back test optimization
    for coinIndex in range(len(coins)):
        pp = ppList[coinIndex]
        pp.backTestOptimization(pp.appreciationRate_,
                                pp.quantizer(pp.appreciationRate_))


def writeBotLog(logStr):
    fileName = __file__.split(".py")[0] + ".log"
    f = open(fileName, "a")
    f.write(logStr)
    f.close()


if __name__ == "__main__":
    sc = BlockingScheduler(timezone="UTC")
    sc.add_job(botRoutine, "cron", hour=0, minute=1)
    sc.start()
class Constants:
    db = {}
    URL_GOV = 'https://data.gov.il/api/3/action/datastore_search?' \
              'resource_id=8a21d39d-91e3-40db-aca1-f73f7ab1df69&limit=100000000'
    SCHEDULER: BlockingScheduler = BlockingScheduler()
Exemple #29
0
from apscheduler.schedulers.blocking import BlockingScheduler
from django.core.management import call_command
from django.core.management.base import BaseCommand

scheduler = BlockingScheduler()


@scheduler.scheduled_job('interval', minutes=5)
def manage_task_payments_and_progress():
    # Distribute task payments to participants
    call_command('tunga_distribute_task_payments')

    # Update periodic task progress events
    call_command('tunga_manage_task_progress')


@scheduler.scheduled_job('interval', minutes=10)
def send_message_emails():
    # Send new message emails for conversations
    call_command('tunga_send_message_emails')

    # Send new activity emails for tasks
    call_command('tunga_send_task_activity_emails')

    # Send new message emails for customer support conversations
    call_command('tunga_send_customer_emails')


class Command(BaseCommand):

    def handle(self, *args, **options):
 def run(self):
     scheduler = BlockingScheduler()
     scheduler.add_job(publish_datas,
                       'interval',
                       seconds=getattr(project_conf, "report_interval", 30))
     scheduler.start()