Example #1
0
class TalosCollectorCron(object):
    """docstring for talosCollectorCron"""
    def __init__(self, redis_conn,config_path):
        super(TalosCollectorCron, self).__init__()
        self.redis_conn  = redis_conn
        self.config_path = config_path
        self.scheduler = BlockingScheduler()
        json_config    = open(config_path, 'r')
        # print json_config.read()
        self.jsons = json.loads(json_config.read())

    def myjob(self,c):
        # 获取日期
        # 获取HOST信息
        # analyzer
        if c['enable']:
            tmp = os.popen(c['cmd']).readlines()
            data = {}
            data['content']  = tmp
            data['analyzer'] = c['analyzer']
            data['host']     = socket.gethostname()
            data['date']     = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            data['param']    = c['param']
            r.publish('talos:q:cmd',json.dumps(data))

    def start(self):
        for c in self.jsons:
            cron = c['time'].split(' ')
            job = self.scheduler.add_job(self.myjob,args=[c],trigger='cron', year=cron[5], month=cron[4], day=cron[3], hour=cron[2], minute=cron[1], second=cron[0])
        print 'TalosCollectorCron Start..'
        self.scheduler.start()
Example #2
0
def test5():
    """定时执行任务,关闭调度器"""
    sched = BlockingScheduler()
    sched.add_job(my_job, 'date',run_date=datetime(2016, 8, 16, 12, 34,5), args=('123',),seconds=1, id='my_job_id') 
    # add_job的第二个参数是trigger,它管理着作业的调度方式。它可以为date, interval或者cron。
    sched.start()
    print('定时任务')
Example #3
0
def test1():
    """定时执行任务"""
    start_time = time.time()
    sched = BlockingScheduler()
    sched.add_job(my_job, 'interval', args=('123',),seconds=1, id='my_job_id') # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    sched.start() # 程序运行到这里,并不往后运行,除非把任务都完成,但ctrl+C 可以终止
    print('运行不到这里')
Example #4
0
def main(argv):
	if len(argv) > 1:
		#initialize some variables
		pass
	scheduler = BlockingScheduler()
	scheduler.add_job(link, "interval", hours=1, id="link_job")
	scheduler.start()
Example #5
0
 def task_schedule(self):
     scheduler = BlockingScheduler()
     try:
         scheduler.add_job(self._get_task, 'interval', seconds=30)
         scheduler.start()
     except Exception as e:
         print(e)
def go_sched():

	sched = BlockingScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults)

	startTime = datetime.datetime.now()+datetime.timedelta(seconds=10)
	scheduleJobs(sched, startTime)
	sched.start()
Example #7
0
class BGPTableDownload(basesinfonierspout.BaseSinfonierSpout):

    def __init__(self):

        basesinfonierspout.BaseSinfonierSpout().__init__()

    def useropen(self):
        
        self.interval = int(self.getParam("frequency"))        
        
        self.sched = BlockingScheduler()
        self.sched.add_job(self.job, "interval", seconds=self.interval, id="bgptable")
        self.sched.start()

    def usernextTuple(self):

        pass
        
    def job(self):
        
        query = "http://bgp.potaroo.net/v6/as2.0/bgptable.txt"
        self.log(query)
        headers = {
            "User-Agent" : "Mozilla/5.0 (Windows NT 6.2; WOW64; rv:20.0) Gecko/20100101 Firefox/20.0"
        }
        r = requests.get(query, headers=headers)
        self.emit()
Example #8
0
class CrawlScheduler(object):

    def __init__(self, crawler):
        self.crawler = crawler
        self.scheduler = BlockingScheduler()

    def start(self):
        logging.info('=============================================')
        logging.info('[{0}] Start crawling from Instagram...'.format(datetime.datetime.now()))
        crawling_start_time = time.time()
        self.crawler.crawl()
        crawling_end_time = time.time()
        time_spent = int(crawling_end_time - crawling_start_time)
        logging.info('Time spent: {0}min {1}s'.format(time_spent / 60, time_spent % 60))
        logging.info('=============================================')

    @staticmethod
    def get_nearest_start_time():
        nearest_start_timestamp = long(time.time() / (60 * 15) + 1) * 60 * 15
        return datetime.datetime.fromtimestamp(nearest_start_timestamp)

    def start_scheduler(self, should_continue=False):
        # Config logging and alarm.
        logging.basicConfig(filename=self.crawler.get_crawl_log(), level=logging.DEBUG)

        scheduler_start_time = self.get_nearest_start_time()
        redis_client.set(self.crawler.get_redis_end_time_key(), str(scheduler_start_time))
        if not should_continue:
            redis_client.set(self.crawler.get_redis_start_time_key(),
                             str(scheduler_start_time - datetime.timedelta(minutes=14, seconds=59)))
        self.scheduler.add_job(self.start, 'interval', start_date=scheduler_start_time, minutes=15, misfire_grace_time=600)
        self.scheduler.start()
Example #9
0
    def run(self):
        setup_logging()
        log = logging.getLogger('hermes_cms.service.runner')

        while True:
            try:
                config = Registry().get(self.config_file)
            # pylint: disable=broad-except
            except Exception as e:
                log.exception(e)

            module_name = config['jobs'][self.name]['module_name']
            class_name = config['jobs'][self.name]['class_name']

            mod = __import__(module_name, fromlist=[class_name])
            service_class = getattr(mod, class_name)

            job_class = service_class(self.name, self.region, config)

            seconds = int(config['jobs'][self.name]['frequency'])

            scheduler = BlockingScheduler()
            scheduler.add_job(job_class.do_action, IntervalTrigger(seconds=seconds))
            log.info('Starting Scheduled job %s', self.name)
            scheduler.start()
Example #10
0
def run():
    sched = BlockingScheduler()
    sched.add_job(main.run, "cron", hour="7,11,17")

    try:
        sched.start()
    except KeyboardInterrupt:
        pass
Example #11
0
 def task_schedule(self):
     scheduler = BlockingScheduler()
     try:
         scheduler.add_job(self._get_task, 'cron', day='1-31', hour=self.sche_time[0],
                           minute=self.sche_time[1], second=self.sche_time[2])
         scheduler.start()
     except Exception as e:
         print(e)
def main():
    """Run tick() at the interval of every ten seconds."""
    scheduler = BlockingScheduler(timezone=utc)
    scheduler.add_job(tick, 'interval', seconds=10)
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        pass
Example #13
0
def main():
    scheduler = BlockingScheduler()
    scheduler.add_job(kick_off_script, 'interval', seconds=60)
    print('Press Ctrl+C to exit')

    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        pass
Example #14
0
def main():
    sched = BlockingScheduler()
    sched.add_job(spider.spider(), 'interval', seconds=21600)
    print 'Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C')

    try:
        sched.start()
    except (KeyboardInterrupt, SystemExit):
        pass
    def run(self):
        scheduler = BlockingScheduler()
        cron_jobs = [NotificationJob(), TransferJob()]

        for cron_job in cron_jobs:
            trigger = cron_job.trigger()
            scheduler.add_job(cron_job.run, **trigger )
        logger.info('running CronJobTaskRunner')
        scheduler.start()
Example #16
0
def schedJobs(funcToRun):
    logging.basicConfig()
    scheduler = BlockingScheduler()
    datenow = datetime.datetime.now()
    print("main scheduler started for jobs @" + str(datenow))

#    rs = scheduler.add_job(subtwo, 'interval', id="MainTaskid", name="maintask", start_date=datetime.datetime.now(), seconds=3, jobstore='default')
    rs = scheduler.add_job(funcToRun, trigger="interval", id="mainSchedJobID", name="mainSchedJob", jobstore='default', executor='default', replace_existing=False, minutes=mainSchedJobsInterval)
    print("Running Tasks")
    scheduler.start()
Example #17
0
def bob_job():

    me, password = email_login()
    sched = BlockingScheduler()

    @sched.scheduled_job('cron', day_of_week='mon,tue,wed,thu,fri', hour=17)
    def scheduled_job():
        job.run(me, password)

    sched.start()    
Example #18
0
def test7():
    """定时执行任务,通过ctrl+c终止"""
    scheduler = BlockingScheduler()
    scheduler.add_job(tick, 'interval', seconds=1)
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
    try:
        scheduler.start(paused=False) # 后面的并没有执行,若参数paused为真,则并不执行
    except (KeyboardInterrupt, SystemExit):
        pass
    print('ok')
Example #19
0
def main():
    ceilometer = create_ceilomenter_client()
    resources = ceilometer.resources.list()
    for i in resources:
        # print '\n'
        print i.resource_id
    # Run this job in certian time, with parameter 'text'
    sched = BlockingScheduler()
    sched.add_job(my_job, 'interval', seconds=5, args=['test'])
    sched.start()
Example #20
0
def schedule_task():
    
    sched = BlockingScheduler()
    @sched.scheduled_job('interval', hours=1)
    def timed_job():
        notifications = session.query(Notification).all()
        for n in notifications:
            send_text(n)
        print("ran job")
    sched.start()
Example #21
0
    def run(self):
        self.setup_logging()

        scheduler = BlockingScheduler()
        weather = Weather(scheduler, zip=self._args['zip'], station=self._args['station'])
        dimmer = Dimmer(scheduler)
        display = Display(weather, dimmer)

        display.start()
        scheduler.start()
Example #22
0
def main():
	scheduler = BlockingScheduler()
	routine = Routine()
	job = scheduler.add_job(routine.check_fresh_dd, 'interval', minutes=1)
	try:
		scheduler.start()
	except (KeyboardInterrupt, SystemExit):
		pass

# if __name__ == "__main__":
# 	main()
Example #23
0
def main(generate_once, minutes):
    generate()

    if not generate_once:
        print('Starting schedule, every {} minutes'.format(minutes))
        scheduler = BlockingScheduler()
        scheduler.add_job(generate, 'interval', minutes=1)
        try:
            scheduler.start()
        except (KeyboardInterrupt, SystemExit):
            pass
Example #24
0
def main():

    sched = BlockingScheduler()

    @sched.scheduled_job('cron', day_of_week='mon,tue,wed,thu,fri,sat', hour=23)
    def scheduled_job():
    	print('[INFO] Job started.')
        get_busystock_earnings()
        download_SPX()
        print('[INFO] Job ended.')

    sched.start()    
def main():
    repo_slugs = ['start-jsk/jsk_apc']
    gh_repos_handler = GitHubReposHandler(repo_slugs)
    scheduler = BlockingScheduler(logger=logger)
    scheduler.add_job(gh_repos_handler.send_empty_pr,
                      trigger='interval', minutes=5)
    scheduler.add_job(gh_repos_handler.close_ci_success_empty_pr,
                      trigger='interval', minutes=5)
    scheduler.print_jobs()
    scheduler.start()
Example #26
0
def startTask():
    from apscheduler.schedulers.blocking import BlockingScheduler
    scheduler = BlockingScheduler()
    scheduler.add_job(crawlerTask, 'cron', second='0',minute='15', hour='8')
    scheduler.add_job(pushTask, 'cron', second='0',minute='30', hour='8')
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()
Example #27
0
    def scheduler(self):
        """Runs the daemon scheduler

        """
        self.write_pid(str(os.getpid()))
        if self.setproctitle:
            import setproctitle
            setproctitle.setproctitle('mymongo_scheduler')
        sched = BlockingScheduler()
        try:
            sched.add_job(self.dummy_sched, 'interval', minutes=1)
            sched.start()
        except Exception as e:
            self.logger.error('Cannot start scheduler. Error: ' + str(e))
Example #28
0
class schedulecontrol:
    def __init__(self):
        self.scheduler = BackgroundScheduler()
        self.oncescheduler=BlockingScheduler()
        self.scheduler.start()
    def start(self):
        self.oncescheduler.start()
    def addschedule(self,event=None, day_of_week='0-7', hour='11',minute='57' ,second='0',id='',type='cron',run_date='',args=None):
        if id=='':
            id=str(time.strftime("%Y-%m-%d %X", time.localtime()));
        if type=='date':
            if run_date=='':

                self.oncescheduler.add_job(event, args=args)


            else:

                self.oncescheduler.add_job(event, 'date', run_date=run_date, args=args)
        elif type=='back':
            self.oncescheduler.add_job(event,type, day_of_week=day_of_week, hour=hour,minute=minute ,second=second,id=id)
        else:

            self.scheduler.add_job(event, type, day_of_week=day_of_week, hour=hour, minute=minute, second=second, id=id)
    def removeschedule(self,id):
        self.scheduler.remove_job(id)
Example #29
0
def main():
    """main
    """
    print arrScripts
    logging.basicConfig()
    objSche = BlockingScheduler()
    for (k, v) in arrScripts.items(): 
        #ThreadNum(v)
        o = getObj(v['classname'])
        f = getattr(o(), v['method'])
        objSche.add_job(f, 'cron', month=v['cron_month'], day=v['cron_day'], 
            hour=v['cron_hour'], minute=v['cron_minute'], kwargs=v)

    objSche.start()
Example #30
0
def main(args):
    run_flag = args[1]
    bs = BlockingScheduler()
    bs.add_executor(ThreadPoolExecutor(5))
    system = SystemStatsReader(conf)
    elastic = ElasticStatsReader(conf)
    if run_flag == 'sys':
        add_job_to_scheduler(bs,run_system_only, [system])
    elif run_flag == 'elastic':
        add_job_to_scheduler(bs, run_elastic_only, [elastic])
    elif run_flag == None:
        add_job_to_scheduler(bs,run_system_only, [system])
        add_job_to_scheduler(bs, run_elastic_only, [elastic])
    bs.start()
Example #31
0
# encoding: utf-8
# coding: utf-8
import requests
import ssl
import time
import random
from proxiesUtil import Proxies
import logging
import configparser
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.schedulers.blocking import BlockingScheduler

job_defaults = {'coalesce': False, 'max_instances': 3}
schedulerBack = BackgroundScheduler(job_defaults=job_defaults)
schedulerBlock = BlockingScheduler(job_defaults=job_defaults)
job1 = None
job2 = None
job3 = None

config = configparser.ConfigParser()
config.read("config.ini")

logging.basicConfig(format="%(asctime)s %(name)s %(levelname)s %(message)s",
                    filename='log',
                    level=logging.INFO,
                    datefmt='%Y-%m-%d %H:%M:%S')

ssl._create_default_https_context = ssl._create_unverified_context
# 文章id
articleIds = [
    '54948224', '105194108', '105048964', '104820919', '104845505', '90290535',
    print(character_frame('Summary'))
    print("There are {} UI qjobs".format(nUI))
    print("{} qjobs running; {} qjobs submitted".format(
        qjobs.n_rjobs(),qjobs.n_jobs()))
    print(fjobs.summary())

############################
# run main function
##############################
class common:
    path='./'

def my_job():
    oldstdout = sys.stdout
    sys.stdout = open('currentjob.txt', 'w+')
    main(common.path)
    sys.stdout.flush()
    sys.stdout=oldstdout

common.path=os.getcwd()
scheduler = BlockingScheduler()
scheduler.add_job(my_job, 'interval', minutes=10,
                  next_run_time=datetime.datetime.now())
scheduler.start()






Example #33
0
def autoBackup():
    print "Start backup the database."
    storeInfo = checkFolder() + dbName + '.sql'
    try:
        #tmpTime=time.strftime("%Y-%m-%d")
        tmp = os.system(
            "mysqldump -u {0} -p{1} --databases {2} --result-file={3}".format(
                dbuser, dbpasswd, dbName, storeInfo))
        #print tmp
    except Exception as e:
        traceback.print_stack()
        currentTime = time.strftime("%Y-%m-%d %H:%M:%S")
        print "Occured some error when trying to Backup the database....{0}\n\n".format(
            currentTime)
    else:
        currentTime = time.strftime("%Y-%m-%d %H:%M:%S")
        print "Backup completed!!!=====>{0}".format(currentTime)
        print "Next backup will Start after " + interval + " minutes."
        print('Press Ctrl+{0} to exit\n\n'.format('Break' if os.name ==
                                                  'nt' else 'C'))


if __name__ == '__main__':
    scheduler = BlockingScheduler()
    scheduler.add_job(autoBackup, 'interval', minutes=int(interval))
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        pass
Example #34
0
from bot import SynthweetixBot
from config import ConfigType, ConfigFactory

if __name__ == '__main__':
    # Configuration
    app_settings = os.getenv('CONFIGURATION', default='development')
    type_ = ConfigType.reverse_lookup(app_settings)
    cfactory = ConfigFactory()
    config = cfactory.get(type_)

    # Logging
    logging.basicConfig(format='[%(asctime)s] [%(levelname)s] %(message)s',
                        level=config.LOG_LEVEL)

    logging.info(f'Initializing Synthweetix in {app_settings} environment')
    bot = SynthweetixBot(
        config.TWITTER_CONSUMER_KEY, config.TWITTER_CONSUMER_SECRET,
        config.TWITTER_ACCESS_TOKEN, config.TWITTER_ACCESS_SECRET,
        config.ETHERSCAN_API_KEY, config.TRADE_VALUE_THRESHOLD,
        config.EYE_CATCHER_THRESHOLD)

    # Run once on startup
    bot.execute()

    # Run the bot periodically
    if config.TRIGGER is not None:  # In case the bot is deployed as a Heroku or Docker cron job.
        scheduler = BlockingScheduler()
        scheduler.add_job(bot.execute, config.TRIGGER)
        scheduler.start()
Example #35
0
def run():
    main()
    sched = BlockingScheduler()
    sched.add_job(main, 'interval', minutes=5)
    sched.start()
Example #36
0
                                    entry['link'][33:]

                        try:
                            cursor.execute(add_entry, entry)
                            conn.commit()
                            index.add(str(cursor.lastrowid), Simhash(features))
                        except Exception as e:
                            print('Exception when add entry: {}'.format(e))
                    except Exception as e:
                        print("Unexpected Error: {}".format(e))
            except Exception as e:
                print("Unexpected Error: {}".format(e))
        # print(d['feed']['title'])
    elapsed = time.clock() - start
    print('time used: ' + str(elapsed))

    # 关闭Cursor和Connection:
    cursor.close()


restore_simhash()
sched = BlockingScheduler()
sched.add_job(crawl, 'interval', minutes=30, max_instances=5)
sched.start()

# restore_simhash()
# crawl()

conn.close()

Example #37
0
from dotenv import load_dotenv

# Haal de environment variabelen op
load_dotenv()

import pylast
from pylast import PERIOD_1MONTH
import requests
from jsonpath_ng import jsonpath
from jsonpath_ng.ext import parse
import spotipy
from spotipy.oauth2 import SpotifyOAuth
from apscheduler.schedulers.blocking import BlockingScheduler

# Maak een nieuwe scheduler aan
sched = BlockingScheduler()

import logging
logging.basicConfig(level=os.getenv('LOGLEVEL', 'INFO'))

# In[1]: Algemene instellingen

# Studio Brussel Streams
status = 'http://icecast.vrtcdn.be/status-json.xsl'
streams = ['bruut', 'tijdloze', 'hooray', 'belgisch']

LASTFM_API_KEY = os.getenv('LASTFM_API_KEY')
LASTFM_API_SECRET = os.getenv('LASTFM_API_SECRET')
LASTFM_USERNAME, LASTFM_PASSWORD = {}, {}

scope = 'playlist-modify-public'
Example #38
0
"""
 https://finviz.com/help/screener.ashx
"""

from apscheduler.schedulers.blocking import BlockingScheduler
import logging
import time
import datetime
import random
import pandas as pd

from hsstock.service.mysql_service import MysqlService
from hsstock.utils.fiv_crawler import Crawler
from hsstock.utils.app_logging import setup_logging

sched = BlockingScheduler()


@sched.scheduled_job('cron', day_of_week='mon-fri', hour='16', minute='39')
def scheduled_job():
    logging.info('This job is run every weekday at 5pm')
    crawler = Crawler('##')
    storeservice = MysqlService()

    pos = 0
    while pos < 7101:
        print(pos)
        url = 'http://finviz.com/screener.ashx?v=152&r=' + str(
            pos
        ) + '&c=0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70'
            dataClean.renameFiles(todayStr)
            dataClean.appendData(today, todayStr)

        else:
            logging.info('sftp 连接失败')

    except Exception as e:
        traceback.print_exc()
        logging.error(str(e))
    finally:
        if (sftp != 'default'):
            sftp.close()


if __name__ == "__main__":
    scheduler = BlockingScheduler()
    scheduler.add_job(scanTask,
                      'cron',
                      day='*/1',
                      hour='9',
                      minute='5',
                      second='0')

    try:
        print('start')
        scheduler.start()
        #scanTask()
        print('end')
    except Exception as e:
        scheduler.shutdown()
        traceback.print_exc()
Example #40
0
def main():
    config.parse_args()
    executors = {
        'default': ThreadPoolExecutor(10),
        'processpool': ProcessPoolExecutor(3)
    }
    job_defaults = {
        'coalesce': True,
        'max_instances': 2,
        'misfire_grace_time': 3600
    }
    scheduler = BlockingScheduler(executors=executors,
                                  job_defaults=job_defaults,
                                  timezone="UTC")
    #    scheduler.add_executor('processpool')
    scheduler.add_jobstore('sqlalchemy', url=CONF.database.connection)
    print CONF.database.connection
    scheduler.add_job(tick, 'interval', seconds=10, id="abcdefg")
    #        scheduler.add_job(tick, 'cron', day='2,7,10,15',id="bill_generation")
    scheduler.get_job("get_instance_by_hour") or scheduler.add_job(
        collectInstance, 'cron', hour='*', id="get_instance_by_hour")
    scheduler.get_job("get_disk_by_hour") or scheduler.add_job(
        collectDisk, 'cron', hour='*', id="get_disk_by_hour")
    scheduler.get_job("get_snapshot_by_hour") or scheduler.add_job(
        collectSnapshot, 'cron', hour='*', id="get_snapshot_by_hour")
    scheduler.get_job("get_router_by_hour") or scheduler.add_job(
        collectRouter, 'cron', hour='*', id="get_router_by_hour")
    scheduler.get_job("get_ip_by_hour") or scheduler.add_job(
        collectIp, 'cron', hour='*', id="get_ip_by_hour")
    scheduler.get_job("get_image_by_hour") or scheduler.add_job(
        collectImage, 'cron', hour='*', id="get_image_by_hour")
    scheduler.get_job("get_vpn_by_hour") or scheduler.add_job(
        collectVpn, 'cron', hour='*', id="get_vpn_by_hour")
    scheduler.get_job("send_data_msg") or scheduler.add_job(
        send_data_msg, 'cron', minute='*/2', id="send_data_msg")
    #        print help(scheduler)
    scheduler.start()
Example #41
0
        for line in compare_results:
            line = int(line) + 1
            result = linecache.getline(results_file, line).strip()
            send_msg = send_msg + "{}\n".format(result)
        send_msg = msg + send_msg
        file_list = [results_file]
        print(send_msg)
        log(send_msg)
        try:
            send_mail(send_name, send_subject, send_msg, file_list)
        except Exception as e:
            print('send_mail 失败')
            log('send_mail 失败')
            print(e)
            log(e)
    else:
        print('{} 文件内容中全部为 200,这里不发送邮件'.format(code_file))
        log('{} 文件内容中全部为 200,这里不发送邮件'.format(code_file))


if __name__ == '__main__':
    init()                  # 初始化
    makedir(results_dir)    # 创建结果文件夹,同时切换工作目录
    scheduler = BlockingScheduler()
    # scheduler.add_job(url_test, 'cron', day_of_week='*', hour='*')
    scheduler.add_job(compare_sendmail_loop, 'cron', day_of_week='*', hour='*', minute='*')
    print(scheduler.get_jobs())
    scheduler.start()
    # 主要测试:
    # compare_sendmail_loop()
Example #42
0
    def __init__(self, **kwargs):
        """
        Args:
            weeks: number of weeks to wait
            days: number of days to wait
            hours: number of hours to wait
            minutes: number of minutes to wait
            seconds: number of seconds to wait
            verbose: (int) verbosity level
            max_njobs_inque: Limit on the number of jobs that can be present in the queue
            use_dynamic_manager: True if the :class:`TaskManager` must be re-initialized from
                file before launching the jobs. Default: False
            max_nlaunches: Maximum number of tasks launched by radpifire (default -1 i.e. no limit)
            fix_qcritical: True if the launcher should try to fix QCritical Errors (default: True)
            rmflow: If set to True, the scheduler will remove the flow directory if the calculation
                completed successfully. Default: False
        """
        # Options passed to the scheduler.
        self.sched_options = AttrDict(
            weeks=kwargs.pop("weeks", 0),
            days=kwargs.pop("days", 0),
            hours=kwargs.pop("hours", 0),
            minutes=kwargs.pop("minutes", 0),
            seconds=kwargs.pop("seconds", 0),
            #start_date=kwargs.pop("start_date", None),
        )

        if all(not v for v in self.sched_options.values()):
            raise self.Error("Wrong set of options passed to the scheduler.")

        self.mailto = kwargs.pop("mailto", None)
        self.verbose = int(kwargs.pop("verbose", 0))
        self.use_dynamic_manager = kwargs.pop("use_dynamic_manager", False)
        self.max_njobs_inqueue = kwargs.pop("max_njobs_inqueue", 200)
        self.max_ncores_used = kwargs.pop("max_ncores_used", None)
        self.contact_resource_manager = as_bool(kwargs.pop("contact_resource_manager", False))

        self.remindme_s = float(kwargs.pop("remindme_s", 4 * 24 * 3600))
        self.max_num_pyexcs = int(kwargs.pop("max_num_pyexcs", 0))
        self.max_num_abierrs = int(kwargs.pop("max_num_abierrs", 0))
        self.safety_ratio = int(kwargs.pop("safety_ratio", 5))
        #self.max_etime_s = kwargs.pop("max_etime_s", )
        self.max_nlaunches = kwargs.pop("max_nlaunches", -1)
        self.debug = kwargs.pop("debug", 0)
        self.fix_qcritical = kwargs.pop("fix_qcritical", True)
        self.rmflow = kwargs.pop("rmflow", False)

        self.customer_service_dir = kwargs.pop("customer_service_dir", None)
        if self.customer_service_dir is not None:
            self.customer_service_dir = Directory(self.customer_service_dir)
            self._validate_customer_service()

        if kwargs:
            raise self.Error("Unknown arguments %s" % kwargs)

        if has_sched_v3:
            logger.warning("Using scheduler v>=3.0.0")
            from apscheduler.schedulers.blocking import BlockingScheduler
            self.sched = BlockingScheduler()
        else:
            from apscheduler.scheduler import Scheduler
            self.sched = Scheduler(standalone=True)

        self.nlaunch = 0
        self.num_reminders = 1

        # Used to keep track of the exceptions raised while the scheduler is running
        self.exceptions = deque(maxlen=self.max_num_pyexcs + 10)

        # Used to push additional info during the execution.
        self.history = deque(maxlen=100)
Example #43
0
CONSUMER_KEY = ""
CONSUMER_SECRET = ""
ACCESS_TOKEN = ""
ACCESS_TOKEN_SECRET = ""


# This function will tweet on Twitter
def publish_Status_On_Twitter(Twitter_Status):
    TW = Twitter(auth=OAuth(ACCESS_TOKEN, ACCESS_TOKEN_SECRET, CONSUMER_KEY,
                            CONSUMER_SECRET))
    TW.statuses.update(status=Twitter_Status)


def tweet_if_internet():
    try:
        publish_Status_On_Twitter("Internet Service is up: " +
                                  datetime.now().strftime("%Y-%m-%d %H:%M"))
    except:
        pass


# Tweet on script startup
publish_Status_On_Twitter("Internet Service is up: " +
                          datetime.now().strftime("%Y-%m-%d %H:%M"))

scheduler = BlockingScheduler()

# Schedule tweet every 30 minutes
scheduler.add_job(tweet_if_internet, 'interval', hours=0.5)
scheduler.start()
Example #44
0
from apscheduler.schedulers.blocking import BlockingScheduler

from main import cronjob

scheduler = BlockingScheduler()
#scheduler.add_job(cronjob, 'cron', hour=0, minute=00)
scheduler.add_job(cronjob, 'interval', seconds=30)

scheduler.start()
Example #45
0
                and not float(tck_usdt['bidPrice']) == 0
            ]

            # open new positions only if bid price and ask price differ less than 0.4%
            for ticker in tickers_usdt:
                if ticker['symbol'] == buy_signal[1]:
                    diff = (float(ticker['askPrice']) /
                            float(ticker['bidPrice']) - 1) * 100
                    if diff < 0.4:
                        market_order = MarketOrder(
                            symbol=buy_signal[1],
                            acc_name="admin",
                            symbol_info=buy_dict[
                                buy_signal[1]].symbol.symbol_info,
                            amount=binance_account.value_per_trade,
                            stoploss=0.1,
                            type="BUY")
                        market_order.run_order()

        except BinanceAPIException as e:
            print(e.message)


if __name__ == '__main__':
    # current config for 15m timeframe only
    scheduler = BlockingScheduler()
    scheduler.add_job(session, 'cron', minute=0, second=5)
    scheduler.add_job(session, 'cron', minute=15, second=5)
    scheduler.add_job(session, 'cron', minute=30, second=5)
    scheduler.add_job(session, 'cron', minute=45, second=5)
    scheduler.start()
Example #46
0
from apscheduler.schedulers.blocking import BlockingScheduler
from stock import get_stock_alert

sched = BlockingScheduler()


# @sched.scheduled_job('interval', minutes=3)
# @sched.scheduled_job('interval', day_of_week='mon-fri')
@sched.scheduled_job('cron', day_of_week='mon-sun', hour='4-11')
def scheduled_job():
    get_stock_alert()
    print('This job runs Monday to friday every 1 hour form')


sched.start()
Example #47
0
if __name__ == '__main__':
    consumer_thread = threading.Thread(target=saveHistory,
                                       args=(historyQueue, ),
                                       daemon=True)
    consumer_thread.start()
    for index in range(20):  # 20个线程爬取
        consumer_thread = threading.Thread(target=consumer,
                                           args=(
                                               queue,
                                               result_queue,
                                           ),
                                           daemon=True)
        consumer_thread.start()
        threads.append(consumer_thread)

    for index in range(5):  # 5个储存
        consumer_thread2 = threading.Thread(target=RealtimeTradeConsumer,
                                            args=(result_queue, ),
                                            daemon=True)
        consumer_thread2.start()
        threads.append(consumer_thread2)

    scheduler = BlockingScheduler()
    # 开启任务
    scheduler.add_job(startJob, 'cron', hour=16, minute=57)
    # scheduler.add_job(func=startJob, trigger='cron', month='1-12', day='*', hour='9-17', minute='*')
    # 开启新的记录表
    scheduler.add_job(ceartDb, 'cron', hour=2, minute=0)
    scheduler.start()
Example #48
0
import hashlib
import requests
import os
import glob

from block import Block
from config import *
import utils

import apscheduler
from apscheduler.schedulers.blocking import BlockingScheduler

#if we're running mine.py, we don't want it in the background
#because the script would return after starting. So we want the
#BlockingScheduler to run the code.
sched = BlockingScheduler(standalone=True)

import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)

def mine_for_block(chain=None, rounds=STANDARD_ROUNDS, start_nonce=0, timestamp=None):
  if not chain:
    chain = sync.sync_local() #gather last node

  prev_block = chain.most_recent_block()
  return mine_from_prev_block(prev_block, rounds=rounds, start_nonce=start_nonce, timestamp=timestamp)

def mine_from_prev_block(prev_block, rounds=STANDARD_ROUNDS, start_nonce=0, timestamp=None):
  #create new block with correct
  new_block = utils.create_new_block_from_prev(prev_block=prev_block, timestamp=timestamp)
Example #49
0
    Invalid,
    MultipleInvalid,
    Optional,
    Or,
    Range,
    Required,
    Schema,
    Url,
)

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logging_level = os.environ.get("LOGGING_LEVEL", "ERROR")
logging_format = "%(asctime)s [level=%(levelname)s] [thread=%(threadName)s] [module=%(module)s] [line=%(lineno)d]: %(message)s"
logging.basicConfig(level=logging_level, format=logging_format)
log = logging.getLogger(__name__)
scheduler = BlockingScheduler(timezone="UTC")
monitors = {}


def batches(iterator, batch_size: int):
    """ Yields lists of max batch_size from given iterator"""
    while True:
        batch = list(islice(iterator, batch_size))
        if not batch:
            break
        yield batch


def sanitize_labels(labels: dict):
    """Given prometheus metric sample labels, returns labels dict suitable for Prometheus format"""
    new_labels = ""
Example #50
0
from apscheduler.schedulers.blocking import BlockingScheduler


def a():
    print("A")

def b():
    print("B")

def c():
    print("C")

scheduler3 = BlockingScheduler()
s1 = scheduler3.add_job(a, 'cron', day_of_week='1-5', hour=17, minute=19)
s1.remove()
s2 = scheduler3.add_job(b, 'cron', day_of_week='1-5', hour=16, minute=20)
s2.resume()
s3 = scheduler3.add_job(c, 'cron', day_of_week='1-5', hour=16, minute=21)
s3.remove()


scheduler3.start()



Example #51
0
# -*- coding: utf-8 -*-
from cqhttp import CQHttp
# import _thread
import time
from modian import newOrder
from weibo import Weibo
from koudai48 import Koudai
from setting import groupid, md_interval, kd_interval, wb_interval
from CQLog import INFO, WARN
# 引入时间调度器 apscheduler 的 BlockingScheduler
from apscheduler.schedulers.blocking import BlockingScheduler

# 与group.py设置一样
bot = CQHttp(api_root='http://127.0.0.1:5700/')
# 实例化 BlockingScheduler
sched = BlockingScheduler()

global weibo_id_array
global firstcheck_weibo

weibo_id_array = []
firstcheck_weibo = True

# 查询时间间隔初始化
interval_md = md_interval()
interval_wb = wb_interval()
interval_kd = kd_interval()

# 获取酷q版本
version_dict = bot.get_version_info()
version = version_dict['coolq_edition']
Example #52
0
import numpy as np
from apscheduler.schedulers.blocking import BlockingScheduler

config = configparser.ConfigParser()
config.read('config.ini')

bot = telegram.Bot(token=(config['TELEGRAM']['ACCESS_TOKEN']))

url = 'https://apod.nasa.gov/apod/astropix.html'
url_zh = 'http://sprite.phys.ncku.edu.tw/astrolab/mirrors/apod/apod.html'

def job():
    id = np.loadtxt('id.txt')
    id = np.unique(id)
    pic = getapod.get_pic(url)
    exp = getapod.get_exp(url_zh,'zh')
    bot.send_message(chat_id='@APOD_hans', text = pic)
    bot.send_message(chat_id='@APOD_hans', text= exp , parse_mode='Markdown')
    time.sleep(1)
    
    for w in id:
        time.sleep(1)
        bot.send_message(chat_id=w, text = pic)
        bot.send_message(chat_id=w, text= exp , parse_mode='Markdown')
    

scheduler = BlockingScheduler()
scheduler.add_job(job,'cron', day_of_week='0-6', hour=18, minute=10)
scheduler.start()

from notify.notificacao import Alerta
from apscheduler.schedulers.blocking import BlockingScheduler
from datetime import datetime as dt
import json


def realiza_busca():
    inicio = dt.now().strftime('%Y-%m-%d %H:%M')
    print('iniciando busca \nhorario: {}'.format(dt.now().strftime('%d/%m/%Y %H:%M:%S')))
    with open('configuracao.json', 'rb') as arquivo:
        configuracao = json.load(arquivo)
        for item_configuracao in configuracao['itens']:
            crawler = Crawler(item_configuracao['nome'], item_configuracao['ordenar_consulta'])
            pagina = crawler.obter_pagina()
            if pagina is not None:
                lista = Parser.separar_dados(pagina)
                if len(lista) > 0:
                    bd = Conexao(r'./persist/bd_info_buscape.db')
                    for item in lista.items():
                        bd.inserir_bd(item[1]['produto'].lower(), float(item[1]['valor'].replace('.', '')), item[1]['link'], item_configuracao['nome'], float(item_configuracao['preco_alvo']))
        alerta = Alerta(configuracao['email']['usuario'], configuracao['email']['senha'], bd)
        alerta.valida_envio_notificacao(configuracao, inicio)
        bd.desconectar_bd()
        print('busca concluida \nhorario: {} \n'.format(dt.now().strftime('%d/%m/%Y %H:%M:%S')))

if __name__ == '__main__':
    print('iniciando atividade de monitoramento {} \n'.format(dt.now().strftime('%d/%m/%Y %H:%M:%S')))
    agendador = BlockingScheduler()
    agendador.add_job(realiza_busca, 'interval', hours=1)
    agendador.start()
Example #54
0
from datetime import datetime, timedelta

import django

from apscheduler.schedulers.blocking import BlockingScheduler

scheduler = BlockingScheduler()
django.setup(set_prefix=False)


@scheduler.scheduled_job('interval', minutes=1)
def delete_hour_old_endpoints():
    print('THIS RUNS EVERY MINUTE TO DELETE HOUR OLD ENDPOINTS\n')
    from endpoints import models as endpoints_models
    endpoints_models.Endpoint.objects.filter(created_at__lte=datetime.now() -
                                             timedelta(minutes=60)).delete()


scheduler.start()
         sys.exit(-1)
 if test:
     try:
         logger.info('Now testing submit.')
         for user in conf['users']:
             submit(username=user['username'],
                    password=user['password'],
                    send_mail_instance=send_mail_instances[0])
     except Exception:
         logger.exception('Exception raised when testing submit, exit...')
         sys.exit(-1)
 # retry after 20s when job raised exceptions
 # Refer to https://www.cnblogs.com/quijote/p/4385774.html
 scheduler = BlockingScheduler(jobstore_retry_interval=20,
                               job_defaults={
                                   'coalesce': True,
                                   'max_instances': len(conf['users']) * 2,
                                   'misfire_grace_time': 60 * 60 * 9
                               })
 for i, user in enumerate(conf['users']):
     trigger1 = CronTrigger(hour=user['hour'], minute=user['minute'])
     scheduler.add_job(submit,
                       trigger1,
                       kwargs={
                           'username': user['username'],
                           'password': user['password'],
                           'send_mail_instance': send_mail_instances[i]
                       })
     trigger2 = CronTrigger(hour=user['hour'] +
                            ((user['minute'] + 5) // 60),
                            minute=((user['minute'] + 5) % 60))
     scheduler.add_job(submit,
Example #56
0
class WorkInfoMigration:
    def __init__(self, cfg):
        self.cfg = cfg
        self.WORKINFO_REPO = {}

        self._initConfig()

    def _initConfig(self):
        self.systemName = self.cfg.get('MODULE_CONF', 'TACS_SYSTEM_NAME')
        self.workInfoBaseDir = self.cfg.get('MODULE_CONF', 'TACS_WORKINFO_RAW')

        self.auditLogTempDir = self.cfg.get('MODULE_CONF',
                                            'TACS_AUDITLOG_TEMP')
        self.auditLogBaseDir = self.cfg.get('MODULE_CONF',
                                            'TACS_AUDITLOG_PATH')
        self.receivedWorkCode = self.cfg.get('MODULE_CONF',
                                             'RECEIVED_WORK_CODE')

        self.tangoWmWorkInfoUrl = self.cfg.get('MODULE_CONF',
                                               'TANGO_WM_WORKINFO_URL')
        self.tangoWmEqpInfoUrl = self.cfg.get('MODULE_CONF',
                                              'TANGO_WM_EQPINFO_URL')
        self.xAuthToken = self.cfg.get('MODULE_CONF', 'TANGO_WM_X_AUTH_TOKEN')

        self.host = self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_HOST')
        self.port = int(self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_PORT'))
        self.user = self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_USER')
        self.passwd = self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_PASSWD')

        self.scheduleInterval = self.cfg.get('MODULE_CONF',
                                             'SCHEDULE_INTERVAL_MIN')

        self.stdoutSleepTime = int(
            self.cfg.get('MODULE_CONF', 'STDOUT_SLEEP_TIME'))
        self.migrationPath = self.cfg.get('MODULE_CONF',
                                          'COLLECT_MIGRATION_PATH')

        self.headers = {
            'x-auth-token': self.xAuthToken,
            'Content-Type': 'application/json; charset=utf-8'
        }
        self.migration = False

        self.errFilePath = self.cfg.get('MODULE_CONF', 'ERROR_FILE_PATH')
        self.searchStartDate = None
        self.searchEndDate = None
        self.migrationProcFlag = False

    def _stdout(self, msg):
        sys.stdout.write('stdout' + msg + '\n')
        sys.stdout.flush()
        __LOG__.Trace('stdout: %s' % msg)

    def _executeScheduler(self):
        try:
            __LOG__.Trace('scheduler process start')
            fileNameList = self._observFileNameList()

            # request workInfo
            workIdList = self._lookupWorkInfo()

            # request eqpInfo by workId
            self._lookupEqpInfo(workIdList)
        except:
            __LOG__.Exception()

    def _observFileNameList(self):
        fileNameList = list()
        self._mkdirs(self.errFilePath)
        fileNameList = os.path.listdir(self.errFilePath)

        for oneFileName in fileNameList:
            try:
                dparser.parse(oneFileName)
            except ValueError as valErr:
                del fileNameList(oneFileName)

    def _lookupWorkInfo(self, fromDate=None, toDate=None, migration=False):
        self.searchStartDate = fromDate
        self.searchEndDate = toDate

        if not migration:
            searchEndDateObj = datetime.now()
            #searchStartDateObj  = datetime(searchEndDateObj.year, searchEndDateObj.month, searchEndDateObj.day, searchEndDateObj.hour, (searchEndDateObj.minute - int(self.scheduleInterval)))
            searchStartDateObj = searchEndDateObj - timedelta(minutes=1)

            self.searchStartDate = searchStartDateObj.strftime('%Y%m%d%H%M')
            self.searchEndDate = searchEndDateObj.strftime('%Y%m%d%H%M')

        __LOG__.Trace('lookup workInfo from({}) ~ to({})'.format(
            self.searchStartDate, self.searchEndDate))

        url = self.tangoWmWorkInfoUrl.format(self.systemName,
                                             self.searchStartDate,
                                             self.searchEndDate)
        __LOG__.Trace('request workInfo url: {}'.format(url))

        rawDict = self._requestGet(url)
        return self._loadWorkInfo(rawDict)

    def _lookupEqpInfo(self, workIdList):
        if not workIdList:
            __LOG__.Trace('workIdList is empty')
        else:
            logDictList = list()
            yyyyMMdd = None
            eventDate = None

            for oneWorkId in workIdList:
                url = self.tangoWmEqpInfoUrl.format(self.systemName, oneWorkId)
                __LOG__.Trace('request eqpInfo url: {}'.format(url))

                rawDict = self._requestGet(url)
                logDict, yyyyMMdd, eventDate = self._loadEqpInfo(
                    oneWorkId, rawDict, logDictList)
                logDictList.append(logDict)
            if rawDict:
                self._writeTacsHistoryFile(yyyyMMdd, eventDate, logDictList)
            else:
                __LOG__.Trace('eqpInfo Dict is None {}'.format(rawDict))

    def _requestGet(self, url, verify=False):
        rawDict = None
        response = None

        try:
            response = requests.get(url=url,
                                    headers=self.headers,
                                    verify=verify)

            if response != None and response.status_code == 200:
                rawDict = response.json()
            else:
                __LOG__.Trace(
                    '!!! Exception !!! requestGet failed. statusCode: {}'.
                    format(response.status_code))
                self.createDateFile('{}_{}'.format(self.searchStartDate,
                                                   self.searchEndDate))
                pass

        except:
            __LOG__.Exception()
            self.createDateFile('{}_{}'.format(self.searchStartDate,
                                               self.searchEndDate))
            pass

        return rawDict

    def _loadWorkInfo(self, rawDict):
        if rawDict:
            __LOG__.Trace('workInfo rawData: {}'.format(rawDict))
            workIdList = []

            if type(rawDict['workInfo']) is list:
                for oneWorkInfo in rawDict['workInfo']:
                    workId = oneWorkInfo['workId']
                    __LOG__.Trace('workId: {}'.format(workId))
                    if workId is None or not workId:
                        __LOG__.Trace('invalid workId({})'.format(workId))
                        continue

                    workIdList.append(workId)

                    wrapper = {}
                    wrapper['workInfo'] = oneWorkInfo

                    workEvntDate = datetime.now().strftime('%Y%m%d%H%M%S')
                    wrapper['workInfo']['workEvntDate'] = workEvntDate

                    self.WORKINFO_REPO[workId] = wrapper
                __LOG__.Trace('WORKINFO_REPO: {}'.format(self.WORKINFO_REPO))
            else:
                __LOG__.Trace('Unsupported type: {}'.format(
                    type(rawDict['workInfo'])))
                pass

            return workIdList
        else:
            __LOG__.Trace('workInfo rawData is None')
            return None

    def _loadEqpInfo(self, oneWorkId, rawDict, logDictList):
        logDict = dict()
        yyyyMMdd = None
        eventDate = None

        if rawDict:
            __LOG__.Trace('eqpInfo rawData: {}'.format(rawDict))
            if 'eqpInfo' in rawDict and type(rawDict['eqpInfo']) is list:
                scriptFileList = []
                wrapper = self.WORKINFO_REPO[oneWorkId]
                if wrapper:
                    wrapper['eqpInfo'] = rawDict['eqpInfo']
                    for oneEqpInfoDict in rawDict['eqpInfo']:
                        if 'scriptInfo' in oneEqpInfoDict:
                            scriptInfoList = oneEqpInfoDict['scriptInfo']

                            if scriptInfoList:
                                for oneScriptInfoDict in scriptInfoList:
                                    filePathname = oneScriptInfoDict[
                                        'atchdPathFileNm']
                                    if filePathname:
                                        remoteFilepath, remoteFilename = os.path.split(
                                            filePathname)
                                        __LOG__.Trace(
                                            'remoteFilepath({}), remoteFilename({})'
                                            .format(remoteFilepath,
                                                    remoteFilename))
                                        scriptFileDict = {}
                                        scriptFileDict[
                                            'remoteFilepath'] = remoteFilepath
                                        scriptFileDict[
                                            'remoteFilename'] = remoteFilename

                                        scriptFileList.append(scriptFileDict)
                                    else:
                                        __LOG__.Trace(
                                            'workId({})/eqpNm({}) atchdPathFileNm({}) is invalid'
                                            .format(oneWorkId,
                                                    oneEqpInfoDict['eqpNm'],
                                                    filePathname))
                                        pass
                            else:
                                __LOG__.Trace(
                                    'workId({})/eqpNm({}) scriptInfoList({}) is invalid'
                                    .format(oneWorkId, oneEqpInfoDict['eqpNm'],
                                            scriptInfoList))
                        else:
                            __LOG__.Trace(
                                'workId({})/eqpNm({}) scriptInfo does not exist in eqpInfo'
                                .format(oneWorkId, oneEqpInfoDict['eqpNm']))
                            pass
                else:
                    __LOG__.Trace(
                        'no registered workId({}) in WORKINFO_REPO'.format(
                            oneWorkId))
                    return

                __LOG__.Trace('scriptFileList: {}'.format(scriptFileList))
                eventDate = wrapper['workInfo']['workEvntDate']
                yyyyMMdd = datetime.strptime(eventDate,
                                             '%Y%m%d%H%M%S').strftime('%Y%m%d')
                __LOG__.Trace('eventDate({}), yyyyMMdd({})'.format(
                    eventDate, yyyyMMdd))
                self._getScriptFiles(yyyyMMdd, oneWorkId, scriptFileList)

                logDict = self._writeTangoWorkFile(yyyyMMdd, eventDate,
                                                   oneWorkId, wrapper)

                self._removeCompleteWorkInfo(oneWorkId)
            else:
                __LOG__.Trace(
                    'Unsupported type: {}'.format('eqpInfo' in rawDict if type(
                        rawDict['eqpInfo']) else None))
                pass
        else:
            __LOG__.Trace(
                'workId({}), eqpInfo rawData is None'.format(oneWorkId))
            pass

        return logDict, yyyyMMdd, eventDate

    def _getScriptFiles(self, yyyyMMdd, workId, scriptFileList):
        if not scriptFileList:
            __LOG__.Trace('scriptFileList({}) is empty'.format(scriptFileList))
            return

        try:
            tacsWorkInfoPath = os.path.join(self.workInfoBaseDir, yyyyMMdd,
                                            workId)
            self._mkdirs(tacsWorkInfoPath)

            sftpClient = SFTPClient.SftpClient(self.host, self.port, self.user,
                                               self.passwd)
            for oneScriptFileDict in scriptFileList:
                remoteFilepath = oneScriptFileDict['remoteFilepath']
                remoteFilename = oneScriptFileDict['remoteFilename']

                sftpClient.download(remoteFilepath, remoteFilename,
                                    tacsWorkInfoPath)
                __LOG__.Trace(
                    'scriptFile from({}) -> to({}) download succeed'.format(
                        os.path.join(remoteFilepath, remoteFilename),
                        os.path.join(tacsWorkInfoPath, remoteFilename)))

            sftpClient.close()
        except Exception as ex:
            __LOG__.Trace('scriptFile download proccess failed {}'.format(ex))
            self._removeCompleteWorkInfo(workId)
            raise ex

    def _writeTangoWorkFile(self, yyyyMMdd, eventDate, workId, wrapper):
        logDict = {}
        try:
            tacsWorkInfoPath = os.path.join(self.workInfoBaseDir, yyyyMMdd,
                                            workId)
            self._mkdirs(tacsWorkInfoPath)

            contents = json.dumps(wrapper, ensure_ascii=False)
            __LOG__.Trace('contents: {}'.format(contents))
            createFilePath = os.path.join(
                tacsWorkInfoPath, '{}_{}_META.json'.format(eventDate, workId))
            self._createFile(createFilePath, contents)
            logDict['tacsLnkgRst'] = 'OK'

            if self.migration:
                __LOG__.Trace([
                    'mf', '30000', 'put', 'dbl',
                    'stdoutfile://{}'.format(createFilePath)
                ])
                subprocess.call([
                    'mf', '30000',
                    'put,dbl,stdoutfile://{}'.format(createFilePath)
                ])
            else:
                time.sleep(self.stdoutSleepTime)
                self._stdout('file://{}'.format(createFilePath))
        except Exception as ex:
            __LOG__.Trace('workFile write process failed {}'.format(ex))
            logDict['tacsLnkgRst'] = 'FAIL'
            logDict['tacsLnkgRsn'] = ex.args
            self._removeCompleteWorkInfo(workId)
            raise ex
        finally:
            logDict['evntTypCd'] = self.receivedWorkCode
            logDict['evntDate'] = eventDate
            logDict['workId'] = workId
            logDict['lnkgEqpIp'] = ''

        return logDict


#			self._writeTacsHistoryFile(yyyyMMdd, eventDate, logDict)

    def _writeTacsHistoryFile(self, yyyyMMdd, eventDate, logDictList):
        if logDictList:
            __LOG__.Trace('received workInfo history: {}'.format(logDictList))
            try:
                tacsHistoryTempPath = os.path.join(
                    self.auditLogTempDir,
                    'AUDIT_{}'.format(self.receivedWorkCode))
                self._mkdirs(tacsHistoryTempPath)
                contentList = list()

                for oneLogDict in logDictList:
                    content = json.dumps(oneLogDict, ensure_ascii=False)
                    contentList.append(content)

                contents = '\n'.join(contentList)

                __LOG__.Trace('contents: {}'.format(contents))

                tacsHistoryFilename = self._getTacsHistoryFilename(
                    yyyyMMdd, eventDate)
                __LOG__.Trace(
                    'tacsHistoryFilename: {}'.format(tacsHistoryFilename))
                self._createFile(
                    os.path.join(tacsHistoryTempPath, tacsHistoryFilename),
                    contents)

                tacsHistoryPath = os.path.join(
                    self.auditLogBaseDir,
                    'AUDIT_{}'.format(self.receivedWorkCode))
                self._mkdirs(tacsHistoryPath)

                shutil.move(
                    os.path.join(tacsHistoryTempPath, tacsHistoryFilename),
                    os.path.join(tacsHistoryPath, tacsHistoryFilename))
                __LOG__.Trace(
                    'tacsHistory file move from {} -> to {} succeed'.format(
                        os.path.join(tacsHistoryTempPath, tacsHistoryFilename),
                        os.path.join(tacsHistoryPath, tacsHistoryFilename)))
            except Exception as ex:
                __LOG__.Trace('tacsHistory {} load process failed {}'.format(
                    logDict, ex))
        else:
            __LOG__.Trace(
                'received workInfo history({}) is invalid'.format(logDict))

    def _mkdirs(self, directory):
        __LOG__.Trace('{} isExists: {}'.format(directory,
                                               os.path.exists(directory)))
        if not os.path.exists(directory):
            __LOG__.Trace('create directories {}'.format(directory))
            os.makedirs(directory)

    def _createFile(self, filePath, contents):
        f = None
        try:
            f = open(filePath, 'w')
            f.write(contents)
            __LOG__.Trace('{} file is created'.format(filePath))
        except Exception as ex:
            __LOG__.Trace('{} to file process failed {}'.format(contents, ex))
            raise ex
        finally:
            if f:
                f.close()

    def _getTacsHistoryFilename(self, yyyyMMdd, eventDate):
        HHmm = datetime.strptime(eventDate, '%Y%m%d%H%M%S').strftime('%H%M')
        tacsHistoryFilename = '{}_{}_{}.audit'.format(yyyyMMdd, HHmm,
                                                      uuid.uuid4())
        return tacsHistoryFilename

    def _removeCompleteWorkInfo(self, workId):
        if workId in self.WORKINFO_REPO:
            del self.WORKINFO_REPO[workId]
            __LOG__.Trace('workId({}), WORKINFO_REPO: {}'.format(
                workId, self.WORKINFO_REPO))

    def shutdown(self):
        try:
            if self.scheduler:
                #self.scheduler.remove_job('workInfo_scheduler')
                self.scheduler.shutdown()
                __LOG__.Trace('schduler is terminated')
            else:
                _LOG__.Trace('scheduler is None')
        except Exception as ex:
            __LOG__.Trace('shutdown failed {}'.format(ex))

    def run(self):

        self.scheduler = BlockingScheduler()
        self.scheduler.add_job(self._executeScheduler,
                               'cron',
                               minute='*/{}'.format(self.scheduleInterval),
                               second='0',
                               id='workInfo_scheduler',
                               max_instances=2)
        self.scheduler.start()
Example #57
0
class StockJob:
    def __init__(self):
        self.scheduler = BlockingScheduler()
        self.pool = None
        if (platform.system() == 'Windows'):
            self.stockDeal = Trader_gxzq(no=TDX_USER,pwd=TDX_PWD,dimpwd=TDX_DIMPWD)
        self.isopen = False
        self.localData = LocalData.getInstance()
        self.stockData = StockData.getInstance()
        self.tdxData = TdxData.getInstance()
        self.mailUtil = MailUtil.getInstance()


    def add_interval_job(self,fun,days=1,start='2020-05-07 20:56:00'):
        # 在 2020-05-07 20:00:00,每隔1天执行一次
        self.scheduler.add_job(fun, 'interval', days=days, start_date=start)
    '''
    cron: 在特定时间周期性地触发:
        year: 4位数字
        month: 月 (1-12)
        day: 天 (1-31)
        week: 标准周 (1-53)
        day_of_week: 周中某天 (0-6 or mon,tue,wed,thu,fri,sat,sun)
        hour: 小时 (0-23)
        minute:分钟 (0-59)
        second: 秒 (0-59)
        start_date: 最早执行时间
        end_date: 最晚执行时间
        timezone: 执行时间区间
    '''
    def add_cron_job(self,fun,month='*',day='*',day_of_week='*',hour='*',minute='*'):
        self.scheduler.add_job(func=fun, trigger='cron', month=month, day=day,day_of_week=day_of_week, hour=hour, minute=minute)
    def add_date_job(self,fun,date):
        self.scheduler.add_job(fun, 'date', run_date=date)
    def start(self):
        self.scheduler.start()
    def stop(self):
        self.scheduler.shutdown()

    # 回测
    def test(self,default=True):
        config.putByKey('TDX_CATEGORY','6')
        if default:# 默认情况只有交易日可回测
            now = time.strftime("%Y%m%d", time.localtime())
            if not DEBUG and self.stockData.isOpen(now) <= 0:
                return
        # 判断是否交易日,才回测否则不做操作
        codes = self.localData.codes(type=1)
        codes = codes[codes['zs'].eq(0)] #只回测个股
        for index, code in codes.iterrows():
            logging.debug('%s开始更新,当前索引%s,剩余%s'%(code['code'],index,len(codes)-index-1))
            try:
                g57 = StockTest2(code['code'], 0, 'Test2')
                g57.run()
            except Exception as e:
                print(e)
        # 回测完成后设置股票池
        self.pool = self.localData.result_report()

    # 下载数据
    def download(self):
        config.putByKey('TDX_CATEGORY', '6')
        codes = self.localData.codes()
        codes = codes[codes['zs'].eq(0)]
        for index,code in codes.iterrows():
            try:
                self.localData.data(code['code'],code['zs'])
                time.sleep(0.2) # 每分钟60次限制
            except Exception as e:
                print(e)
    # 更新股票代码、板块、关注股东初始化
    def update_codes(self):
        self.stockData.updateStocks()
        # self.tdxData.updateBk()
        self.localData.init_gdwarn()
        # self.localData.update_stock_pool()
    # 更新板块
    def update_bks(self):
        self.tdxData.updateBk()
    # 更新股东
    def update_gds(self):
        self.tdxData.updateGDs()
    # 更新分红
    def update_fhs(self):
        self.tdxData.updateFhYears()
    # 打开交易软件
    def open_tdx(self):
        now = time.strftime("%Y%m%d", time.localtime())
        if not DEBUG and  self.stockData.isOpen(now) <= 0:
            return
        logging.debug('软件开始登陆。。。。')
        self.stockDeal.login()
        time.sleep(10)
        self.stockDeal.click_tree_item('买入')
        # 登录成功后,休眠5秒获取持仓信息
        time.sleep(3)
        results, orders = self.stockDeal.orders()
        self.les_money = float(results['可用'])
        # 可买入的股票,保存在数据库表中(月线为卖出时删除该自选,在关闭时合并股票到表中,如果表中没有股票从当月可操作股票中随机获取)
        # 获取可买入数量,可买入的股票池,每只买入金额,可卖出的股票池
        nums,buy_data,level_money,seal_data = self.localData.get_buy_stocks(self.les_money)
        self.level_money = level_money
        self.seal_data = seal_data
        self.buy_pools = {} # 讲买卖股票池改为{code:'',num:''}形式
        if len(buy_data)>0:
            for i in range(nums):
                self.buy_pools[buy_data[i]]=0
        # 根据持仓查询对应的月回测记录,讲交易状态放入缓存
        self.seal_pools = {}  # 可卖出的股票
        try:
            for i in range(len(orders)):
                self.seal_pools[orders[i][0]]=orders[i][4]
            self.isopen = True
            logging.debug('软件开始登陆成功。\n可用资金%s,买入股票池%s,持仓%s'%(self.les_money,str(self.buy_pools),str(self.seal_pools)))
        except Exception as e:
            logging.debug('获取持仓失败%s'%e)
    # 关闭交易软件
    def close_tdx(self):
        now = time.strftime("%Y%m%d", time.localtime())
        if not DEBUG and  self.stockData.isOpen(now) <= 0:
            return
        logging.debug('软件即将关闭。。。。')
        try:
            # 当日成交,如果有成交记录发送邮件通知
            dayorders = self.stockDeal.day_orders()
            # 关闭交易软件
            self.stockDeal.destory()
            # 根据当日成交更新表成交
            self.localData.update_stock_pool(dayorders,handle=1)
            self.mailUtil.sendEmail([MAIL_USER], '当日成交', self.localData.get_order_msg(dayorders))
            logging.debug('软件已关闭.当日成交明细:\n%s'%("\n".join('%s' % id for id in dayorders)))
        except Exception as e:
            pass
        self.isopen = False
    # 个股预警交易
    def update_datas(self):
        now = time.strftime("%Y%m%d", time.localtime())
        if not DEBUG and self.stockData.isOpen(now) <= 0:
            return
        if not DEBUG and not self.localData.is_deal_time():
            return
        if not self.isopen:
            self.close_tdx()
            self.open_tdx()
        # 设置初始数据
        config.putByKey('TDX_CATEGORY','9')
        pools = {}
        pools.update(self.buy_pools)
        pools.update(self.seal_pools)
        pools_data = []
        for code,num in pools.items():
            # 更新数据
            df = self.localData.data(code,0)
            current_price = df.tail(1)['close'].values[0]
            # 回测
            g57 = StockTest3(code, 0, 'Test3',data=df)
            order,result,msgs = g57.run()
            # 解析回测结果:日期','类型','价格','手数','成交额','扣除费
            # base = self.localData.get_base(code, 0)
            arr = order.split('\t')
            if arr[0] != now: # 当天买组交易
                continue
            # 手数需要重新计算
            if arr[1]=='1' :# 买入
                self.stockDeal.click_toolbar('买入')
                logging.debug('买入:%s'%order)
                if len(self.buy_pools)>0:# 当月线为买入状态方可交易
                    num = int(self.level_money[code] / current_price / 100) * 100
                    # 根据编号
                    # self.stockDeal.buy(code=code,type=base['type'],price=current_price,num=num)
                    self.buy_pools.pop(code,'404') # 买入后移出股票池
                    pools_data.append({'code': code, 'state': 1, 'num': num})
                    logging.info('买入成功:代码:%s,价格:%s,数量:%s' % (code,current_price,num))
                else:
                    logging.error('买入失败,买入股票池%s,待买入%s,价格%s'%(self.buy_pools,code,current_price))
                    self.mailUtil.sendEmail([MAIL_USER], '交易失败,请及时关注', '买入失败%s'%order)
            elif arr[1]=='0':# 卖出
                self.stockDeal.click_toolbar('卖出')
                logging.debug('卖出:%s' % order)
                num = self.seal_pools.get(code,0)
                if self.seal_pools.get(code,0) ==0:
                    err_msg  =  '卖出失败,代码:%s,当前价格:%s,数量:%s' % (code,current_price,num)
                    logging.error(err_msg)
                    self.mailUtil.sendEmail([MAIL_USER], '交易失败,请及时关注', err_msg)
                    continue
                # self.stockDeal.seal(code=code,type=base['type'],price=current_price,num=num)
                pools_data.append({'code': code, 'state': 0, 'num': 0})
                self.seal_pools.pop(code,'404')
                logging.info('卖出成功:代码:%s,价格:%s,数量:%s' % (code, current_price, num))
                # 如果月线为买入状态,需加入到self.pools第一个位置
                if self.seal_data.get(code,0)>0:
                    self.buy_pools[code] = self.stockDeal.get_money(current_price,num)
                else:
                    logging.error('卖出池%s未找到%s'%(str(self.seal_data,code)))

    @classmethod
    def getInstance(cls):
        if not hasattr(StockJob, "_instance"):
            StockJob._instance = StockJob()
        return StockJob._instance
Example #58
0
def send_job():
    (session, uid) = login(username, password)
    msg = ""
    if datetime.now().hour == 8:
        msg = "早"
    elif datetime.now().hour == 12:
        msg = "午"
    elif datetime.now().hour == 23:
        msg = "晚"
    (text, url_pic) = get_data()
    message = msg+("安!世界\n") + text
    send_wb(session, message, url_pic)
    session.close()

if __name__ == '__main__':
    print("name:",sys.argv[0])  
    for i in range(1,len(sys.argv)):  
        print("parameter",i,sys.argv[i])
    scheduler = BlockingScheduler()
    scheduler.add_job(send_job, 'cron', hour="8, 23")
    scheduler.add_job(send_job, 'cron', hour="12", minute="25")
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        pass

        
Example #59
0
# -*- coding: UTF-8 -*-
import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
from red import redGrab
from sign_in import signIn

if __name__ == '__main__':

    cookie = "isg=BOLiWhcNyxiqXdZyVZYAuhHmOWxEM-ZN8YS37Sx7DdUr_4F5FMCpWvTsKf2odF7l; UTUSER=13427833; ut_ubt_ssid=esdsjykvbd6j51tpnazd6pgcxf9err7f_2019-07-10; tzyy=0e652fd568ee60d48e3d6a94b4971d3b; _utrace=57b668bd1b7ebe88b473406d60dd537d_2018-12-14; cna=ffeZFOshGXQCAbfvpoparfGj; track_id=1544751458|54c1a55c86c1a9ace6ea3e04d283945bd0c259ad9676626eac|6fce7a0c2f3ea42754a659127f065306; ubt_ssid=aucakq7qtf2n8v98iy5cs62nslkrn77b_2018-12-14; perf_ssid=91ocoxg4sesq2m126eqhernl65dsm1lc_2018-12-14; USERID=13427833; SID=bZm0fK8PjJCoKoaqmUI4lQigt63YMIEiXBSA"
    scheduler = BlockingScheduler()
    redGrab = redGrab()
    sign = signIn()
    # 抢红包
    scheduler.add_job(func=redGrab.sendRed,
                      args=(cookie, ),
                      trigger='cron',
                      hour=8,
                      minute=59,
                      second=59)
    scheduler.add_job(func=redGrab.sendRed,
                      args=(cookie, ),
                      trigger='cron',
                      hour=13,
                      minute=59,
                      second=59)
    scheduler.add_job(func=redGrab.sendRed,
                      args=(cookie, ),
                      trigger='cron',
                      hour=16,
                      minute=59,
                      second=59)
Example #60
0
            --executor-memory 5g \
            --executor-cores 5 \
            --num-executors 10 \
            --conf "spark.executor.memoryOverhead=1g" \
            --conf "spark.pyspark.python=/opt/anaconda3/bin/python" \
            --conf "spark.driver.maxResultSize=23g" \
            --conf "spark.kryoserializer.buffer.max=1024m" \
            --py-files dpd.zip  \
            .{script}.py >./log/{d}.log 2>&1 &
            """
    sp.Popen(submit,shell=True)
def my_job():
    today=dt.now()
    day_of_month=today.day
    day_of_week=today.weekday()
    hour=today.hour
    if hour==3:
        submit("daily1")
    if day_of_week==0 and hour==10:
        submit("daily2")
    if day_of_month==1 and hour==10:
        submit("daily3")
    if day_of_week==6 and hour==3:
        submit1("daily4")

if __name__=="__main__":
    # submit("weekly")
    # submit("monthly")
    scheduler=BlockingScheduler()
    scheduler.add_job(func=my_job,trigger="cron",hour="*/1")
    scheduler.start()