Exemplo n.º 1
0
def get_mongo_job_stores():
    from pytz import utc
    from apscheduler.jobstores.mongodb import MongoDBJobStore, MongoClient
    from apscheduler.executors.pool import ProcessPoolExecutor
    from ops.apscheduler.db_context import get_mongo_client

    client = get_mongo_client()

    jobstores = {
        'mongo':
        MongoDBJobStore(collection='job',
                        database='apscheduler',
                        client=client),
        'default':
        MongoDBJobStore(collection='job',
                        database='apscheduler2',
                        client=client),
    }
    executors = {
        'default': {
            'type': 'threadpool',
            'max_workers': 20
        },
        'processpool': ProcessPoolExecutor(max_workers=5)
    }
    job_defaults = {'coalesce': False, 'max_instances': 3}
    scheduler = TornadoScheduler()
    scheduler.configure(jobstores=jobstores,
                        executors=executors,
                        job_defaults=job_defaults,
                        timezone=utc)

    return scheduler
class ProjectManagerServer():
    def __init__(self):
        self._jobs = []
        tornado.options.parse_command_line()
        application = tornado.web.Application(
                [
                    # --- * PROJECT * ---
                    (r'/', ListProjectsHandler),
                    (r'/project/(.+)', ProjectDetailsHandler), # project id

                    # --- * JOB * ---
                    (r'/exec/([^/]+)/([^/]+)', ExecuteProjectJobHandler), # project id, job id
                    # View job configuration context. Args: job id
                    (r'/job/([^/]+)/configuration', JobConfigurationHandler),

                    # --- * CONFIGURATION * ---
                    # Change branch 
                    (r'/change/branch', ChangeBranchHandler),
                    ],
                template_path = os.path.join(os.path.dirname(__file__), '.', 'templates'),
                static_path = os.path.join(os.path.dirname(__file__), '.', 'static')
        )
        self.http_server = tornado.httpserver.HTTPServer(application)

        executors = {
            'default': {'type': 'threadpool', 'max_workers': 20},
            'processpool': ProcessPoolExecutor(max_workers=5)
        }

        logger.info("Initializing scheduler (pid: {}, port: '{}')...".format(os.getpid(), settings.HTTP_PORT))
        self.scheduler = TornadoScheduler()
        self.scheduler.configure(executors = executors)
        self.scheduler.start()

    # @classmethod
    def start(self):
        signal.signal(signal.SIGINT, self._signal_handler)
        self.http_server.listen(settings.HTTP_PORT)
        tornado.ioloop.IOLoop.current().start()

    def _signal_handler(self, signal_type, frame):
        if signal_type == signal.SIGINT:
            logger.info('SIGINT')
        else:
            logger.warning('Unknown signal')

        self.terminate()

    # @classmethod
    def terminate(self):
        logger.info('Stopping console...')
        self.scheduler.shutdown()
        tornado.ioloop.IOLoop.current().stop()
        sys.exit(0)
Exemplo n.º 3
0
def get_scheduler(store_path=None, log_file=None):
    if store_path is None:
        store_path = r'jobstore.sqlite'
    if log_file is None:
        log_file = r'logger.log'
    scheduler = TornadoScheduler({'apscheduler.timezone': 'Asia/Shanghai'})
    jobstores = {
        'default': RedisJobStore(host='10.134.103.241', port=6379)
    }
    executors = {
        'default': ThreadPoolExecutor(20),
        'processpool': ProcessPoolExecutor(5)
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': 1
    }
    scheduler.configure(jobstores=jobstores, executors=executors)
    # 日志定制
    scheduler._logger = modify_logger(scheduler._logger, log_file=log_file)
    return scheduler
def get_scheduler(store_path=None, log_file=None):
    if store_path is None:
        store_path = r'jobstore.sqlite'
    if log_file is None:
        log_file = r'logger.log'
    scheduler = TornadoScheduler({'apscheduler.timezone': 'Asia/Shanghai'})
    jobstores = {'default': RedisJobStore(host='10.134.103.241', port=6379)}
    executors = {
        'default': ThreadPoolExecutor(20),
        'processpool': ProcessPoolExecutor(5)
    }
    job_defaults = {'coalesce': False, 'max_instances': 1}
    scheduler.configure(jobstores=jobstores, executors=executors)
    # # 事件记录
    # scheduler.add_listener(
    #     lambda event: event_listener(event, scheduler),
    #     EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_ADDED | EVENT_JOB_SUBMITTED | EVENT_JOB_REMOVED
    # )
    # 日志定制
    scheduler._logger = modify_logger(scheduler._logger, log_file=log_file)
    return scheduler
class ScheduleServer():
    def __init__(self):
        tornado.options.parse_command_line()
        application = tornado.web.Application(
                [
                    (r'/', IndexPageHandler),

                    # --- PROJECT ---  #
                    # List of project summary.
                    (r'/api/v0/projects', apih.ListProjectSummaryHandler),
                    # Details of a project. Args: project id
                    (r'/api/v0/project/([^/]+)/details', apih.ProjectDetailsHandler),
                    # List of details of resources. Args: project id
                    (r'/api/v0/project/([^/]+)/resource/details', apih.ProjectResourceDetailsHandler),
                    # List of language status. Args: project id 
                    (r'/api/v0/project/([^/]+)/translation/status', apih.ProjectTranslationStatusHandler),

                    # --- JOB --- #
                    # List of jobs.
                    (r'/api/v0/jobs', apih.ListJobSummaryHandler),
                    # Summary of a job. Args: job id
                    (r'/api/v0/job/([^/]+)', apih.JobSummaryHandler),
                    # Execute a job. Args: job id
                    (r'/api/v0/job/([^/]+)/exec', apih.JobExecutionHandler),
                    # Details of a job. Args: job id
                    (r'/api/v0/job/([^/]+)/details', apih.JobDetailsHandler),
                    # List of resource file path and slug. Args: job id 
                    (r'/api/v0/job/([^/]+)/resource/slugs', apih.JobResourceSlugsHandler),
                    # List of resource name (in translation platform) and slug. Args: job id
                    (r'/api/v0/job/([^/]+)/translation/slugs', apih.JobTranslationSlugsHandler),
                    # Sync status (only for ResourceUploaderJob or TranslationUploaderJob). Args: job id
                    (r'/api/v0/job/([^/]+)/sync/status', apih.JobSyncStatusHandler),
                    # Job execution status. Args: job id
                    (r'/api/v0/job/([^/]+)/exec/status', apih.JobExecStatusHandler),

                    # maybe /job/(^/]+)/log/context/3  (limit = 3) might be useful

                    # --- CONFIGURATION --- #
                    # not using now but keep for a while   
                    # (r'/api/v0/config/([^/]+)/([^/]+)', apih.ConfigurationHandler), # job id, 'key' in config file
                    # Contents of project configuration file. Args: project id
                    # (r'/api/v0/config/project/([^/]+)', apih.ProjectConfigurationHandler),
                    # Contents of job configuration file. Args: job id
                    # (r'/api/v0/config/job/([^/]+)', apih.JobConfigurationHandler),
                    # Contents of resource configuration file. Args: resource configuration file name
                    (r'/api/v0/config/resource/([^/]+)', apih.ResourceConfigurationHandler),
                    # Contents of translation configuration file. Args: translation configuration file name
                    (r'/api/v0/config/translation/([^/]+)', apih.TranslationConfigurationHandler),

                    #--- LOG (JOB EXECUTION LOG) --- #
                    # Log context. Args: log path
                    (r'/api/v0/log/([^/]+)/context', apih.LogContextHandler),

                    # --- LOCAL FILES --- #
                    # List local repositories (git repository).
                    (r'/api/v0/local/repositories', apih.ListLocalRepositoriesHandler),
                    # List branches of specified local git repository. Args: repository name
                    (r'/api/v0/local/repository/([^/]+)/branches', apih.ListLocalRepositoryBranchesHandler),
                    # List local files under specified directory. Args: repository name, relative path in the repository
                    (r'/api/v0/local/repository/([^/]+)/files/([^/]+)', apih.ListLocalRepositoryFilesHandler),

                    # --- RESOURCE REPOSITORY --- #
                    # List of repositories. Args: platform name
                    #(r'/api/v0/resource/([^/]+)/repositories', apih.ListResourceRepositoriessHandler),

                    #--- TRANSLATION REPOSITORY --- #
                    # List of projects. Args: platform name
                    (r'/api/v0/translation/([^/]+)/projects', apih.ListTranslationProjectsHandler),
                    # Project details. Args: platform name, project id
                    (r'/api/v0/translation/([^/]+)/project/([^/]+)/details', apih.TranslationProjectDetailsHandler),
                    # Resource details. Args: platform name, project id, resource id
                    (r'/api/v0/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/details', apih.TranslationResourceDetailsHandler),
                    # All strings for a language. Args: platform name, project id, resource id, language code
                    (r'/api/v0/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/translation/([^/]+)/strings', apih.TranslationTranslationStringsHandler),
                    # Details for a translation string. Args: platform name, project id, resource id, source string id
                    # (r'/api/v0/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/translation/([^/]+)/details', apih.TranslationTranslationStringDetailsHandler),
                    # Details for a source string. Args: platform name, project id, resource id, source string id (key)
                    (r'/api/v0/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/source/([^/]+)/details', apih.TranslationSourceStringDetailsHandler)
                ],
                template_path = os.path.join(os.path.dirname(__file__), 'templates'),
                static_path = os.path.join(os.path.dirname(__file__), 'static')
        )
        self.http_server = tornado.httpserver.HTTPServer(application)

        executors = {
            'default': {'type': 'threadpool', 'max_workers': 20},
            'processpool': ProcessPoolExecutor(max_workers=5)
        }

        logger.info("Initializing scheduler (pid: {}, port: '{}')...".format(os.getpid(), settings.HTTP_PORT))
        self.scheduler = TornadoScheduler()
        self.scheduler.configure(executors = executors)
        self._restore_jobs()
        self.scheduler.start()
        logger.info(self.scheduler.print_jobs())

    def _restore_jobs(self):
        global job
        configs = job.get_configuration(status='active')
        total = 0
        for c in configs:
            o = SchedulerJob(c)
            self.scheduler.add_job(o.execute, 'cron', month=c.month, day=c.day, day_of_week=c.day_of_week, hour=c.hour, minute=c.minute, name=c.name, id=c.id, misfire_grace_time = 600)
            total += 1
        logger.info("Restored '{}' jobs.".format(total))

    # @classmethod
    def start(self):
        signal.signal(signal.SIGINT, self._signal_handler)
        self.http_server.listen(settings.HTTP_PORT)
        tornado.ioloop.IOLoop.current().start()

    def _signal_handler(self, signal_type, frame):
        if signal_type == signal.SIGINT:
            logger.info('SIGINT')
        else:
            logger.warning('Unknown signal')
        self.terminate()

    # @classmethod
    def terminate(self):
        logger.info('Stopping scheduler...')
        self.scheduler.shutdown()
        tornado.ioloop.IOLoop.current().stop()
        sys.exit(0)
Exemplo n.º 6
0
job_defaults = {'coalesce': False, 'max_instances': 3}


class MainHandler(tornado.web.RequestHandler):
    def get(self):
        self.write("Hello World")


def make_app():
    return tornado.web.Application([
        (r"/", MainHandler),
    ])


if __name__ == '__main__':
    scheduler = TornadoScheduler()
    # alarm_time = datetime.now() + timedelta(seconds=10)
    scheduler.add_job(tick, 'interval', seconds=10)
    # scheduler.add_job(sensorTimerA, 'interval', seconds=19)
    print('To clear the alarms, delete the example.sqlite file.')
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
    scheduler.configure(jobstores=jobstores,
                        executors=executors,
                        job_defaults=job_defaults,
                        timezone=utc)
    scheduler.start()
    app = make_app()
    app.listen(8888)
    tornado.ioloop.IOLoop.current().start()
Exemplo n.º 7
0
    ])


if __name__ == "__main__":

    scheduler = TornadoScheduler()

    jobstores = {
        'default': MemoryJobStore()
    }
    executors = {
        'default': TornadoExecutor(max_workers=5)
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': 1
    }

    scheduler.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)

    scheduler.add_job(tick, 'interval', seconds=3)
    scheduler.start()

    app = make_app()
    app.listen(8888)

    try:
        tornado.ioloop.IOLoop.current().start()
    except (KeyboardInterrupt, SystemExit):
        pass
class SystemManagerServer():
    def __init__(self):
        self._jobs = []
        tornado.options.parse_command_line()
        application = tornado.web.Application(
                [
                    (r'/', IndexHandler),
                    # --- * CONFIGURATION * ---
                    # Context of resource configuration file. Args: resource configuration filename
                    (r'/config/resource/([^/]+)', ResourceConfigHandler),
                    # Context of translation configuration file. Args: translation configuration filename
                    (r'/config/translation/([^/]+)', TranslationConfigHandler),

                    # --- * DASHBOARD * ---
                    # Dashboard.
                    (r'/dashboard', DashboardHandler),

                    # --- * JOB * ---
                    # List of jobs.
                    (r'/jobs', ListJobsHandler),
                    # List resource slugs for resources in a project. Args: job id
                    (r'/job/([^/]+)/check/slugs', CheckSlugsHandler),
                    # Details of a job. Args: job id
                    (r'/job/([^/]+)/details', JobDetailsHandler),
                    # Context of most recent log for a job. Args: job id

                    # --- * LOG * ---
                    (r'/log/([^/]+)/context', LogContextHandler),
                    # List of projects.

                    # --- * PROJECT * ---
                    (r'/projects', ListProjectsHandler),
                    # Details of a project. Args: project id
                    (r'/project/([^/]+)/details', ProjectDetailsHandler),
                    # List of projects in translation platform (e.g. Transifex projects) Args: translation platform name
  
                    # --- * TRANSLATION PLATFORM * ---
                    (r'/translation/([^/]+)/projects', ListTranslationProjects),
                    # Details of a project in translation platform. Args: translation platform name, project slag
                    (r'/translation/([^/]+)/project/([^/]+)/details', TranslationProjectDetails),
                    # List of all translation strings for a resource of a language. Args: translation platform name, project slug, resource slug, langauge code
                    (r'/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/translation/([^/]+)/strings', TranslationProjectTranslationStrings),
                    # Details of a source string. Args: translation platform name, project slug, resource slug
                    (r'/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/source/strings', TranslationProjectSourceStringDetails)
                ],
                template_path = os.path.join(os.path.dirname(__file__), '.', 'templates'),
                static_path = os.path.join(os.path.dirname(__file__), '.', 'static')
        )
        self.http_server = tornado.httpserver.HTTPServer(application)

        executors = {
            'default': {'type': 'threadpool', 'max_workers': 20},
            'processpool': ProcessPoolExecutor(max_workers=5)
        }

        logger.info("Initializing scheduler (pid: {}, port: '{}')...".format(os.getpid(), settings.HTTP_PORT))
        self.scheduler = TornadoScheduler()
        self.scheduler.configure(executors = executors)
        self.scheduler.start()

    # @classmethod
    def start(self):
        signal.signal(signal.SIGINT, self._signal_handler)
        self.http_server.listen(settings.HTTP_PORT)
        tornado.ioloop.IOLoop.current().start()

    def _signal_handler(self, signal_type, frame):
        if signal_type == signal.SIGINT:
            logger.info('SIGINT')
        else:
            logger.warning('Unknown signal')

        self.terminate()

    # @classmethod
    def terminate(self):
        logger.info('Stopping console...')
        self.scheduler.shutdown()
        tornado.ioloop.IOLoop.current().stop()
        sys.exit(0)
class ExperimentScheduler():
        def __init__(self, Experiment):
                executors = {
                    'default': ThreadPoolExecutor(20),
                    'processpool': ProcessPoolExecutor(5)
                }
                self.CommandDictionary = {
                        'TurnIgnitionOn': TurnIgnitionOn,
                        'TurnIgnitionOff': TurnIgnitionOff,
                        'SetAxleBasedVehicleSpeed': SetAxleBasedVehicleSpeed,
                        'SetBrakePressure': SetBrakePressure,
                        'EndExperiment' : EndExperiment
                }
                self.scheduler = TornadoScheduler()
                self.scheduler.configure(executors=executors)
                self.experiment = Experiment
                try:self.experimentname = Experiment['ExperimentName']
                except KeyError: raise KeyError('ExperimentName not defined')
                try:self.end_time = Experiment['Endtime']
                except KeyError: raise KeyError('Endtime not defined')
                #self.SSS_loop = Experiment.setdefault('SSS_loop', [])
                self.SSS_once = Experiment.setdefault('SSS_once', [])
                #self.CAN_loop = Experiment.setdefault('CAN_loop', [])
                self.CAN_once = Experiment.setdefault('CAN_once', [])
                self.CAN_utils = Experiment.setdefault('CAN_utils', [])
                self.seed_key = Experiment.setdefault('seed_key', [])
                relativetime = datetime.now()
                self.CAN0 = CanTools('can0')
                self.CAN1 = CanTools('can1')
                #self.SSS_loop_jobs = [self.add_SSS_loop(job, relativetime) for job in self.SSS_loop]
                self.SSS_once_jobs = [self.add_SSS_once(job, relativetime) for job in self.SSS_once]
                #self.CAN_loop_jobs = [self.add_CAN_loop(job, relativetime) for job in self.CAN_loop]
                self.CAN_once_jobs = [self.add_CAN_once(job, relativetime) for job in self.CAN_once]
                print(self.CAN_utils)
                self.CAN_utils_jobs = [self.add_CAN_util(job, relativetime) for job in self.CAN_utils]
                print('added util')
                self.seed_key_jobs = [self.add_seed_key(job, relativetime) for job in self.seed_key]
                if 'EndExperiment' not in [job[1] for job in self.SSS_once]:
                    print('error')
                    raise tornado.web.HTTPError(400)
                else:
                    self.endexperiment = arrow.get(timedelta(seconds=float([job[0] for job in self.SSS_once if job[1] == 'EndExperiment'][0])+ 40.0) + relativetime).timestamp 
                self.log_post = json.dumps({'experimentname':self.experimentname, 'endtime':self.endexperiment})
                print('finished intializing')
 #       def add_SSS_loop(self, SSS_loop, time):
 #               #Jobs are coroutines. interval must be set here
 #               try:
 #                   serialfunction = SSS_loop[1]
 #                   args = SSS_loop[-1]
 #                   JOBCOROUTINE = self.CommandDictionary[serialfunction]
 #               except KeyError:
 #                   raise ValueError('Not supported SSS function -- please add')
 #               return self.scheduler.add_job(JOBCOROUTINE, 'interval', args=args, seconds=SSS_loop[2], start_date=time+timedelta(seconds=30)+timedelta(seconds=SSS_loop[0]), end_date=time+timedelta(seconds=30)+timedelta(seconds=SSS_loop[0])+timedelta(seconds=SSS_loop[3]))
        def add_SSS_once(self, SSS_once, time):
                try:
                    serialfunction = SSS_once[1]
                    args = SSS_once[-1]
                    JOBCOROUTINE = self.CommandDictionary[serialfunction]
                except KeyError:
                    raise ValueError('Not supported SSS function -- please add')
                delay = timedelta(seconds=SSS_once[0]) + timedelta(seconds=30)
                run_date = time + delay
                return self.scheduler.add_job(JOBCOROUTINE, 'date', args=args, run_date=run_date)
#        def add_CAN_loop(self, CAN_loop, time):
#                try:
#                    serialfunction = CAN_loop[1]
#                    JOBCOROUTINE = self.CommandDictionary[serialfunction]
#                except KeyError:
#                    raise ValueError('Not supported function call -- please add')
#                return self.scheduler.add_job(JOBCOROUTINE, 'interval', seconds=0)
        def add_CAN_once(self, CAN_once, time):
                try:
                    CAN_message = bytes.fromhex(CAN_once[1].replace(" ", ""))
                    args = CAN_message
                except KeyError:
                    raise ValueError('Not supported function call -- please add')
                delay = timedelta(seconds=CAN_once[0]) + timedelta(seconds=30)
                run_date = time + delay
                if CAN_once[-1][0] == 'can0': return self.scheduler.add_job(self.CAN0.sendRawMessage, 'date', args=args, run_date=run_date)
                elif CAN_once[-1][0] == 'can1': return self.scheduler.add_job(self.CAN1.sendRawMessage, 'date', args=args, run_date=run_date)
                else:
                    raise ValueError('CAN interface must be set as can1 or can0')

        def add_CAN_util(self, CAN_util, time):
                print('got here 1')
                utilities = {
                    'cangen': CanUtils.cangen
                }
                try:
                    util = CAN_util[1]
                    args = CAN_util[-1]
                    JOBCOROUTINE = utilities[util]
                except KeyError:
                    raise ValueError('Not supported function call -- please add')
                delay = timedelta(seconds=CAN_util[0]) + timedelta(seconds=30)
                run_date = time + delay
                print('got here 2')
                return self.scheduler.add_job(JOBCOROUTINE, 'date', args=args, run_date=run_date)
        def add_seed_key(self, seed_key, time):
                delay = timedelta(seconds=seed_key[0]) + timedelta(seconds=30)
                run_date = time + delay
                if seed_key[1] == 1:
                    print('vin update added')
                    return self.scheduler.add_job(self.CAN1.vinUpdate, 'date', args=[seed_key[2]], run_date=run_date)
                if seed_key[1] == 2:
                    print('gov. speed update added')
                    return self.scheduler.add_job(self.CAN1.govSpeedUpdate, 'date', args=[seed_key[2]], run_date=run_date)
        def start(self):
                self.scheduler.add_job(self.shutdown, 'date', run_date=datetime.now()+timedelta(seconds=30))
                InitializeTruck()
                self.scheduler.start(paused=False)
        @gen.coroutine
        def shutdown(self):
                global PROCESS_LIST
                while len(self.scheduler.get_jobs()) != 0: pass
                print('shutting down sheduler')
                self.scheduler.shutdown(wait=False)
                for p in PROCESS_LIST: p.kill()
        def getJobDict(self):
                jobDict = {}
                jobDict.update({'SSS_loop_jobs': SSS_loop_jobs})
                jobDict.update({'SSS_once_jobs': SSS_once_jobs})
                jobDict.update({'CAN_loop_jobs': CAN_loop_jobs})
                jobDict.update({'CAN_once_jobs': CAN_once_jobs})
                jobDict.update({'seed_key_jobs': seed_key_jobs})
                return jobDict
 
        def handle_response(self, response):
            if response.error:
                print("Error: %s" % response.error)
            else:
                print(response.body)