Esempio n. 1
0
 def __init__(self, application, request, **kwargs):
     super().__init__(application, request, **kwargs)
     conf = configparser.ConfigParser()
     conf.read("web.ini")
     ip = conf.get("redis", "ip")
     port = conf.getint("redis", "port")
     timeout = conf.getint("redis", "timeout")
     self.db = SchedulerDb(ip, port, timeout)
Esempio n. 2
0
    def __init__(self, conf):
        self.db = SchedulerDb(host=conf.get("redis", "host"),
                              port=conf.getint("redis", "port"),
                              timeout=conf.getint("redis", "timeout"))

        self.conn = pymysql.Connect(host=conf.get("mysql", "host"),
                                    port=conf.getint("mysql", "port"),
                                    user=conf.get("mysql", "user"),
                                    passwd=conf.get("mysql", "passwd"),
                                    db=conf.get("mysql", "db"),
                                    charset='utf8')
Esempio n. 3
0
 def __init__(self):
     conf = configparser.ConfigParser()
     conf.read("../agent.ini")
     ip = conf.get("redis", "ip")
     port = conf.getint("redis", "port")
     timeout = conf.getint("redis", "timeout")
     self.invoker_id = self._get_invoker_id()
     self.max_tasks = conf.getint("invoker", "max_tasks")
     self.live_seconds = conf.getint("invoker", "live_seconds")
     self.db = SchedulerDb(ip, port, timeout)
     logging.config.fileConfig("../logger.ini")
     self.logger = logging.getLogger("main")
     executors = {
         'default': {'type': 'processpool', 'max_workers': self.max_tasks + 1}
     }
     self.blockScheduler = BlockingScheduler()
     self.jobs = {}
     self.lock = threading.Lock()
Esempio n. 4
0
    def __init__(self, application, request, **kwargs):
        super().__init__(application, request, **kwargs)
        conf = configparser.ConfigParser()
        conf.read("web.ini")
        ip = conf.get("redis", "host")
        port = conf.getint("redis", "port")
        timeout = conf.getint("redis", "timeout")
        self.db = SchedulerDb(ip, port, timeout)

        mysql_host = conf.get("mysql", "host")
        mysql_port = conf.getint("mysql", "port")
        mysql_passwd = conf.get("mysql", "passwd")
        mysql_user = conf.get("mysql", "user")
        mysql_db = conf.get("mysql", "db")
        self.configurationDb = ConfigureDb(mysql_host, mysql_port, mysql_user,
                                           mysql_passwd, mysql_db)
Esempio n. 5
0
class AppHandler(tornado.web.RequestHandler):
    def __init__(self, application, request, **kwargs):
        super().__init__(application, request, **kwargs)
        conf = configparser.ConfigParser()
        conf.read("web.ini")
        ip = conf.get("redis", "ip")
        port = conf.getint("redis", "port")
        timeout = conf.getint("redis", "timeout")
        self.db = SchedulerDb(ip, port, timeout)

    def get(self, *args, **kwargs):
        title = "netto configure web"
        environments = self.db.query_all_environments()
        if len(environments) > 0:
            cur_env = environments[0]
        else:
            cur_env = TaskEnvironment("netto", [])
        self.render("app.html", title=title, environments=environments, cur_env=cur_env)
Esempio n. 6
0
class Scheduler:
    def __init__(self):
        conf = configparser.ConfigParser()
        conf.read("../agent.ini")
        ip = conf.get("redis", "ip")
        port = conf.getint("redis", "port")
        timeout = conf.getint("redis", "timeout")
        self.invoker_id = self._get_invoker_id()
        self.max_tasks = conf.getint("invoker", "max_tasks")
        self.live_seconds = conf.getint("invoker", "live_seconds")
        self.db = SchedulerDb(ip, port, timeout)
        logging.config.fileConfig("../logger.ini")
        self.logger = logging.getLogger("main")
        executors = {
            'default': {'type': 'processpool', 'max_workers': self.max_tasks + 1}
        }
        self.blockScheduler = BlockingScheduler()
        self.jobs = {}
        self.lock = threading.Lock()

    @staticmethod
    def _get_invoker_id():
        hostname = socket.gethostname()
        pid = os.getpid()
        return hostname + "-" + str(pid)

    def task_invoke(self, task_instance, task_param):
        if task_param.cmd.startswith('http'):
            executor = HttpExecutor(self.db, task_instance, task_param)
            executor.execute()
        else:
            pass

    def break_heart(self):
        """
        invoker每隔一段时间就心跳一下,看看是否有新任务,是否有任务需要更新
        :param bs:
        :return:
        """
        # 先看看参数是否有变化的把调度重启或者关闭
        try:
            self.lock.acquire()
            self.refresh_local_invoker()
            self.refresh_other_invokers()
            if len(self.jobs) >= self.max_tasks:
                return

            task_instances, task_params = self.db.query_waiting_run_tasks(self.invoker_id,
                                                                          self.max_tasks - len(self.jobs),
                                                                          True)
            if len(task_instances) == 0:
                return
            for i in range(len(task_instances)):
                task_instance = task_instances[i]
                task_param = task_params[i]
                if task_instance.id not in self.jobs.keys():
                    self.logger.info("分配了新任务%s", task_instance.id)
                    job = self.blockScheduler.add_job(self.task_invoke,
                                                      next_run_time=(
                                                          datetime.datetime.now() + datetime.timedelta(seconds=2)),
                                                      args=[task_instance, task_param], id=task_instance.id)
                    self.jobs[job.id] = job
                    self.db.lock_invoker_instance(self.invoker_id, task_instance.id, self.live_seconds)
                else:
                    self.logger.error("%s任务已经在运行", task_instance.id)
        finally:
            self.lock.release()

    def refresh_local_invoker(self):
        """
        调度的参数是否发生变化,如有需要重启调度
        :param bs:
        :return:
        """

        self.db.update_invoker_time(self.invoker_id, self.jobs.keys(), self.live_seconds)
        self.logger.info("%s心跳更新成功!", self.invoker_id)
        # 看看是否有需要停止的任务再自己这里,释放掉
        stop_tasks = self.db.query_need_stop_tasks(self.invoker_id)
        for stop_task in stop_tasks:
            if stop_task in self.jobs.keys():
                try:
                    job = self.jobs[stop_task]
                    task_instance = job.args[0]
                    task_instance.status = 'off'
                    job.pause()
                    job.remove()
                except Exception as e:
                    self.logger.error(e)
                    self.jobs.pop(stop_task)
                    try:
                        self.blockScheduler.remove_job(stop_task)
                    except Exception as e1:
                        self.logger.error(e1)

            self.logger.info("人工停止了任务%s", stop_task)
            self.db.unlock_invoker_instance(self.invoker_id, stop_task, self.live_seconds)

        # 是否有参数变化的任务需要重启
        c_jobs = copy.copy(self.jobs)
        for key in c_jobs.keys():
            if key not in self.jobs.keys():
                continue
            job = self.jobs[key]
            task_instance = job.args[0]
            old_task_param = job.args[1]
            # 判断参数是否发生变化,如果有变化重新执行任务
            new_task_param = self.db.query_task_param(task_instance.task_param_id)
            # if new_task_param
            if not new_task_param.has_diff(old_task_param):
                continue

            try:
                task_instance.status = 'off'
                job.pause()
                job.remove()
            except Exception as e:
                self.logger.error(e)
                self.jobs.pop(key)
                try:
                    self.blockScheduler.remove_job(key)
                except Exception as e1:
                    self.logger.error(e1)
            self.logger.info("参数变化停止了任务%s", task_instance.id)
            self.db.unlock_invoker_instance(self.invoker_id, task_instance.id, self.live_seconds)
            self.db.add_task_waiting_run(task_instance.id)

    def refresh_other_invokers(self):
        """
        遍历所有的invoker,判断invoker是否超过存活期
        :return:
        """
        invokers = self.db.query_all_invokers()
        for invoker_id in invokers.keys():
            if not self.db.invoker_is_live(self.invoker_id):
                task_instance_list = self.db.query_invoker_tasks(self.invoker_id)
                for task_instance_id in task_instance_list:
                    self.db.add_task_waiting_run(task_instance_id)

    def main(self):
        try:
            self.db.register_invoker(self.invoker_id, self.max_tasks, self.live_seconds);
            self.blockScheduler.add_listener(self._job_listener,
                                             events.EVENT_JOB_ERROR | events.EVENT_JOB_MISSED)

            self.blockScheduler.add_job(self.break_heart, "interval", seconds=self.live_seconds / 2,
                                        id="break_heart")
            self.logger.info("开始启动调度...")
            self.blockScheduler.start()
            self.logger.info("启动调度成功!")
        except KeyboardInterrupt as e:
            self.logger.info(e)
            self.blockScheduler.shutdown()

    def _job_listener(self, ev):
        """
        监听job的事件,job完成后再发起下次调用,对于异常也要处理
        :param ev:
        :return:
        """
        if ev.code == events.EVENT_JOB_ERROR:
            self.logger.error(ev.exception)
            self.logger.error(ev.traceback)
        else:
            pass
Esempio n. 7
0
class ConfigureDb:
    def __init__(self, conf):
        self.db = SchedulerDb(host=conf.get("redis", "host"),
                              port=conf.getint("redis", "port"),
                              timeout=conf.getint("redis", "timeout"))

        self.conn = pymysql.Connect(host=conf.get("mysql", "host"),
                                    port=conf.getint("mysql", "port"),
                                    user=conf.get("mysql", "user"),
                                    passwd=conf.get("mysql", "passwd"),
                                    db=conf.get("mysql", "db"),
                                    charset='utf8')

    def query_tasks_info(self, env):
        return self.db.query_tasks_info(env)

    def query_task_param(self, param_id):
        return self.db.query_task_param(param_id)

    def query_all_environments(self):
        try:
            cursor = self.conn.cursor()
            sql = "select s_app,s_group,owners,last_time from system_env"
            cursor.execute(sql)
            environments = []
            for row in cursor.fetchall():
                env = TaskEnvironment(row[0], json.loads(row[2]), row[1])
                environments.append(env)
            return environments
        finally:
            cursor.close()
            self.conn.close()

    def save_task_param(self, param):
        try:
            cursor = self.conn.cursor()
            sql = "replace into task_param(md5_id,s_app,s_group,cmd,invoke_count," \
                  "cron_express,sleep_seconds,timeout_seconds,fetch_count,data_retry_count," \
                  "retry_after_seconds,execute_thread_count,execute_count,self_defined," \
                  "status,last_time) " \
                  "values" \
                  "('%s','%s','%s','%s'," \
                  "'%d','%s','%d','%d'," \
                  "'%d','%d','%d','%d','%d','%s','%s',now())"
            data = (param.id, param.app, param.group, param.cmd,
                    param.get_invoke_args()['invoke_count'],
                    param.get_invoke_args()['cron_express'],
                    param.get_invoke_args()['sleep_seconds'],
                    param.get_invoke_args()['timeout_seconds'],
                    param.get_service_args()['fetch_count'],
                    param.get_service_args()['data_retry_count'],
                    param.get_service_args()['retry_after_seconds'],
                    param.get_service_args()['execute_thread_count'],
                    param.get_service_args()['execute_count'],
                    param.get_service_args()['self_defined'], param.status)
            cursor.execute(sql % data)
            self.conn.commit()
            print('成功插入', cursor.rowcount, '条数据')
        finally:
            cursor.close()
            self.conn.close()
        return self.db.save_task_param(param)

    def stop_task_param(self, param_id):
        return self.db.stop_task_param(param_id)

    def start_task_param(self, param_id):
        return self.db.start_task_param(param_id)