Beispiel #1
0
def main():
    logging.info("taskflow receiver is running")
    # get redis data
    redisdb = RedisDB()
    while True:
        data = redisdb.pop_run_queue()
        if data is not None:
            message_process(int(data))
        time.sleep(1)
Beispiel #2
0
def main():
    taskflowdb = TaskFlowDB()
    redisdb = RedisDB()
    while True:
        # 找到当前调度中有效调度,活动状态,有效数据
        data = taskflowdb.get_sched_cron()
        if len(data) == 0:
            time.sleep(30)
            continue
        for item in data:
            try:
                # 精确到秒
                trigger_time = datetime.datetime.strptime(
                    datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                    '%Y-%m-%d %H:%M:%S')
                cron = croniter(item["cron_sched"], trigger_time)
                args_python_code = item["args_python_code"]
                args_json = get_arguments(args_python_code, trigger_time, True)

                # 默认没有父任务
                parent_id = 0
                source_id = item["id"]
                source_type = "schedule"
                if "workflow" == item["task_type"]:
                    # 则先创建父任务
                    instance_id = taskflowdb.create_instance(
                        item["task_name"], source_id, source_type, parent_id,
                        "workflow", item["task_name"], args_json, 'running')
                    wf = WorkflowSpec(item["task_name"], taskflowdb,
                                      instance_id, parent_id)
                    step_name = wf.get_step_name(wf.begin_step)
                    module_name = wf.steps[step_name].get("module")
                    args_json = wf.get_step_parameters(step_name, True)
                    parent_id = instance_id
                elif "module" == item["task_type"]:
                    module_name = item["task_name"]
                    step_name = module_name
                else:
                    raise ValueError("task_type is invalid")
                instance_id = taskflowdb.create_instance(
                    step_name, source_id, source_type, parent_id, "module",
                    module_name, args_json, 'running')

                trigger_next_time = cron.get_next(datetime.datetime)
                redisdb.push_run_queue(instance_id)
                taskflowdb.update_sched("start",
                                        item["id"],
                                        'running',
                                        trigger_last_time=trigger_time,
                                        trigger_next_time=trigger_next_time)
                logging.info("shcedid=%s is running" % (item["id"]))
            except:
                logging.error(traceback.format_exc())
        time.sleep(3)
def main():
    logging.info("taskflow producer is running")
    taskflowdb = TaskFlowDB()
    redisdb = RedisDB()
    while True:
        data = taskflowdb.get_undo_taskforms()
        if len(data) == 0:
            time.sleep(30)
            continue
        for item in data:
            try:
                source_id = item["id"]
                source_type = "form"
                # 默认没有父任务
                parent_id = 0
                if "workflow" == item["task_type"]:
                    # 则先创建父任务
                    instance_id = taskflowdb.create_instance(
                        item["task_name"], source_id, source_type, parent_id,
                        "workflow", item["task_name"], item["args_json"],
                        'running')
                    wf = WorkflowSpec(item["task_name"], taskflowdb,
                                      instance_id, parent_id)
                    step_name = wf.get_step_name(wf.begin_step)
                    module_name = wf.steps[step_name].get("module")
                    args_json = wf.get_step_parameters(step_name, True)
                    parent_id = instance_id
                elif "module" == item["task_type"]:
                    module_name = item["task_name"]
                    step_name = module_name
                    args_json = item["args_json"]
                else:
                    raise ValueError("task_type is invalid")
                # 创建任务
                instance_id = taskflowdb.create_instance(
                    step_name, source_id, source_type, parent_id, "module",
                    module_name, args_json, 'running')
                redisdb.push_run_queue(instance_id)
                taskflowdb.save_taskform_status(item["id"], 'running')
            except:
                logging.error(traceback.format_exc())
        time.sleep(3)
def main():
    logging.info("taskflow checker is running")
    redisdb = RedisDB()
    while True:
        data = redisdb.fetch_check_queue()
        if data:
            for instance_id, score in data:
                redisdb.push_run_queue(instance_id)
                redisdb.del_check_queue(instance_id)
                time.sleep(3)
        else:
            time.sleep(300)
Beispiel #5
0
def main(instance_id: int):
    """
    当前运行的一定是module
    """
    try:
        taskflowdb = TaskFlowDB()
        # 获取基础数据信息
        instance_data = taskflowdb.get_instance(instance_id)
        if "module" != instance_data["task_type"]:
            logging.error("当前运行的不是模块!")
            raise ValueError("id %s is not module" % instance_id)
        module_name = instance_data["task_name"]
        # 动态导入运行模块
        inner_func = importlib.import_module("modules.%s" % module_name)
        inner_func_main = getattr(inner_func, "main")
        # 实例获取到的参数
        inner_func_main_full_arg_spec = inspect.getfullargspec(inner_func_main)
        inner_func_main_argument_list = inner_func_main_full_arg_spec.args
        # 处理参数数据
        # 运行中的产生的参数
        inner_func_kwargs = {}
        # 处理输入参数别名的情况并设定模块运行数据
        input_arguments = json.loads(instance_data["args_json"])
        for arg_name in inner_func_main_argument_list:
            if arg_name in input_arguments:
                arg_value = input_arguments.get(arg_name)
                inner_func_kwargs[arg_name] = arg_value
        if inner_func_main_full_arg_spec.varkw:
            inner_func_kwargs["sys_instance"] = instance_data
        # 暂时关闭释放资源,因为连接串资源宝贵
        taskflowdb.close()
        # 运行模块
        success = True
        message = ""
        return_data = {}
        run_result = None
        try:
            logging.info("----------run module: %s start----------" % module_name)
            run_result = inner_func_main(**inner_func_kwargs)
            logging.info("----------run module: %s finish----------" % module_name)
            if run_result is not None:
                if type(run_result) is bool:
                    success = run_result
                elif type(run_result) is tuple:
                    len_ret = len(run_result)
                    if len_ret > 0:
                        success = bool(run_result[0])
                    if len_ret > 1:
                        message = str(run_result[1])
                    if len_ret > 2:
                        return_data = dict(run_result[2])
        except:
            success = False
            message = traceback.format_exc()
            logging.error("run module err \n %s", message)
        redisdb = RedisDB()
        if str(module_name).startswith("check_"):
            if run_result is None:
                check_interval = inner_func_kwargs.get("check_interval", 300)
                check_maxcount = inner_func_kwargs.get("check_maxcount", 0)
                times = redisdb.get_check_times(instance_id)
                # 这里需要出来下check的功能
                if check_maxcount and times > check_maxcount:
                    redisdb.del_check(instance_id)
                else:
                    redisdb.set_check(instance_id, times + 1, check_interval)
                    return
            else:
                redisdb.del_check(instance_id)
        result_status = 'success' if success else 'failure'
        # 重新开启db资源
        taskflowdb = TaskFlowDB()
        # 更新instance 数据
        result_json = json.dumps(return_data, cls=CustomJSONEncoder)
        taskflowdb.save_instance_status(instance_id, result_status, result_message=message, result_json=result_json)
        # 处理执行结果
        # 如果是工作流
        source_id = instance_data["source_id"]
        source_type = instance_data["source_type"]
        parent_id = instance_data["parent_id"]
        if parent_id > 0:
            parent_instance = taskflowdb.get_instance(parent_id)
            workflow_name = parent_instance["task_name"]
            wf = WorkflowSpec(workflow_name, taskflowdb, instance_id, parent_id)
            cur_step_name = instance_data["name"]
            end_step_name = wf.get_step_name(wf.end_step)
            if cur_step_name == end_step_name:
                update_source_task_status(taskflowdb, source_type, source_id, result_status)
                return
            cur_step = wf.steps[cur_step_name]
            if success:
                # 判断是否需要进行成功后暂停
                success_pause = cur_step.get("on-success-pause", False)
                if success_pause:
                    update_source_task_status(taskflowdb, source_type, source_id, 'pause')
                    return
                next_step_name = wf.get_step_name(cur_step.get("on-success"))
                if not next_step_name:
                    update_source_task_status(taskflowdb, source_type, source_id, result_status)
                    return
            else:
                retry_count = int(cur_step.get("on-failure-retry", 0))
                run_count = instance_data.get("retry_count", 0)
                if retry_count > 0 and run_count <= retry_count:
                    redisdb.push_run_queue(instance_id)
                    taskflowdb.save_instance_status(parent_id, result_status, retry_count=run_count + 1,
                                                    result_message=message)
                    return
                taskflowdb.save_instance_status(parent_id, result_status, result_message=message)
                next_step_name = wf.get_step_name(cur_step.get("on-failure"))
                if not next_step_name:
                    update_source_task_status(taskflowdb, source_type, source_id, result_status)
                    return
            # 计算获取下一步骤的参数数据
            next_module_name = wf.steps[next_step_name].get("module")
            next_step_args_json = wf.get_step_parameters(next_step_name, True)
            next_instance_id = taskflowdb.create_instance(next_step_name, source_id, source_type, parent_id,
                                                          "module", next_module_name, next_step_args_json, 'running')

            redisdb.push_run_queue(next_instance_id)
        else:
            update_source_task_status(taskflowdb, source_type, source_id, result_status)
    except:
        logging.error("task run err \n %s", traceback.format_exc())
Beispiel #6
0
 def setUp(self) -> None:
     self.db = RedisDB()
Beispiel #7
0
class TestRedisDB(unittest.TestCase):
    def setUp(self) -> None:
        self.db = RedisDB()

    def test_run_queue(self):
        instance = 1
        self.db.push_run_queue(instance)
        pop_value = self.db.pop_run_queue()
        self.assertEqual(int(pop_value), instance)

    def test_check_queue(self):
        instance = 1
        times = 1
        interval = 5
        self.db.set_check(instance, times, interval)
        data = self.db.get_check_times(instance)
        self.assertEqual(int(data), times)
        time.sleep(10)
        data = self.db.fetch_check_queue()
        self.assertTrue(len(data) > 0)
        print('test check queue data:', type(data[0]), data[0])
        self.db.del_check_queue(instance)
        self.db.del_check(instance)