def generate(self, sub_graph: AISubGraph, project_desc: ProjectDesc) -> AbstractJob: """ Generate local flink job. :param sub_graph: Sub graph generates from ai nodes. :param project_desc: Description of the project. :return: Base job Object. """ if sub_graph.config.exec_mode == ExecutionMode.BATCH: flink_context = JobContext(ExecutionMode.BATCH) else: flink_context = JobContext(ExecutionMode.STREAM) flink_context.project_config = project_desc.project_config local_flink_job_config: LocalFlinkJobConfig = sub_graph.config job = LocalFlinkJob(ai_graph=sub_graph, job_context=flink_context, job_config=local_flink_job_config) if job.job_config.language_type is None: job.job_config.language_type = self.get_language_type(sub_graph) # set jar and class job.job_config.main_class = version.main_class job.job_config.jar_path = version.jar_path # set python main job.job_config.py_entry_file = version.py_main_file return job
def generate(self, sub_graph: AISubGraph, project_desc: ProjectDesc) -> VVPJob: if sub_graph.config.exec_mode == ExecutionMode.BATCH: flink_context = JobContext(ExecutionMode.BATCH) else: flink_context = JobContext(ExecutionMode.STREAM) flink_context.project_config = project_desc.project_config job_config: VVPJobConfig = sub_graph.config job = VVPJob(job_context=flink_context, job_config=job_config) return job
def generate(self, sub_graph: AISubGraph, project_desc: ProjectDesc) -> KubernetesPythonJob: if sub_graph.config.exec_mode == ExecutionMode.BATCH: run_func = serialize(batch_run_func) py_context = JobContext(ExecutionMode.BATCH) else: run_func = serialize(stream_run_func) py_context = JobContext(ExecutionMode.STREAM) py_context.project_config = project_desc.project_config run_graph: RunGraph = self.build_run_graph(sub_graph, py_context) job_config: KubernetesPythonJobConfig = sub_graph.config return KubernetesPythonJob(run_graph=run_graph, run_func=run_func, job_context=py_context, job_config=job_config)