def run_workflow(): build_workflow() af.set_project_config_file(project_path + '/project.yaml') res = af.run(project_path, dag_id='repeated_dag_example', scheduler_type=SchedulerType.AIFLOW) af.wait_workflow_execution_finished(res)
def test_deploy_airflow(self): airflow_path = af.project_config().get_airflow_deploy_path() if not os.path.exists(airflow_path): os.makedirs(airflow_path) with af.config(LocalPythonJobConfig(job_name="simple")): op = af.user_define_operation( af.PythonObjectExecutor(SimpleExecutor())) res = af.run(test_util.get_project_path()) af.wait_workflow_execution_finished(res)
def test_batch_train_component_with_an_output(self): input_example_meta = af.register_example( name='batch_train_example', support_type=ExampleSupportType.EXAMPLE_BATCH) model_meta = af.register_model(model_name='mnist_model', model_type=ModelType.SAVED_MODEL) example_meta = af.register_example( name='output_example', support_type=ExampleSupportType.EXAMPLE_BATCH, data_type='numpy', data_format='npz', batch_uri=os.path.abspath( os.path.dirname(__file__) + '/numpy_output.npz')) with af.config( af.BaseJobConfig(platform='local', engine='python', job_name='batch_train')): input_example = af.read_example( example_info=input_example_meta, executor=PythonObjectExecutor( python_object=ReadBatchExample())) train_channel = af.train( input_data_list=[input_example], executor=PythonObjectExecutor( python_object=TrainBatchMnistModelWithOutput()), model_info=model_meta, output_num=1) af.write_example(input_data=train_channel, example_info=example_meta) workflow_id = af.run(test_util.get_project_path()) res = af.wait_workflow_execution_finished(workflow_id) self.assertEqual(0, res)
def test_stream_train_component(self): batch_input_example_meta = af.register_example( name='stream_train_example', support_type=ExampleSupportType.EXAMPLE_BOTH) model_meta = af.register_model(model_name='mnist_model', model_type=ModelType.SAVED_MODEL) stream_input_example_meta = af.register_example( name='stream_train_example', support_type=ExampleSupportType.EXAMPLE_BOTH) with af.config( af.BaseJobConfig(platform='local', engine='python', job_name='stream_train')): batch_input_example = af.read_example( example_info=batch_input_example_meta, executor=PythonObjectExecutor( python_object=ReadBatchExample())) batch_train = af.train(input_data_list=[batch_input_example], executor=PythonObjectExecutor( python_object=TrainBatchMnistModel()), model_info=model_meta) stream_input_example = af.read_example( example_info=stream_input_example_meta, executor=PythonObjectExecutor( python_object=ReadStreamExample())) stream_train = af.train(input_data_list=[stream_input_example], executor=PythonObjectExecutor( python_object=TrainStreamMnistModel()), model_info=model_meta) af.stop_before_control_dependency(stream_train, batch_train) workflow_id = af.run(test_util.get_project_path()) res = af.wait_workflow_execution_finished(workflow_id) self.assertEqual(0, res)
def run_flink_job(): with af.global_config_file(test_util.get_job_config_file()): with af.config('vvp_job'): faf.vvp_job() workflow_id = af.run(test_util.get_project_path()) res = af.wait_workflow_execution_finished(workflow_id) print(res)
def test_stream_transform_component(self): file = get_file_dir(__file__) + '/test1.csv' input_example_meta = af.register_example( name='test_example', support_type=ExampleSupportType.EXAMPLE_BOTH, stream_uri=file) output_file = get_file_dir( __file__) + "/output_transform_stream_test1.csv" output_example_meta = af.register_example( name='test_example_output', support_type=ExampleSupportType.EXAMPLE_BOTH, stream_uri=output_file) with af.config( af.BaseJobConfig(platform='local', engine='python', job_name='stream_transform')): input_example = af.read_example( example_info=input_example_meta, executor=PythonObjectExecutor( python_object=ReadStreamExample())) transform_example = af.transform( input_data_list=[input_example], executor=PythonObjectExecutor( python_object=TransformStreamData())) af.write_example(input_data=transform_example, example_info=output_example_meta.name, executor=PythonObjectExecutor( python_object=WriteStreamExample())) workflow_id = af.run(test_util.get_project_path()) res = af.wait_workflow_execution_finished(workflow_id) self.assertEqual(0, res)
def test_read_example_with_numpy_npy(self): npy_name = 'test.npy' np.save(file=npy_name, arr=np.arange(10)) input_example_meta = af.register_example( name='input_numpy_example', data_type='numpy', data_format='npy', support_type=ExampleSupportType.EXAMPLE_BATCH, batch_uri=os.path.abspath( os.path.dirname(__file__) + "/" + npy_name)) output_example_meta = af.register_example( name='ouput_numpy_example', data_type='numpy', data_format='npy', support_type=ExampleSupportType.EXAMPLE_BATCH, batch_uri=os.path.abspath( os.path.dirname(__file__) + '/numpy_output.npy')) with af.config( af.BaseJobConfig(platform='local', engine='python', job_name='test_npy')): example_channel = af.read_example(example_info=input_example_meta) af.write_example(input_data=example_channel, example_info=output_example_meta) workflow_id = af.run(test_util.get_project_path()) res = af.wait_workflow_execution_finished(workflow_id) self.assertEqual(0, res)
def test_read_example_with_pandas(self): input_example_meta = af.register_example( name='input_pandas_example', data_type='pandas', data_format='csv', support_type=ExampleSupportType.EXAMPLE_BATCH, batch_uri=os.path.abspath( os.path.dirname(__file__) + '/test1.csv')) output_example_meta = af.register_example( name='ouput_pandas_example', data_type='pandas', data_format='csv', support_type=ExampleSupportType.EXAMPLE_BATCH, batch_uri=os.path.abspath( os.path.dirname(__file__) + '/pandas_output.csv')) with af.config( af.BaseJobConfig(platform='local', engine='python', job_name='test_csv')): example_channel = af.read_example(example_info=input_example_meta) af.write_example(input_data=example_channel, example_info=output_example_meta) workflow_id = af.run(test_util.get_project_path()) res = af.wait_workflow_execution_finished(workflow_id) self.assertEqual(0, res)
def run_flink_spec_job(): with af.global_config_file(test_util.get_job_config_file()): with af.config('vvp_spec_job'): faf.vvp_job() workflow_id = af.run(test_util.get_project_path(), dag_id='wordcount_vvp_python', scheduler_type=af.SchedulerType.AIFLOW) res = af.wait_workflow_execution_finished(workflow_id) print(res)
def test_project_register(self): print(sys._getframe().f_code.co_name) TestProject.build_ai_graph(1) af.register_example(name="a", support_type=af.ExampleSupportType.EXAMPLE_BOTH) w_id = af.submit_ai_flow() res = af.wait_workflow_execution_finished(w_id) self.assertEqual(0, res) e_meta = af.get_example_by_name("a") self.assertEqual("a", e_meta.name)
def test_stream_evaluate_component(self): input_example_meta = af.register_example( name='batch_train_example', support_type=ExampleSupportType.EXAMPLE_BATCH) model_meta = af.register_model(model_name='mnist_model', model_type=ModelType.SAVED_MODEL) stream_evaluate_example_meta = af.register_example( name='stream_evaluate_example', support_type=ExampleSupportType.EXAMPLE_STREAM) stream_output_file = get_file_dir(__file__) + '/stream_evaluate' evaluate_output = af.register_artifact(name='stream_evaluate', stream_uri=stream_output_file) stream_evaluate_result_example_meta = af.register_example( name='stream_evaluate_result_example', support_type=ExampleSupportType.EXAMPLE_STREAM, stream_uri=stream_output_file) if os.path.exists(stream_output_file): os.remove(stream_output_file) with af.config( af.BaseJobConfig(platform='local', engine='python', job_name='stream_evaluate')): input_example = af.read_example( example_info=input_example_meta, executor=PythonObjectExecutor( python_object=ReadBatchExample())) batch_train = af.train(input_data_list=[input_example], executor=PythonObjectExecutor( python_object=TrainBatchMnistModel()), model_info=model_meta) stream_evaluate_example = af.read_example( example_info=stream_evaluate_example_meta, executor=PythonObjectExecutor( python_object=ReadStreamExample())) stream_evaluate = af.evaluate( input_data_list=[stream_evaluate_example], model_info=model_meta, executor=PythonObjectExecutor( python_object=EvaluateStreamMnistModel()), output_num=1) af.write_example(input_data=stream_evaluate, example_info=stream_evaluate_result_example_meta, executor=PythonObjectExecutor( python_object=WriteStreamExample())) af.stop_before_control_dependency(stream_evaluate, batch_train) workflow_id = af.run(test_util.get_project_path()) res = af.wait_workflow_execution_finished(workflow_id) self.assertEqual(0, res)
def test_run_pyflink_job(self): project_path = os.path.dirname(__file__) + '/../' af.set_project_config_file(project_path + "project.yaml") input_file = get_parent_dir( get_file_dir(__file__)) + '/resources/word_count.txt' output_file = get_file_dir(__file__) + "/word_count_output.csv" if os.path.exists(output_file): os.remove(output_file) example_1 = af.create_example( name="example_1", support_type=af.ExampleSupportType.EXAMPLE_BOTH, batch_uri=input_file, stream_uri=input_file, data_format="csv") example_2 = af.create_example( name="example_2", support_type=af.ExampleSupportType.EXAMPLE_BOTH, batch_uri=output_file, stream_uri=output_file, data_format="csv") flink_config = faf.LocalFlinkJobConfig() flink_config.local_mode = 'cluster' flink_config.flink_home = '/Users/chenwuchao/soft/apache/flink-1.11.0/' flink_config.set_table_env_create_func(TableEnvCreator()) with af.config(flink_config): input_example = af.read_example( example_info=example_1, executor=faf.flink_executor.FlinkPythonExecutor( python_object=Source())) processed = af.transform( input_data_list=[input_example], executor=faf.flink_executor.FlinkPythonExecutor( python_object=Transformer())) af.write_example(input_data=processed, example_info=example_2, executor=faf.flink_executor.FlinkPythonExecutor( python_object=Sink())) workflow_id = af.run(project_path) res = af.wait_workflow_execution_finished(workflow_id)
def test_read_example_with_numpy_npz(self): npy_name = 'test.npz' np.savez(npy_name, np.arange(10), np.sin(np.arange(10))) input_example_meta = af.register_example( name='input_numpy_example', data_type='numpy', data_format='npz', support_type=ExampleSupportType.EXAMPLE_BATCH, batch_uri=os.path.abspath( os.path.dirname(__file__) + "/" + npy_name)) output_example_meta_first = af.register_example( name='ouput_numpy_example_1', data_type='numpy', data_format='npz', support_type=ExampleSupportType.EXAMPLE_BATCH, batch_uri=os.path.abspath( os.path.dirname(__file__) + '/numpy_output_1.npz')) output_example_meta_second = af.register_example( name='ouput_numpy_example_2', data_type='numpy', data_format='npz', support_type=ExampleSupportType.EXAMPLE_BATCH, batch_uri=os.path.abspath( os.path.dirname(__file__) + '/numpy_output_2.npz')) with af.config( af.BaseJobConfig(platform='local', engine='python', job_name='test_npz')): example_channel = af.read_example(example_info=input_example_meta) transform_channel = af.transform( input_data_list=[example_channel], executor=PythonObjectExecutor( python_object=TransformTrainData()), output_num=2) af.write_example(input_data=transform_channel[0], example_info=output_example_meta_first) af.write_example(input_data=transform_channel[1], example_info=output_example_meta_second) workflow_id = af.run(test_util.get_project_path()) res = af.wait_workflow_execution_finished(workflow_id) self.assertEqual(0, res)
def test_batch_predict_component(self): input_example_meta = af.register_example( name='input_train_example', support_type=ExampleSupportType.EXAMPLE_BOTH) model_meta = af.register_model(model_name='mnist_model', model_type=ModelType.SAVED_MODEL) batch_output_file = get_file_dir(__file__) + '/batch_predict' evaluate_output = af.register_artifact(name='batch_evaluate', batch_uri=batch_output_file) output_example_meta = af.register_example( name='output_result_example', support_type=ExampleSupportType.EXAMPLE_BATCH, data_type='numpy', data_format='txt', batch_uri=batch_output_file) if os.path.exists(batch_output_file): os.remove(batch_output_file) with af.config( af.BaseJobConfig(platform='local', engine='python', job_name='batch_predict')): batch_example = af.read_example( example_info=input_example_meta, executor=PythonObjectExecutor( python_object=ReadBatchExample())) batch_train = af.train(input_data_list=[batch_example], executor=PythonObjectExecutor( python_object=TrainBatchMnistModel()), model_info=model_meta) batch_predict = af.predict( input_data_list=[batch_example], model_info=model_meta, executor=PythonObjectExecutor( python_object=PredictBatchMnistModel()), output_num=1) af.write_example(input_data=batch_predict, example_info=output_example_meta) af.stop_before_control_dependency(batch_predict, batch_train) workflow_id = af.run(test_util.get_project_path()) res = af.wait_workflow_execution_finished(workflow_id) self.assertEqual(0, res)
def test_batch_train_component(self): input_example_meta = af.register_example( name='batch_train_example', support_type=ExampleSupportType.EXAMPLE_BATCH) model_meta = af.register_model(model_name='mnist_model', model_type=ModelType.SAVED_MODEL) with af.config( af.BaseJobConfig(platform='local', engine='python', job_name='batch_train')): input_example = af.read_example( example_info=input_example_meta, executor=PythonObjectExecutor( python_object=ReadBatchExample())) af.train(input_data_list=[input_example], executor=PythonObjectExecutor( python_object=TrainBatchMnistModel()), model_info=model_meta) workflow_id = af.run(test_util.get_project_path()) res = af.wait_workflow_execution_finished(workflow_id) self.assertEqual(0, res)
def test_batch_model_validate(self): input_example_meta = af.register_example(name='batch_train_example', support_type=ExampleSupportType.EXAMPLE_BOTH) model_meta = af.register_model(model_name='mnist_model', model_type=ModelType.SAVED_MODEL) with af.config(af.BaseJobConfig(platform='local', engine='python', job_name='evaluate')): input_example = af.read_example(example_info=input_example_meta, executor=PythonObjectExecutor(python_object=ReadBatchExample())) batch_train = af.train(input_data_list=[input_example], executor=PythonObjectExecutor(python_object=TrainBatchMnistModel()), model_info=model_meta) model_validate = af.model_validate(input_data_list=[input_example], model_info=model_meta, executor=PythonObjectExecutor(python_object=BatchModelValidate()), output_num=0) af.stop_before_control_dependency(model_validate, batch_train) workflow_id = af.run(test_util.get_project_path()) res = af.wait_workflow_execution_finished(workflow_id) self.assertEqual(0, res)
def test_run_pyflink_job(self): input_file = get_parent_dir( get_file_dir(__file__)) + '/resources/word_count.txt' output_file = get_file_dir(__file__) + "/word_count_output.csv" if os.path.exists(output_file): os.remove(output_file) example_1 = af.create_example( name="example_1", support_type=af.ExampleSupportType.EXAMPLE_BOTH, batch_uri=input_file, stream_uri=input_file, data_format="csv") example_2 = af.create_example( name="example_2", support_type=af.ExampleSupportType.EXAMPLE_BOTH, batch_uri=output_file, stream_uri=output_file, data_format="csv") flink_config = faf.LocalFlinkJobConfig() flink_config.local_mode = 'python' flink_config.set_table_env_create_func(TableEnvCreator()) with af.config(flink_config): input_example = af.read_example( example_info=example_1, executor=faf.flink_executor.FlinkPythonExecutor( python_object=Source())) processed = af.transform( input_data_list=[input_example], executor=faf.flink_executor.FlinkPythonExecutor( python_object=Transformer())) af.write_example(input_data=processed, example_info=example_2, executor=faf.flink_executor.FlinkPythonExecutor( python_object=Sink())) workflow_id = af.run(test_util.get_project_path()) res = af.wait_workflow_execution_finished(workflow_id) self.assertEqual(0, res)
def run_workflow(): """ Run the user-defined workflow definition. """ train_example_meta, label_example_meta, test_example_meta, test_output_example_meta, train_model_meta = prepare_workflow( ) python_job_config_0 = BaseJobConfig(job_name='read_train', platform='local', engine='python') python_job_config_1 = BaseJobConfig(job_name='train', platform='local', engine='python') flink_job_config_2 = LocalFlinkJobConfig() flink_job_config_2.job_name = 'test' flink_job_config_2.local_mode = 'python' flink_job_config_2.flink_home = os.environ['FLINK_HOME'] flink_job_config_2.set_table_env_create_func(MyStreamTableEnvCreator()) with af.config(python_job_config_0): python_job_0_read_train_data = af.read_example( example_info=train_example_meta, executor=PythonObjectExecutor(python_object=ReadTrainCsvExample())) python_job_0_read_label_data = af.read_example( example_info=label_example_meta, executor=PythonObjectExecutor(python_object=ReadLabelCsvExample())) write_train_data_example = af.register_example( name='write_train_data', support_type=ExampleSupportType.EXAMPLE_BATCH, data_type='pandas', data_format='csv', batch_uri='/tmp/write_train_data.csv') python_job_0_write_train_result = af.write_example( input_data=python_job_0_read_train_data, example_info=write_train_data_example, executor=PythonObjectExecutor( python_object=WriteTrainCsvExample())) with af.config(python_job_config_1): python_job_1_train_model = af.train( name='trainer_0', input_data_list=[ python_job_0_read_train_data, python_job_0_read_label_data ], executor=PythonObjectExecutor(python_object=TrainModel()), model_info=train_model_meta) with af.config(flink_job_config_2): flink_job_2_read_test_data = af.read_example( example_info=test_example_meta, executor=FlinkPythonExecutor(python_object=ReadTestCsvExample())) flink_job_2_predict_test_data = af.transform( input_data_list=[flink_job_2_read_test_data], executor=FlinkPythonExecutor( python_object=PredictTestLabelExecutor())) write_result = af.write_example( input_data=flink_job_2_predict_test_data, example_info=test_output_example_meta, executor=FlinkPythonExecutor( python_object=WritePredictTestExample())) af.stop_before_control_dependency(python_job_1_train_model, python_job_0_write_train_result) af.stop_before_control_dependency(write_result, python_job_1_train_model) workflow_id = af.run(get_project_path() + '/') res = af.wait_workflow_execution_finished(workflow_id) sys.exit(res)
def test_run_project(self): print(sys._getframe().f_code.co_name) TestProject.build_ai_graph(1) workflow_id = af.submit_ai_flow() res = af.wait_workflow_execution_finished(workflow_id) self.assertEqual(0, res)
def run_workflow(root_dir_path, project_yaml_path): ai_flow.set_project_config_file(project_yaml_path) res = ai_flow.run(root_dir_path, dag_id='hello_world_example', scheduler_type=ai_flow.SchedulerType.AIFLOW) ai_flow.wait_workflow_execution_finished(res)
def run_workflow(): """ Run the user-defined workflow definition. """ train_data_file, predict_result_directory, merge_predict_result_path, \ first_test_data_file, first_result_data_file = collect_data_file() # Prepare workflow: Example & Model Metadata registration. train_example_meta, predict_result_meta, merge_data_meta, first_test_example_meta, second_test_example_meta, \ first_result_example_meta, second_result_example_meta, train_model_meta = \ prepare_workflow(train_data_file=train_data_file, predict_result_directory=predict_result_directory, merge_predict_result_path=merge_predict_result_path, first_test_data_file=first_test_data_file, first_result_data_file=first_result_data_file) # Save proxima indexes under the following index path. index_path = '{}/codes/{}/'.format(os.environ['ENV_HOME'], os.environ['TASK_ID']) + 'test.index' # Set Python job config to train model. python_job_config_0 = BaseJobConfig(platform='local', engine='python', job_name='train') python_job_config_1 = BaseJobConfig(platform='local', engine='python', job_name='start_cluster_serving') python_job_config_2 = BaseJobConfig(platform='local', engine='python', job_name='merge_predict_result') # Set Flink job config to predict with cluster serving global_job_config_1 = LocalFlinkJobConfig() global_job_config_1.local_mode = 'cluster' global_job_config_1.flink_home = os.environ['FLINK_HOME'] global_job_config_1.job_name = 'cluster_serving' global_job_config_1.set_table_env_create_func(StreamTableEnvCreatorBuildIndex()) # Set Flink job config to build index. global_job_config_2 = LocalFlinkJobConfig() global_job_config_2.local_mode = 'cluster' global_job_config_2.flink_home = os.environ['FLINK_HOME'] global_job_config_2.job_name = 'build_index' global_job_config_2.set_table_env_create_func(StreamTableEnvCreator()) # Set Flink job config to fink sick. global_job_config_3 = LocalFlinkJobConfig() global_job_config_3.local_mode = 'cluster' global_job_config_3.flink_home = os.environ['FLINK_HOME'] global_job_config_3.job_name = 'find_sick' global_job_config_3.set_table_env_create_func(StreamTableEnvCreator()) # Set Flink job config to online cluster. global_job_config_4 = LocalFlinkJobConfig() global_job_config_4.local_mode = 'cluster' global_job_config_4.flink_home = os.environ['FLINK_HOME'] global_job_config_4.job_name = 'online_cluster' global_job_config_4.set_table_env_create_func(StreamTableEnvCreator()) with af.config(python_job_config_0): # Under first job config, we construct the first job, the job is going to train an auto_encoder model. python_job_0_read_train_example = af.read_example(example_info=train_example_meta, executor=PythonObjectExecutor(python_object=ReadCsvExample())) python_job_0_train_model = af.train(input_data_list=[python_job_0_read_train_example], executor=PythonObjectExecutor(python_object=TrainAutoEncoder()), model_info=train_model_meta, name='trainer_0') with af.config(python_job_config_1): python_job_1_cluster_serving_channel = af.cluster_serving(model_info=train_model_meta, parallelism=2) # python_job_1_cluster_serving_channel = af.cluster_serving(model_info=train_model_meta, parallelism=16) with af.config(global_job_config_1): flink_job_0_read_train_example = af.read_example(example_info=train_example_meta, executor=FlinkPythonExecutor(python_object=ReadTrainExample())) flink_job_0_predict_model = af.predict(input_data_list=[flink_job_0_read_train_example], model_info=train_model_meta, executor=FlinkPythonExecutor( python_object=PredictAutoEncoderWithTrain())) flink_job_0_write_predict_data = af.write_example(input_data=flink_job_0_predict_model, example_info=predict_result_meta, executor=FlinkPythonExecutor( python_object=WritePredictResult())) with af.config(python_job_config_2): python_job_2_merge_train_data_file = af.user_define_operation(executor=PythonObjectExecutor( python_object=MergePredictResult())) with af.config(global_job_config_2): flink_job_1_read_train_example = af.read_example(example_info=merge_data_meta, executor=FlinkPythonExecutor(python_object=ReadMergeExample())) flink_job_1_build_index_channel = af.transform([flink_job_1_read_train_example], executor=FlinkPythonExecutor( python_object=BuildIndexExecutor(index_path, FloatDataType(), 128))) with af.config(global_job_config_3): flink_job_2_read_history_example = af.read_example(example_info=first_test_example_meta, executor=FlinkPythonExecutor( python_object=ReadPredictExample())) flink_job_2_predict_model = af.predict(input_data_list=[flink_job_2_read_history_example], model_info=train_model_meta, executor=FlinkPythonExecutor(python_object=PredictAutoEncoder())) flink_job_2_transformed_data = af.transform([flink_job_2_predict_model], executor=FlinkPythonExecutor( python_object=SearchExecutor(index_path, FloatDataType(), 2))) flink_job_2_read_train_example = af.read_example(example_info=train_example_meta, executor=FlinkPythonExecutor(python_object=ReadTrainExample())) flink_job_2_join_channel = af.transform( input_data_list=[flink_job_2_transformed_data, flink_job_2_read_train_example], executor=FlinkPythonExecutor(python_object=FindHistory())) flink_job_2_write_result = af.write_example(input_data=flink_job_2_join_channel, example_info=first_result_example_meta, executor=FlinkPythonExecutor(python_object=SearchSink())) with af.config(global_job_config_4): flink_job_3_read_online_example = af.read_example(example_info=second_test_example_meta, executor=FlinkPythonExecutor( python_object=ReadOnlinePredictExample())) flink_job_3_predict_model = af.predict(input_data_list=[flink_job_3_read_online_example], model_info=train_model_meta, executor=FlinkPythonExecutor(python_object=OnlinePredictAutoEncoder())) flink_job_3_transformed_data = af.transform([flink_job_3_predict_model], executor=FlinkPythonExecutor( python_object=SearchExecutor3(index_path, FloatDataType(), 2))) af.write_example(input_data=flink_job_3_transformed_data, example_info=second_result_example_meta, executor=FlinkPythonExecutor(python_object=WriteSecondResult())) af.stop_before_control_dependency(python_job_1_cluster_serving_channel, python_job_0_train_model) af.stop_before_control_dependency(flink_job_0_read_train_example, python_job_1_cluster_serving_channel) af.stop_before_control_dependency(python_job_2_merge_train_data_file, flink_job_0_read_train_example) af.stop_before_control_dependency(flink_job_1_build_index_channel, python_job_2_merge_train_data_file) af.stop_before_control_dependency(flink_job_2_read_history_example, flink_job_1_build_index_channel) af.stop_before_control_dependency(flink_job_3_read_online_example, flink_job_2_write_result) workflow_id = af.run(get_project_path()+'/') res = af.wait_workflow_execution_finished(workflow_id) sys.exit(res)