Ejemplo n.º 1
0
 def tearDown(self) -> None:
     print('tearDown')
     store = SqlAlchemyStore(_SQLITE_DB_URI)
     base.metadata.drop_all(store.db_engine)
     base.metadata.create_all(store.db_engine)
     af.default_graph().clear_graph()
     res = client.list_job(page_size=10, offset=0)
     self.assertIsNone(res)
 def setUp(self):
     master._clear_db()
     db_utils.clear_db_jobs()
     db_utils.clear_db_dags()
     db_utils.clear_db_serialized_dags()
     db_utils.clear_db_runs()
     db_utils.clear_db_task_execution()
     db_utils.clear_db_message()
     db_utils.clear_db_jobs()
     af.default_graph().clear_graph()
Ejemplo n.º 3
0
        def build_ai_graph() -> AIGraph:
            with af.engine('cmd_line'):
                p_list = []
                for i in range(3):
                    p = af.user_define_operation(
                        executor=CmdExecutor(cmd_line="echo 'hello_{}' && sleep 3".format(i)))
                    p_list.append(p)
                af.stop_before_control_dependency(p_list[0], p_list[1])
                af.stop_before_control_dependency(p_list[0], p_list[2])

            return af.default_graph()
 def setUp(self):
     TestProject.master._clear_db()
     af.default_graph().clear_graph()
def run_flink_predict_job():
    input_file = "/test1.csv"
    output_file = "/output_test2.csv"
    example_1 = af.create_example(
        name="example_1",
        support_type=af.ExampleSupportType.EXAMPLE_BOTH,
        batch_uri=input_file,
        stream_uri=input_file,
        data_format="csv")

    example_2 = af.create_example(
        name="example_2",
        support_type=af.ExampleSupportType.EXAMPLE_BOTH,
        batch_uri=output_file,
        stream_uri=output_file,
        data_format="csv")
    flink_config = faf.LocalFlinkJobConfig()
    flink_config.flink_home = ''
    with af.config(flink_config):
        batch_args_1: Properties = {}
        ddl = """CREATE TABLE input_table (a INT, b INT, c INT) WITH ('connector' = 'filesystem',
                        'path' = 'INPUT',
                        'format' = 'csv'
                        )"""
        table_name = "input_table"
        batch_args_1['ddl'] = ddl
        batch_args_1['table_name'] = table_name

        stream_args_1 = batch_args_1

        batch_args_2: Properties = {}
        ddl = """CREATE TABLE output_table (aa INT, cc INT) WITH ('connector' = 'filesystem',
                        'path' = 'OUTPUT',
                        'format' = 'csv'
                        )"""
        table_name = "output_table"
        batch_args_2['ddl'] = ddl
        batch_args_2['table_name'] = table_name
        stream_args_2 = batch_args_2

        input_example = af.read_example(example_info=example_1,
                                        exec_args=ExecuteArgs(
                                            batch_properties=batch_args_1,
                                            stream_properties=stream_args_1))
        model_meta = af.ModelMeta(name="test", model_type="saved_model")
        model_version = af.ModelVersionMeta(version="11111",
                                            model_path="./tmp/saved_model/",
                                            model_metric="./tmp/saved_model/",
                                            model_id=0)
        processed = af.predict(
            input_data_list=[input_example],
            model_info=model_meta,
            model_version_info=model_version,
            executor=faf.flink_executor.FlinkJavaExecutor(
                java_class="com.apache.flink.ai.flow.TestPredict"))

        af.write_example(input_data=processed,
                         example_info=example_2,
                         exec_args=ExecuteArgs(
                             batch_properties=batch_args_2,
                             stream_properties=stream_args_2))

    g = af.default_graph()
    workflow = af.compile_workflow(project_path=test_util.get_project_path())
    print(dumps(list(workflow.jobs.values())[0]))