Ejemplo n.º 1
0
 def test_stream_predict_component(self):
     batch_example_meta = af.register_example(
         name='batch_train_example',
         support_type=ExampleSupportType.EXAMPLE_BOTH)
     model_meta = af.register_model(model_name='mnist_model',
                                    model_type=ModelType.SAVED_MODEL)
     stream_predict_example_meta = af.register_example(
         name='stream_predict_example',
         support_type=ExampleSupportType.EXAMPLE_STREAM)
     stream_output_file = get_file_dir(__file__) + '/stream_predict'
     evaluate_output = af.register_artifact(name='stream_evaluate',
                                            batch_uri=stream_output_file)
     stream_predict_result_example_meta = af.register_example(
         name='stream_result_example',
         support_type=ExampleSupportType.EXAMPLE_STREAM,
         stream_uri=stream_output_file)
     if os.path.exists(stream_output_file):
         os.remove(stream_output_file)
     with af.config(
             af.BaseJobConfig(platform='local',
                              engine='python',
                              job_name='stream_predict')):
         batch_example = af.read_example(
             example_info=batch_example_meta,
             executor=PythonObjectExecutor(
                 python_object=ReadBatchExample()))
         stream_predict_example = af.read_example(
             example_info=stream_predict_example_meta,
             executor=PythonObjectExecutor(
                 python_object=ReadStreamExample()))
         batch_train = af.train(input_data_list=[batch_example],
                                executor=PythonObjectExecutor(
                                    python_object=TrainBatchMnistModel()),
                                model_info=model_meta)
         stream_predict = af.predict(
             input_data_list=[stream_predict_example],
             model_info=model_meta,
             executor=PythonObjectExecutor(
                 python_object=PredictStreamMnistModel()),
             output_num=1)
         af.write_example(input_data=stream_predict,
                          example_info=stream_predict_result_example_meta,
                          executor=PythonObjectExecutor(
                              python_object=WriteStreamExample()))
     af.stop_before_control_dependency(stream_predict, batch_train)
     workflow_id = af.run(test_util.get_project_path())
     res = af.wait_workflow_execution_finished(workflow_id)
     self.assertEqual(0, res)
Ejemplo n.º 2
0
def run_project(project_root_path):
    af.set_project_config_file(project_root_path + "/project.yaml")
    project_name = af.project_config().get_project_name()
    artifact_prefix = project_name + "."

    validate_trigger = af.external_trigger(name='validate')
    push_trigger = af.external_trigger(name='push')

    with af.global_config_file(project_root_path + '/resources/workflow_config.yaml'):
        with af.config('train_job'):
            train_example = af.register_example(name=artifact_prefix + 'train_example',
                                                support_type=ExampleSupportType.EXAMPLE_STREAM,
                                                stream_uri=EXAMPLE_URI.format('train'))
            train_read_example = af.read_example(example_info=train_example,
                                                 executor=PythonObjectExecutor(python_object=TrainExampleReader()))
            train_transform = af.transform(input_data_list=[train_read_example],
                                           executor=PythonObjectExecutor(python_object=TrainExampleTransformer()))
            train_model = af.register_model(model_name=artifact_prefix + 'logistic-regression',
                                            model_type=ModelType.SAVED_MODEL,
                                            model_desc='logistic regression model')
            train_channel = af.train(input_data_list=[train_transform],
                                     executor=PythonObjectExecutor(python_object=ModelTrainer()),
                                     model_info=train_model)
        with af.config('validate_job'):
            validate_example = af.register_example(name=artifact_prefix + 'validate_example',
                                                   support_type=ExampleSupportType.EXAMPLE_STREAM,
                                                   stream_uri=EXAMPLE_URI.format('evaluate'),
                                                   data_format='npz')
            validate_read_example = af.read_example(example_info=validate_example,
                                                    executor=PythonObjectExecutor(
                                                        python_object=ValidateExampleReader()))
            validate_transform = af.transform(input_data_list=[validate_read_example],
                                              executor=PythonObjectExecutor(python_object=ValidateTransformer()))
            validate_artifact_name = artifact_prefix + 'validate_artifact'
            validate_artifact = af.register_artifact(name=validate_artifact_name,
                                                     stream_uri=get_file_dir(__file__) + '/validate_result')
            validate_channel = af.model_validate(input_data_list=[validate_transform],
                                                 model_info=train_model,
                                                 executor=PythonObjectExecutor(
                                                     python_object=ModelValidator(validate_artifact_name)),
                                                 )
        with af.config('push_job'):
            # Push model to serving
            # Register metadata of pushed model
            push_model_artifact_name = artifact_prefix + 'push_model_artifact'
            push_model_artifact = af.register_artifact(name=push_model_artifact_name,
                                                       stream_uri=get_file_dir(__file__) + '/pushed_model')
            push_channel = af.push_model(model_info=train_model,
                                         executor=PythonObjectExecutor(
                                             python_object=ModelPusher(push_model_artifact_name)))

        with af.config('predict_job'):
            predict_example = af.register_example(name=artifact_prefix + 'predict_example',
                                                  support_type=ExampleSupportType.EXAMPLE_STREAM,
                                                  stream_uri=EXAMPLE_URI.format('predict'))
            predict_read_example = af.read_example(example_info=predict_example,
                                                   executor=PythonObjectExecutor(python_object=PredictExampleReader()))
            predict_transform = af.transform(input_data_list=[predict_read_example],
                                             executor=PythonObjectExecutor(python_object=PredictTransformer()))
            predict_channel = af.predict(input_data_list=[predict_transform],
                                         model_info=train_model,
                                         executor=PythonObjectExecutor(python_object=ModelPredictor()))

            write_example = af.register_example(name=artifact_prefix + 'export_example',
                                                support_type=ExampleSupportType.EXAMPLE_STREAM,
                                                stream_uri=get_file_dir(__file__) + '/predict_result')
            af.write_example(input_data=predict_channel,
                             example_info=write_example,
                             executor=PythonObjectExecutor(python_object=ExampleWriter()))

        af.model_version_control_dependency(src=validate_channel,
                                            model_version_event_type=ModelVersionEventType.MODEL_GENERATED,
                                            dependency=validate_trigger, model_name=train_model.name)

        af.model_version_control_dependency(src=push_channel,
                                            model_version_event_type=ModelVersionEventType.MODEL_VALIDATED,
                                            dependency=push_trigger, model_name=train_model.name)
    # Run workflow
    transform_dag = project_name
    af.deploy_to_airflow(project_root_path, dag_id=transform_dag)
    af.run(project_path=project_root_path,
           dag_id=transform_dag,
           scheduler_type=SchedulerType.AIRFLOW)
def run_workflow():
    # Init project
    af.init_ai_flow_context()

    artifact_prefix = af.current_project_config().get_project_name() + "."
    # Training of model
    with af.job_config('train'):
        # Register metadata of training data(dataset) and read dataset(i.e. training dataset)
        train_dataset = af.register_dataset(name=artifact_prefix + 'train_dataset',
                                            uri=DATASET_URI.format('train'))
        train_read_dataset = af.read_dataset(dataset_info=train_dataset,
                                             read_dataset_processor=DatasetReader())

        # Register model metadata and train model
        train_model = af.register_model(model_name=artifact_prefix + 'KNN',
                                        model_desc='KNN model')
        train_channel = af.train(input=[train_read_dataset],
                                 training_processor=ModelTrainer(),
                                 model_info=train_model)

    # Validation of model
    with af.job_config('validate'):
        # Read validation dataset
        validate_dataset = af.register_dataset(name=artifact_prefix + 'validate_dataset',
                                               uri=DATASET_URI.format('test'))
        # Validate model before it is used to predict
        validate_read_dataset = af.read_dataset(dataset_info=validate_dataset,
                                                read_dataset_processor=ValidateDatasetReader())
        validate_artifact_name = artifact_prefix + 'validate_artifact'
        validate_artifact = af.register_artifact(name=validate_artifact_name,
                                                 uri=get_file_dir(__file__) + '/validate_result')
        validate_channel = af.model_validate(input=[validate_read_dataset],
                                             model_info=train_model,
                                             model_validation_processor=ModelValidator(validate_artifact_name))

    # Prediction(Inference) using flink
    with af.job_config('predict'):
        # Read test data and do prediction
        predict_dataset = af.register_dataset(name=artifact_prefix + 'predict_dataset',
                                              uri=DATASET_URI.format('test'))
        predict_read_dataset = af.read_dataset(dataset_info=predict_dataset,
                                               read_dataset_processor=Source())
        predict_channel = af.predict(input=[predict_read_dataset],
                                     model_info=train_model,
                                     prediction_processor=Predictor())
        # Save prediction result
        write_dataset = af.register_dataset(name=artifact_prefix + 'write_dataset',
                                            uri=get_file_dir(__file__) + '/predict_result.csv')
        af.write_dataset(input=predict_channel,
                         dataset_info=write_dataset,
                         write_dataset_processor=Sink())

    # Define relation graph connected by control edge: train -> validate -> predict
    af.action_on_model_version_event(job_name='validate',
                                     model_version_event_type=ModelVersionEventType.MODEL_GENERATED,
                                     model_name=train_model.name)
    af.action_on_model_version_event(job_name='predict',
                                     model_version_event_type=ModelVersionEventType.MODEL_VALIDATED,
                                     model_name=train_model.name)
    # Submit workflow
    af.workflow_operation.submit_workflow(af.current_workflow_config().workflow_name)
    # Run workflow
    af.workflow_operation.start_new_workflow_execution(af.current_workflow_config().workflow_name)
Ejemplo n.º 4
0
def run_project(project_root_path):

    af.set_project_config_file(project_root_path + "/project.yaml")
    project_name = af.project_config().get_project_name()
    artifact_prefix = project_name + "."

    validate_trigger = af.external_trigger(name='validate')
    push_trigger = af.external_trigger(name='push')

    with af.global_config_file(project_root_path + '/resources/workflow_config.yaml'):
        # the config of train job is a periodic job  which means it will
        # run every `interval`(defined in workflow_config.yaml) seconds
        with af.config('train_job'):
            # Register metadata raw training data(example) and read example(i.e. training dataset)
            train_example = af.register_example(name=artifact_prefix + 'train_example',
                                                support_type=ExampleSupportType.EXAMPLE_BATCH,
                                                batch_uri=EXAMPLE_URI.format('train'))
            train_read_example = af.read_example(example_info=train_example,
                                                 executor=PythonObjectExecutor(python_object=ExampleReader()))

            # Transform(preprocessing) example
            train_transform = af.transform(input_data_list=[train_read_example],
                                           executor=PythonObjectExecutor(python_object=ExampleTransformer()))

            # Register model metadata and train model
            train_model = af.register_model(model_name=artifact_prefix + 'logistic-regression',
                                            model_type=ModelType.SAVED_MODEL,
                                            model_desc='logistic regression model')
            train_channel = af.train(input_data_list=[train_transform],
                                     executor=PythonObjectExecutor(python_object=ModelTrainer()),
                                     model_info=train_model)
        with af.config('validate_job'):
            # Validation of model
            # Read validation dataset and validate model before it is used to predict

            validate_example = af.register_example(name=artifact_prefix + 'validate_example',
                                                   support_type=ExampleSupportType.EXAMPLE_STREAM,
                                                   batch_uri=EXAMPLE_URI.format('evaluate'))
            validate_read_example = af.read_example(example_info=validate_example,
                                                    executor=PythonObjectExecutor(
                                                        python_object=ValidateExampleReader()))
            validate_transform = af.transform(input_data_list=[validate_read_example],
                                              executor=PythonObjectExecutor(python_object=ValidateTransformer()))
            validate_artifact_name = artifact_prefix + 'validate_artifact'
            validate_artifact = af.register_artifact(name=validate_artifact_name,
                                                     batch_uri=get_file_dir(__file__) + '/validate_result')
            validate_channel = af.model_validate(input_data_list=[validate_transform],
                                                 model_info=train_model,
                                                 executor=PythonObjectExecutor(
                                                     python_object=ModelValidator(validate_artifact_name)))
        with af.config('push_job'):
            # Push model to serving
            # Register metadata of pushed model
            push_model_artifact_name = artifact_prefix + 'push_model_artifact'
            push_model_artifact = af.register_artifact(name=push_model_artifact_name,
                                                       batch_uri=get_file_dir(__file__) + '/pushed_model')
            push_channel = af.push_model(model_info=train_model,
                                         executor=PythonObjectExecutor(
                                            python_object=ModelPusher(push_model_artifact_name)))

        with af.config('predict_job'):
            # Prediction(Inference)
            predict_example = af.register_example(name=artifact_prefix + 'predict_example',
                                                  support_type=ExampleSupportType.EXAMPLE_STREAM,
                                                  stream_uri=EXAMPLE_URI.format('predict'))
            predict_read_example = af.read_example(example_info=predict_example,
                                                   executor=PythonObjectExecutor(python_object=PredictExampleReader()))
            predict_transform = af.transform(input_data_list=[predict_read_example],
                                             executor=PythonObjectExecutor(python_object=PredictTransformer()))
            predict_channel = af.predict(input_data_list=[predict_transform],
                                         model_info=train_model,
                                         executor=PythonObjectExecutor(python_object=ModelPredictor()))
            # Save prediction result
            write_example = af.register_example(name=artifact_prefix + 'write_example',
                                                support_type=ExampleSupportType.EXAMPLE_STREAM,
                                                stream_uri=get_file_dir(__file__) + '/predict_result')
            af.write_example(input_data=predict_channel,
                             example_info=write_example,
                             executor=PythonObjectExecutor(python_object=ExampleWriter()))

        # Define relation graph connected by control edge:
        # Once a round of training is done, validator will be launched and
        # pusher will be launched if the new model is better.
        # Prediction will start once the first round of training is done and
        # when pusher pushes(deploys) a new model, the predictor will use the latest deployed model as well.
        af.model_version_control_dependency(src=validate_channel,
                                            model_version_event_type=ModelVersionEventType.MODEL_GENERATED,
                                            dependency=validate_trigger, model_name=train_model.name)
        af.model_version_control_dependency(src=push_channel,
                                            model_version_event_type=ModelVersionEventType.MODEL_VALIDATED,
                                            dependency=push_trigger, model_name=train_model.name)

    # Run workflow
    transform_dag = project_name
    af.deploy_to_airflow(project_root_path, dag_id=transform_dag)
    af.run(project_path=project_root_path,
           dag_id=transform_dag,
           scheduler_type=SchedulerType.AIRFLOW)
def run_workflow():
    af.init_ai_flow_context()

    artifact_prefix = af.current_project_config().get_project_name() + "."

    # the config of train job is a periodic job  which means it will
    # run every `interval`(defined in workflow_config.yaml) seconds
    with af.job_config('train'):
        # Register metadata raw training data(dataset) and read dataset(i.e. training dataset)
        train_dataset = af.register_dataset(name=artifact_prefix +
                                            'train_dataset',
                                            uri=DATASET_URI.format('train'))
        train_read_dataset = af.read_dataset(
            dataset_info=train_dataset, read_dataset_processor=DatasetReader())

        # Transform(preprocessing) dataset
        train_transform = af.transform(
            input=[train_read_dataset],
            transform_processor=DatasetTransformer())

        # Register model metadata and train model
        train_model = af.register_model(model_name=artifact_prefix +
                                        'logistic-regression',
                                        model_desc='logistic regression model')
        train_channel = af.train(input=[train_transform],
                                 training_processor=ModelTrainer(),
                                 model_info=train_model)
    with af.job_config('validate'):
        # Validation of model
        # Read validation dataset and validate model before it is used to predict

        validate_dataset = af.register_dataset(
            name=artifact_prefix + 'validate_dataset',
            uri=DATASET_URI.format('evaluate'))
        validate_read_dataset = af.read_dataset(
            dataset_info=validate_dataset,
            read_dataset_processor=ValidateDatasetReader())
        validate_transform = af.transform(
            input=[validate_read_dataset],
            transform_processor=ValidateTransformer())
        validate_artifact_name = artifact_prefix + 'validate_artifact'
        validate_artifact = af.register_artifact(name=validate_artifact_name,
                                                 uri=get_file_dir(__file__) +
                                                 '/validate_result')
        validate_channel = af.model_validate(
            input=[validate_transform],
            model_info=train_model,
            model_validation_processor=ModelValidator(validate_artifact_name))
    with af.job_config('push'):
        # Push model to serving
        # Register metadata of pushed model
        push_model_artifact_name = artifact_prefix + 'push_model_artifact'
        push_model_artifact = af.register_artifact(
            name=push_model_artifact_name,
            uri=get_file_dir(__file__) + '/pushed_model')
        af.push_model(
            model_info=train_model,
            pushing_model_processor=ModelPusher(push_model_artifact_name))

    with af.job_config('predict'):
        # Prediction(Inference)
        predict_dataset = af.register_dataset(
            name=artifact_prefix + 'predict_dataset',
            uri=DATASET_URI.format('predict'))
        predict_read_dataset = af.read_dataset(
            dataset_info=predict_dataset,
            read_dataset_processor=PredictDatasetReader())
        predict_transform = af.transform(
            input=[predict_read_dataset],
            transform_processor=PredictTransformer())
        predict_channel = af.predict(input=[predict_transform],
                                     model_info=train_model,
                                     prediction_processor=ModelPredictor())
        # Save prediction result
        write_dataset = af.register_dataset(
            name=artifact_prefix + 'write_dataset',
            uri=get_file_dir(__file__) + '/predict_result')
        af.write_dataset(input=predict_channel,
                         dataset_info=write_dataset,
                         write_dataset_processor=DatasetWriter())

    # Define relation graph connected by control edge:
    # Once a round of training is done, validator will be launched and
    # pusher will be launched if the new model is better.
    # Prediction will start once the first round of training is done and
    # when pusher pushes(deploys) a new model, the predictor will use the latest deployed model as well.
    af.action_on_model_version_event(
        job_name='validate',
        model_version_event_type=ModelVersionEventType.MODEL_GENERATED,
        model_name=train_model.name)
    af.action_on_model_version_event(
        job_name='push',
        model_version_event_type=ModelVersionEventType.MODEL_VALIDATED,
        model_name=train_model.name)

    # Run workflow
    af.workflow_operation.submit_workflow(
        af.current_workflow_config().workflow_name)
    af.workflow_operation.start_new_workflow_execution(
        af.current_workflow_config().workflow_name)
def run_flink_predict_job():
    input_file = "/test1.csv"
    output_file = "/output_test2.csv"
    example_1 = af.create_example(
        name="example_1",
        support_type=af.ExampleSupportType.EXAMPLE_BOTH,
        batch_uri=input_file,
        stream_uri=input_file,
        data_format="csv")

    example_2 = af.create_example(
        name="example_2",
        support_type=af.ExampleSupportType.EXAMPLE_BOTH,
        batch_uri=output_file,
        stream_uri=output_file,
        data_format="csv")
    flink_config = faf.LocalFlinkJobConfig()
    flink_config.flink_home = ''
    with af.config(flink_config):
        batch_args_1: Properties = {}
        ddl = """CREATE TABLE input_table (a INT, b INT, c INT) WITH ('connector' = 'filesystem',
                        'path' = 'INPUT',
                        'format' = 'csv'
                        )"""
        table_name = "input_table"
        batch_args_1['ddl'] = ddl
        batch_args_1['table_name'] = table_name

        stream_args_1 = batch_args_1

        batch_args_2: Properties = {}
        ddl = """CREATE TABLE output_table (aa INT, cc INT) WITH ('connector' = 'filesystem',
                        'path' = 'OUTPUT',
                        'format' = 'csv'
                        )"""
        table_name = "output_table"
        batch_args_2['ddl'] = ddl
        batch_args_2['table_name'] = table_name
        stream_args_2 = batch_args_2

        input_example = af.read_example(example_info=example_1,
                                        exec_args=ExecuteArgs(
                                            batch_properties=batch_args_1,
                                            stream_properties=stream_args_1))
        model_meta = af.ModelMeta(name="test", model_type="saved_model")
        model_version = af.ModelVersionMeta(version="11111",
                                            model_path="./tmp/saved_model/",
                                            model_metric="./tmp/saved_model/",
                                            model_id=0)
        processed = af.predict(
            input_data_list=[input_example],
            model_info=model_meta,
            model_version_info=model_version,
            executor=faf.flink_executor.FlinkJavaExecutor(
                java_class="com.apache.flink.ai.flow.TestPredict"))

        af.write_example(input_data=processed,
                         example_info=example_2,
                         exec_args=ExecuteArgs(
                             batch_properties=batch_args_2,
                             stream_properties=stream_args_2))

    g = af.default_graph()
    workflow = af.compile_workflow(project_path=test_util.get_project_path())
    print(dumps(list(workflow.jobs.values())[0]))
Ejemplo n.º 7
0
def run_workflow():
    af.init_ai_flow_context()

    artifact_prefix = af.current_project_config().get_project_name() + "."

    with af.job_config('train'):
        # Register metadata raw training data(dataset) and read dataset(i.e. training dataset)
        train_dataset = af.register_dataset(name=artifact_prefix +
                                            'train_dataset',
                                            uri=DATASET_URI.format('train'))
        train_read_dataset = af.read_dataset(
            dataset_info=train_dataset,
            read_dataset_processor=TrainDatasetReader())
        train_transform = af.transform(
            input=[train_read_dataset],
            transform_processor=TrainDatasetTransformer())
        train_model = af.register_model(model_name=artifact_prefix +
                                        'logistic-regression',
                                        model_desc='logistic regression model')
        train_channel = af.train(input=[train_transform],
                                 training_processor=ModelTrainer(),
                                 model_info=train_model)
    with af.job_config('validate'):
        validate_dataset = af.register_dataset(
            name=artifact_prefix + 'validate_dataset',
            uri=DATASET_URI.format('evaluate'))
        validate_read_dataset = af.read_dataset(
            dataset_info=validate_dataset,
            read_dataset_processor=ValidateDatasetReader())
        validate_transform = af.transform(
            input=[validate_read_dataset],
            transform_processor=ValidateTransformer())
        validate_artifact_name = artifact_prefix + 'validate_artifact'
        validate_artifact = af.register_artifact(name=validate_artifact_name,
                                                 uri=get_file_dir(__file__) +
                                                 '/validate_result')
        validate_channel = af.model_validate(
            input=[validate_transform],
            model_info=train_model,
            model_validation_processor=ModelValidator(validate_artifact_name))
    with af.job_config('push'):
        # Push model to serving
        # Register metadata of pushed model
        push_model_artifact_name = artifact_prefix + 'push_model_artifact'
        push_model_artifact = af.register_artifact(
            name=push_model_artifact_name,
            uri=get_file_dir(__file__) + '/pushed_model')
        af.push_model(
            model_info=train_model,
            pushing_model_processor=ModelPusher(push_model_artifact_name))
    with af.job_config('predict'):
        predict_dataset = af.register_dataset(
            name=artifact_prefix + 'predict_dataset',
            uri=DATASET_URI.format('predict'))
        predict_read_dataset = af.read_dataset(
            dataset_info=predict_dataset,
            read_dataset_processor=PredictDatasetReader())
        predict_transform = af.transform(
            input=[predict_read_dataset],
            transform_processor=PredictTransformer())
        predict_channel = af.predict(input=[predict_transform],
                                     model_info=train_model,
                                     prediction_processor=ModelPredictor())
        write_dataset = af.register_dataset(
            name=artifact_prefix + 'export_dataset',
            uri=get_file_dir(__file__) + '/predict_result')
        af.write_dataset(input=predict_channel,
                         dataset_info=write_dataset,
                         write_dataset_processor=DatasetWriter())

    af.action_on_model_version_event(
        job_name='validate',
        model_version_event_type=ModelVersionEventType.MODEL_GENERATED,
        model_name=train_model.name)
    af.action_on_model_version_event(
        job_name='push',
        model_version_event_type=ModelVersionEventType.MODEL_VALIDATED,
        model_name=train_model.name)

    # Run workflow
    af.workflow_operation.submit_workflow(
        af.current_workflow_config().workflow_name)
    af.workflow_operation.start_new_workflow_execution(
        af.current_workflow_config().workflow_name)
Ejemplo n.º 8
0
def run_workflow():
    af.init_ai_flow_context()
    artifact_prefix = af.current_project_config().get_project_name() + "."
    with af.job_config('train'):
        # Training of model
        # Register metadata raw training data(dataset) and read dataset(i.e. training dataset)
        train_dataset = af.register_dataset(name=artifact_prefix +
                                            'train_dataset',
                                            uri=DATASET_URI.format('train'))
        train_read_dataset = af.read_dataset(
            dataset_info=train_dataset, read_dataset_processor=DatasetReader())

        # Transform(preprocessing) dataset
        train_transform = af.transform(
            input=[train_read_dataset],
            transform_processor=DatasetTransformer())

        # Register model metadata and train model
        train_model = af.register_model(model_name=artifact_prefix +
                                        'logistic-regression',
                                        model_desc='logistic regression model')
        train_channel = af.train(input=[train_transform],
                                 training_processor=ModelTrainer(),
                                 model_info=train_model)

    with af.job_config('evaluate'):
        # Evaluation of model
        evaluate_dataset = af.register_dataset(
            name=artifact_prefix + 'evaluate_dataset',
            uri=DATASET_URI.format('evaluate'))
        evaluate_read_dataset = af.read_dataset(
            dataset_info=evaluate_dataset,
            read_dataset_processor=EvaluateDatasetReader())
        evaluate_transform = af.transform(
            input=[evaluate_read_dataset],
            transform_processor=EvaluateTransformer())
        # Register disk path used to save evaluate result
        evaluate_artifact_name = artifact_prefix + 'evaluate_artifact'
        evaluate_artifact = af.register_artifact(name=evaluate_artifact_name,
                                                 uri=get_file_dir(__file__) +
                                                 '/evaluate_result')
        # Evaluate model
        evaluate_channel = af.evaluate(
            input=[evaluate_transform],
            model_info=train_model,
            evaluation_processor=ModelEvaluator(evaluate_artifact_name))

    with af.job_config('validate'):
        # Validation of model
        # Read validation dataset and validate model before it is used to predict

        validate_dataset = af.register_dataset(
            name=artifact_prefix + 'validate_dataset',
            uri=DATASET_URI.format('evaluate'))
        validate_read_dataset = af.read_dataset(
            dataset_info=validate_dataset,
            read_dataset_processor=ValidateDatasetReader())
        validate_transform = af.transform(
            input=[validate_read_dataset],
            transform_processor=ValidateTransformer())
        validate_artifact_name = artifact_prefix + 'validate_artifact'
        validate_artifact = af.register_artifact(name=validate_artifact_name,
                                                 uri=get_file_dir(__file__) +
                                                 '/validate_result')
        validate_channel = af.model_validate(
            input=[validate_transform],
            model_info=train_model,
            model_validation_processor=ModelValidator(validate_artifact_name))
    with af.job_config('push'):
        # Push model to serving
        # Register metadata of pushed model
        push_model_artifact_name = artifact_prefix + 'push_model_artifact'
        push_model_artifact = af.register_artifact(
            name=push_model_artifact_name,
            uri=get_file_dir(__file__) + '/pushed_model')
        af.push_model(
            model_info=train_model,
            pushing_model_processor=ModelPusher(push_model_artifact_name))

    with af.job_config('predict'):
        # Prediction(Inference)
        predict_dataset = af.register_dataset(
            name=artifact_prefix + 'predict_dataset',
            uri=DATASET_URI.format('predict'))
        predict_read_dataset = af.read_dataset(
            dataset_info=predict_dataset,
            read_dataset_processor=PredictDatasetReader())
        predict_transform = af.transform(
            input=[predict_read_dataset],
            transform_processor=PredictTransformer())
        predict_channel = af.predict(input=[predict_transform],
                                     model_info=train_model,
                                     prediction_processor=ModelPredictor())
        # Save prediction result
        write_dataset = af.register_dataset(
            name=artifact_prefix + 'write_dataset',
            uri=get_file_dir(__file__) + '/predict_result')
        af.write_dataset(input=predict_channel,
                         dataset_info=write_dataset,
                         write_dataset_processor=DatasetWriter())

        # Define relation graph connected by control edge: train -> evaluate -> validate -> push -> predict
        af.action_on_job_status('evaluate', 'train')
        af.action_on_job_status('validate', 'evaluate')
        af.action_on_job_status('push', 'validate')
        af.action_on_job_status('predict', 'push')

    # Run workflow
    af.workflow_operation.submit_workflow(
        af.current_workflow_config().workflow_name)
    af.workflow_operation.start_new_workflow_execution(
        af.current_workflow_config().workflow_name)
Ejemplo n.º 9
0
def run_workflow():
    """
    Run the user-defined workflow definition.
    """
    train_data_file, predict_result_directory, merge_predict_result_path, \
    first_test_data_file, first_result_data_file = collect_data_file()
    # Prepare workflow: Example & Model Metadata registration.
    train_example_meta, predict_result_meta, merge_data_meta, first_test_example_meta, second_test_example_meta, \
    first_result_example_meta, second_result_example_meta, train_model_meta = \
        prepare_workflow(train_data_file=train_data_file,
                         predict_result_directory=predict_result_directory,
                         merge_predict_result_path=merge_predict_result_path,
                         first_test_data_file=first_test_data_file,
                         first_result_data_file=first_result_data_file)

    # Save proxima indexes under the following index path.
    index_path = '{}/codes/{}/'.format(os.environ['ENV_HOME'], os.environ['TASK_ID']) + 'test.index'

    # Set Python job config to train model.
    python_job_config_0 = BaseJobConfig(platform='local', engine='python', job_name='train')

    python_job_config_1 = BaseJobConfig(platform='local', engine='python', job_name='start_cluster_serving')

    python_job_config_2 = BaseJobConfig(platform='local', engine='python', job_name='merge_predict_result')

    # Set Flink job config to predict with cluster serving
    global_job_config_1 = LocalFlinkJobConfig()
    global_job_config_1.local_mode = 'cluster'
    global_job_config_1.flink_home = os.environ['FLINK_HOME']
    global_job_config_1.job_name = 'cluster_serving'
    global_job_config_1.set_table_env_create_func(StreamTableEnvCreatorBuildIndex())

    # Set Flink job config to build index.
    global_job_config_2 = LocalFlinkJobConfig()
    global_job_config_2.local_mode = 'cluster'
    global_job_config_2.flink_home = os.environ['FLINK_HOME']
    global_job_config_2.job_name = 'build_index'
    global_job_config_2.set_table_env_create_func(StreamTableEnvCreator())

    # Set Flink job config to fink sick.
    global_job_config_3 = LocalFlinkJobConfig()
    global_job_config_3.local_mode = 'cluster'
    global_job_config_3.flink_home = os.environ['FLINK_HOME']
    global_job_config_3.job_name = 'find_sick'
    global_job_config_3.set_table_env_create_func(StreamTableEnvCreator())

    # Set Flink job config to online cluster.
    global_job_config_4 = LocalFlinkJobConfig()
    global_job_config_4.local_mode = 'cluster'
    global_job_config_4.flink_home = os.environ['FLINK_HOME']
    global_job_config_4.job_name = 'online_cluster'
    global_job_config_4.set_table_env_create_func(StreamTableEnvCreator())

    with af.config(python_job_config_0):
        # Under first job config, we construct the first job, the job is going to train an auto_encoder model.
        python_job_0_read_train_example = af.read_example(example_info=train_example_meta,
                                                          executor=PythonObjectExecutor(python_object=ReadCsvExample()))
        python_job_0_train_model = af.train(input_data_list=[python_job_0_read_train_example],
                                            executor=PythonObjectExecutor(python_object=TrainAutoEncoder()),
                                            model_info=train_model_meta,
                                            name='trainer_0')

    with af.config(python_job_config_1):
        python_job_1_cluster_serving_channel = af.cluster_serving(model_info=train_model_meta, parallelism=2)
        # python_job_1_cluster_serving_channel = af.cluster_serving(model_info=train_model_meta, parallelism=16)

    with af.config(global_job_config_1):
        flink_job_0_read_train_example = af.read_example(example_info=train_example_meta,
                                                         executor=FlinkPythonExecutor(python_object=ReadTrainExample()))
        flink_job_0_predict_model = af.predict(input_data_list=[flink_job_0_read_train_example],
                                               model_info=train_model_meta,
                                               executor=FlinkPythonExecutor(
                                                   python_object=PredictAutoEncoderWithTrain()))
        flink_job_0_write_predict_data = af.write_example(input_data=flink_job_0_predict_model,
                                                          example_info=predict_result_meta,
                                                          executor=FlinkPythonExecutor(
                                                              python_object=WritePredictResult()))

    with af.config(python_job_config_2):
        python_job_2_merge_train_data_file = af.user_define_operation(executor=PythonObjectExecutor(
            python_object=MergePredictResult()))

    with af.config(global_job_config_2):
        flink_job_1_read_train_example = af.read_example(example_info=merge_data_meta,
                                                         executor=FlinkPythonExecutor(python_object=ReadMergeExample()))
        flink_job_1_build_index_channel = af.transform([flink_job_1_read_train_example],
                                                       executor=FlinkPythonExecutor(
                                                           python_object=BuildIndexExecutor(index_path, FloatDataType(),
                                                                                            128)))

    with af.config(global_job_config_3):
        flink_job_2_read_history_example = af.read_example(example_info=first_test_example_meta,
                                                           executor=FlinkPythonExecutor(
                                                               python_object=ReadPredictExample()))
        flink_job_2_predict_model = af.predict(input_data_list=[flink_job_2_read_history_example],
                                               model_info=train_model_meta,
                                               executor=FlinkPythonExecutor(python_object=PredictAutoEncoder()))
        flink_job_2_transformed_data = af.transform([flink_job_2_predict_model],
                                                    executor=FlinkPythonExecutor(
                                                        python_object=SearchExecutor(index_path, FloatDataType(), 2)))
        flink_job_2_read_train_example = af.read_example(example_info=train_example_meta,
                                                         executor=FlinkPythonExecutor(python_object=ReadTrainExample()))
        flink_job_2_join_channel = af.transform(
            input_data_list=[flink_job_2_transformed_data, flink_job_2_read_train_example],
            executor=FlinkPythonExecutor(python_object=FindHistory()))
        flink_job_2_write_result = af.write_example(input_data=flink_job_2_join_channel,
                                                    example_info=first_result_example_meta,
                                                    executor=FlinkPythonExecutor(python_object=SearchSink()))

    with af.config(global_job_config_4):
        flink_job_3_read_online_example = af.read_example(example_info=second_test_example_meta,
                                                    executor=FlinkPythonExecutor(
                                                        python_object=ReadOnlinePredictExample()))
        flink_job_3_predict_model = af.predict(input_data_list=[flink_job_3_read_online_example],
                                         model_info=train_model_meta,
                                         executor=FlinkPythonExecutor(python_object=OnlinePredictAutoEncoder()))
        flink_job_3_transformed_data = af.transform([flink_job_3_predict_model],
                                              executor=FlinkPythonExecutor(
                                                  python_object=SearchExecutor3(index_path, FloatDataType(), 2)))
        af.write_example(input_data=flink_job_3_transformed_data,
                         example_info=second_result_example_meta,
                         executor=FlinkPythonExecutor(python_object=WriteSecondResult()))

    af.stop_before_control_dependency(python_job_1_cluster_serving_channel, python_job_0_train_model)
    af.stop_before_control_dependency(flink_job_0_read_train_example, python_job_1_cluster_serving_channel)
    af.stop_before_control_dependency(python_job_2_merge_train_data_file, flink_job_0_read_train_example)
    af.stop_before_control_dependency(flink_job_1_build_index_channel, python_job_2_merge_train_data_file)
    af.stop_before_control_dependency(flink_job_2_read_history_example, flink_job_1_build_index_channel)
    af.stop_before_control_dependency(flink_job_3_read_online_example, flink_job_2_write_result)
    workflow_id = af.run(get_project_path()+'/')
    res = af.wait_workflow_execution_finished(workflow_id)
    sys.exit(res)