Example #1
0
def get_existing_monitor_pipeline(workflow_arn):
    """
    Create a dummpy implementation of get existing data pipeline
    """
    data_pipeline = Workflow(
        name="data_pipeline_name",
        definition=Chain([]),
        role="workflow_execution_role",
    )

    return data_pipeline.attach(workflow_arn)
Example #2
0
def get_existing_training_pipeline(workflow_arn):
    """
    Create a dummpy implementation of get existing training pipeline
    """
    training_pipeline = Workflow(
        name="training_pipeline_name",
        definition=Chain([]),
        role="workflow_execution_role",
    )

    return training_pipeline.attach(workflow_arn)
def get_existing_inference_pipeline(workflow_arn):
    """
    Create a dummy implementation to get existing training pipeline

    TODO: This could be a good PR for the SDK.
    """
    inference_pipeline = Workflow(
        name="inference_pipeline_name",
        definition=Chain([]),
        role="workflow_execution_role",
    )

    return inference_pipeline.attach(workflow_arn)
Example #4
0
def test_attach_existing_workflow(client):
    workflow = Workflow.attach(state_machine_arn, client)
    assert workflow.name == state_machine_name
    assert workflow.role == role_arn
    assert workflow.state_machine_arn == state_machine_arn
Example #5
0
                                                model_name=job_name,
                                                initial_instance_count=1,
                                                instance_type='ml.m5.large')

endpoint_step = steps.EndpointStep(
    "Create or Update Endpoint",
    endpoint_name=execution_input['EndpointName'],
    endpoint_config_name=job_name,
    update=update_endpoint)

workflow_definition = steps.Chain(
    [training_step, model_step, endpoint_config_step, endpoint_step])

# Update the workflow that is already created

workflow = Workflow.attach(workflow_arn)
workflow.update(definition=workflow_definition)
print('Workflow updated: {}'.format(workflow_arn))

# Sleep for 5 seconds then execute after this is applied
time.sleep(5)

execution = workflow.execute(inputs=execution_params)
stepfunction_arn = execution.execution_arn
print('Workflow exectuted: {}'.format(stepfunction_arn))

# Export environment variables

if not os.path.exists('cloud_formation'):
    os.makedirs('cloud_formation')
Example #6
0
workflow_definition = steps.Chain([
    etl_step,
    training_step,
    model_step,
    lambda_step,
    check_accuracy_step
])

# This can be used to create a brand new workflow
try:
    # This is used to update the existing workflow. 
    # That way you can still see all the step function run history
    # You could alternatively delete and recreate the workflow
    state_machine_arn = 'arn:aws:states:ap-southeast-2:' + account_id + ':stateMachine:' + workflow_name
    workflow = Workflow.attach(state_machine_arn=state_machine_arn)
    workflow.update(
        definition = workflow_definition,
        role=workflow_execution_role
    )
except:
    workflow = Workflow(
        name=workflow_name,
        definition=workflow_definition,
        role=workflow_execution_role,
        execution_input=execution_input
    )
    workflow.create()


# Documentation states the following:
def execute(state_machine_arn: str, execution_input: Union[dict, None]):
    """execute statemachine based on the name."""
    workflow = Workflow.attach(state_machine_arn)
    return workflow.execute(inputs=execution_input)
def _execute(state_machine_arn: str):
    """execute statemachine based on the name."""
    workflow = Workflow.attach(state_machine_arn)
    return workflow.execute()