def main(): sagemaker_session = sagemaker.Session() stepfunctions.set_stream_logger(level=logging.INFO) bucket = 's3://pixiv-image-backet' sagemaker_execution_role = 'arn:aws:iam::829044821271:role/service-role/AmazonSageMaker-ExecutionRole-20200412T194702' workflow_execution_role = 'arn:aws:iam::829044821271:role/StepFunctionsWorkflowExecutionRole' estimator1 = PyTorch(entry_point='train.py', source_dir='projection_discriminator', role=sagemaker_execution_role, framework_version='1.4.0', train_instance_count=2, train_instance_type='ml.m5.2xlarge', hyperparameters={ 'train_epoch': 1, }) estimator2 = PyTorch(entry_point='train.py', source_dir='wgan_gp', role=sagemaker_execution_role, framework_version='1.4.0', train_instance_count=2, train_instance_type='ml.m5.2xlarge', hyperparameters={ 'train_epoch': 1, }) training_step1 = steps.TrainingStep(state_id='Train Step1', estimator=estimator1, data={ 'training': bucket, }, job_name='PD-Train-{0}'.format( uuid.uuid4())) training_step2 = steps.TrainingStep(state_id='Train Step2', estimator=estimator2, data={ 'training': bucket, }, job_name='PD-Train-{0}'.format( uuid.uuid4())) parallel_state = steps.Parallel(state_id='Parallel', ) parallel_state.add_branch(training_step1) parallel_state.add_branch(training_step2) workflow_definition = steps.Chain([parallel_state]) workflow = Workflow( name='MyTraining-{0}'.format(uuid.uuid4()), definition=workflow_definition, role=workflow_execution_role, ) workflow.create() workflow.execute()
def main(): stepfunctions.set_stream_logger(level=logging.INFO) workflow_execution_role = 'arn:aws:iam::829044821271:role/StepFunctionsWorkflowExecutionRole' # Load job name with open('./stepfunctions_name.json', 'r') as f: stepfunctions_name = json.load(f) with open('./face_clip/aws_batch/batch_names.json', 'r') as f: face_clip_name = json.load(f) with open('./tag_extraction/aws_batch/batch_names.json', 'r') as f: tag_extraction_name = json.load(f) # Define steps face_clip_step = steps.BatchSubmitJobStep( state_id = 'Face Clip Step', parameters={ 'JobDefinition': face_clip_name['jobDefinition'], 'JobName': face_clip_name['job'], 'JobQueue': face_clip_name['jobQueue'] } ) tag_extraction_step = steps.BatchSubmitJobStep( state_id = 'Tag Extraction Step', parameters={ 'JobDefinition': tag_extraction_name['jobDefinition'], 'JobName': tag_extraction_name['job'], 'JobQueue': tag_extraction_name['jobQueue'] } ) # Define workflow chain_list = [face_clip_step, tag_extraction_step] workflow_definition = steps.Chain(chain_list) workflow = Workflow( name=stepfunctions_name['workflow'], definition=workflow_definition, role=workflow_execution_role, ) # workflow workflow.create()
from sagemaker import get_execution_role from sagemaker.estimator import Estimator from sagemaker.inputs import TrainingInput from sagemaker.processing import Processor from sagemaker.processing import ProcessingInput, ProcessingOutput import stepfunctions from stepfunctions.inputs import ExecutionInput from stepfunctions.workflow import Workflow from stepfunctions.steps import ( Chain, ProcessingStep, TrainingStep, ) stepfunctions.set_stream_logger(level=logging.INFO) config_name = 'flow.yaml' def get_parameters(): params = {} with open(config_name) as file: config = yaml.safe_load(file) params['region'] = config['config']['region'] params['sagemaker-role-arn'] = config['config']['sagemaker-role-arn'] params['sfn-workflow-name'] = os.environ['SFN_WORKFLOW_NAME'] params['sfn-role-arn'] = config['config']['sfn-role-arn'] params['job-name-prefix'] = config['config']['job-name-prefix'] params['secretsmanager-arn'] = config['config']['secretsmanager-arn'] params['mlflow-server-uri'] = config['experiments']['mlflow-server-uri'] params['experiment-name'] = config['experiments']['experiment-name']