def startCluster(batchState, *args, **kwargs): taskName = yield clusters_client.startCluster(*args, **kwargs) _setQueue(taskName, batchState) defer.returnValue(taskName)
def _run(state, batchState): if 'state' not in batchState: _log(batchState, 'First time running, creating pipeline state information') batchState['pipeline_config'] = yield _applyActions(state.innerPipelineConfig(), batchState['actions']) batchState['pipeline_state'] = PRESTART_STATE # We need to create a fake, local, pipeline for metrics to work batchState['pipeline_name'] = pipeline_misc.checksumInput(batchState['pipeline_config']) batchState['pipeline_config']['pipeline.PIPELINE_NAME'] = batchState['pipeline_name'] batchState['pipeline_config']['pipeline.PIPELINE_WRAPPER_NAME'] = batchState['pipeline_name'] _log(batchState, 'Pipeline named ' + batchState['pipeline_name']) pipeline = yield pipelines_client.createPipeline(host='localhost', clusterName='local', userName='******', pipelineName=batchState['pipeline_name'], protocol='clovr_wrapper', queue='pipeline.q', config=batchState['pipeline_config'], parentPipeline=state.parentPipeline()) batchState['clovr_wrapper_task_name'] = pipeline['task_name'] _log(batchState, 'Setting number of tasks to 6 (number in a standard clovr_wrapper)') yield _updateTask(batchState, lambda t : t.update(completedTasks=0, numTasks=6)) state.updateBatchState() else: _log(batchState, 'Pipeline run before, loading pipeline information') pipeline = yield pipelines_client.pipelineList('localhost', 'local', 'guest', batchState['pipeline_name'], detail=True) batchState['state'] = RUNNING_STATE yield _updateTask(batchState, lambda t : t.setState(tasks.task.TASK_RUNNING)) pipelineConfigFile = os.path.join(TMP_DIR, 'pipeline_configs', global_state.make_ref() + '.conf') _log(batchState, 'Creating ergatis configuration') _writeErgatisConfig(batchState['pipeline_config'], pipelineConfigFile) if batchState['pipeline_state'] == PRESTART_STATE: _log(batchState, 'Pipeline is in PRESTART state') yield state.prerunQueue.addWithDeferred(workflow_runner.run, state.workflowConfig(), batchState['pipeline_config']['pipeline.PRESTART_TEMPLATE_XML'], pipelineConfigFile, TMP_DIR) yield _updateTask(batchState, lambda t : t.addMessage(tasks.task.MSG_SILENT, 'Completed prestart' ).progress()) batchState['pipeline_state'] = STARTING_STATE state.updateBatchState() if batchState['pipeline_state'] == STARTING_STATE: _log(batchState, 'Pipeline is in STARTING state') clusterTask = yield clusters_client.startCluster( 'localhost', batchState['pipeline_config']['cluster.CLUSTER_NAME'], 'guest', int(batchState['pipeline_config']['cluster.EXEC_NODES']), 0, batchState['pipeline_config']['cluster.CLUSTER_CREDENTIAL'], {'cluster.master_type': batchState['pipeline_config']['cluster.MASTER_INSTANCE_TYPE'], 'cluster.master_bid_price': batchState['pipeline_config']['cluster.MASTER_BID_PRICE'], 'cluster.exec_type': batchState['pipeline_config']['cluster.EXEC_INSTANCE_TYPE'], 'cluster.exec_bid_price': batchState['pipeline_config']['cluster.EXEC_BID_PRICE']}) taskState = yield tasks.blockOnTask('localhost', 'local', clusterTask) if taskState != tasks.task.TASK_COMPLETED: raise TaskError(clusterTask) yield _updateTask(batchState, lambda t : t.addMessage(tasks.task.MSG_SILENT, 'Completed start' ).progress()) batchState['pipeline_state'] = PRERUN_STATE state.updateBatchState() if batchState['pipeline_state'] == PRERUN_STATE: _log(batchState, 'Pipeline is in PRERUN state') yield state.prerunQueue.addWithDeferred(workflow_runner.run, state.workflowConfig(), batchState['pipeline_config']['pipeline.PRERUN_TEMPLATE_XML'], pipelineConfigFile, TMP_DIR) yield _updateTask(batchState, lambda t : t.addMessage(tasks.task.MSG_SILENT, 'Completed prerun' ).progress()) batchState['pipeline_state'] = RUN_PIPELINE_STATE state.updateBatchState() if batchState['pipeline_state'] == RUN_PIPELINE_STATE: _log(batchState, 'Pipeline is in RUN_PIPELINE state') pipeline = yield pipelines_client.runPipeline(host='localhost', clusterName=batchState['pipeline_config']['cluster.CLUSTER_NAME'], userName='******', parentPipeline=batchState['pipeline_name'], bareRun=True, queue=state.innerPipelineQueue(), config=batchState['pipeline_config'], overwrite=True) batchState['pipeline_task'] = pipeline['task_name'] yield _updateTask(batchState, lambda t : t.addMessage(tasks.task.MSG_SILENT, 'Completed run pipeline' ).progress()) batchState['pipeline_state'] = RUNNING_PIPELINE_STATE state.updateBatchState() if batchState['pipeline_state'] == RUNNING_PIPELINE_STATE: _log(batchState, 'Pipeline is in RUNNING_PIPELINE state') _monitorPipeline(batchState) yield _waitForPipeline(batchState) yield _updateTask(batchState, lambda t : t.addMessage(tasks.task.MSG_SILENT, 'Completed running pipeline' ).progress()) batchState['pipeline_state'] = POSTRUN_STATE state.updateBatchState() if batchState['pipeline_state'] == POSTRUN_STATE: _log(batchState, 'Pipeline is in POSTRUN state') yield state.postrunQueue.addWithDeferred(workflow_runner.run, state.workflowConfig(), batchState['pipeline_config']['pipeline.POSTRUN_TEMPLATE_XML'], pipelineConfigFile, TMP_DIR) yield _updateTask(batchState, lambda t : t.addMessage(tasks.task.MSG_SILENT, 'Completed postrun' ).progress()) batchState['pipeline_state'] = COMPLETED_STATE batchState['state'] = COMPLETED_STATE state.updateBatchState() yield _updateTask(batchState, lambda t : t.setState(tasks.task.TASK_COMPLETED)) _log(batchState, 'Pipeline finished successfully')
def _run(state, batchState): if 'state' not in batchState: _log(batchState, 'First time running, creating pipeline state information') batchState['pipeline_config'] = yield _applyActions( state.innerPipelineConfig(), batchState['actions']) batchState['pipeline_state'] = PRESTART_STATE # We need to create a fake, local, pipeline for metrics to work batchState['pipeline_name'] = pipeline_misc.checksumInput( batchState['pipeline_config']) batchState['pipeline_config']['pipeline.PIPELINE_NAME'] = batchState[ 'pipeline_name'] batchState['pipeline_config'][ 'pipeline.PIPELINE_WRAPPER_NAME'] = batchState['pipeline_name'] _log(batchState, 'Pipeline named ' + batchState['pipeline_name']) pipeline = yield pipelines_client.createPipeline( host='localhost', clusterName='local', userName='******', pipelineName=batchState['pipeline_name'], protocol='clovr_wrapper', queue='pipeline.q', config=batchState['pipeline_config'], parentPipeline=state.parentPipeline()) batchState['clovr_wrapper_task_name'] = pipeline['task_name'] _log( batchState, 'Setting number of tasks to 6 (number in a standard clovr_wrapper)' ) yield _updateTask(batchState, lambda t: t.update(completedTasks=0, numTasks=6)) state.updateBatchState() else: _log(batchState, 'Pipeline run before, loading pipeline information') pipeline = yield pipelines_client.pipelineList( 'localhost', 'local', 'guest', batchState['pipeline_name'], detail=True) batchState['state'] = RUNNING_STATE yield _updateTask(batchState, lambda t: t.setState(tasks.task.TASK_RUNNING)) pipelineConfigFile = os.path.join(TMP_DIR, 'pipeline_configs', global_state.make_ref() + '.conf') _log(batchState, 'Creating ergatis configuration') _writeErgatisConfig(batchState['pipeline_config'], pipelineConfigFile) if batchState['pipeline_state'] == PRESTART_STATE: _log(batchState, 'Pipeline is in PRESTART state') yield state.prerunQueue.addWithDeferred( workflow_runner.run, state.workflowConfig(), batchState['pipeline_config']['pipeline.PRESTART_TEMPLATE_XML'], pipelineConfigFile, TMP_DIR) yield _updateTask( batchState, lambda t: t.addMessage( tasks.task.MSG_SILENT, 'Completed prestart').progress()) batchState['pipeline_state'] = STARTING_STATE state.updateBatchState() if batchState['pipeline_state'] == STARTING_STATE: _log(batchState, 'Pipeline is in STARTING state') clusterTask = yield clusters_client.startCluster( 'localhost', batchState['pipeline_config']['cluster.CLUSTER_NAME'], 'guest', int(batchState['pipeline_config']['cluster.EXEC_NODES']), 0, batchState['pipeline_config']['cluster.CLUSTER_CREDENTIAL'], { 'cluster.master_type': batchState['pipeline_config']['cluster.MASTER_INSTANCE_TYPE'], 'cluster.master_bid_price': batchState['pipeline_config']['cluster.MASTER_BID_PRICE'], 'cluster.exec_type': batchState['pipeline_config']['cluster.EXEC_INSTANCE_TYPE'], 'cluster.exec_bid_price': batchState['pipeline_config']['cluster.EXEC_BID_PRICE'] }) taskState = yield tasks.blockOnTask('localhost', 'local', clusterTask) if taskState != tasks.task.TASK_COMPLETED: raise TaskError(clusterTask) yield _updateTask( batchState, lambda t: t.addMessage(tasks.task.MSG_SILENT, 'Completed start').progress()) batchState['pipeline_state'] = PRERUN_STATE state.updateBatchState() if batchState['pipeline_state'] == PRERUN_STATE: _log(batchState, 'Pipeline is in PRERUN state') yield state.prerunQueue.addWithDeferred( workflow_runner.run, state.workflowConfig(), batchState['pipeline_config']['pipeline.PRERUN_TEMPLATE_XML'], pipelineConfigFile, TMP_DIR) yield _updateTask( batchState, lambda t: t.addMessage(tasks.task.MSG_SILENT, 'Completed prerun').progress()) batchState['pipeline_state'] = RUN_PIPELINE_STATE state.updateBatchState() if batchState['pipeline_state'] == RUN_PIPELINE_STATE: _log(batchState, 'Pipeline is in RUN_PIPELINE state') pipeline = yield pipelines_client.runPipeline( host='localhost', clusterName=batchState['pipeline_config']['cluster.CLUSTER_NAME'], userName='******', parentPipeline=batchState['pipeline_name'], bareRun=True, queue=state.innerPipelineQueue(), config=batchState['pipeline_config'], overwrite=True) batchState['pipeline_task'] = pipeline['task_name'] yield _updateTask( batchState, lambda t: t.addMessage( tasks.task.MSG_SILENT, 'Completed run pipeline').progress()) batchState['pipeline_state'] = RUNNING_PIPELINE_STATE state.updateBatchState() if batchState['pipeline_state'] == RUNNING_PIPELINE_STATE: _log(batchState, 'Pipeline is in RUNNING_PIPELINE state') _monitorPipeline(batchState) yield _waitForPipeline(batchState) yield _updateTask( batchState, lambda t: t.addMessage(tasks.task.MSG_SILENT, 'Completed running pipeline').progress()) batchState['pipeline_state'] = POSTRUN_STATE state.updateBatchState() if batchState['pipeline_state'] == POSTRUN_STATE: _log(batchState, 'Pipeline is in POSTRUN state') yield state.postrunQueue.addWithDeferred( workflow_runner.run, state.workflowConfig(), batchState['pipeline_config']['pipeline.POSTRUN_TEMPLATE_XML'], pipelineConfigFile, TMP_DIR) yield _updateTask( batchState, lambda t: t.addMessage(tasks.task.MSG_SILENT, 'Completed postrun').progress()) batchState['pipeline_state'] = COMPLETED_STATE batchState['state'] = COMPLETED_STATE state.updateBatchState() yield _updateTask(batchState, lambda t: t.setState(tasks.task.TASK_COMPLETED)) _log(batchState, 'Pipeline finished successfully')