def _submit(runConfig):
    if registry.has_run(runConfig.runid):
        msg = "Runid %s does already exist" % runConfig.runid
        return SubmissionResponse(runConfig.runid, SUBM_ALREADY_SUBMITTED, msg)
        
    try:
        pipeline_exec = PipelineExecution(runConfig, server_model.config)
        logger.debug("PipelineExecution object for runid %s created."%runConfig.runid)
        pipeline_exec.initialize()
        logger.debug("PipelineExecution object for runid %s initialized."%runConfig.runid)
        registry.add_run(runConfig.runid, pipeline_exec)
        logger.debug("PipelineExecution object for runid %s registered."%runConfig.runid)
        _ = pipeline_exec.start()
        msg = "Pipeline execution for runid %s started."%runConfig.runid
        logger.info(msg)
    except ConfigurationError as ce:
        msg = 'Submission failed with exception %s.' % str(ce)
        stacktrace= 'Stacktrace: \n%s'%'\n'.join(traceback.format_stack())
        logger.warn(msg)
        return SubmissionResponse(runConfig.runid, SUBM_FAILED, msg, stacktrace)
    except Exception as e:
        msg = 'Submission failed with exception %s.' % str(e)
        stacktrace= 'Stacktrace: \n%s'%'\n'.join(traceback.format_stack())
        logger.error(msg)
        return SubmissionResponse(runConfig.runid, SUBM_ERROR, msg, stacktrace)
    try:
        history.add_entry("Runid %s submitted to the system" % runConfig.runid, datetime.datetime.now())
    except Exception as e:
        logger.warn("Runid %s could not be appended to the history."%runConfig.runid)
        msg=msg+"\n But not entry could be added the server history."

    return SubmissionResponse(runConfig.runid, SUBM_EXECUTING, msg)
def main():
    args = parse_cmd_args()
    config=load_configuration(args.config)
    data=load_inputs(args.data)
    inputdataPaths={k:v for k,v in data.iteritems() if k not in (WORKDIR,LOGDIR)}
    data = {    RUNID             : str(uuid4()),   
                WORKDIR           : data[WORKDIR],
                LOGDIR            : data[LOGDIR] if LOGDIR in data else LOGDIR,
                PIPELINE_SCRIPT   : args.pipeline,
                PIPELINE_DIR      : config[PIPELINE_DIR],
                PKG_REPOSITORY    : config[PKG_REPOSITORY],
                INPUTDATA_PATHS   : inputdataPaths,
                CREDENTIALS       : config[CREDENTIALS] }
           
    runConfig=RunConfiguration(data)
    config=RunServerConfiguration(config)
    pipeline_exec=PipelineExecution(runConfig, config)
    pipeline_exec.initialize()
    d = pipeline_exec.start()
            
    COL_NAMES=('TICK', 'MODEL PATH', 'PID', 'STATUS', 'DURATION', 'OUTPUT DIR')
    COL_WIDTH=(20,80,10,20,10,150)
    def print_report(result):
        print "".join(COL_NAMES[i].ljust(COL_WIDTH[i]) for i in range(len(COL_WIDTH)))
        for entries in pipeline_exec.report:
            line=[]
            for key in ['tick','path','pid','status','time','workdir']:
                if key=='time':
                    line.append("%.2f"%entries[key])
                else:
                    line.append(entries[key])
                    
            print "".join(line[i].ljust(COL_WIDTH[i]) for i in range(len(line)))
        return result

    def print_output_dict(result):
        if pipeline_exec.outputs:
            for portname, datapath in pipeline_exec.outputs.iteritems():
                print ''.join([portname.ljust(20), '-->'.ljust(5), datapath.ljust(80)])
        return result
    
    def visualize(result):
        g = pipeline_exec.traverser.get_graph()
        visualizer.visualize_graph(g)
        return result

    def failed_output(reason):
        print reason
        
    def stop_reactor(outputs):
        reactor.stop()

    d.addBoth(print_report)
    d.addBoth(print_output_dict)
    d.addErrback(failed_output)
    d.addBoth(visualize)
    d.addCallback(stop_reactor)
    from twisted.internet import reactor
    reactor.run()
Example #3
0
 def test_initialize(self):
     execution = PipelineExecution(self.inputs, self.config)
     execution.initialize()
     self.assertTrue(execution.pipelineDir in sys.path)
     self.assertTrue(execution.pipeline and isfunction(execution.pipeline))
     self.assertIsInstance(execution.dataflow, Graph)
     self.assertEquals(1, len(execution.dataflow.get_all_ticks()))
     self.assertTrue(execution.data[CONTEXT])
     self.assertTrue(execution.traverser)
 def test_initialize(self):
     execution = PipelineExecution(self.inputs, self.config)
     execution.initialize()
     self.assertTrue(execution.pipelineDir in sys.path)
     self.assertTrue(execution.pipeline and isfunction(execution.pipeline))
     self.assertIsInstance(execution.dataflow, Graph)
     self.assertEquals(1,len(execution.dataflow.get_all_ticks()))
     self.assertTrue(execution.data[CONTEXT])
     self.assertTrue(execution.traverser)
 def test_start_error(self):
     node_callbacks.DRM=drm_access
     execution = PipelineExecution(self.inputs, self.config)
     execution.initialize()
     test_executor=TestCmdExecutor(self.config, "1", False, 0, False, JOB_ERROR)
     execution.callbacks._cmd_executor=test_executor
     execution.start()
     self.assertFalse(execution.outputs)
     self.assertEquals(1, len(execution.report))
     self.assertEquals(JOB_ERROR, execution.report[0]['status'])
     self.assertEquals('1', execution.report[0]['tick'])
     self.assertEquals('1', execution.report[0]['pid'])
     self.assertEquals('test_exec', execution.report[0]['path'])
Example #6
0
    def test_start_success(self):
        execution = PipelineExecution(self.inputs, self.config)
        execution.initialize()
        test_executor = TestCmdExecutor(self.config, "1", False, 0, False,
                                        JOB_COMPLETED)
        execution.callbacks._cmd_executor = test_executor
        execution.start()
        self.assertTrue(execution.outputs)
        self.assertEquals('test_exec/c.xml', execution.outputs['c'])
        self.assertEquals(1, len(execution.report))
        self.assertEquals(JOB_COMPLETED, execution.report[0]['status'])
        self.assertEquals('1', execution.report[0]['tick'])
        self.assertEquals('1', execution.report[0]['pid'])
        self.assertEquals('test_exec', execution.report[0]['path'])

        result_dict = PipelineExecution.todict(execution)
        self.assertEquals("1", result_dict[RUNID])
        self.assertTrue(result_dict[CONFIG])
        self.assertEquals("COMPLETED", result_dict[STATUS])
        self.assertTrue(result_dict[SUBMITTED])
        self.assertEquals("testpipeline", result_dict[PIPELINE]['name'])
        self.assertTrue(result_dict[REPORT])
        self.assertTrue(result_dict[INPUTS])
        self.assertTrue(result_dict[OUTPUTS])
        self.assertTrue(result_dict[OUTPUTS][WORKDIR])
Example #7
0
def _submit(runConfig):
    if registry.has_run(runConfig.runid):
        msg = "Runid %s does already exist" % runConfig.runid
        return SubmissionResponse(runConfig.runid, SUBM_ALREADY_SUBMITTED, msg)

    try:
        pipeline_exec = PipelineExecution(runConfig, server_model.config)
        logger.debug("PipelineExecution object for runid %s created." %
                     runConfig.runid)
        pipeline_exec.initialize()
        logger.debug("PipelineExecution object for runid %s initialized." %
                     runConfig.runid)
        registry.add_run(runConfig.runid, pipeline_exec)
        logger.debug("PipelineExecution object for runid %s registered." %
                     runConfig.runid)
        _ = pipeline_exec.start()
        msg = "Pipeline execution for runid %s started." % runConfig.runid
        logger.info(msg)
    except ConfigurationError as ce:
        msg = 'Submission failed with exception %s.' % str(ce)
        stacktrace = 'Stacktrace: \n%s' % '\n'.join(traceback.format_stack())
        logger.warn(msg)
        return SubmissionResponse(runConfig.runid, SUBM_FAILED, msg,
                                  stacktrace)
    except Exception as e:
        msg = 'Submission failed with exception %s.' % str(e)
        stacktrace = 'Stacktrace: \n%s' % '\n'.join(traceback.format_stack())
        logger.error(msg)
        return SubmissionResponse(runConfig.runid, SUBM_ERROR, msg, stacktrace)
    try:
        history.add_entry("Runid %s submitted to the system" % runConfig.runid,
                          datetime.datetime.now())
    except Exception as e:
        logger.warn("Runid %s could not be appended to the history." %
                    runConfig.runid)
        msg = msg + "\n But not entry could be added the server history."

    return SubmissionResponse(runConfig.runid, SUBM_EXECUTING, msg)
Example #8
0
 def test_start_error_drm2(self):
     node_callbacks.DRM = drm_access2
     execution = PipelineExecution(self.inputs, self.config)
     execution.initialize()
     test_executor = TestCmdExecutor2(self.config, "1", False, 0, False,
                                      JOB_ERROR)
     execution.callbacks._cmd_executor = test_executor
     execution.start()
     self.assertFalse(execution.outputs)
     self.assertEquals(1, len(execution.report))
     self.assertEquals(JOB_ERROR, execution.report[0]['status'])
     self.assertEquals('1', execution.report[0]['tick'])
     self.assertEquals('1', execution.report[0]['pid'])
     self.assertEquals('test_exec', execution.report[0]['path'])
 def test_start_success(self):
     execution = PipelineExecution(self.inputs, self.config)
     execution.initialize()
     test_executor=TestCmdExecutor(self.config, "1", False, 0, False, JOB_COMPLETED)
     execution.callbacks._cmd_executor=test_executor
     execution.start()
     self.assertTrue(execution.outputs)
     self.assertEquals('test_exec/c.xml', execution.outputs['c'])
     self.assertEquals(1, len(execution.report))
     self.assertEquals(JOB_COMPLETED, execution.report[0]['status'])
     self.assertEquals('1', execution.report[0]['tick'])
     self.assertEquals('1', execution.report[0]['pid'])
     self.assertEquals('test_exec', execution.report[0]['path'])
     
     result_dict=PipelineExecution.todict(execution)
     self.assertEquals("1",result_dict[RUNID]) 
     self.assertTrue(result_dict[CONFIG])
     self.assertEquals("COMPLETED",result_dict[STATUS])
     self.assertTrue(result_dict[SUBMITTED])
     self.assertEquals("testpipeline",result_dict[PIPELINE]['name'])
     self.assertTrue(result_dict[REPORT])
     self.assertTrue(result_dict[INPUTS])
     self.assertTrue(result_dict[OUTPUTS])
     self.assertTrue(result_dict[OUTPUTS][WORKDIR])
Example #10
0
def main():
    args = parse_cmd_args()
    config = load_configuration(args.config)
    data = load_inputs(args.data)
    inputdataPaths = {
        k: v
        for k, v in data.iteritems() if k not in (WORKDIR, LOGDIR)
    }
    data = {
        RUNID: str(uuid4()),
        WORKDIR: data[WORKDIR],
        LOGDIR: data[LOGDIR] if LOGDIR in data else LOGDIR,
        PIPELINE_SCRIPT: args.pipeline,
        PIPELINE_DIR: config[PIPELINE_DIR],
        PKG_REPOSITORY: config[PKG_REPOSITORY],
        INPUTDATA_PATHS: inputdataPaths,
        CREDENTIALS: config[CREDENTIALS]
    }

    runConfig = RunConfiguration(data)
    config = RunServerConfiguration(config)
    pipeline_exec = PipelineExecution(runConfig, config)
    pipeline_exec.initialize()
    d = pipeline_exec.start()

    COL_NAMES = ('TICK', 'MODEL PATH', 'PID', 'STATUS', 'DURATION',
                 'OUTPUT DIR')
    COL_WIDTH = (20, 80, 10, 20, 10, 150)

    def print_report(result):
        print "".join(COL_NAMES[i].ljust(COL_WIDTH[i])
                      for i in range(len(COL_WIDTH)))
        for entries in pipeline_exec.report:
            line = []
            for key in ['tick', 'path', 'pid', 'status', 'time', 'workdir']:
                if key == 'time':
                    line.append("%.2f" % entries[key])
                else:
                    line.append(entries[key])

            print "".join(line[i].ljust(COL_WIDTH[i])
                          for i in range(len(line)))
        return result

    def print_output_dict(result):
        if pipeline_exec.outputs:
            for portname, datapath in pipeline_exec.outputs.iteritems():
                print ''.join(
                    [portname.ljust(20), '-->'.ljust(5),
                     datapath.ljust(80)])
        return result

    def visualize(result):
        g = pipeline_exec.traverser.get_graph()
        visualizer.visualize_graph(g)
        return result

    def failed_output(reason):
        print reason

    def stop_reactor(outputs):
        reactor.stop()

    d.addBoth(print_report)
    d.addBoth(print_output_dict)
    d.addErrback(failed_output)
    d.addBoth(visualize)
    d.addCallback(stop_reactor)
    from twisted.internet import reactor
    reactor.run()