workflows.append(("mutiple_transfer", tr_wf_examples.multiple_simple_example())) workflows.append(("special_command_transfer", tr_wf_examples.special_command())) workflows.append(("special_transfer", tr_wf_examples.special_transfer())) workflows.append(("mutiple_crp", srp_wf_examples.multiple_simple_example())) workflows.append(("special_command_crp", srp_wf_examples.special_command())) ret_value = 0 for workflow_name, workflow in workflows: print "--------------------------------------------------------------" print workflow_name file_path = os.path.join(directory, "json_" + workflow_name + ".wf") Helper.serialize(file_path, workflow) new_workflow = Helper.unserialize(file_path) if not new_workflow.attributs_equal(workflow): print "FAILED !!" ret_value = 1 else: print "OK" try: os.remove(file_path) except IOError: pass if ret_value == 0: print "\nAll test ran with success." else:
logger.info("epd_to_deploy " + repr(options.epd_to_deploy)) logger.info("untar_directory " + repr(options.untar_directory)) sch = MPIScheduler(comm, interval=1, nb_attempt_per_job=options.nb_attempt_per_job) config.disable_queue_limits() workflow_engine = ConfiguredWorkflowEngine(database_server, sch, config) if options.workflow_file and os.path.exists(options.workflow_file): workflow_file = options.workflow_file logger.info(" ") logger.info("******* submission of worklfow **********") logger.info("workflow file: " + repr(workflow_file)) workflow = Helper.unserialize(workflow_file) workflow_engine.submit_workflow(workflow, expiration_date=None, name=None, queue=None) if options.wf_id_to_restart != None: workflow_id = options.wf_id_to_restart logger.info(" ") logger.info("******* restart worklfow **********") logger.info("workflow if: " + repr(workflow_id)) workflow_engine.stop_workflow(workflow_id) workflow_engine.restart_workflow(workflow_id, queue=None) while not workflow_engine.engine_loop.are_jobs_and_workflow_done(): time.sleep(2) for slave in range(1, comm.size):