def __init__(self): self.set_argparse() self._set_rmq() self.am = entk.AppManager(hostname=self.rmq_hostname, port=self.rmq_port) self.p = entk.Pipeline() self.s = entk.Stage()
def __init__(self, name, resource, walltime, cpus, gpus=0, project=None, queue=None): self._res_dict = {'resource': resource, 'walltime': walltime, 'cpus': cpus, 'gpus': gpus} if project: self._res_dict['project'] = project if queue: self._res_dict['queue'] = queue if 'local.localhost' in resource: self._res_dict['schema'] = 'ssh' else: self._res_dict['schema'] = 'gsissh' rmq_endpoint = os.environ.get('RMQ_ENDPOINT', None) rmq_port = os.environ.get('RMQ_PORT', None) if ((rmq_endpoint is None) or (rmq_port is None)): raise RuntimeError('Rabbit MQ endpoint and/or port is not set') self._app_manager = re.AppManager(port=int(rmq_port), hostname=rmq_endpoint, name=name, autoterminate=False, write_workflow=False) self._app_manager.resource_desc = self._res_dict self._logger = ru.Logger(name='iceberg-middleware', level='DEBUG')
def __init__(self): self.set_argparse() self._set_rmq() self.am = entk.AppManager(hostname=self.rmq_hostname, port=self.rmq_port, username=self.rmq_username, password=self.rmq_password) #pipelines = [] self.pipelines = [] self.p1 = entk.Pipeline() self.p2 = entk.Pipeline() #pipelines.append(self.p1) #pipelines.append(self.p2) self.s1 = entk.Stage() self.s2 = entk.Stage() self.s3 = entk.Stage() self.s4 = entk.Stage() self.s5 = entk.Stage() self.s6 = entk.Stage() self.s7 = entk.Stage()
if not check_environment(): raise ("ERROR: Incorrect environment set up.") pdesc = { 'resource': cfg['pdesc']['resource'], 'queue': cfg['pdesc']['queue'], 'schema': cfg['pdesc']['schema'], 'walltime': cfg['pdesc']['walltime'], 'cpus': cfg['pdesc']['cpus_node'] * 4 * cfg['pdesc']['nodes'], 'gpus': 6 * cfg['pdesc']['nodes'], 'project': cfg['pdesc']['project'] } appman = entk.AppManager(hostname=os.environ.get('RMQ_HOSTNAME'), port=int(os.environ.get('RMQ_PORT')), username=os.environ.get('RMQ_USERNAME'), password=os.environ.get('RMQ_PASSWORD'), autoterminate=False) appman.resource_desc = pdesc for wf in cfg['workflows']: if wf == 'ml1': reporter.header('Submit ML1') counter = 1 ml1_run(appman, cfg_ml1, reporter, counter) reporter.header('ML1 done') elif wf == 'wf1': reporter.header('Submit S1') wf1_run(appman, cfg_wf1, reporter) reporter.header('S1 done')
# ------------------------------------------------------------------------------ # if __name__ == '__main__': # Create a dictionary describe four mandatory keys: # resource, walltime, cores and project # resource is 'local.localhost' to execute locally res_dict = { 'resource': 'local.localhost', 'walltime': 15, 'cpus' : 2, } # Create Application Manager appman = re.AppManager(hostname=hostname, port=port) appman.resource_desc = res_dict pipes.append(generate_pipeline(True)) pipes.append(generate_pipeline(False)) pipes.append(generate_pipeline(False)) pipes.append(generate_pipeline(False)) # Assign the workflow as a set of Pipelines to the Application Manager appman.workflow = pipes def tmp(): while True: print([p.state for p in pipes]) time.sleep(1)
timesteps, basename) print 'replica sandbox:', replica_sandbox replica = list() replica_pipelines = list() for rid in range(replicas): print rid replica = Replica() r_pipeline = replica.replica_pipeline(rid, cycle, replica_cores, md_executable, timesteps, replica_sandbox) replica_pipelines.append(r_pipeline) appman = re.AppManager(autoterminate=False, port=32769) appman.resource_desc = { "resource": 'local.localhost', "walltime": 30, "cpus": 4 } appman.workflow = set([system]) appman.run() appman.workflow = set(replica_pipelines) appman.run() appman.resource_terminate() # ------------------------------------------------------------------------------