def add(self, basepath, relay, *confs): dp_jobrun = scdc.dataprov_open(basepath, "jobrun", "uname -a; sleep", os.getcwd()) if relay: print("registering at relay") if relay == True: worker_relay = "scdc:///rel" url = "scdc://" else: worker_relay = relay url = "scdc+tcp://" + socket.getfqdn() url += "/" + basepath worker_relay_ID = socket.gethostname() + "_" + basepath cmd = worker_relay + "/CONFIG put relay " + worker_relay_ID + " " + url ret = scdc.dataset_cmd(None, cmd, None, None) if not ret: print("error: registering at relay failed! (cmd: '" + cmd + "')") worker_relay = None else: worker_relay = None worker_relay_ID = None for c in confs: if c == "": continue print("configuration command '" + c + "'") cmd = "scdc:///" + basepath + "/CONFIG " + c ret = scdc.dataset_cmd(None, cmd, None, None) if not ret: print("error: configuration command failed! (cmd: '" + cmd + "')") self.worker.append((dp_jobrun, worker_relay, worker_relay_ID))
interrupt = (sig != 0) scdc.nodeport_cancel(np_tcp, interrupt) def printmsg(s): print("jobrun_rel: " + s) scdc.log_init("log_FILE", sys.stdout, sys.stderr) printmsg("scdc init") scdc.init() basepath = "rel" dp_rel = scdc.dataprov_open(basepath, "jobrun_relay") np_tcp = scdc.nodeport_open("tcp:max_connections:", 2) signal.signal(signal.SIGABRT, sighandler) signal.signal(signal.SIGTERM, sighandler) signal.signal(signal.SIGINT, sighandler) scdc.nodeport_start(np_tcp, scdc.NODEPORT_START_LOOP_UNTIL_CANCEL) # running scdc.nodeport_stop(np_tcp) scdc.nodeport_close(np_tcp)
interrupt = (sig != 0) scdc.nodeport_cancel(np_tcp, interrupt) def printmsg(s): print("minimal_srv: " + s) printmsg("scdc init") scdc.init() basepath = "jobR" runcmd = "uname -a; " + os.getcwd() + "/minimal.sh" workdir = os.getcwd() + "/minimal.work" dp_job = scdc.dataprov_open(basepath, "jobrun", runcmd, workdir) # configure local jobrun data provider scdc.dataset_cmd(None, "scdc:///" + basepath + "/CONFIG put max_parallel_jobs 2", None, None) scdc.dataset_cmd(None, "scdc:///" + basepath + "/CONFIG put show_output 1", None, None) scdc.dataset_cmd(None, "scdc:///" + basepath + "/CONFIG put xterm 1", None, None) np_tcp = scdc.nodeport_open("tcp:max_connections:", 2) signal.signal(signal.SIGABRT, sighandler) signal.signal(signal.SIGTERM, sighandler) signal.signal(signal.SIGINT, sighandler)
hostname = None #hostname = "localhost" relay = None #relay = "rel" scdc.log_init("log_FILE", sys.stdout, sys.stderr) printmsg("scdc init") scdc.init() basepath = "jobR" #dp_job = scdc.dataprov_open(basepath, "jobrun:system:jobcmd:workdir", "uname -a; " + os.getcwd() + "/mpirun.sh -hosts $NODES$:$NPROCS$ $PARAMS$", os.getcwd()) dp_job = scdc.dataprov_open(basepath, "jobrun:handler:jobcmd", jobrun_handler, True, "run hosts: $NODES$:$NPROCS$") # configure local jobrun data provider #scdc.dataset_cmd(None, "scdc:///" + basepath + "/CONFIG put cores 3", None, None) #scdc.dataset_cmd(None, "scdc:///" + basepath + "/CONFIG put xterm 1", None, None) if hostname is None and relay: dp_rel = scdc.dataprov_open(relay, "jobrun_relay") rel = "scdc:///" + relay printmsg("registering '" + basepath + "' at relay: '" + rel + "'") cmd = rel + "/CONFIG put relay " + basepath + " " + "scdc:///" + basepath scdc.dataset_cmd(None, cmd, None, None) if hostname: url = "scdc+tcp://" + hostname
STORE_FS_PATH = os.getenv("MERGE_SCDC_REPO_PATH", "") + "store/" STORE_MYSQL_CREDENTIALS = os.getenv("MERGE_SCDC_MYSQL_CREDENTIALS", "") dbstore = False target = "store_test" input_file = target + ".in" output_file = target + ".out" print("SCDC storage demo for Python"); scdc.init() dpFS = scdc.dataprov_open("storeFS", "fs", STORE_FS_PATH) dpDB = scdc.dataprov_open("storeDB", "mysql", STORE_MYSQL_CREDENTIALS) uri = "scdc:/storeDB" if dbstore else "scdc:/storeFS" # the loop is only used to be able to break if an error occurs while True: # open dataset ds = scdc.dataset_open(uri) if not ds: print("ERROR: open dataset failed!") break else: print("open dataset '" + uri + "': OK")
def simpat_init(): simpat_dp = scdc.dataprov_open(simpat_base, "bench")
else: Z_TRACE("unkown server command: '" + cmd + "'") return True signal.signal(signal.SIGABRT, sighandler) signal.signal(signal.SIGTERM, sighandler) signal.signal(signal.SIGINT, sighandler) Z_TRACE("start hook demo") Z_TRACE() scdc.init() dp_hook = scdc.dataprov_open("hookdemo", "hook:id", hookdemo.hook, 0x2501) if NODEPORT_TCP != None: np_tcp = scdc.nodeport_open("tcp:max_connections", 2) if NODEPORT_STREAM != None: np_stream = scdc.nodeport_open("stream:cmd_handler", cmd_handler, None) scdc.nodeport_start(np_tcp, NODEPORT_TCP) scdc.nodeport_start(np_stream, NODEPORT_STREAM) scdc.nodeport_stop(np_stream) scdc.nodeport_stop(np_tcp) scdc.nodeport_close(np_stream) scdc.nodeport_close(np_tcp)
scdc.nodeport_cancel(np_tcp, interrupt) def printmsg(s): print("jobrun_srv: " + s) scdc.log_init("log_FILE", sys.stdout, sys.stderr) printmsg("scdc init") scdc.init() basepath = "jobR" dp_job = scdc.dataprov_open(basepath, "jobrun", "uname -a; " + os.getcwd() + "/mpirun.sh", os.getcwd()) # configure local jobrun data provider #scdc.dataset_cmd(None, "scdc:///" + basepath + "/CONFIG put max_parallel_jobs 3", None, None) #scdc.dataset_cmd(None, "scdc:///" + basepath + "/CONFIG put xterm 1", None, None) if relay: printmsg("registering at relay") url = "scdc+tcp://" + socket.getfqdn() + "/" + basepath cmd = relay + "/CONFIG put relay " + basepath + " " + url relay_ret = scdc.dataset_cmd(None, cmd, None, None) if relay_ret: printmsg("registering at relay OK!") else: printmsg("registering at relay FAILED! (cmd: '" + cmd + "')")
def __init__(self): print("init_relay:") self.dp_relay = scdc.dataprov_open("rel", "jobrun_relay")