def kvs_watch_wrapper(key, value, arg, errnum): (callback, real_arg) = ffi.from_handle(arg) if errnum == errno.ENOENT: value = None else: value = json.loads(ffi.string(value).decode("utf-8")) key = ffi.string(key) ret = callback(key, value, real_arg, errnum) return ret if ret is not None else 0
def id_encode(jobid, encoding="f58"): """ returns: Jobid encoded in encoding :rtype str """ buflen = 128 buf = ffi.new("char[]", buflen) RAW.id_encode(int(jobid), encoding, buf, buflen) return ffi.string(buf, buflen).decode("utf-8")
def get_key_direct(flux_handle, key): valp = ffi.new("char *[1]") future = RAW.flux_kvs_lookup(flux_handle, None, 0, key) RAW.flux_kvs_lookup_get(future, valp) if valp[0] == ffi.NULL: return None ret = json.loads(ffi.string(valp[0]).decode("utf-8")) RAW.flux_future_destroy(future) return ret
def get_key_direct(flux_handle, key): valp = ffi.new("char *[1]") future = RAW.flux_kvs_lookup(flux_handle, None, 0, key) RAW.flux_kvs_lookup_get(future, valp) if valp[0] == ffi.NULL: return None ret = json.loads(ffi.string(valp[0]).decode("utf-8")) RAW.flux_future_destroy(future) return ret
def get_dict(self): """Get the raw "result" dictionary for the job Return the underlying "result" payload from ``flux_job_result(3)`` as a dictionary. """ result_str = ffi.new("char *[1]") RAW.result_get(self.handle, result_str) if result_str[0] == ffi.NULL: return None return json.loads(ffi.string(result_str[0]).decode("utf-8"))
def job_kvs_guest(flux_handle, jobid): """ :returns: The KVS guest directory of the given job :rtype: KVSDir """ path_len = 1024 buf = ffi.new("char[]", path_len) RAW.kvs_guest_key(buf, path_len, jobid, "") kvs_key = ffi.string(buf, path_len) return flux.kvs.get_dir(flux_handle, kvs_key)
def kz_stream_handler(kz_handle, arg): del kz_handle # unused (stream, prefix, handle) = ffi.from_handle(arg) buf = ffi.new("char *[1]") while True: try: count = RAW.get(handle, buf) if count == 0: break if prefix is None: generic_write(stream, ffi.string(buf[0])) else: for _ in ffi.string(buf[0]).splitlines(True): generic_write(stream, prefix) generic_write(stream, ffi.string(buf[0])) except EnvironmentError as err: if err.errno == errno.EAGAIN: pass else: raise err
def kz_stream_handler(kz_handle, arg): del kz_handle # unused (stream, prefix, handle) = ffi.from_handle(arg) buf = ffi.new("char *[1]") while True: try: count = RAW.get(handle, buf) if count == 0: break if prefix is None: generic_write(stream, ffi.string(buf[0])) else: for _ in ffi.string(buf[0]).splitlines(True): generic_write(stream, prefix) generic_write(stream, ffi.string(buf[0])) except EnvironmentError as err: if err.errno == errno.EAGAIN: pass else: raise err
def watch_once(flux_handle, key): """ Watches the selected key until the next change, then returns the updated value of the key """ if isdir(flux_handle, key): directory = get_dir(flux_handle) # The wrapper automatically unpacks directory's handle RAW.flux_kvs_watch_once_dir(flux_handle, directory) return directory out_json_str = ffi.new("char *[1]") RAW.flux_kvs_watch_once(flux_handle, key, out_json_str) if out_json_str[0] == ffi.NULL: return None return json.loads(ffi.string(out_json_str[0]).decode("utf-8"))
def get_event(self, autoreset=True): """ Return the next event from a JobEventWatchFuture, or None if the event stream has terminated. The future is auto-reset unless autoreset=False, so a subsequent call to get_event() will try to fetch the next event and thus may block. """ result = ffi.new("char *[1]") try: RAW.event_watch_get(self.pimpl, result) except OSError as exc: if exc.errno == errno.ENODATA: self.needs_cancel = False return None # re-raise all other exceptions raise event = EventLogEvent(ffi.string(result[0]).decode("utf-8")) if autoreset is True: self.reset() return event
def wait_get_status(future): """Get job status from a Future returned by job.wait_async() Process a response to a Flux job wait request. This method blocks until the response is received, then decodes the result to obtain the job status. :param future: a Flux future object returned by job.wait_async() :type future: Future :returns: job status, a tuple of: Job ID (int), success (bool), and an error (string) if success=False :rtype: tuple """ if future is None or future == ffi.NULL: raise EnvironmentError(errno.EINVAL, "future must not be None/NULL") future.wait_for() # ensure the future is fulfilled success = ffi.new("bool[1]") errstr = ffi.new("const char *[1]") jobid = ffi.new("flux_jobid_t[1]") RAW.wait_get_id(future, jobid) RAW.wait_get_status(future, success, errstr) return JobWaitResult(int(jobid[0]), bool(success[0]), ffi.string(errstr[0]))
def jsc_notify_wrapper(jcb, arg, errnum): if jcb != ffi.NULL: jcb = ffi.string(jcb).decode("utf-8") callback, real_arg = ffi.from_handle(arg) ret = callback(jcb, real_arg, errnum) return ret if ret is not None else 0
def query_jcb(flux_handle, jobid, key): jcb_str = ffi.new("char *[1]") RAW.query_jcb(flux_handle, jobid, key, jcb_str) if jcb_str[0] == ffi.NULL: return None return json.loads(ffi.string(jcb_str[0]).decode("utf-8"))
import json import re import sys import six from flux.wrapper import Wrapper from _flux._core import ffi, lib MOD = sys.modules[__name__] # Inject enum/define names matching ^JSC_[A-Z_]+$ into module PATTERN = re.compile("^JSC_[A-Z_]+") for k in dir(lib): if PATTERN.match(k): v = ffi.string(getattr(lib, k)).decode("ascii") setattr(MOD, k, v) class JSCWrapper(Wrapper): """ Generic JSC wrapper """ def __init__(self): """Set up the wrapper interface for functions prefixed with jsc_""" super(JSCWrapper, self).__init__(ffi, lib, prefixes=["jsc_"]) RAW = JSCWrapper()