def exec_graph(ctx, call_id=None, host=None, port=None, headless=False, output_file=None): """ Get the execution graph for the given call ID """ k8s_host, k8s_port = get_invoke_host_port() host = host if host else k8s_host port = port if port else k8s_port if not call_id: with open(LAST_CALL_ID_FILE) as fh: call_id = fh.read() if not call_id: print("No call ID provided and no last call ID found") exit(1) json_str = exec_graph_call_impl(None, None, call_id, host, port, quiet=True) graph = parse_exec_graph_json(json_str) plot_exec_graph(graph, headless=headless, output_file=output_file)
def status(ctx, call_id=None): """ Get the status of an async function call """ host, port = get_invoke_host_port() call_id = get_call_id(call_id) status_call_impl(None, None, call_id, host, port, quiet=False)
def invoke(ctx, func, native=False, iface=None, np=8): """ Invoke one of the ParRes Kernels functions """ if func not in PRK_CMDLINE: print("Invalid PRK function {}".format(func)) return 1 cmdline = PRK_CMDLINE[func] if func == "random" and not is_power_of_two(np): print("Must have a power of two number of processes for random") exit(1) elif func == "sparse" and not (SPARSE_GRID_SIZE % np == 0): print( "To run sparse, grid size must be a multiple of --np (currently grid_size={} and np={})" .format(SPARSE_GRID_SIZE, np)) exit(1) if native: executable = PRK_NATIVE_EXECUTABLES[func] cmd_out = mpi_run(executable, iface=iface, cmdline=cmdline, np=np) cmd_out = cmd_out.decode() print(cmd_out) else: host, port = get_invoke_host_port() cmd_out = invoke_impl(FAASM_USER, func, cmdline=cmdline, host=host, port=port, mpi_world_size=np) _parse_prk_out(func, cmd_out)
def flush(ctx, user, function): """ Flush workers """ host, port = get_invoke_host_port() host = host if host else "127.0.0.1" port = port if port else 8080 flush_call_impl(host, port, user, function)
def status(ctx, call_id, host=None, port=None): """ Get the status of an async function call """ k8s_host, k8s_port = get_invoke_host_port() host = host if host else k8s_host port = port if port else k8s_port status_call_impl(None, None, call_id, host, port, quiet=False)
def all(ctx): """ Flush functions, state and shared files from all workers """ host, port = get_invoke_host_port() msg = { "type": FAABRIC_MSG_TYPE_FLUSH, } url = "http://{}:{}".format(host, port) headers = get_knative_headers() return do_post(url, msg, headers=headers, quiet=False, json=True)
def exec_graph(ctx, call_id=None, headless=False, output_file=None): """ Get the execution graph for the given call ID """ host, port = get_invoke_host_port() call_id = get_call_id(call_id) json_str = exec_graph_call_impl(call_id, host, port, quiet=True) graph = parse_exec_graph_json(json_str) if output_file: plot_exec_graph(graph, headless=headless, output_file=output_file) else: plot_exec_graph(graph, headless=headless)
def __init__(self, threads=4, total_connections=20, delay_ms=0, duration_secs=10): super().__init__(None) self.wrk_bin = join(FAASM_HOME, "tools", "wrk") self.threads = threads self.total_connections = total_connections self.delay_ms = delay_ms self.host, self.port = get_invoke_host_port() self.url = "http://{}:{}".format(self.host, self.port) self.duration_secs = duration_secs self.wrk_output = "/tmp/wrk_results.txt"
def _do_deploy_knative_native(func_name, image_name, replicas): faasm_config = get_faasm_config() if not faasm_config.has_section("Faasm"): print("Must have faasm config set up with Faasm section") return 1 # Host and port required for chaining native functions invoke_host, invoke_port = get_invoke_host_port() _deploy_knative_fn( func_name, image_name, replicas, 1, NATIVE_WORKER_ANNOTATIONS, extra_env={ "COLD_START_DELAY_MS": "1000", "FAASM_INVOKE_HOST": invoke_host, "FAASM_INVOKE_PORT": invoke_port, }, )
def function(ctx): """ Flush functions, state and shared files from all workers """ host, port = get_invoke_host_port() flush_call_impl(host, port)
def invoke_impl( user, func, input=None, py=False, asynch=False, knative=True, poll=False, cmdline=None, mpi_world_size=None, debug=False, sgx=False, ): host, port = get_invoke_host_port() # Polling always requires asynch if poll: asynch = True # Create URL and message url = "http://{}".format(host) if not port == "80": url += ":{}".format(port) if py: msg = { "user": PYTHON_USER, "function": PYTHON_FUNC, "async": asynch, "py_user": user, "py_func": func, "python": True, } else: msg = { "user": user, "function": func, "async": asynch, } if sgx: msg["sgx"] = sgx if input: msg["input_data"] = input if cmdline: msg["cmdline"] = cmdline if mpi_world_size: msg["mpi_world_size"] = mpi_world_size # Knative must pass custom headers if knative: headers = get_knative_headers() else: headers = {} if asynch: return _async_invoke(url, msg, headers=headers, poll=poll, host=host, port=port) else: return do_post(url, msg, headers=headers, json=True, debug=debug)
def invoke_impl( user, func, host=None, port=None, input=None, py=False, asynch=False, knative=True, poll=False, cmdline=None, mpi_world_size=None, debug=False, poll_interval_ms=1000, ): # Provider-specific stuff if knative: host, port = get_invoke_host_port(host, port) # Defaults host = host if host else "127.0.0.1" port = port if port else 8080 # Polling always requires asynch if poll: asynch = True # Create URL and message url = "http://{}".format(host) if not port == "80": url += ":{}".format(port) if py: msg = { "user": PYTHON_USER, "function": PYTHON_FUNC, "async": asynch, "py_user": user, "py_func": func, "python": True, } else: msg = { "user": user, "function": func, "async": asynch, } if input: msg["input_data"] = input if cmdline: msg["cmdline"] = cmdline if mpi_world_size: msg["mpi_world_size"] = mpi_world_size # Knative must pass custom headers if knative: headers = _get_knative_headers("worker") else: headers = {} if asynch: # Submit initial asynch call asynch_result = do_post(url, msg, headers=headers, quiet=True, json=True) try: call_id = int(asynch_result) except ValueError: raise RuntimeError( "Could not parse async response to int: {}".format( asynch_result)) if not poll: # Return the call ID if we're not polling return call_id else: if not knative: raise RuntimeError("Poll only supported for knative") print("\n---- Polling {} ----".format(call_id)) # Poll status until we get success/ failure result = None output = None count = 0 while result != STATUS_SUCCESS: count += 1 interval = float(poll_interval_ms) / 1000 sleep(interval) result, output = status_call_impl(user, func, call_id, host, port, quiet=True) print("\nPOLL {} - {}".format(count, result)) print("\n---- Finished {} ----\n".format(call_id)) print(output) if result == STATUS_SUCCESS: prefix = "SUCCESS:" success = True else: prefix = "FAILED:" success = False output = output.replace(prefix, "") return success, output else: if knative: return do_post(url, msg, headers=headers, json=True, debug=debug) else: raise RuntimeError("Must specify knative")
def invoke_impl(user, func, host=None, port=None, input=None, py=False, asynch=False, knative=True, native=False, ibm=False, poll=False, cmdline=None, mpi_world_size=None, debug=False, poll_interval_ms=1000): faasm_config = get_faasm_config() # Provider-specific stuff if ibm: host = faasm_config["IBM"]["k8s_subdomain"] port = 8080 elif knative: host, port = get_invoke_host_port() # Defaults host = host if host else "127.0.0.1" port = port if port else 8080 # Polling always requires asynch if poll: asynch = True # Create URL and message url = "http://{}".format(host) if not port == "80": url += ":{}".format(port) if py: msg = { "user": PYTHON_USER, "function": PYTHON_FUNC, "async": asynch, "py_user": user, "py_func": func, "python": True, } else: msg = { "user": user, "function": func, "async": asynch, } if input: msg["input_data"] = input if cmdline: msg["cmdline"] = cmdline if mpi_world_size: msg["mpi_world_size"] = mpi_world_size # IBM-specific message format if ibm: faasm_conf = get_faasm_config() msg.update({ "IBM_API_KEY": faasm_conf["IBM"]["api_key"], "REDIS_QUEUE_HOST": faasm_conf["IBM"]["redis_host_public"], "REDIS_STATE_HOST": faasm_conf["IBM"]["redis_host_public"], }) # Message needs to be nested msg = { "value": msg, } # IBM must call init first if ibm: do_post("http://{}:{}/init/".format(host, port), msg, json=True) # Knative must pass custom headers if knative and native: if py: headers = _get_knative_headers("python") else: headers = _get_knative_headers(func) elif knative: headers = _get_knative_headers("worker") else: headers = {} if asynch: # Submit initial asynch call asynch_result = do_post(url, msg, headers=headers, quiet=True, json=True) try: call_id = int(asynch_result) except ValueError: raise RuntimeError("Could not parse async response to int: {}".format(asynch_result)) if not poll: # Return the call ID if we're not polling return call_id else: if not knative: raise RuntimeError("Poll only supported for knative") print("\n---- Polling {} ----".format(call_id)) # Poll status until we get success/ failure result = None output = None count = 0 while result != STATUS_SUCCESS: count += 1 interval = float(poll_interval_ms) / 1000 sleep(interval) result, output = status_call_impl(user, func, call_id, host, port, quiet=True, native=native) print("\nPOLL {} - {}".format(count, result)) print("\n---- Finished {} ----\n".format(call_id)) print(output) if result == STATUS_SUCCESS: prefix = "SUCCESS:" success = True else: prefix = "FAILED:" success = False output = output.replace(prefix, "") return success, output else: if ibm or knative: return do_post(url, msg, headers=headers, json=True, debug=debug) else: raise RuntimeError("Must specify knative or ibm")
def mapping(ctx, download=False): """ Run genomics mapping using Faasm """ read_idxs, _ = get_reads_from_dir() start_time = time() # Iterate through and make the calls to the worker call_ids = list() for read_idx in read_idxs: print("Mapping read chunk {}".format(read_idx)) call_id = invoke_impl( "gene", "mapper", input="{}".format(read_idx), asynch=True, poll=False, ) call_ids.append(call_id) # Poll for completion of each read completed_read_idxs = list() host, port = get_invoke_host_port() print("Polling workers...") while len(completed_read_idxs) < len(read_idxs): for i, read_idx in enumerate(read_idxs): sleep(1) # See whether this call is still running call_id = call_ids[i] result, output = status_call_impl("gene", "mapper", call_id, host, port, quiet=True) if result == STATUS_RUNNING: continue # Check for success or failure if result == STATUS_SUCCESS: print("Read chunk {} completed.".format(read_idx)) # Download the results of this read if download: print("Downloading output for read chunk {}.".format( read_idx)) state_key = "output_read_{}".format(read_idx) if not exists(GENOMICS_OUTPUT_DIR): makedirs(GENOMICS_OUTPUT_DIR) output_file = join(GENOMICS_OUTPUT_DIR, state_key) host, port = get_upload_host_port(None, None) download_binary_state("gene", state_key, output_file, host=host, port=port) elif result == STATUS_FAILED: print("Read chunk {} failed: {}", read_idx, output) # Check if we're done completed_read_idxs.append(read_idx) for call_id in call_ids: exec_graph( ctx, call_id=call_id, host=host, headless=True, output_file="/tmp/exec_graph_{}.png".format(call_id), ) print("-----------------------------------------") print("FAASM MAPPING COMPLETE") print("Time: {:.2f}s".format(time() - start_time)) print("-----------------------------------------")
knative=True, native=False, ibm=False, poll=False, cmdline=None, mpi_world_size=None, debug=False, poll_interval_ms=1000): faasm_config = get_faasm_config() # Provider-specific stuff if ibm: host = faasm_config["IBM"]["k8s_subdomain"] port = 8080 elif knative: host, port = get_invoke_host_port() # Defaults host = host if host else "127.0.0.1" port = port if port else 8080 # Polling always requires async if poll: async = True # Create URL and message url = "http://{}".format(host) if not port == "80": url += ":{}".format(port) if py: