def update_job_cores(self, provider, job_key, index=0, received_bn=0) -> int: """Update job cores.""" self.set_job_received_block_number(received_bn) try: event_filter = self._eBlocBroker.events.LogJob.createFilter( argument_filters={"provider": str(provider)}, fromBlock=int(self.job_info["received_block_number"]), toBlock=self.to_block, ) for logged_job in event_filter.get_all_entries(): if logged_job.args["jobKey"] == job_key and logged_job.args[ "index"] == int(index): self.job_info["received_block_number"] = received_bn = int( logged_job["blockNumber"]) self.job_info.update({"core": logged_job.args["core"]}) self.job_info.update({"run_time": logged_job.args["runTime"]}) self.job_info.update( {"cloudStorageID": logged_job.args["cloudStorageID"]}) self.job_info.update( {"cacheType": logged_job.args["cacheType"]}) break else: log(f"E: failed to find job({job_key}) to update") return received_bn except Exception as e: print_tb(f"E: Failed to update_job_cores.\n{e}") raise e
def _main(given_bn): lock = None try: is_driver_on(process_count=1, is_print=False) try: lock = zc.lockfile.LockFile(env.DRIVER_LOCKFILE, content_template=str(pid)) except PermissionError: print_tb("E: PermissionError is generated for the locked file") give_rwe_access(env.WHOAMI, "/tmp/run") lock = zc.lockfile.LockFile(env.DRIVER_LOCKFILE, content_template=str(pid)) run_driver(given_bn) except HandlerException: pass except QuietExit as e: log(e, is_err=True) except zc.lockfile.LockError: log(f"E: Driver cannot lock the file {env.DRIVER_LOCKFILE}, the pid file is in use") except Terminate as e: terminate(str(e), lock) except Exception as e: print_tb(e) breakpoint() # DEBUG: end of program pressed CTRL-c finally: with suppress(Exception): if lock: lock.close()
def eudat_submit(job: Job, is_pass=False, required_confs=1): log("==> Submitting source code through [blue]EUDAT[/blue]") Ebb = cfg.Ebb requester = Ebb.w3.toChecksumAddress(job.requester_addr) oc_client = "059ab6ba-4030-48bb-b81b-12115f531296" connect() try: job.check_account_status(requester) except Exception as e: print_tb(e) raise e login(oc_client, env.LOG_PATH.joinpath(".eudat_client.txt"), env.OC_CLIENT) if len(sys.argv) == 3: provider = str(sys.argv[1]) tar_hash = sys.argv[2] log(f"==> provided_hash={tar_hash}") else: provider = Ebb.w3.toChecksumAddress(job.provider_addr) job.folders_to_share = job.paths check_link_folders(job.data_paths, job.registered_data_files, is_pass=is_pass) return submit(provider, requester, job, required_confs=required_confs)
def main(): received_block_number = 0 job_id = 0 if len(sys.argv) > 3: provider = str(sys.argv[1]) job_key = str(sys.argv[2]) index = int(sys.argv[3]) if len(sys.argv) == 5: job_id = int(sys.argv[4]) if len(sys.argv) == 6: received_block_number = int(sys.argv[5]) else: log("E: Provide <provider, job_key, index, and job_id> as arguments") sys.exit(1) try: Ebb = cfg.Ebb Ebb.get_job_info(provider, job_key, index, job_id, received_block_number, is_log_print=True) except Exception as e: raise e
def process_payment_tx(self): try: tx_hash = eblocbroker_function_call( lambda: Ebb.process_payment( self.job_key, self.index, self.job_id, self.elapsed_time, self.result_ipfs_hash, self.storage_ids, self.end_time_stamp, self.data_transfer_in, self.data_transfer_out, self.job_info["core"], self.job_info["run_time"], self.received_block_number, ), max_retries=10, ) except Exception as e: print_tb(e) sys.exit(1) log(f"==> process_payment {self.job_key} {self.index}") return tx_hash
def _register_provider(self, *args, **kwargs): """Register provider.""" if is_byte_str_zero(env.PROVIDER_ID): log(f"E: PROVIDER_ID={env.PROVIDER_ID} is not valid, change it in [{c.pink}]~/.ebloc-broker/.env" ) raise QuietExit if self.does_provider_exist(env.PROVIDER_ID): log(f"warning: Provider {env.PROVIDER_ID} is already registered.\n" "Please call the [blue]update_provider_info.py[/blue] or " "[blue]update_provider_prices.py[/blue] script for an update.") raise QuietExit if kwargs["commitment_blk"] < cfg.BLOCK_DURATION_1_HOUR: raise Exception( f"E: Commitment block number should be greater than {cfg.BLOCK_DURATION_1_HOUR}" ) if len(kwargs["federation_cloud_id"]) >= 128: raise Exception("E: federation_cloud_id hould be lesser than 128") if len(kwargs["email"]) >= 128: raise Exception("E: e-mail should be less than 128") try: tx = self.register_provider(*args) return self.tx_id(tx) except Exception as e: raise e
def _remove(path: str, is_verbose=False): """Remove file or folders based on its type. __ https://stackoverflow.com/a/10840586/2402577 """ try: if path == "/": raise ValueError("E: Attempting to remove root(/)") if os.path.isfile(path): with suppress(FileNotFoundError): os.remove(path) elif os.path.isdir(path): # deletes a directory and all its contents shutil.rmtree(path) else: if is_verbose: log(f"warning: {WHERE(1)} Nothing is removed, following path does not exist:\n[magenta]{path}" ) return if is_verbose: log(f"==> {WHERE(1)} remove following path:\n[magenta]{path}") except OSError as e: # Suppress the exception if it is a file not found error. # Otherwise, re-raise the exception. if e.errno != errno.ENOENT: print_tb(e) raise e
def add_bloxberg_config(fname): bloxberg_config = { "name": "bloxberg (Bloxberg)", "id": "bloxberg", "chainid": 8995, "host": "https://core.bloxberg.org", "explorer": "https://blockexplorer.bloxberg.org/api", } config, ind, bsi = ruamel.yaml.util.load_yaml_guess_indent(open(fname)) data = config is_bloxberg_added = False for config in data["live"]: if config["name"] == "Ethereum": for network in config["networks"]: if "bloxberg" in network["name"]: is_bloxberg_added = True if json.loads(json.dumps(network)) == bloxberg_config: log(f"## bloxberg config is already added into {fname}") else: network["name"] = bloxberg_config["name"] network["id"] = bloxberg_config["id"] network["chainid"] = bloxberg_config["chainid"] network["host"] = bloxberg_config["host"] network["explorer"] = bloxberg_config["explorer"] if not is_bloxberg_added: config["networks"].append(bloxberg_config) yaml = ruamel.yaml.YAML() yaml.indent(mapping=ind, sequence=ind, offset=bsi) with open(fname, "w") as fp: yaml.dump(data, fp)
def get_provider_info(address): """Return provider info. :param str address: Ethereum address of the provider """ t1.join() log(Ebb.get_provider_info(address))
def deposit_storage(eth_address, is_provider=False): """Deposit storage balance. :param str eth_address: Ethereum address of the provider :param bool is_provider: Checks it the caller provider """ from_block = Ebb.get_deployed_block_number() if is_provider: event_filter = Ebb._eBlocBroker.events.LogJob.createFilter( fromBlock=int(from_block), argument_filters={"provider": eth_address}, toBlock="latest", ) else: # should be owner of the job event_filter = Ebb._eBlocBroker.events.LogJob.createFilter( fromBlock=int(from_block), argument_filters={"owner": eth_address}, toBlock="latest", ) for job in enumerate(event_filter.get_all_entries()): job_info = job[1].args flag_check = [] for idx, code_hash in enumerate(job_info["sourceCodeHash"]): main_cloud_storage_id = job_info["cloudStorageID"][idx] if main_cloud_storage_id in (StorageID.IPFS, StorageID.IPFS_GPG): _hash = bytes32_to_ipfs(code_hash) _type = "ipfs_hash" else: _hash = cfg.w3.toText(code_hash) _type = "md5sum" log(br(f"{idx}, {_type}"), "bold cyan", end="") if len(code_hash) <= 32: log(f" {_hash} bytes={code_hash}", "bold") else: log(f" {_hash}\n\t{code_hash}", "bold") provider = Ebb.w3.toChecksumAddress(job_info["provider"]) if is_provider and eth_address.lower() == provider.lower(): data_owner = Ebb.w3.toChecksumAddress(job_info["owner"]) deposit, output = Ebb.get_storage_info(provider, data_owner, code_hash) flag_check.append(output[3]) log(f"deposit={deposit}, {output}", "bold") if deposit > 0 and not any( flag_check): # if not any(i for i in flag_check): is_verified_list = [True, True] tx = Ebb._data_received( job_info["jobKey"], job_info["index"], job_info["sourceCodeHash"], job_info["cacheType"], is_verified_list, ) get_tx_status(Ebb.tx_id(tx)) else: log("warning: already all data files are are verifid")
def balance(): from broker._utils._log import log try: balance = cfg.Ebb.get_balance(args.eth_address) log(f"## balance={balance}") except Exception as e: print_tb(e)
def main(): data_price = 1 commitment_blk_dur = 600 for code_hash in hashes: with suppress(Exception): _register_data(code_hash, data_price, commitment_blk_dur) log(f"## registering data {len(hashes)} files {ok()}")
def balance(eth_address): """Return block number. :param str eth_address: Ethereum address of the provider """ t1.join() balance = Ebb.get_balance(eth_address) log(f"## balance={balance}")
def find_all(self, sort_str=""): """Find all records.""" if not sort_str: cursor = self.collection.find({}) else: cursor = self.collection.find({}).sort(sort_str) for document in cursor: log(document)
def process_logged_job(self, idx): """Process logged job one by one.""" self.received_block = [] self.storage_duration = [] wait_until_idle_core_available() self.is_provider_received_job = True console_ruler(idx, character="-") # sourceCodeHash = binascii.hexlify(logged_job.args['sourceCodeHash'][0]).decode("utf-8")[0:32] job_key = self.logged_job.args["jobKey"] index = self.logged_job.args["index"] self.job_block_number = self.logged_job["blockNumber"] self.cloud_storage_id = self.logged_job.args["cloudStorageID"] log(f"## job_key=[magenta]{job_key}[/magenta] | index={index}") log( f"received_block_number={self.job_block_number} \n" f"transactionHash={self.logged_job['transactionHash'].hex()} | " f"log_index={self.logged_job['logIndex']} \n" f"provider={self.logged_job.args['provider']} \n" f"received={self.logged_job.args['received']}", "bold yellow", ) if self.logged_job["blockNumber"] > self.latest_block_number: self.latest_block_number = self.logged_job["blockNumber"] try: run([env.BASH_SCRIPTS_PATH / "is_str_valid.sh", job_key]) except: logging.error("E: Filename contains an invalid character") return try: job_id = 0 # main job_id self.job_info = eblocbroker_function_call( partial(self.Ebb.get_job_info, env.PROVIDER_ID, job_key, index, job_id, self.job_block_number), max_retries=10, ) cfg.Ebb.get_job_code_hashes(env.PROVIDER_ID, job_key, index, self.job_block_number) self.requester_id = self.job_info["job_owner"] self.job_info.update({"received_block": self.received_block}) self.job_info.update({"storage_duration": self.storage_duration}) self.job_info.update({"cacheType": self.logged_job.args["cacheType"]}) cfg.Ebb.analyze_data(job_key, env.PROVIDER_ID) self.job_infos.append(self.job_info) log(f"==> requester={self.requester_id}") log("==> [yellow]job_info:", "bold") log(self.job_info) except Exception as e: print_tb(e) return for job in range(1, len(self.job_info["core"])): with suppress(Exception): self.job_infos.append( # if workflow is given then add jobs into list self.Ebb.get_job_info(env.PROVIDER_ID, job_key, index, job, self.job_block_number) ) self.check_requested_job()
def countdown(seconds: int, is_silent=False): if not is_silent: log(f"## sleep_time={seconds} seconds") while seconds: mins, secs = divmod(seconds, 60) timer = "sleeping for {:02d}:{:02d}".format(mins, secs) print(f" * {_date()} | {timer}", end="\r") time.sleep(1) seconds -= 1
def is_process_on(process_name, name="", process_count=0, port=None, is_print=True) -> bool: """Check wheather the process runs on the background. https://stackoverflow.com/a/6482230/2402577 """ if not name: name = process_name p1 = Popen(["ps", "auxww"], stdout=PIPE) p2 = Popen( ["grep", "-v", "-e", "flycheck_", "-e", "grep", "-e", "emacsclient"], stdin=p1.stdout, stdout=PIPE) p1.stdout.close() # type: ignore p3 = Popen(["grep", "-E", process_name], stdin=p2.stdout, stdout=PIPE) p2.stdout.close() # type: ignore output = p3.communicate()[0].decode("utf-8").strip().splitlines() pids = [] for line in output: fields = line.strip().split() # array indices start at 0 unlike awk, 1 indice points the port number pids.append(fields[1]) if len(pids) > process_count: if port: # How to find processes based on port and kill them all? # https://stackoverflow.com/a/5043907/2402577 p1 = Popen(["lsof", "-i", f"tcp:{port}"], stdout=PIPE) p2 = Popen(["grep", "LISTEN"], stdin=p1.stdout, stdout=PIPE) out = p2.communicate()[0].decode("utf-8").strip() running_pid = out.strip().split()[1] if running_pid in pids: if is_print: log(f"==> [green]{name}[/green] is already running on the background, its pid={running_pid}" ) return True else: if is_print: log(f"==> [green]{name}[/green] is already running on the background" ) return True name = name.replace("\\", "").replace(">", "").replace("<", "") if is_print: print_tb( f"[bold green]{name}[/bold green] is not running on the background {WHERE(1)}" ) return False
def _ipfs_add_folder(self, folder_path): try: self.result_ipfs_hash = cfg.ipfs.add(folder_path) log(f"==> result_ipfs_hash={self.result_ipfs_hash}") cfg.ipfs.pin(self.result_ipfs_hash) data_transfer_out = cfg.ipfs.get_cumulative_size( self.result_ipfs_hash) except Exception as e: print_tb(e) raise e data_transfer_out = byte_to_mb(data_transfer_out) self.data_transfer_out += data_transfer_out
def is_gpg_published(gpg_fingerprint): try: run(["gpg", "--list-keys", gpg_fingerprint]) except Exception as e: log("## running: gpg --verbose --keyserver hkps://keyserver.ubuntu.com --send-keys <key_id>" ) try: run([ "gpg", "--verbose", "--keyserver", "hkps://keyserver.ubuntu.com", "--send-keys", gpg_fingerprint ]) except: raise Exception from e
def get_file_size(self, fn, folder_name): # accept_given_shares() try: log(f"## trying to get {fn} info from EUDAT") #: DAV/Properties/getcontentlength the number of bytes of a resource info = config.oc.file_info(fn) return info.get_size() except Exception as e: log(f"warning: {e}") if "HTTP error: 404" in str(e): try: _folder_fn = folder_name _list = fnmatch.filter(os.listdir(env.OWNCLOUD_PATH), f"{_folder_fn} *") for _dir in _list: shutil.move(f"{env.OWNCLOUD_PATH}/{_dir}", f"{env.OWNCLOUD_PATH}/{_folder_fn}") info = config.oc.file_info(fn) return info.get_size() except Exception as e: log(f"E: {e}") _list = config.oc.list(".") for path in _list: if folder_name in path.get_name( ) and folder_name != path.get_name: config.oc.move(path.get_name(), folder_name) info = config.oc.file_info(fn) return info.get_size() log(str(e)) raise Exception( "E: failed all the attempts to get file info at Eudat") from e
def total_size_to_download(self): data_transfer_in_to_download = 0 # total size to download in bytes for idx, source_code_hash_text in enumerate( self.code_hashes_to_process): if self.cloudStorageID[idx] != StorageID.NONE: folder_name = source_code_hash_text if folder_name not in self.is_cached: data_transfer_in_to_download += self.get_file_size( f"/{folder_name}/{folder_name}.tar.gz", folder_name) self.data_transfer_in_to_download_mb = bytes_to_mb( data_transfer_in_to_download) log(f"## Total size to download {data_transfer_in_to_download} bytes == " f"{self.data_transfer_in_to_download_mb} MB")
def run(self) -> bool: self.start_time = time.time() if cfg.IS_THREADING_ENABLED: self.thread_log_setup() log(f"## Keep track from: tail -f {self.drivers_log_path}") try: log(f" * log_path={self.drivers_log_path}") self._run() # self.thread_log_setup() return True except Exception as e: print_tb(f"{self.job_key}_{self.index} {e}") sys.exit(1) finally: time.sleep(0.25)
def register_requester(yaml_fn): """Return provider info. :param str yaml_fn: Full file path of Yaml file that contains the requester info """ t1.join() try: tx_hash = Ebb.register_requester(yaml_fn) if tx_hash: get_tx_status(tx_hash) else: log() except QuietExit: pass except Exception as e: print_tb(e)
def test_submit_job_gas(): global provider global requester mine(1) mine(5) provider = accounts[0] requester = accounts[1] register_provider(100) register_requester(requester) start_time = 10 completion_time = 20 cores = [127] index = 0 submit_receipt(index, cores, start_time, completion_time, elapsed_time=1) # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- start_time = 10 completion_time = 25 cores = [1] index = 1 submit_receipt(index, cores, start_time, completion_time, elapsed_time=1) # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- start_time = 11 completion_time = 25 cores = [1] index = 2 tx = submit_receipt(index, cores, start_time, completion_time, elapsed_time=1) gas_base = int(tx.__dict__["gas_used"]) # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- start_time = 8 completion_time = 9 cores = [65] index = 3 tx = submit_receipt(index, cores, start_time, completion_time, elapsed_time=1) gas_end = int(tx.__dict__["gas_used"]) check_list() log(f"==> gas_cost_for_iteration={gas_end - gas_base}")
def handler(signum, frame): """Register an handler for the timeout. Example error: Signal handler called with signum=14 frame=<frame at 0x7f2fb1cece40, file '/usr/lib/python3.8/threading.py', line 1027 __ https://docs.python.org/3/library/signal.html#example """ if any(x in str(frame) for x in [ "subprocess.py", "ssl.py", "log_job", "connection.py", "threading.py", "utils.py" ]): pass else: log(f"E: Signal handler called with signum={signum} frame={frame}") traceback.print_stack() raise HandlerException("Forever is over, end of time")
def _squeue(): try: squeue_output = run(["squeue"]) if "squeue: error:" in str(squeue_output): raise Exception("squeue: error") except Exception as e: raise Terminate( "warning: SLURM is not running on the background. Please run:\nsudo ./broker/bash_scripts/run_slurm.sh" ) from e # Get real info under the header after the first line if len(f"{squeue_output}\n".split("\n", 1)[1]) > 0: # checks if the squeue output's line number is gretaer than 1 log( "view information about jobs located in the Slurm scheduling queue:", "bold yellow") log(f"{squeue_output} {ok()}", "bold")
def main(args): given_bn = 0 try: if args.bn: given_bn = args.bn elif args.latest: given_bn = cfg.Ebb.get_block_number() if args.is_thread is False: cfg.IS_THREADING_ENABLED = False console_ruler("provider session starts") log(f" * {datetime.now().strftime('%Y-%m-%d %H:%M')}") with launch_ipdb_on_exception(): # if an exception is raised, launch ipdb _main(given_bn) except KeyboardInterrupt: sys.exit(1)
def about(): from os.path import expanduser from broker._utils._log import log with open(expanduser("~/.ebloc-broker/cfg.yaml"), "r") as f: flag = True indent = 2 for line in f: if flag: if " " in line[:2]: flag = False if " " in line[:4]: indent = 4 if "cfg" not in line and " " * indent in line[:indent]: line = line[indent:] log(line.rstrip(), "bold")
def process_logged_jobs(self): """Process logged jobs.""" self.is_cached = {} self.latest_block_number = 0 self.is_provider_received_job = False for idx, logged_job in enumerate(self.logged_jobs_to_process): self.job_infos = [] self.logged_job = logged_job self.requester_id = None try: self.process_logged_job(idx) self.sent_job_to_storage_class() except JobException as e: log(str(e)) except Exception as e: print_tb(e) log(str(e)) breakpoint() # DEBUG
def test_test3(): global provider global requester provider = accounts[0] requester = accounts[1] register_provider(100) register_requester(requester) start_time = 10 completion_time = 20 cores = [1] index = 0 submit_receipt(index, cores, start_time, completion_time, elapsed_time=1) # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- start_time = 27 completion_time = 35 cores = [1] index = 1 submit_receipt(index, cores, start_time, completion_time, elapsed_time=1) # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- start_time = 30 completion_time = 45 cores = [1] index = 2 tx = submit_receipt(index, cores, start_time, completion_time, elapsed_time=1) gas_base = int(tx.__dict__["gas_used"]) # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- start_time = 34 completion_time = 51 cores = [1] index = 3 tx = submit_receipt(index, cores, start_time, completion_time, elapsed_time=1) gas_end = int(tx.__dict__["gas_used"]) log(f"==> gas_cost_for_iteration={gas_end - gas_base}")