def check_link_folders(folders_to_share, registered_data_files, is_pass=False): is_continue = False if registered_data_files: is_continue = True for data_file in registered_data_files: if isinstance(data_file, bytes): data_file = data_file.decode("utf-8") log(f"[bold green] * {data_file}[/bold green] => [bold yellow]../data_link/{data_file}[/bold yellow]" ) if folders_to_share: is_continue = True path_to = env.LINK_PATH / "base" / "data_link" check_linked_data(folders_to_share, path_to, is_pass) for folder in folders_to_share: if not os.path.isdir(folder): log(f"E: {folder} path does not exist") else: if is_continue: print("") if not is_pass: question_yes_no( "#> Would you like to continue with linked folder path in your run.sh?\n" "If no, please feel free to update your run.sh file and continue", is_exit=True, )
def check_linked_data(paths_from, path_to, is_pass=False): """Generate folder as hard linked of the given folder paths or provider main folder. :param paths_from: iterates all over the given folders :param path_to: linked folders_to_share into into given path """ mkdir(path_to) link = Link(paths_from, path_to) link.link_folders(paths_from) log() for key, value in link.data_map.items(): test_with_small_data(value) log(f"[bold green] * {key}[/bold green] => [bold yellow]../data_link/{value}[/bold yellow]" ) if not is_pass: print("") question_yes_no( "#> Would you like to continue with linked folder path in your run.sh?\n" "If no, please feel free to update your run.sh file and continue", is_exit=True, ) for folder in paths_from: if not os.path.isdir(folder): log(f"E: {folder} path does not exist") sys.exit(1)
def swarm_connect(self, ipfs_id: str): """Swarm connect into the ipfs node.""" if not is_ipfs_on(): raise IpfsNotConnected # TODO: check is valid IPFS id try: log(f" * trying to connect into {ipfs_id}") cmd = ["/usr/local/bin/ipfs", "swarm", "connect", ipfs_id] p, output, e = popen_communicate(cmd) if p.returncode != 0: log() e = e.replace("[/", "/").replace("]", "").replace("e: ", "").rstrip() if "failure: dial to self attempted" in e: log(f"E: {e}") if not cfg.IS_FULL_TEST and not question_yes_no( "#> Would you like to continue?"): raise QuietExit else: log("E: connection into provider's IPFS node via swarm is not accomplished" ) raise Exception(e) else: log(f"{output} {ok()}") except Exception as e: print_tb(e) raise e
def check_before_submit(self, provider, _from, provider_info, key, job): """Check job's conditions before submitting.""" self.is_provider_valid(provider) self.is_requester_valid(_from) main_storage_id = job.storage_ids[0] is_use_ipfs = False for storage_id in job.storage_ids: if storage_id > 4: raise Exception( "Wrong storage_ids value is given. Please provide from 0 to 4") if storage_id in [StorageID.IPFS, StorageID.IPFS_GPG]: is_use_ipfs = True break if not job.code_hashes: raise Exception("source_code_hash list is empty") if len(key) >= 64: raise Exception( "Length of key is greater than 64, please provide lesser") key_len = 46 if len(key) != key_len and main_storage_id in [ StorageID.IPFS, StorageID.IPFS_GPG ]: raise Exception( f"E: key's length does not match with its original length, it should be {key_len}. " f"Please check your key length, given key={key}") key_len = 33 if len(key) != 33 and main_storage_id == StorageID.GDRIVE: raise Exception( f"E: key's length does not match with its original length, it should be {key_len}. " "Please check your key length") for idx, core in enumerate(job.cores): if core > provider_info["available_core_num"]: raise Exception( f"Requested {core}, which is {core}, is greater than the provider's core number" ) if job.run_time[idx] == 0: raise Exception( f"run_time{br(idx)} is provided as 0. Please provide non-zero value" ) for core_min in job.run_time: if core_min > 1440: raise Exception( "E: run_time provided greater than 1440. Please provide smaller value" ) for cache_type in job.cache_types: if cache_type > 1: # cache_type should be {0: private, 1: public} raise Exception( f"E: cache_type ({cache_type}) provided greater than 1. Please provide smaller value" ) if is_use_ipfs: if not is_ipfs_on(): sys.exit() try: cfg.ipfs.swarm_connect(provider_info["ipfs_id"]) except Exception as e: log(f"E: {e}") if not cfg.IS_FULL_TEST and not question_yes_no( "#> Would you like to continue?"): raise QuietExit from e for idx, source_code_hash in enumerate(job.code_hashes_str): if source_code_hash == "": raise Exception( f"source_code_hash{br(idx)} should not be empty string") requester_info = self.get_requester_info(_from) gpg_fingerprint = get_gpg_fingerprint(env.GMAIL).upper() if requester_info["gpg_fingerprint"].upper() != gpg_fingerprint: raise Exception( f"E: gpg_fingerprint does not match {requester_info['gpg_fingerprint'].upper()} " f"with registered gpg_fingerprint={gpg_fingerprint}") try: is_gpg_published(gpg_fingerprint) except Exception as e: raise e
def run_driver(given_bn): """Run the main driver script for eblocbroker on the background.""" # dummy sudo command to get the password when session starts for only to # create users and submit the slurm job under another user run(["sudo", "printf", "hello"]) kill_process_by_name("gpg-agent") config.logging = setup_logger(_log.DRIVER_LOG) # driver_cancel_process = None try: from broker.imports import connect connect() Ebb: "Contract.Contract" = cfg.Ebb driver = Driver() except Exception as e: raise Terminate from e if not env.PROVIDER_ID: raise Terminate(f"PROVIDER_ID is None in {env.LOG_PATH}/.env") if not env.WHOAMI or not env.EBLOCPATH or not env.PROVIDER_ID: raise Terminate(f"Please run: {env.BASH_SCRIPTS_PATH}/folder_setup.sh") if not env.SLURMUSER: raise Terminate(f"SLURMUSER is not set in {env.LOG_PATH}/.env") try: deployed_block_number = Ebb.get_deployed_block_number() except Exception as e: raise e if not env.config["block_continue"]: env.config["block_continue"] = deployed_block_number if given_bn > 0: block_number_saved = int(given_bn) else: block_number_saved = env.config["block_continue"] if not isinstance(env.config["block_continue"], int): log("E: block_continue variable is empty or contains an invalid character") if not question_yes_no("#> Would you like to read from the contract's deployed block number?"): terminate() block_number_saved = deployed_block_number if deployed_block_number: env.config["block_continue"] = deployed_block_number else: raise Terminate(f"deployed_block_number={deployed_block_number} is invalid") _tools(block_number_saved) try: Ebb.is_contract_exists() except: terminate( "Contract address does not exist on the blockchain, is the blockchain sync?\n" f"block_number={Ebb.get_block_number()}", is_traceback=False, ) if cfg.IS_THREADING_ENABLED: log(f"## is_threading={cfg.IS_THREADING_ENABLED}") Ebb.is_eth_account_locked(env.PROVIDER_ID) log(f"==> whoami={env.WHOAMI}") log(f"==> log_file={_log.DRIVER_LOG}") log(f"==> rootdir={os.getcwd()}") log(f"==> is_web3_connected={Ebb.is_web3_connected()}") if not Ebb.does_provider_exist(env.PROVIDER_ID): # updated since cluster is not registered env.config["block_continue"] = Ebb.get_block_number() terminate( textwrap.fill( f"Your Ethereum address {env.PROVIDER_ID} " "does not match with any provider in eBlocBroker. Please register your " "provider using your Ethereum Address in to the eBlocBroker. You can " "use eblocbroker/register_provider.py script to register your provider." ), is_traceback=False, ) if not Ebb.is_orcid_verified(env.PROVIDER_ID): raise QuietExit(f"provider's ({env.PROVIDER_ID}) ORCID is not verified") blk_read = block_number_saved balance_temp = Ebb.get_balance(env.PROVIDER_ID) eth_balance = Ebb.eth_balance(env.PROVIDER_ID) log(f"==> deployed_block_number={deployed_block_number}") log(f"==> account_balance={eth_balance} gwei | {cfg.w3.fromWei(eth_balance, 'ether')} eth") log(f"==> Ebb_balance={balance_temp}") while True: wait_until_idle_core_available() time.sleep(0.2) if not str(blk_read).isdigit(): raise Terminate(f"block_read_from={blk_read}") balance = Ebb.get_balance(env.PROVIDER_ID) if cfg.IS_THREADING_ENABLED: _squeue() console_ruler() if isinstance(balance, int): value = int(balance) - int(balance_temp) if value > 0: log(f"==> Since Driver started provider_gained_wei={value}") current_bn = Ebb.get_block_number() log(f" * {get_date()} waiting new job to come since block_number={blk_read}") log(f"==> current_block={current_bn} | sync_from={blk_read}") flag = True while current_bn < int(blk_read): current_bn = Ebb.get_block_number() if flag: log(f"## Waiting block number to be updated, it remains constant at {current_bn}") flag = False time.sleep(2) log(f"#> [bold yellow]Passed incremented block number... Watching from block_number=[cyan]{blk_read}") blk_read = str(blk_read) # reading events' block number has been updated slurm.pending_jobs_check() try: driver.logged_jobs_to_process = Ebb.run_log_job(blk_read, env.PROVIDER_ID) driver.process_logged_jobs() if len(driver.logged_jobs_to_process) > 0 and driver.latest_block_number > 0: # updates the latest read block number blk_read = driver.latest_block_number + 1 env.config["block_continue"] = blk_read if not driver.is_provider_received_job: blk_read = env.config["block_continue"] = current_bn except Exception as e: log() log(f"E: {e}") if "Filter not found" in str(e) or "Read timed out" in str(e): # HTTPSConnectionPool(host='core.bloxberg.org', port=443): Read timed out. (read timeout=10) log("## sleeping for 60 seconds...", end="") time.sleep(60) log(ok()) else: print_tb(e)
def register_requester(self, yaml_fn, is_question=True): """Register or update requester into smart contract.""" yaml_fn = os.path.expanduser(yaml_fn) try: run_ipfs_daemon() client = ipfshttpclient.connect("/ip4/127.0.0.1/tcp/5001/http") except Exception as e: log("E: Run ipfs daemon to detect your ipfs_id") print_tb(e) sys.exit(1) if not os.path.exists(yaml_fn): log(f"E: yaml_fn({yaml_fn}) does not exist") raise QuietExit args = Yaml(yaml_fn) ipfs_id = cfg.ipfs.get_ipfs_id(client) email = env.GMAIL gpg_fingerprint = get_gpg_fingerprint(email) try: is_gpg_published(gpg_fingerprint) except Exception as e: raise e account = args["config"]["account"].lower() email = args["config"]["email"] federation_cloud_id = args["config"]["federation_cloud_id"] # if env.IS_BLOXBERG: # account = self.brownie_load_account().address log(f"==> registering {account} as requester") if is_byte_str_zero(account): log(f"E: account={account} is not valid, change it in [{c.pink}]~/.ebloc-broker/cfg.yaml" ) raise QuietExit if len(federation_cloud_id) >= 128: raise Exception("E: federation_cloud_id is more than 128") if len(email) >= 128: raise Exception("E: email is more than 128") if len(gpg_fingerprint) != 40: raise Exception("E: gpg_fingerprint should be 40 characters") if self.does_requester_exist(account): log(f"warning: requester {account} is already registered") requester_info = Ebb.get_requester_info(account) if (requester_info["email"] == email and requester_info["gpg_fingerprint"] == gpg_fingerprint and requester_info["ipfs_id"] == ipfs_id and requester_info["f_id"] == federation_cloud_id): log(requester_info) log("## Same requester information is provided, nothing to do") raise QuietExit log("==> [bold yellow]registered_requester_info:") log(requester_info) _requester_info = { "email": email, "federation_cloud_id": federation_cloud_id, "gpg_fingerprint": gpg_fingerprint, "ipfs_id": ipfs_id, } log("==> [bold yellow]new_requester_info:") log(_requester_info) if is_question and not question_yes_no( "#> Would you like to update requester info?"): return try: tx = self._register_requester(account, gpg_fingerprint, email, federation_cloud_id, ipfs_id) return self.tx_id(tx) except Exception as e: print_tb(e) raise e