def subprocess_call(cmd, attempt=1, sleep_time=1): """Run subprocess.""" error_msg = "" cmd = list(map(str, cmd)) # always should be type: str for count in range(attempt): try: p, output, error_msg = popen_communicate(cmd) if p.returncode != 0: if count == 0: _cmd = " ".join(cmd) log(f"\n$ {_cmd}", "bold red") log(f"{error_msg} ", "bold", end="") log(WHERE()) if attempt > 1 and count + 1 != attempt: log(f"{br(f'attempt={count}')} ", end="") time.sleep(sleep_time) else: return output except Exception as e: # https://stackoverflow.com/a/1156048/2402577 for line in traceback.format_stack(): log(line.strip()) raise e raise Exception(error_msg)
def question_yes_no(message, is_exit=False): if "[Y/n]:" not in message: message = f"{message} [Y/n]: " log(text=message, end="", flush=True) getch = _Getch() while True: choice = getch().lower() if choice in set(["yes", "y", "ye", "ys", "yy", "yey"]): log(choice) return True elif choice in set(["no", "n", "nn"]) or choice in ["\x04", "\x03"]: if is_exit: log() _exit() # terminate(is_traceback=False) else: return False # sys.exit(1) else: log() log( f"#> Please respond with [bold green]{br('y')}es[/bold green] or [bold green]{br('n')}o[/bold green]: ", end="", )
def pre_submit(storage_ids, provider_address): is_pass = True required_confs = 0 yaml_fn = Path.home( ) / "ebloc-broker" / "broker" / "test_setup" / "nas" / "job_nas.yaml" yaml_cfg = Yaml(yaml_fn) yaml_cfg["config"]["provider_address"] = provider_address for storage_id in storage_ids: yaml_cfg["config"]["source_code"]["storage_id"] = storage_id benchmark_name = create_nas_job_script(is_small=True) submit_base = SubmitBase(yaml_cfg.path) tx_hash = submit_base.submit(is_pass, required_confs) if required_confs >= 1: tx_receipt = get_tx_status(tx_hash, is_silent=True) if tx_receipt["status"] == 1: processed_logs = Ebb._eBlocBroker.events.LogJob( ).processReceipt(tx_receipt, errors=DISCARD) try: if processed_logs: job_result = vars(processed_logs[0].args) job_result["tx_hash"] = tx_hash job_result[ "submitted_job_kind"] = f"nas_{benchmark_name}" log(job_result) except IndexError: log(f"E: Tx({tx_hash}) is reverted")
def _submit_job(self, required_confs, requester, job_price, *args) -> "TransactionReceipt": self.gas_price = GAS_PRICE for _ in range(self.max_retries): self.ops = { "gas": self.gas, "gas_price": f"{self.gas_price} gwei", "from": requester, "allow_revert": True, "value": self.w3.toWei(job_price, "wei"), "required_confs": required_confs, } try: return self.timeout("submitJob", *args) except ValueError as e: log(f"E: {e}") if "Execution reverted" in str(e): raise e if "Transaction cost exceeds current gas limit" in str(e): self.gas -= 10000 except KeyboardInterrupt as e: if "Awaiting Transaction in the mempool" in str(e): log("warning: Timeout Awaiting Transaction in the mempool") self.gas_price *= 1.13 raise Exception("No valid Tx receipt is generated")
def get_requester_info(self, requester): """Return requester information.""" try: requester = self.w3.toChecksumAddress(requester) if not self.does_requester_exist(requester): log( f"E: Requester({requester}) is not registered.\n" "Please try again with registered Ethereum Address as requester. \n" "You can register your requester using: [blue]./broker/eblocbroker_scripts/register_requester.py", ) raise QuietExit block_read_from, orc_id = self._get_requester_info(requester) event_filter = self._eBlocBroker.events.LogRequester.createFilter( fromBlock=int(block_read_from), toBlock=int(block_read_from) + 1 ) gpg_fingerprint = event_filter.get_all_entries()[0].args["gpgFingerprint"].rstrip(b"\x00").hex()[24:].upper() requester_info = { "address": requester.lower(), "block_read_from": block_read_from, "email": event_filter.get_all_entries()[0].args["email"], "gpg_fingerprint": gpg_fingerprint, "ipfs_id": event_filter.get_all_entries()[0].args["ipfsID"], "f_id": event_filter.get_all_entries()[0].args["fID"], "is_orcid_verified": self.is_orcid_verified(requester), } if not is_byte_str_zero(orc_id): requester_info["orc_id"] = orc_id.decode("utf-8").replace("\x00", "") return requester_info except Exception as e: print_tb(e) raise e
def get_data_info(self, provider) -> None: pre_check_data(provider) try: prices_set_block_numbers = self.get_provider_prices_blocks(provider) event_filter = self._eBlocBroker.events.LogRegisterData.createFilter( fromBlock=int(prices_set_block_numbers[0]), toBlock="latest", argument_filters={"provider": provider}, ) provider_data = {} for entry in event_filter.get_all_entries(): registered_data_hash = entry.args["registeredDataHash"] with suppress(Exception): # ignores removed data hashes (price, commitment_block_duration ) = cfg.Ebb.get_registered_data_prices( provider, registered_data_hash, 0) provider_data[registered_data_hash] = { "commitment_block_duration": commitment_block_duration, "price": price, "registered_block_number": entry["blockNumber"], } for k, v in provider_data.items(): log(f" * registered_data_hash={k.decode('utf-8')}") log(f"\t{v}") except Exception as e: raise e
def _update_data_price(): Ebb = cfg.Ebb if not Ebb.does_provider_exist(env.PROVIDER_ID): log(f"warning: Provider {env.PROVIDER_ID} is not registered.\n") raise QuietExit source_code_hash = "b6aaf03752dc68d625fc57b451faa2bf" new_data_price = 21 commitment_block_duration = 600 source_code_hash_bytes = cfg.w3.toBytes(text=source_code_hash) try: (price, _commitment_block_duration) = cfg.Ebb.get_registered_data_prices( env.PROVIDER_ID, source_code_hash_bytes, 0 ) if price == new_data_price and _commitment_block_duration == commitment_block_duration: log(f"## data([green]{source_code_hash}[/green]) already registerered with the given values") raise QuietExit except: raise QuietExit try: tx = Ebb.update_data_price(source_code_hash_bytes, new_data_price, commitment_block_duration) get_tx_status(Ebb.tx_id(tx)) except QuietExit: pass except Exception as e: print_tb(e)
def swarm_connect(self, ipfs_id: str): """Swarm connect into the ipfs node.""" if not is_ipfs_on(): raise IpfsNotConnected # TODO: check is valid IPFS id try: log(f" * trying to connect into {ipfs_id}") cmd = ["/usr/local/bin/ipfs", "swarm", "connect", ipfs_id] p, output, e = popen_communicate(cmd) if p.returncode != 0: log() e = e.replace("[/", "/").replace("]", "").replace("e: ", "").rstrip() if "failure: dial to self attempted" in e: log(f"E: {e}") if not cfg.IS_FULL_TEST and not question_yes_no( "#> Would you like to continue?"): raise QuietExit else: log("E: connection into provider's IPFS node via swarm is not accomplished" ) raise Exception(e) else: log(f"{output} {ok()}") except Exception as e: print_tb(e) raise e
def main(): Ebb = cfg.Ebb if len(sys.argv) == 2: address = str(sys.argv[1]) print(Ebb.get_balance(address)) else: log("E: Provide an address as an argument")
def check_link_folders(folders_to_share, registered_data_files, is_pass=False): is_continue = False if registered_data_files: is_continue = True for data_file in registered_data_files: if isinstance(data_file, bytes): data_file = data_file.decode("utf-8") log(f"[bold green] * {data_file}[/bold green] => [bold yellow]../data_link/{data_file}[/bold yellow]" ) if folders_to_share: is_continue = True path_to = env.LINK_PATH / "base" / "data_link" check_linked_data(folders_to_share, path_to, is_pass) for folder in folders_to_share: if not os.path.isdir(folder): log(f"E: {folder} path does not exist") else: if is_continue: print("") if not is_pass: question_yes_no( "#> Would you like to continue with linked folder path in your run.sh?\n" "If no, please feel free to update your run.sh file and continue", is_exit=True, )
def log_loop(event_filter, poll_interval: int = 2): """Return triggered job event. SIGALRM(14) Term Timer signal from alarm(2). Note: This is by design; see PEP 475, and the documentation <https://docs.python.org/3.5/library/time.html#time.sleep>. If you make your signal handler raise an exception, it will interrupt the sleep() call most of the time. But if the signal happens to be received just before the sleep() call is about to be entered, the handler will only be run when the underlying OS sleep() call returns 10 s later. """ sleep_duration = 0 while True: block_num = cfg.Ebb.get_block_number() since_time = datetime.timedelta(seconds=sleep_duration) sys.stdout.write( f"\r{Style.GREENB}##{Style.END} {Style.B}[{Style.E}" f"{Style.YELLOWB}block_num{Style.END}={Style.CYANB}{block_num}{Style.END}{Style.B}]{Style.E} " f"waiting events for jobs since {Style.CYANB}{since_time}{Style.END} " ) sys.stdout.flush() logged_jobs = event_filter.get_new_entries() if len(logged_jobs) > 0: log() return logged_jobs sleep_duration += poll_interval with suppress(Exception): # may end up in handler() at _utils/tools.py time.sleep(poll_interval)
def is_geth_on(): """Check whether geth runs on the background.""" process_name = f"geth@{env.RPC_PORT}" if not is_process_on(process_name, "Geth", process_count=0): log(f"E: geth is not running on the background, {process_name}. Please run:") log("sudo ~/eBlocPOA/server.sh", "bold yellow") raise QuietExit
def mine(block_number): """Mine give block number in the brownie testing. You can only advance the time by whole seconds. __ https://stackoverflow.com/a/775075/2402577 __ https://stackoverflow.com/a/775095/2402577 """ if block_number == cfg.BLOCK_DURATION_1_HOUR: log(f"## mining for {cfg.BLOCK_DURATION_1_HOUR} blocks...") seconds = block_number * cfg.BLOCK_DURATION height = w3.eth.blockNumber timestamp_temp = w3.eth.getBlock(height)["timestamp"] timedelta = cfg.BLOCK_DURATION * block_number config.chain.mine(blocks=int(block_number), timedelta=timedelta) timestamp_after = w3.eth.getBlock(w3.eth.blockNumber)["timestamp"] log( f"==> Mined {block_number} empty blocks | {datetime.timedelta(seconds=seconds)} | " f"{height} => {w3.eth.blockNumber} | " f"{timestamp_temp} => {timestamp_after} diff={timestamp_after - timestamp_temp}", "bold", ) assert w3.eth.blockNumber == height + block_number and ( timestamp_after - timestamp_temp) + 1 >= timedelta
def sleep_timer(sleep_duration): log(f"## Sleeping for {sleep_duration} seconds, called from {WHERE(1)}") for remaining in range(sleep_duration, 0, -1): sys.stdout.write("\r") sys.stdout.write("{:1d} seconds remaining...".format(remaining)) sys.stdout.flush() time.sleep(1) sys.stdout.write("\rsleeping is done \n")
def balances(): """Prints balance of all the users located under ~/.brownie/accounts.""" for account in users: _account = account.lower().replace("0x", "") fname = Path(expanduser("~/.brownie/accounts")) / _account print(fname) account = Ebb.brownie_load_account(str(fname), "alper") log(Ebb._get_balance(account))
def set_storage_cost(self): """Calculate the cache cost.""" self.storage_cost = 0 self.cache_cost = 0 self.data_transfer_in_sum = 0 for idx, source_code_hash in enumerate(self.job.code_hashes): if self.is_brownie: ds = self.create_data_storage(source_code_hash) else: ds = self.create_data_storage(self.job.code_hashes_str[idx]) if ds.received_block + ds.storage_duration < self.w3.eth.block_number: # storage time is completed ds.received_storage_deposit = 0 try: _source_code_hash = source_code_hash.decode("utf-8") except: _source_code_hash = bytes32_to_ipfs(source_code_hash) log(f"==> is_private{br(_source_code_hash, 'blue')}={ds.is_private}" ) # print(received_block + storage_duration >= self.w3.eth.block_number) # if ds.received_storage_deposit > 0 or if (ds.received_storage_deposit > 0 and ds.received_block + ds.storage_duration >= self.w3.eth.block_number) or ( ds.received_block + ds.storage_duration >= self.w3.eth.block_number and not ds.is_private and ds.is_verified_used): log(f"==> For {bytes32_to_ipfs(source_code_hash)} cost of storage is not paid" ) else: if self.job.data_prices_set_block_numbers[idx] > 0: # if true, registered data's price should be considered for storage output = self.ebb.getRegisteredDataPrice( self.job.provider, source_code_hash, self.job.data_prices_set_block_numbers[idx], ) data_price = output[0] self.storage_cost += data_price break # if not ds.received_storage_deposit and (received_block + storage_duration < w3.eth.block_number): if not ds.received_storage_deposit: self.data_transfer_in_sum += self.job.data_transfer_ins[ idx] if self.job.storage_hours[idx] > 0: self.storage_cost += (self.price_storage * self.job.data_transfer_ins[idx] * self.job.storage_hours[idx]) else: self.cache_cost += self.price_cache * self.job.data_transfer_ins[ idx] self.data_transfer_in_cost = self.price_data_transfer * self.data_transfer_in_sum self.data_transfer_out_cost = self.price_data_transfer * self.job.data_transfer_out self.data_transfer_cost = self.data_transfer_in_cost + self.data_transfer_out_cost
def bytes32_to_ipfs(bytes_array): """Convert bytes_array into IPFS hash format.""" if isinstance(bytes_array, bytes): merge = Qm + bytes_array return base58.b58encode(merge).decode("utf-8") else: log(f"bytes_array={bytes_array} is not a bytes instance") return bytes_array
def cost(self, provider, requester): """Calcualte cost related to the given job.""" log("==> Entered into the cost calculation...") self.provider = provider self.requester = requester self.check() jp = JobPrices(self) jp.set_computational_cost() jp.set_storage_cost() # burda patliyor sanki DELETE jp.set_job_price() return jp.job_price, jp.cost
def print_before_submit(self): for idx, source_code_hash in enumerate(self.code_hashes_str): print_temp = { "path": self.paths[idx], "source_code_hash": source_code_hash, "folder_size_mb": self.data_transfer_ins[idx], "storage_ids": StorageID(self.storage_ids[idx]).name, "cache_type": CacheType(self.cache_types[idx]).name, } log(print_temp) log()
def run_stdout_to_file(cmd, path, mode="w") -> None: """Run command pipe output into give file.""" p, output, error = popen_communicate(cmd, stdout_fn=path, mode=mode) if p.returncode != 0 or (isinstance(error, str) and "error:" in error): _cmd = " ".join(cmd) log(f"\n{_cmd}", "red") raise Exception(f"E: scontrol error:\n{output}") # log(f"## writing into path({path}) is completed") run(["sed", "-i", "s/[ \t]*$//", path]) # remove trailing whitespaces with sed
def connect_into_eblocbroker() -> None: """Connect into ebloc-broker contract in the given blockchain.""" if config.ebb: return if not cfg.w3: connect_into_web3() if not env.EBLOCPATH: log("E: EBLOCPATH variable is empty") raise QuietExit try: abi_file = env.EBLOCPATH / "broker" / "eblocbroker_scripts" / "abi.json" abi = read_json(abi_file, is_dict=False) except Exception as e: raise Exception( f"E: could not read the abi.json file: {abi_file}") from e try: if env.IS_BLOXBERG: if not cfg.IS_BROWNIE_TEST: from brownie import network, project try: network.connect("bloxberg") except Exception as e: print_tb(e) add_bloxberg_into_network_config.main() # network.connect("bloxberg") try: log("warning: [green]bloxberg[/green] key is added into the " "[magenta]~/.brownie/network-config.yaml[/magenta] yaml file. Please try again." ) network.connect("bloxberg") except KeyError: sys.exit(1) project = project.load(env.CONTRACT_PROJECT_PATH) config.ebb = project.eBlocBroker.at(env.CONTRACT_ADDRESS) config.ebb.contract_address = cfg.w3.toChecksumAddress( env.CONTRACT_ADDRESS) #: for the contract's events config._eBlocBroker = cfg.w3.eth.contract(env.CONTRACT_ADDRESS, abi=abi) elif env.IS_EBLOCPOA: config.ebb = cfg.w3.eth.contract(env.CONTRACT_ADDRESS, abi=abi) config._eBlocBroker = config.ebb config.ebb.contract_address = cfg.w3.toChecksumAddress( env.CONTRACT_ADDRESS) except Exception as e: print_tb(e) raise e
def is_contract_exists(self) -> bool: try: contract = self._get_contract_yaml() except Exception as e: raise e contract_address = self.w3.toChecksumAddress(contract["address"]) if self.w3.eth.get_code(contract_address) == "0x" or self.w3.eth.get_code(contract_address) == b"": raise Exception("Empty contract") log(f"==> contract_address={contract_address.lower()}") return True
def set_provider_info(self): """Set provider info into variables.""" if self.Ebb.does_provider_exist(self.job.provider): *_, provider_price_info = self.Ebb._get_provider_info( self.job.provider, 0) else: log(f"E: {self.job.provider} does not exist as a provider") raise QuietExit self.price_core_min = provider_price_info[2] self.price_data_transfer = provider_price_info[3] self.price_storage = provider_price_info[4] self.price_cache = provider_price_info[5]
def main(): provider = env.PROVIDER_ID pre_check_data(provider) Ebb = cfg.Ebb data_hash = b"f13d75bc60898f0823566347e380a34d" try: if is_data_registered(provider, data_hash): tx = Ebb.remove_registered_data(data_hash) get_tx_status(Ebb.tx_id(tx)) else: log(f"## data({data_hash}) is alread deleted or not registered") except Exception as e: print_tb(e)
def check_account_status(self, _from): try: if isinstance(_from, int): _from = self.Ebb.account_id_to_address(_from) if not env.IS_BLOXBERG and is_geth_account_locked(_from): log(f"E: Account({_from}) is locked") raise QuietExit if not self.Ebb.does_requester_exist(_from): log(f"E: Requester's Ethereum address {_from} is not registered" ) sys.exit(1) *_, orcid = self.Ebb.get_requester_info(_from) if not self.Ebb.is_orcid_verified(_from): if orcid != empty_bytes32: log(f"E: Requester({_from})'s orcid: {orcid.decode('UTF')} is not verified" ) else: log(f"E: Requester({_from})'s orcid is not registered") raise QuietExit except QuietExit: sys.exit(1) except: print_tb() sys.exit(1)
def is_gzip_file_empty(filename): """Check whether the given gzip file is empty or not. cmd: gzip -l filename.gz | awk 'NR==2 {print $2} """ p1 = Popen(["gzip", "-l", filename], stdout=PIPE, env={"LC_ALL": "C"}) p2 = Popen(["awk", "NR==2 {print $2}"], stdin=p1.stdout, stdout=PIPE) p1.stdout.close() size = p2.communicate()[0].decode("utf-8").strip() if bool(int(size)): return False log(f"==> Created gzip file is empty:\n [magenta]{filename}[/magenta]") return True
def main(): Ebb = cfg.Ebb is_write_to_file = False if len(sys.argv) == 2: if sys.argv[1] in ("1", "True", "true"): is_write_to_file = True try: output = Ebb.get_block_number() if is_write_to_file: env.config["block_continue"] = output else: log(f"block_number={output}", "bold") except Exception as e: print_tb(e)
def decrypt_using_gpg(self, gpg_file, extract_target=None): """Decrypt compresses file using gpg. This function is specific for using on driver.ipfs to decript tar file, specific for "tar.gz" file types. cmd: gpg --verbose --output={tar_file} --pinentry-mode loopback \ --passphrase-file=f"{env.LOG_PATH}/gpg_pass.txt" \ --decrypt {gpg_file_link} """ if not os.path.isfile(f"{gpg_file}.gpg"): os.symlink(gpg_file, f"{gpg_file}.gpg") gpg_file_link = f"{gpg_file}.gpg" tar_file = f"{gpg_file}.tar.gz" cmd = [ "gpg", "--verbose", "--batch", "--yes", f"--output={tar_file}", "--pinentry-mode", "loopback", f"--passphrase-file={env.GPG_PASS_FILE}", "--decrypt", gpg_file_link, ] try: run(cmd) log(f"==> GPG decrypt {ok()}") _remove(gpg_file) os.unlink(gpg_file_link) except Exception as e: print_tb(e) raise e # finally: # os.unlink(gpg_file_link) if extract_target: try: untar(tar_file, extract_target) except Exception as e: raise Exception( "E: Could not extract the given tar file") from e finally: cmd = None _remove(f"{extract_target}/.git") _remove(tar_file)
def is_provider_info_match(self, email, ipfs_id, gpg_fingerprint, f_id): try: provider_info = self.get_provider_info(env.PROVIDER_ID) if ( provider_info["gpg_fingerprint"] == gpg_fingerprint.upper() and provider_info["email"] == email and provider_info["f_id"] == f_id and provider_info["ipfs_id"] == ipfs_id ): log(provider_info) raise QuietExit("warning: Given information is same with the cluster's saved info. Nothing to do.") tx = self._update_provider_info(f"0x{gpg_fingerprint}", email, f_id, ipfs_id) return self.tx_id(tx) except Exception as e: raise e
def is_internet_on(host="8.8.8.8", port=53, timeout=3) -> bool: """Check wheather internet is online. Host: 8.8.8.8 (google-public-dns-a.google.com) OpenPort: 53/tcp Service: domain (DNS/TCP) __ https://stackoverflow.com/a/33117579/2402577 """ try: socket.setdefaulttimeout(timeout) socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port)) return True except socket.error as e: log(f"E: {e}") return False