def main(*args): env = ENV_BASE() from brownie import network, project _args = make_tuple(str(args)) network.connect("bloxberg") project = project.load(env.CONTRACT_PROJECT_PATH) ebb = project.eBlocBroker.at(env.CONTRACT_ADDRESS) provider = cfg.w3.toChecksumAddress(_args[0]) job_requester = cfg.w3.toChecksumAddress(_args[1]) try: source_code_hash = ipfs_to_bytes32(_args[2]) except: source_code_hash = _args[2].encode("utf-8") try: output = ebb.getStorageDeposit(provider, job_requester, source_code_hash) print(output) except: print("0") # if its the first submitted job for the user try: output = ebb.getStorageInfo(provider, source_code_hash) print(output) except: print("(0, 0, False, False)") sys.exit(0)
def get_storage_info(self, provider, requester, code_hash): """Return the received storage deposit for the corresponding source code hash.""" if isinstance(code_hash, str): with suppress(Exception): code_hash = ipfs_to_bytes32(code_hash) if self.eBlocBroker is not None: if env.IS_BLOXBERG: return self.eBlocBroker.getStorageInfo(provider, requester, code_hash) else: return self.eBlocBroker.functions.getStorageInfo(provider, requester, code_hash).call() else: raise Exception("Contract object's eBlocBroker variable is None")
def get_job_storage_duration(self, provider, requester, code_hash): """Return job's storage duration.""" if not isinstance(provider, (Account, LocalAccount)): provider = self.w3.toChecksumAddress(provider) if isinstance(code_hash, str): with suppress(Exception): code_hash = ipfs_to_bytes32(code_hash) if self.eBlocBroker is not None: if env.IS_BLOXBERG: return self.eBlocBroker.getStorageInfo(provider, requester, code_hash) else: return self.eBlocBroker.functions.getStorageInfo(provider, requester, code_hash).call() else: raise Exception("Contract object's eBlocBroker variable is None")
def test_workflow(): job = Job() provider = accounts[0] requester = accounts[1] register_provider() register_requester(requester) job_key = "QmQv4AAL8DZNxZeK3jfJGJi63v1msLMZGan7vSsCDXzZud" code_hash = ipfs_to_bytes32(job_key) with brownie.reverts(): ebb.updataDataPrice(code_hash, 20, 100, {"from": provider}) ebb.registerData(code_hash, 20, cfg.BLOCK_DURATION_1_HOUR, {"from": provider}) ebb.removeRegisteredData( code_hash, {"from": provider}) # should submitJob fail if it is not removed code_hash1 = "0x68b8d8218e730fc2957bcb12119cb204" # "web3.toBytes(hexstr=ipfs_to_bytes32("QmWmyoMoctfbAaiEs2G46gpeUmhqFRDW6KWo64y5r581Ve")) ebb.registerData(code_hash1, 20, cfg.BLOCK_DURATION_1_HOUR, {"from": provider}) mine(6) with brownie.reverts(): ebb.registerData(code_hash1, 20, 1000, {"from": provider}) ebb.updataDataPrice(code_hash1, 250, cfg.BLOCK_DURATION_1_HOUR + 1, {"from": provider}) data_block_numbers = ebb.getRegisteredDataBlockNumbers( provider, code_hash1) log(f"get_registered_data_block_numbers={data_block_numbers[1]}", "bold") get_block_number() data_prices = ebb.getRegisteredDataPrice(provider, code_hash1, 0) log(f"register_data_price={data_prices}", "bold") assert data_prices[0] == 20 res = ebb.getRegisteredDataPrice(provider, code_hash1, data_block_numbers[1]) log(f"register_data_price={res}", "bold") assert res[0] == 250 mine(cfg.BLOCK_DURATION_1_HOUR - 9) res = ebb.getRegisteredDataPrice(provider, code_hash1, 0) log(f"register_data_price={res}", "bold") assert res[0] == 20 mine(1) res = ebb.getRegisteredDataPrice(provider, code_hash1, 0) log(f"register_data_price={res}", "bold") assert res[0] == 250 job.code_hashes = [code_hash, code_hash1] # Hashed of the data file in array job.storage_hours = [0, 0] job.data_transfer_ins = [100, 0] job.data_transfer_out = 100 # job.data_prices_set_block_numbers = [0, 253] # TODO: check this ex 253 exists or not job.data_prices_set_block_numbers = [ 0, data_block_numbers[1] ] # TODO: check this ex 253 exists or not check_price_keys(job.data_prices_set_block_numbers, provider, code_hash1) job.cores = [2, 4, 2] job.run_time = [10, 15, 20] job.storage_ids = [StorageID.IPFS.value, StorageID.NONE.value] job.cache_types = [CacheType.PUBLIC.value, CacheType.PUBLIC.value] args = [ provider, ebb.getProviderSetBlockNumbers(accounts[0])[-1], job.storage_ids, job.cache_types, job.data_prices_set_block_numbers, job.cores, job.run_time, job.data_transfer_out, ] job_price, _cost = job.cost(provider, requester) tx = ebb.submitJob( # first submit job_key, job.data_transfer_ins, args, job.storage_hours, job.code_hashes, { "from": requester, "value": web3.toWei(job_price, "wei") }, ) for idx in range(0, 3): log(ebb.getJobInfo(provider, job_key, 0, idx)) console_ruler(character="-=") assert ( tx.events["LogRegisteredDataRequestToUse"][0]["registeredDataHash"] == "0x0000000000000000000000000000000068b8d8218e730fc2957bcb12119cb204" ), "registered data should be used" with brownie.reverts(): log(ebb.getJobInfo(provider, job_key, 1, 2)) log(ebb.getJobInfo(provider, job_key, 0, 3)) # setJobStatus for the workflow: index = 0 job_id = 0 start_time = 10 tx = ebb.setJobStatusRunning(job_key, index, job_id, start_time, {"from": accounts[0]}) index = 0 job_id = 1 start_time = 20 tx = ebb.setJobStatusRunning(job_key, index, job_id, start_time, {"from": accounts[0]}) # process_payment for the workflow index = 0 job_id = 0 execution_time = 10 data_transfer = [100, 0] end_time = 20 result_ipfs_hash = ipfs_to_bytes32( "QmWmyoMoctfbAaiEs2G46gpeUmhqFRDW6KWo64y5r581Ve") received_sums = [] refunded_sums = [] received_sum = 0 refunded_sum = 0 args = [ index, job_id, end_time, data_transfer[0], data_transfer[1], job.cores, job.run_time, False ] tx = ebb.processPayment(job_key, args, execution_time, result_ipfs_hash, {"from": accounts[0]}) # log(tx.events['LogProcessPayment']) received_sums.append(tx.events["LogProcessPayment"]["receivedWei"]) refunded_sums.append(tx.events["LogProcessPayment"]["refundedWei"]) received_sum += tx.events["LogProcessPayment"]["receivedWei"] refunded_sum += tx.events["LogProcessPayment"]["refundedWei"] log(f"received_sum={received_sum} | refunded_sum={refunded_sum} | job_price={job_price}" ) # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- index = 0 job_id = 1 execution_time = 15 data_transfer = [0, 0] end_time = 39 result_ipfs_hash = ipfs_to_bytes32( "QmWmyoMoctfbAaiEs2G46gpeUmhqFRDW6KWo64y5r581Ve") args = [ index, job_id, end_time, data_transfer[0], data_transfer[1], job.cores, job.run_time, False ] tx = ebb.processPayment(job_key, args, execution_time, result_ipfs_hash, {"from": accounts[0]}) received_sums.append(tx.events["LogProcessPayment"]["receivedWei"]) refunded_sums.append(tx.events["LogProcessPayment"]["refundedWei"]) received_sum += tx.events["LogProcessPayment"]["receivedWei"] refunded_sum += tx.events["LogProcessPayment"]["refundedWei"] log(f"received_sum={received_sum} | refunded_sum={refunded_sum} | job_price={job_price}" ) # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- index = 0 job_id = 2 execution_time = 20 data_transfer = [0, 100] end_time = 39 result_ipfs_hash = ipfs_to_bytes32( "QmWmyoMoctfbAaiEs2G46gpeUmhqFRDW6KWo64y5r581Ve") with brownie.reverts( ): # processPayment should revert, setRunning is not called for the job=2 args = [ index, job_id, end_time, data_transfer[0], data_transfer[1], job.cores, job.run_time, False, ] tx = ebb.processPayment(job_key, args, execution_time, result_ipfs_hash, {"from": accounts[0]}) index = 0 job_id = 2 start_time = 20 tx = ebb.setJobStatusRunning(job_key, index, job_id, start_time, {"from": accounts[0]}) args = [ index, job_id, end_time, data_transfer[0], data_transfer[1], job.cores, job.run_time, True ] tx = ebb.processPayment(job_key, args, execution_time, result_ipfs_hash, {"from": accounts[0]}) # log(tx.events['LogProcessPayment']) received_sums.append(tx.events["LogProcessPayment"]["receivedWei"]) refunded_sums.append(tx.events["LogProcessPayment"]["refundedWei"]) received_sum += tx.events["LogProcessPayment"]["receivedWei"] refunded_sum += tx.events["LogProcessPayment"]["refundedWei"] log(f"received_sum={received_sum} | refunded_sum={refunded_sum} | job_price={job_price}" ) log(received_sums) log(refunded_sums) assert job_price - _cost["storage"] == received_sum + refunded_sum withdraw(accounts[0], received_sum) withdraw(requester, refunded_sum)
def test_multiple_data(): job = Job() provider = accounts[0] requester = accounts[1] requester_1 = accounts[2] register_provider() register_requester(requester) register_requester(requester_1) job_key = "QmQv4AAL8DZNxZeK3jfJGJi63v1msLMZGan7vSsCDXzZud" job.code_hashes.append(ipfs_to_bytes32(job_key)) job_key_2 = "QmVqtWxuBdZQdLnLce6XCBMuqoazAcbmuxoJHQbfbuqDu2" job.code_hashes.append(ipfs_to_bytes32(job_key_2)) job.data_transfer_ins = [100, 100] job.data_transfer_out = 100 # provider's registered data won't be used job.storage_hours = [1, 1] job.data_prices_set_block_numbers = [0, 0] job.cores = [2] job.run_time = [10] provider_price_block_number = ebb.getProviderSetBlockNumbers( accounts[0])[-1] job.storage_ids = [StorageID.EUDAT.value, StorageID.IPFS.value] job.cache_types = [CacheType.PRIVATE.value, CacheType.PUBLIC.value] args = [ provider, provider_price_block_number, job.storage_ids, job.cache_types, job.data_prices_set_block_numbers, job.cores, job.run_time, job.data_transfer_out, ] job_price, _cost = job.cost(provider, requester) # first time job is submitted with the data files tx = ebb.submitJob( job_key, job.data_transfer_ins, args, job.storage_hours, job.code_hashes, { "from": requester, "value": web3.toWei(job_price, "wei") }, ) log(f"==> job_index={tx.events['LogJob']['index']}") log(tx.events["LogJob"]["jobKey"]) assert _cost[ "storage"] == 200, "Since it is not verified yet cost of storage should be 200" # second time job is wanted to send by the same user with the same data files job_price, _cost = job.cost(provider, requester) assert _cost[ "storage"] == 0, "Since cost of storage is already paid by the user it should be 0" # second time job is wanted to send by the differnt user with the same data files job_price, _cost = job.cost(provider, requester_1) log(f"==> cost={_cost}") assert _cost[ "storage"] == 200, "Since it is not verified yet cost of storage should be 200" # cluster verifies the given data files for the related job index = 0 is_verified_list = [True, True] tx = ebb.dataReceived( job_key, index, job.code_hashes, job.cache_types, is_verified_list, { "from": provider, "gas": 4500000 }, ) # second time job is wanted to send by the differnt user with the same data files job_price, _cost = job.cost(provider, requester) assert _cost["storage"] == 0, "Since it is verified torageCost should be 0" # second time job is wanted to send by the differnt user with the same data files job_price, _cost = job.cost(provider, requester_1) assert _cost[ "storage"] == 100, "Since data1 is verified and publis, its cost of storage should be 0" # ds = scripts.DataStorage(provider, code_hashes[1], True) job_price, _cost = job.cost(provider, requester) assert _cost[ "storage"] == 0, "Since it is paid on first job submittion it should be 0" assert _cost[ "data_transfer"] == job.data_transfer_out, "cost of data_transfer should cover only data_transfer_out" tx = ebb.submitJob( job_key, job.data_transfer_ins, args, job.storage_hours, job.code_hashes, { "from": requester, "value": web3.toWei(job_price, "wei") }, ) log(f"job_index={tx.events['LogJob']['index']}", "bold") # ===== provider side ===== index = 0 job_id = 0 start_time = get_block_timestamp() execution_time = 10 result_ipfs_hash = "0xabcd" tx = ebb.setJobStatusRunning(job_key, index, job_id, start_time, {"from": accounts[0]}) mine(60 * execution_time / cfg.BLOCK_DURATION) end_time = start_time + 60 * execution_time block_timestamp = get_block_timestamp() assert ( end_time <= block_timestamp ), f"block timestamp is ahead of completion time, difference={block_timestamp - end_time}" args = [ index, job_id, end_time, sum(job.data_transfer_ins), job.data_transfer_out, job.cores, job.run_time, False, ] tx = ebb.processPayment(job_key, args, execution_time, result_ipfs_hash, {"from": accounts[0]}) received_sum = tx.events["LogProcessPayment"]["receivedWei"] refunded_sum = tx.events["LogProcessPayment"]["refundedWei"] log(f"received_sum={received_sum} refunded_sum={refunded_sum}", "bold") assert received_sum == 320 and refunded_sum == 0 withdraw(accounts[0], received_sum) withdraw(requester, refunded_sum) data_transfer_in = 0 # already requested on index==0 data_transfer_out = 100 data_transfer = [data_transfer_in, data_transfer_out] index = 1 job_id = 0 start_time = get_block_timestamp() execution_time = 10 result_ipfs_hash = "0xabcd" tx = ebb.setJobStatusRunning(job_key, index, job_id, start_time, {"from": accounts[0]}) mine(60 * execution_time / cfg.BLOCK_DURATION) end_time = start_time + 60 * execution_time args = [ index, job_id, end_time, data_transfer[0], data_transfer[1], job.cores, job.run_time, False ] tx = ebb.processPayment(job_key, args, execution_time, result_ipfs_hash, {"from": accounts[0]}) # log(tx.events['LogProcessPayment']) received_sum = tx.events["LogProcessPayment"]["receivedWei"] refunded_sum = tx.events["LogProcessPayment"]["refundedWei"] log(f"received_sum={received_sum} refunded_sum={refunded_sum}", "bold") assert received_sum == 120 and refunded_sum == 0 withdraw(accounts[0], received_sum) withdraw(requester, refunded_sum)
def test_storage_refund(): job = Job() provider = accounts[0] requester = accounts[1] register_provider() register_requester(requester) job_key = "QmQv4AAL8DZNxZeK3jfJGJi63v1msLMZGan7vSsCDXzZud" job.code_hashes.append(ipfs_to_bytes32(job_key)) job.storage_hours.append(1) job_key_2 = "QmVqtWxuBdZQdLnLce6XCBMuqoazAcbmuxoJHQbfbuqDu2" job.code_hashes.append(ipfs_to_bytes32(job_key_2)) job.storage_hours.append(1) job.data_transfer_ins = [100, 100] job.data_transfer_out = 100 job.data_prices_set_block_numbers = [0, 0] job.cores = [2] job.run_time = [10] job.provider_price_block_number = ebb.getProviderSetBlockNumbers( accounts[0])[-1] job.storage_ids = [StorageID.EUDAT.value, StorageID.IPFS.value] job.cache_types = [CacheType.PRIVATE.value, CacheType.PUBLIC.value] # provider's registered data won't be used job.data_prices_set_block_numbers = [0, 0] job_price, _cost = job.cost(provider, requester) job_price += 1 # for test 1 wei extra is paid args = [ provider, job.provider_price_block_number, job.storage_ids, job.cache_types, job.data_prices_set_block_numbers, job.cores, job.run_time, job.data_transfer_out, ] tx = ebb.submitJob( job_key, job.data_transfer_ins, args, job.storage_hours, job.code_hashes, { "from": requester, "value": web3.toWei(job_price, "wei") }, ) refunded = tx.events["LogJob"]["refunded"] log(f"==> job_index={tx.events['LogJob']['index']}") log(f"refunded={refunded}", "bold") log(tx.events["LogJob"]["jobKey"]) assert requester == tx.events["LogJob"]["owner"] withdraw(requester, refunded) # check for extra payment is checked index = 0 job_id = 0 tx = ebb.refund(provider, job_key, index, job_id, job.cores, job.run_time, {"from": provider}) log(ebb.getJobInfo(provider, job_key, index, job_id)) refundedWei = tx.events["LogRefundRequest"]["refundedWei"] log(f"refunded_wei={refundedWei}", "bold") withdraw(requester, refundedWei) # VM Exception while processing transaction: invalid opcode with brownie.reverts(): ebb.getJobInfo(provider, job_key, 5, job_id) storage_cost_sum = 0 for code_hash in job.code_hashes: _storage_cost_sum, *_ = ebb.getStorageInfo(provider, requester, code_hash) storage_cost_sum += _storage_cost_sum assert _cost["storage"] == storage_cost_sum assert _cost["computational"] + _cost["data_transfer"] + _cost[ "cache"] == refundedWei mine(cfg.BLOCK_DURATION_1_HOUR) tx = ebb.refundStorageDeposit(provider, requester, job.code_hashes[0], { "from": requester, "gas": 4500000 }) refundedWei = tx.events["LogDepositStorage"]["payment"] log(f"refunded_wei={refundedWei}", "bold") withdraw(requester, refundedWei) with brownie.reverts(): tx = ebb.refundStorageDeposit(provider, requester, job.code_hashes[0], { "from": requester, "gas": 4500000 }) tx = ebb.refundStorageDeposit(provider, requester, job.code_hashes[1], { "from": requester, "gas": 4500000 }) refundedWei = tx.events["LogDepositStorage"]["payment"] paid_address = tx.events["LogDepositStorage"]["paidAddress"] withdraw(requester, refundedWei) with brownie.reverts(): tx = ebb.refundStorageDeposit(provider, requester, job.code_hashes[0], { "from": requester, "gas": 4500000 }) assert requester == paid_address assert ebb.balanceOf(provider) == 0 console_ruler("same job submitted after full refund", color="blue") tx = ebb.submitJob( job_key, job.data_transfer_ins, args, job.storage_hours, job.code_hashes, { "from": requester, "value": web3.toWei(job_price, "wei") }, ) log(f"job_index={tx.events['LogJob']['index']}", "bold") log(tx.events["LogJob"]["jobKey"]) index = 1 job_id = 0 tx = ebb.refund(provider, job_key, index, job_id, job.cores, job.run_time, {"from": provider}) log(ebb.getJobInfo(provider, job_key, index, job_id)) refundedWei = tx.events["LogRefundRequest"]["refundedWei"] log(f"refunded_wei={refundedWei}", "bold") assert _cost["computational"] + _cost["data_transfer"] + _cost[ "cache"] == refundedWei storage_cost_sum = 0 storage_payment = [] for code_hash in job.code_hashes: deposit, *_ = ebb.getStorageInfo(provider, requester, code_hash) storage_payment.append(deposit) job.is_verified = [True, True] ebb.dataReceived( # called by the provider job_key, index, job.code_hashes, job.cache_types, job.is_verified, { "from": provider, "gas": 4500000 }) for code_hash in job.code_hashes: *_, output = ebb.getStorageInfo(provider, cfg.ZERO_ADDRESS, code_hash) log(output, "bold") with brownie.reverts( ): # refundStorageDeposit should revert, because it is already used by the provider for code_hash in job.code_hashes: tx = ebb.refundStorageDeposit(provider, requester, code_hash, { "from": requester, "gas": 4500000 }) tx = ebb.depositStorage(requester, job.code_hashes[0], { "from": provider, "gas": 4500000 }) mine(cfg.BLOCK_DURATION_1_HOUR) # after deadline (1 hr) is completed to store the data, provider could obtain the money for idx, code_hash in enumerate(job.code_hashes): tx = ebb.depositStorage(requester, code_hash, { "from": provider, "gas": 4500000 }) amount = tx.events["LogDepositStorage"]["payment"] withdraw(provider, amount) assert storage_payment[idx] == amount
def test_submit_job(): job = Job() provider = accounts[0] requester = accounts[1] register_provider() register_requester(requester) fname = f"{cwd}/files/test.txt" # fname = f"{cwd}/files/test_.txt" log(f"==> registered_provider_addresses={ebb.getProviders()}") provider_price_info = ebb.getProviderInfo(accounts[0], 0) # block_read_from = provider_price_info[0] _provider_price_info = provider_price_info[1] # availableCoreNum = _provider_price_info[0] # commitmentBlockDuration = _provider_price_info[1] price_core_min = _provider_price_info[2] # price_data_transfer = _provider_price_info[3] # price_storage = _provider_price_info[4] # price_cache = _provider_price_info[5] log(f"provider_available_core={available_core}") log(f"provider_price_core_min={price_core_min}") log(provider_price_info) job_price_sum = 0 job_id = 0 index = 0 with open(fname) as f: for line in f: arguments = line.rstrip("\n").split(" ") storage_hour = 1 core_min = int(arguments[1]) - int(arguments[0]) core = int(arguments[2]) job.cores = [core] job.run_time = [core_min] # time.sleep(1) # rpc.mine(int(arguments[0])) job_key = "QmQv4AAL8DZNxZeK3jfJGJi63v1msLMZGan7vSsCDXzZud" data_key = "QmQv4AAL8DZNxZeK3jfJGJi63v1msLMZGan7vSsCDXzZud" code_hash = ipfs_to_bytes32(data_key) # log("Client Balance before: " + str(web3.eth.balanceOf(account))) # log("Contract Balance before: " + str(web3.eth.balanceOf(accounts[0]))) job.code_hashes = [code_hash] job.storage_hours = [storage_hour] job.data_transfer_ins = [100] job.data_transfer_out = 100 job.data_prices_set_block_numbers = [0] job.storage_ids = [StorageID.IPFS.value] job.cache_types = [CacheType.PUBLIC.value] args = [ provider, ebb.getProviderSetBlockNumbers(accounts[0])[-1], job.storage_ids, job.cache_types, job.data_prices_set_block_numbers, job.cores, job.run_time, job.data_transfer_out, ] # log(code_hashes[0]) job_price, _cost = job.cost(provider, requester) job_price_sum += job_price data_transfer_ins = [100] job_key = job.storage_hours[0] tx = ebb.submitJob( job_key, data_transfer_ins, args, job.storage_hours, job.code_hashes, { "from": requester, "value": web3.toWei(job_price, "wei") }, ) # log('submitJob => GasUsed:' + str(tx.__dict__['gas_used']) + '| blockNumber=' + str(tx.block_number)) log(f"job_index={tx.events['LogJob']['index']}", "bold") # log("Contract Balance after: " + str(web3.eth.balanceOf(accouts[0]))) # log("Client Balance after: " + str(web3.eth.balanceOf(accounts[8]))) # sys.stdout.write('jobInfo: ') # sys.stdout.flush() log(ebb.getJobInfo(provider, job_key, index, job_id)) index += 1 log(f"total_paid={job_price_sum}") # log(block_read_from) # rpc.mine(100) # log(web3.eth.blockNumber) job_id = 0 with open(fname) as f: for index, line in enumerate(f): arguments = line.rstrip("\n").split(" ") tx = ebb.setJobStatusRunning(job_key, index, job_id, int(arguments[0]), {"from": accounts[0]}) if index == 0: with brownie.reverts(): tx = ebb.setJobStatusRunning(job_key, index, job_id, int(arguments[0]) + 1, {"from": accounts[0]}) console_ruler() result_ipfs_hash = ipfs_to_bytes32( "QmWmyoMoctfbAaiEs2G46gpeUmhqFRDW6KWo64y5r581Ve") with open(fname) as f: for index, line in enumerate(f): arguments = line.rstrip("\n").split(" ") if index == 0: data_transfer_in_sum = 90 job.data_transfer_out = 100 else: data_transfer_in_sum = 0 job.data_transfer_out = 100 core_min = int(arguments[1]) - int(arguments[0]) core = int(arguments[2]) job.cores = [core] job.run_time = [core_min] log(f"contract_balance={ebb.getContractBalance()}", "bold") job_id = 0 execution_time = int(arguments[1]) - int(arguments[0]) end_time = int(arguments[1]) args = [ index, job_id, end_time, data_transfer_in_sum, job.data_transfer_out, job.cores, job.run_time, True, ] tx = ebb.processPayment(job_key, args, execution_time, result_ipfs_hash, {"from": accounts[0]}) # code_hashes received = tx.events["LogProcessPayment"]["receivedWei"] refunded = tx.events["LogProcessPayment"]["refundedWei"] withdraw(accounts[0], received) withdraw(requester, refunded) log(f"received={received} | refunded={refunded}", "bold") log(f"contract_balance={ebb.getContractBalance()}", "bold") for idx in range(0, ebb.getProviderReceiptSize(provider)): # prints finalize version of the linked list log(ebb.getProviderReceiptNode(provider, idx)) console_ruler() log(f"==> storage_duration for job={job_key}") *_, job_storage_info = ebb.getStorageInfo(provider, cfg.ZERO_ADDRESS, code_hash) ds = DataStorage(job_storage_info) log(f"receivedBlockNumber={ds.received_block} |" f"storage_duration(block numbers)={ds.storage_duration} | " f"is_private={ds.is_private} |" f"is_verified_Used={ds.is_verified_used}") received_storage_deposit, *_ = ebb.getStorageInfo(provider, requester, code_hash) log(f"received_storage_deposit={received_storage_deposit}") console_ruler("DONE")
def submit_ipfs(job: Job, is_pass=False, required_confs=1): Ebb = cfg.Ebb requester = Ebb.w3.toChecksumAddress(job.requester_addr) provider = Ebb.w3.toChecksumAddress(job.provider_addr) pre_check(job, requester) log("==> Attemptting to submit a job") main_storage_id = job.storage_ids[0] job.folders_to_share = job.paths check_link_folders(job.data_paths, job.registered_data_files, is_pass=is_pass) if main_storage_id == StorageID.IPFS: log("==> Submitting source code through [blue]IPFS[/blue]") elif main_storage_id == StorageID.IPFS_GPG: log("==> Submitting source code through [blue]IPFS_GPG[/blue]") else: log("E: Please provide IPFS or IPFS_GPG storage type for the source code") sys.exit(1) targets = [] try: provider_info = Ebb.get_provider_info(provider) except Exception as e: print_tb(e) sys.exit(1) for idx, folder in enumerate(job.folders_to_share): if isinstance(folder, Path): target = folder if job.storage_ids[idx] == StorageID.IPFS_GPG: provider_gpg_finderprint = provider_info["gpg_fingerprint"] if not provider_gpg_finderprint: log("E: Provider did not register any GPG fingerprint") sys.exit(1) log(f"==> provider_gpg_finderprint={provider_gpg_finderprint}") try: # target is updated target = cfg.ipfs.gpg_encrypt(provider_gpg_finderprint, target) log(f"==> gpg_file={target}") except Exception as e: print_tb(e) sys.exit(1) try: ipfs_hash = cfg.ipfs.add(target) # ipfs_hash = ipfs.add(folder, True) # True includes .git/ run(["ipfs", "refs", ipfs_hash]) except Exception as e: print_tb(e) sys.exit(1) if idx == 0: key = ipfs_hash job.code_hashes.append(ipfs_to_bytes32(ipfs_hash)) job.code_hashes_str.append(ipfs_hash) log(f"==> ipfs_hash={ipfs_hash} | md5sum={generate_md5sum(target)}") if main_storage_id == StorageID.IPFS_GPG: # created gpg file will be removed since its already in ipfs targets.append(target) else: code_hash = folder if isinstance(code_hash, bytes): job.code_hashes.append(code_hash) job.code_hashes_str.append(code_hash.decode("utf-8")) # TODO: if its ipfs # if isinstance(code_hash, bytes): # code_hash = code_hash.decode("utf-8") # if len(code_hash) == 32: # value = cfg.w3.toBytes(text=code_hash) # job.code_hashes.append(value) # job.code_hashes_str.append(value.decode("utf-8")) # else: # job.code_hashes.append(ipfs_to_bytes32(code_hash)) # job.code_hashes_str.append(code_hash) # if idx != len(job.folders_to_share) - 1: # log("-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-", "cyan") # requester inputs for testing purpose job.price, *_ = job.cost(provider, requester) try: tx_hash = Ebb.submit_job(provider, key, job, requester=requester, required_confs=required_confs) if required_confs >= 1: tx_receipt = get_tx_status(tx_hash) if tx_receipt["status"] == 1: processed_logs = Ebb._eBlocBroker.events.LogJob().processReceipt(tx_receipt, errors=DISCARD) try: if processed_logs: log("job_info:", "bold yellow") log(vars(processed_logs[0].args)) for target in targets: if ".tar.gz.gpg" in str(target): _remove(target) except IndexError: log(f"E: Tx={tx_hash} is reverted") else: pass except QuietExit: pass except Exception as e: print_tb(e) return tx_hash
def process_payment( self, job_key, index, job_id, elapsed_time, result_ipfs_hash, cloud_storage_ids, end_time, data_transfer_in, data_transfer_out, core, run_time, received_block_number=0, ): """Process payment of the paid job.""" log( f"~/ebloc-broker/broker/eblocbroker_scripts/process_payment.py {job_key} {index} {job_id} {elapsed_time}" f" {result_ipfs_hash} '{cloud_storage_ids}' {end_time} {data_transfer_in} {data_transfer_out} '{core}'" f" '{run_time}'", "bold blue", ) for cloud_storage_id in cloud_storage_ids: if len(result_ipfs_hash) != 46 and cloud_storage_id in ( StorageID.IPFS, StorageID.IPFS_GPG): raise Exception( "Result ipfs's length does not match with its original length, check your job_key" ) self.get_job_info(env.PROVIDER_ID, job_key, index, job_id, received_block_number, is_print=False) if self.job_info["stateCode"] == state.code["COMPLETED"]: log("warning: Job is completed and already get paid") sys.exit(1) """ if self.job_info["stateCode"] == str(state.code["COMPLETED"]): logging.error("Job is completed and already get paid") sys.exit(1) """ try: if result_ipfs_hash == b"" or not result_ipfs_hash: result_ipfs_hash = "" else: result_ipfs_hash = ipfs_to_bytes32(result_ipfs_hash) final_job = True # true only for the final job args = [ int(index), int(job_id), int(end_time), int(data_transfer_in), int(data_transfer_out), core, run_time, final_job, ] tx = self._process_payment(job_key, args, int(elapsed_time), result_ipfs_hash) # tx is not returned except Exception as e: print_tb(e) raise e return self.tx_id(tx)