def _update_data_price():
    Ebb = cfg.Ebb
    if not Ebb.does_provider_exist(env.PROVIDER_ID):
        log(f"warning: Provider {env.PROVIDER_ID} is not registered.\n")
        raise QuietExit

    source_code_hash = "b6aaf03752dc68d625fc57b451faa2bf"
    new_data_price = 21
    commitment_block_duration = 600
    source_code_hash_bytes = cfg.w3.toBytes(text=source_code_hash)
    try:
        (price, _commitment_block_duration) = cfg.Ebb.get_registered_data_prices(
            env.PROVIDER_ID, source_code_hash_bytes, 0
        )
        if price == new_data_price and _commitment_block_duration == commitment_block_duration:
            log(f"## data([green]{source_code_hash}[/green]) already registerered with the given values")
            raise QuietExit
    except:
        raise QuietExit

    try:
        tx = Ebb.update_data_price(source_code_hash_bytes, new_data_price, commitment_block_duration)
        get_tx_status(Ebb.tx_id(tx))
    except QuietExit:
        pass
    except Exception as e:
        print_tb(e)
Exemple #2
0
    def git_diff_patch_and_upload(self, source: Path, name, storage_class,
                                  is_job_key):
        if is_job_key:
            log(f"==> base_patch={self.patch_folder}")
            log(f"==> sourcecode_patch={name}")
        else:
            log(f"==> datafile_patch={name}")

        try:
            if storage_class is Ipfs or storage_class is IpfsGPG:
                target_path = self.patch_folder_ipfs
            else:
                target_path = self.patch_folder

            self.patch_upload_name, self.patch_file, is_file_empty = _git.diff_patch(
                source, name, self.index, target_path)
            if not is_file_empty:
                try:
                    storage_class.upload(self, name, is_job_key)
                except Exception as e:
                    print_tb(e)
                    raise e
        except Exception as e:
            raise Exception(
                "E: Problem on the git_diff_patch_and_upload() function"
            ) from e
Exemple #3
0
def is_contract_exists():
    try:
        Ebb = cfg.Ebb
    except Exception as e:
        print_tb(e)

    log(f"==> is_contract_exists={Ebb.is_contract_exists()}", "bold")
Exemple #4
0
    def get_data_init(self, key, _id, is_job_key=False):
        try:
            cmd = ["gdrive", "info", "--bytes", key, "-c", env.GDRIVE_METADATA]
            _p, gdrive_output, *_ = popen_communicate(cmd)
            if _p.returncode != 0:
                raise Exception(gdrive_output)
        except Exception as e:
            raise e

        mime_type = gdrive.get_file_info(gdrive_output, _type="Mime")
        folder_name = gdrive.get_file_info(gdrive_output, _type="Name")
        log(f"==> mime_type=[magenta]{mime_type}")
        if is_job_key:
            # key for the sourceCode tar.gz file is obtained
            try:
                self.data_transfer_in_to_download, self.job_key_list, key = gdrive.size(
                    key,
                    mime_type,
                    folder_name,
                    gdrive_output,
                    self.results_folder_prev,
                    self.code_hashes,
                    self.job_infos[0]["is_cached"],
                )
            except Exception as e:
                print_tb(e)
                raise e

        return mime_type, folder_name
Exemple #5
0
def eudat_submit(job: Job, is_pass=False, required_confs=1):
    log("==> Submitting source code through [blue]EUDAT[/blue]")
    Ebb = cfg.Ebb
    requester = Ebb.w3.toChecksumAddress(job.requester_addr)
    oc_client = "059ab6ba-4030-48bb-b81b-12115f531296"
    connect()
    try:
        job.check_account_status(requester)
    except Exception as e:
        print_tb(e)
        raise e

    login(oc_client, env.LOG_PATH.joinpath(".eudat_client.txt"), env.OC_CLIENT)
    if len(sys.argv) == 3:
        provider = str(sys.argv[1])
        tar_hash = sys.argv[2]
        log(f"==> provided_hash={tar_hash}")
    else:
        provider = Ebb.w3.toChecksumAddress(job.provider_addr)

    job.folders_to_share = job.paths
    check_link_folders(job.data_paths,
                       job.registered_data_files,
                       is_pass=is_pass)
    return submit(provider, requester, job, required_confs=required_confs)
Exemple #6
0
def add_all(repo=None):
    """Add all into git."""
    try:
        if not repo:
            repo = git.Repo(".", search_parent_directories=True)

        log("all files in the entire working tree are updated in the Git repository ",
            end="")
        repo.git.add(A=True)
        log(ok())
        try:
            #: git diff HEAD --name-only | wc -l
            changed_file_len = len(
                repo.index.diff("HEAD",
                                ignore_blank_lines=True,
                                ignore_space_at_eol=True))
        except:
            # if it is the first commit HEAD might not exist
            changed_file_len = len(
                repo.git.diff("--cached", "--ignore-blank-lines",
                              "--ignore-space-at-eol",
                              "--name-only").split("\n"))

        if changed_file_len > 0:
            log("Record changes to the repository ", end="")
            repo.git.commit("-m", "update")
            log(ok())
    except Exception as e:
        print_tb(e)
        raise e
Exemple #7
0
    def upload_driver(self):
        self.clean_before_upload()
        try:
            storage_class = self.get_cloud_storage_class(0)
            self.git_diff_patch_and_upload(self.results_folder,
                                           self.job_key,
                                           storage_class,
                                           is_job_key=True)
        except Exception as e:
            raise e

        for idx, name in enumerate(self.code_hashes_to_process[1:], 1):
            # starting from 1st index for data files
            source = self.results_data_folder / name
            try:
                if not self.storage_ids[idx] == StorageID.NONE:
                    storage_class = self.get_cloud_storage_class(idx)
                    self.git_diff_patch_and_upload(source,
                                                   name,
                                                   storage_class,
                                                   is_job_key=False)
                else:
                    pass
            except Exception as e:
                print_tb(e)
                raise e

        if not is_dir_empty(self.patch_folder_ipfs):
            # it will upload files after all the patchings are completed
            # in case any file is created via ipfs
            self._ipfs_add_folder(self.patch_folder_ipfs)
Exemple #8
0
    def submit_slurm_job(self, job_core_num, sbatch_file_path):
        """Slurm submits job.

        * Real mode -n is used.
        * For Emulator-mode -N use 'sbatch -c'
        * cmd: sudo su - $requester_id -c "cd $results_folder && firejail --noprofile \
                sbatch -c$job_core_num $results_folder/${job_key}*${index}.sh --mail-type=ALL
        """
        for _attempt in range(10):
            try:
                cmd = f'sbatch -n {job_core_num} "{sbatch_file_path}" --mail-type=ALL'
                with cd(self.results_folder):
                    try:
                        job_id = _run_as_sudo(env.SLURMUSER, cmd, shell=True)
                        return job_id
                    except Exception as e:
                        if "Invalid account" in str(e):
                            remove_user(env.SLURMUSER)
                            add_user_to_slurm(env.SLURMUSER)
                            job_id = _run_as_sudo(env.SLURMUSER,
                                                  cmd,
                                                  shell=True)

                time.sleep(
                    1)  # wait 1 second for slurm idle core to be updated
            except Exception as e:
                print_tb(e)
                slurm.remove_user(self.requester_id)
                slurm.add_user_to_slurm(self.requester_id)

        raise Exception("E: sbatch could not submit the job")
Exemple #9
0
def _dump_dict_to_file(filename, job_keys):
    try:
        log("==> meta_data.json file is updated in the parent folder")
        dump_dict_to_file(filename, job_keys)
    except Exception as e:
        print_tb(e)
        raise e
Exemple #10
0
def main():
    pidfile = env.DRIVER_DAEMON_LOCK
    d = DriverDaemon(pidfile=pidfile)
    # pidfile = daemon.pidfile.PIDLockFile(env.DRIVER_DAEMON_LOCK)
    # logfile = os.path.join(os.getcwd(), "sleepy.log")
    # pidfile = os.path.join(os.getcwd(), "sleepy.pid")
    # logging.basicConfig(filename=logfile, level=logging.DEBUG)
    # d = Daemon_base(pidfile, env.EBLOCPATH, cmd)
    if len(sys.argv) == 2:
        if sys.argv[1] in ["start", "s"]:
            try:
                is_driver_on()
                d.start()
            except QuietExit:
                pass
            except Exception as e:
                print_tb(str(e))
                sys.exit(1)
        elif sys.argv[1] in ["terminate", "t"]:
            d.terminate()
        elif sys.argv[1] in ["reload", "r"]:
            d.restart()
        else:
            print("Unknown command")
            sys.exit(2)
            sys.exit(0)
    else:
        print("usage: %s [s]tart|[t]erminate|[r]eload" % sys.argv[0])
        sys.exit(2)
Exemple #11
0
    def process_payment_tx(self):
        try:
            tx_hash = eblocbroker_function_call(
                lambda: Ebb.process_payment(
                    self.job_key,
                    self.index,
                    self.job_id,
                    self.elapsed_time,
                    self.result_ipfs_hash,
                    self.storage_ids,
                    self.end_time_stamp,
                    self.data_transfer_in,
                    self.data_transfer_out,
                    self.job_info["core"],
                    self.job_info["run_time"],
                    self.received_block_number,
                ),
                max_retries=10,
            )
        except Exception as e:
            print_tb(e)
            sys.exit(1)

        log(f"==> process_payment {self.job_key} {self.index}")
        return tx_hash
Exemple #12
0
    def sbatch_call(self):
        link = Link(self.results_data_folder, self.results_data_link)
        try:
            if len(self.registered_data_hashes) > 0:
                # in case there there mounted folder first umount them in order
                # to give folder permission
                link.umount(self.registered_data_hashes)

            # folder permissions should be applied before linking the
            # folders in case there is a read-only folder. file permission for
            # the requester's foders should be reset
            give_rwe_access(self.requester_id, self.results_folder_prev)
            give_rwe_access(env.WHOAMI, self.results_folder_prev)
            # give_rwe_access(self.requester_id, self.requester_home)
            # give_rwe_access(env.WHOAMI, self.requester_home)
            if calculate_size(self.results_data_folder, _type="bytes") > 0:
                link.link_folders()

            if len(self.registered_data_hashes) > 0:
                link.registered_data(self.registered_data_hashes)

            self._sbatch_call()
        except Exception as e:
            print_tb(f"E: Failed to call _sbatch_call() function. {e}")
            raise e
Exemple #13
0
def get_data_key_ids(results_folder_prev) -> bool:
    filename = f"{results_folder_prev}/meta_data.json"
    log(f"==> meta_data_path={filename}")
    try:
        meta_data = read_json(filename)
    except Exception as e:
        print_tb(e)

    return meta_data
Exemple #14
0
    def initialize(self):
        with suppress(Exception):
            eudat.login(env.OC_USER,
                        env.LOG_PATH.joinpath(".eudat_client.txt"),
                        env.OC_CLIENT)

        try:
            self.get_shared_tokens()
        except Exception as e:
            print_tb(e)
            raise e
Exemple #15
0
def submit_gdrive(job: Job, is_pass=False, required_confs=1):
    log("==> Submitting source code through [blue]GDRIVE[/blue]")
    pre_check()
    Ebb = cfg.Ebb
    job.folders_to_share = job.paths
    check_link_folders(job.data_paths,
                       job.registered_data_files,
                       is_pass=is_pass)
    _git.generate_git_repo(job.folders_to_share)
    job.clean_before_submit()
    requester = Ebb.w3.toChecksumAddress(job.requester_addr)
    provider = Ebb.w3.toChecksumAddress(job.provider_addr)
    job = gdrive.submit(provider, requester, job)
    for folder_to_share in job.folders_to_share:
        if isinstance(folder_to_share, bytes):
            code_hash = folder_to_share
            job.code_hashes.append(code_hash)
            job.code_hashes_str.append(code_hash.decode("utf-8"))
        else:
            tar_hash = job.foldername_tar_hash[folder_to_share]
            #: required to send string as bytes == str_data.encode('utf-8')
            code_hash = Ebb.w3.toBytes(text=tar_hash)
            job.code_hashes.append(code_hash)
            job.code_hashes_str.append(code_hash.decode("utf-8"))

    tar_hash = job.foldername_tar_hash[job.folders_to_share[0]]
    key = job.keys[tar_hash]
    job.price, *_ = job.cost(provider, requester)
    try:
        tx_hash = Ebb.submit_job(provider,
                                 key,
                                 job,
                                 requester=requester,
                                 required_confs=required_confs)
        tx_receipt = get_tx_status(tx_hash)
        if tx_receipt["status"] == 1:
            processed_logs = Ebb._eBlocBroker.events.LogJob().processReceipt(
                tx_receipt, errors=DISCARD)
            log(vars(processed_logs[0].args))
            try:
                log(f"{ok()} [bold]job_index={processed_logs[0].args['index']}"
                    )
            except IndexError:
                log(f"E: Tx({tx_hash}) is reverted")
    except QuietExit:
        pass
    except Exception as e:
        print_tb(e)

    log()
    for k, v in job.tar_hashes.items():
        log(f"{k} [blue]=>[/blue] {v}")

    return tx_hash
Exemple #16
0
    def _ipfs_add_folder(self, folder_path):
        try:
            self.result_ipfs_hash = cfg.ipfs.add(folder_path)
            log(f"==> result_ipfs_hash={self.result_ipfs_hash}")
            cfg.ipfs.pin(self.result_ipfs_hash)
            data_transfer_out = cfg.ipfs.get_cumulative_size(
                self.result_ipfs_hash)
        except Exception as e:
            print_tb(e)
            raise e

        data_transfer_out = byte_to_mb(data_transfer_out)
        self.data_transfer_out += data_transfer_out
Exemple #17
0
def main():
    provider = env.PROVIDER_ID
    pre_check_data(provider)
    Ebb = cfg.Ebb
    data_hash = b"f13d75bc60898f0823566347e380a34d"
    try:
        if is_data_registered(provider, data_hash):
            tx = Ebb.remove_registered_data(data_hash)
            get_tx_status(Ebb.tx_id(tx))
        else:
            log(f"## data({data_hash}) is alread deleted or not registered")
    except Exception as e:
        print_tb(e)
Exemple #18
0
def share_single_folder(folder_name, f_id) -> bool:
    try:
        # folder_names = os.listdir(env.OWNCLOUD_PATH)
        # fID = '*****@*****.**'
        if not config.oc.is_shared(folder_name):
            config.oc.share_file_with_user(folder_name, f_id, remote_user=True, perms=31)
            log(f"sharing with [yellow]{f_id}[/yellow] {ok()}", "bold")
            return True

        log("## Requester folder is already shared")
        return True
    except Exception as e:
        print_tb(e)
        return False
Exemple #19
0
    def pre_data_check(self, key):
        if self.data_transfer_in_to_download > self.data_transfer_in_requested:
            # TODO: full refund
            raise Exception(
                "Requested size to download the source_code and data files is greater than the given amount"
            )

        try:
            cmd = ["gdrive", "info", "--bytes", key, "-c", env.GDRIVE_METADATA]
            return subprocess_call(cmd, 1)
        except Exception as e:
            # TODO: gdrive list --query "sharedWithMe"
            print_tb(e)
            raise e
def main():
    Ebb = cfg.Ebb
    is_write_to_file = False
    if len(sys.argv) == 2:
        if sys.argv[1] in ("1", "True", "true"):
            is_write_to_file = True

    try:
        output = Ebb.get_block_number()
        if is_write_to_file:
            env.config["block_continue"] = output
        else:
            log(f"block_number={output}", "bold")
    except Exception as e:
        print_tb(e)
Exemple #21
0
    def run(self) -> bool:
        self.start_time = time.time()
        if cfg.IS_THREADING_ENABLED:
            self.thread_log_setup()
            log(f"## Keep track from: tail -f {self.drivers_log_path}")

        try:
            log(f" * log_path={self.drivers_log_path}")
            self._run()
            # self.thread_log_setup()
            return True
        except Exception as e:
            print_tb(f"{self.job_key}_{self.index} {e}")
            sys.exit(1)
        finally:
            time.sleep(0.25)
Exemple #22
0
 def complete_refund(self) -> str:
     """Complete refund back to the requester."""
     try:
         tx_hash = self.Ebb.refund(
             self.logged_job.args["provider"],
             env.PROVIDER_ID,
             self.job_key,
             self.index,
             self.job_id,
             self.cores,
             self.run_time,
         )
         log(f"==> refund tx_hash={tx_hash}")
         return tx_hash
     except Exception as e:
         print_tb(e)
         raise e
Exemple #23
0
    def search_token(self,
                     f_id,
                     share_list,
                     folder_name,
                     is_silent=False) -> bool:
        """Search for the share_token from the shared folder."""
        share_key = f"{folder_name}_{self.requester_id[:16]}"
        if not is_silent:
            log(f"## searching share tokens for the related source_code_folder={folder_name}"
                )

        for idx in range(len(share_list) - 1, -1, -1):
            # starts iterating from last item to the first one
            input_folder_name = share_list[idx]["name"]
            input_folder_name = input_folder_name[
                1:]  # removes '/' at the beginning
            share_id = share_list[idx]["id"]
            # input_owner = share_list[i]['owner']
            input_user = f"{share_list[idx]['user']}@b2drop.eudat.eu"
            if input_folder_name == share_key and input_user == f_id:
                self.share_token = str(share_list[idx]["share_token"])
                self.share_id[share_key] = {
                    "share_id": int(share_id),
                    "share_token": self.share_token,
                }
                if Ebb.mongo_broker.add_item_share_id(share_key, share_id,
                                                      self.share_token):
                    # adding into mongoDB for future uses
                    log(f"#> Added into mongoDB {ok()}")
                else:
                    logging.error(
                        "E: Something is wrong, not added into mongoDB")

                log(f"==> name={folder_name} | share_id={share_id} | share_token={self.share_token} {ok()}"
                    )
                try:
                    config.oc.accept_remote_share(int(share_id))
                    log(f"## share_id={share_id} is accepted")
                except Exception as e:
                    print_tb(e)

                self.accept_flag += 1
                return True

        return False
Exemple #24
0
def _withdraw():
    Ebb = cfg.Ebb
    if len(sys.argv) == 2:
        account = str(sys.argv[1])
    else:
        log("## provide an ethereum account as an argument")
        sys.exit(1)

    try:
        balance = Ebb.get_balance(account)
        if balance > 0:
            log(f"account_balance={balance}", "bold")
            get_tx_status(Ebb.withdraw(account))
        else:
            log("warning: account balance is empty nothing to do")
    except QuietExit:
        pass
    except Exception as e:
        print_tb(e)
Exemple #25
0
    def upload(self, key, is_job_key):
        """Upload generated result into gdrive.

        :param key: key of the shared gdrive file
        :returns: True if upload is successful
        """
        try:
            if not is_job_key:
                meta_data = gdrive.get_data_key_ids(self.results_folder_prev)
                key = meta_data[key]

            cmd = [
                env.GDRIVE, "info", "--bytes", key, "-c", env.GDRIVE_METADATA
            ]
            gdrive_info = subprocess_call(cmd, 5, sleep_time=30)
        except Exception as e:
            raise Exception(
                f"{WHERE(1)} E: {key} does not have a match. meta_data={meta_data}. {e}"
            ) from e

        mime_type = gdrive.get_file_info(gdrive_info, "Mime")
        log(f"mime_type={mime_type}")
        self.data_transfer_out += calculate_size(self.patch_file)
        log(f"data_transfer_out={self.data_transfer_out} MB =>"
            f" rounded={int(self.data_transfer_out)} MB")
        if "folder" in mime_type:
            cmd = [
                env.GDRIVE, "upload", "--parent", key, self.patch_file, "-c",
                env.GDRIVE_METADATA
            ]
        elif "gzip" in mime_type or "/zip" in mime_type:
            cmd = [
                env.GDRIVE, "update", key, self.patch_file, "-c",
                env.GDRIVE_METADATA
            ]
        else:
            raise Exception("Files could not be uploaded")

        try:
            log(subprocess_call(cmd, 5))
        except Exception as e:
            print_tb(e)
            raise Exception("E: gdrive could not upload the file") from e
Exemple #26
0
    def remove_source_code(self):
        """Client's initial downloaded files are removed."""
        timestamp_file = f"{self.results_folder_prev}/timestamp.txt"
        try:
            cmd = [
                "find", self.results_folder, "-type", "f", "!", "-newer",
                timestamp_file
            ]
            files_to_remove = run(cmd)
            if files_to_remove:
                log(f"## Files to be removed: \n{files_to_remove}\n")
        except Exception as e:
            print_tb(e)
            sys.exit()

        run([
            "find", self.results_folder, "-type", "f", "!", "-newer",
            timestamp_file, "-delete"
        ])
Exemple #27
0
    def scontrol_update(self, job_core_num, sbatch_file_path, time_limit):
        """Prevent scontrol update locked exception.

        scontrol generates: Job update not available right now, the DB index is being
        set, try again in a bit for job 5.
        """
        try:
            _slurm_job_id = self.submit_slurm_job(job_core_num,
                                                  sbatch_file_path)
            slurm_job_id = _slurm_job_id.split()[3]
            cmd = [
                "scontrol", "update", f"jobid={slurm_job_id}",
                f"TimeLimit={time_limit}"
            ]
            subprocess_call(cmd, attempt=10, sleep_time=10)
            return slurm_job_id
        except Exception as e:
            print_tb(e)
            raise e
Exemple #28
0
def submit(provider, requester, job, required_confs=1):
    try:
        tx_hash = _submit(provider, requester, job, required_confs)
        if required_confs >= 1:
            tx_receipt = get_tx_status(tx_hash)
            if tx_receipt["status"] == 1:
                processed_logs = Ebb._eBlocBroker.events.LogJob().processReceipt(tx_receipt, errors=DISCARD)
                log(vars(processed_logs[0].args))
                try:
                    log(f"{ok()} [bold]job_index={processed_logs[0].args['index']}")
                except IndexError:
                    log(f"E: Tx({tx_hash}) is reverted")
        else:
            log(f"tx_hash={tx_hash}", "bold")
    except QuietExit:
        pass
    except Exception as e:
        print_tb(e)

    return tx_hash
Exemple #29
0
    def run(self) -> bool:
        self.start_time = time.time()
        if cfg.IS_THREADING_ENABLED:
            self.thread_log_setup()

        log(f"{br(get_date())} job's source code has been sent through Google Drive", "bold cyan")

        # self.get_data_init(key=self.job_key, _id=0, is_job_key=True)

        try:
            if os.path.isdir(self.results_folder):
                # attempt to download the source code
                target = self.get_data(key=self.job_key, _id=0, is_job_key=True)

            if not os.path.isdir(f"{target}/.git"):
                log(f"warning: .git folder does not exist within {target}")
                _git.generate_git_repo(target)
        except Exception as e:
            print_tb(e)
            return False

        if not self.check_run_sh():
            self.complete_refund()

        for idx, source_code_hash in enumerate(self.code_hashes):
            if self.cloudStorageID[idx] == StorageID.NONE:
                if isinstance(source_code_hash, bytes):
                    self.registered_data_hashes.append(source_code_hash.decode("utf-8"))
                else:
                    self.registered_data_hashes.append(source_code_hash)

        for idx, (_, value) in enumerate(self.job_key_list.items()):
            try:
                target = self.get_data(value, idx + 1)
                if not os.path.isdir(f"{target}/.git"):
                    log(f"warning: .git folder does not exist within {target}")
                    _git.generate_git_repo(target)
            except:
                return False

        return self.sbatch_call()
Exemple #30
0
    def attemp_get_job_info(self):
        is_print = True
        sleep_time = 30
        for attempt in range(10):
            # log(self.job_info)
            if self.job_info["stateCode"] == state.code["RUNNING"]:
                # it will come here eventually, when setJob() is deployed. Wait
                # until does values updated on the blockchain
                log("## job has been started")
                return

            if self.job_info["stateCode"] == state.code["COMPLETED"]:
                # detects an error on the slurm side
                log("warning: job is already completed and its money is received"
                    )
                self.get_job_info()
                raise QuietExit

            try:
                self.job_info = Ebb.get_job_info(env.PROVIDER_ID, self.job_key,
                                                 self.index, self.job_id,
                                                 self.received_block_number,
                                                 is_print)
                is_print = False
            except Exception as e:
                print_tb(e)
                # sys.exit(1)

            # sleep here so this loop is not keeping CPU busy due to
            # start_code tx may deploy late into the blockchain.
            log(
                f"==> {br(attempt)} start_code tx of the job is not obtained yet, "
                f"waiting for {sleep_time} seconds to pass...",
                end="",
            )
            sleep(sleep_time)
            log(ok())

        log("E: failed all the attempts, abort")
        sys.exit(1)