Exemple #1
0
def daemon():
    if args.daemon_type[0] == "ipfs":
        from broker.utils import run_ipfs_daemon

        run_ipfs_daemon(_is_print=True)
    if args.daemon_type[0] == "slurm":
        from broker.config import env
        from broker.utils import run

        run(["sudo", env.BASH_SCRIPTS_PATH / "run_slurm.sh"])
Exemple #2
0
def get_ipfs_id() -> str:
    run_ipfs_daemon()
    try:
        # may create error
        client = ipfshttpclient.connect("/ip4/127.0.0.1/tcp/5001/http")
    except ipfshttpclient.exceptions.ConnectionError:
        log("E: Failed to establish a new connection to IPFS, please run it on the background.\n"
            "Please run [magenta]~/ebloc-broker/broker/_daemons/ipfs.py")
        sys.exit(1)
    except Exception as e:
        print_tb(e)
        log("E: Failed to establish a new connection to IPFS, please run it on the background.\n"
            "Please run [magenta]~/ebloc-broker/broker/_daemons/ipfs.py")
        sys.exit(1)

    try:
        return cfg.ipfs.get_ipfs_id(client)
    except Exception as e:
        print_tb(str(e))
        sys.exit(1)
Exemple #3
0
def pre_check(job, requester):
    """Pre check jobs to submit."""
    try:
        job.check_account_status(requester)
        is_bin_installed("ipfs")
        if not is_dpkg_installed("pigz"):
            log("E: Install [green]pigz[/green].\nsudo apt install -y pigz")
            sys.exit()

        if not os.path.isfile(env.GPG_PASS_FILE):
            log(f"E: Please store your gpg password in the [magenta]{env.GPG_PASS_FILE}[/magenta]\nfile for decrypting")
            raise QuietExit

        run_ipfs_daemon()
        if job.storage_ids[0] == StorageID.IPFS:
            for storage_id in job.storage_ids[1:]:
                if storage_id in (StorageID.GDRIVE, StorageID.EUDAT):
                    raise Exception(
                        "If source code is submitted via IPFS, then data files must be submitted using IPFS or IPFS_GPG"
                    )
    except Exception as e:
        print_tb(e)
        sys.exit()
Exemple #4
0
    def run(self) -> bool:
        self.start_time = time.time()
        if cfg.IS_THREADING_ENABLED:
            self.thread_log_setup()

        run_ipfs_daemon()
        log(f"{br(get_date())} Job's source code has been sent through ",
            "bold cyan",
            end="")
        if self.cloudStorageID[0] == StorageID.IPFS:
            log("[bold green]IPFS")
        else:
            log("[bold green]IPFS_GPG")

        if not is_ipfs_on():
            return False

        log(f"==> is_hash_locally_cached={cfg.ipfs.is_hash_locally_cached(self.job_key)}"
            )
        if not os.path.isdir(self.results_folder):
            os.makedirs(self.results_folder)

        _remove(f"{self.results_folder}/{self.job_key}")
        try:
            self.check_ipfs(self.job_key)
        except:
            return False

        self.registered_data_hashes = []
        for idx, source_code_hash in enumerate(self.code_hashes):
            if self.cloudStorageID[idx] == StorageID.NONE:
                self.registered_data_hashes.append(source_code_hash)  # GOTCHA
            else:
                ipfs_hash = bytes32_to_ipfs(source_code_hash)
                if ipfs_hash not in self.ipfs_hashes:
                    try:  # job_key as data hash already may added to the list
                        self.check_ipfs(ipfs_hash)
                    except:
                        return False

        initial_folder_size = calculate_size(self.results_folder)
        for idx, ipfs_hash in enumerate(self.ipfs_hashes):
            # here scripts knows that provided IPFS hashes exists online
            is_hashed = False
            log(f"## attempting to get IPFS file: {ipfs_hash} ... ", end="")
            if cfg.ipfs.is_hash_locally_cached(ipfs_hash):
                is_hashed = True
                log(ok("already cached"))
            else:
                log()

            if idx == 0:
                target = self.results_folder
            else:
                #  "_" added before the filename in case $ ipfs get <ipfs_hash>
                target = f"{self.results_data_folder}/_{ipfs_hash}"
                mkdir(target)

            is_storage_paid = False  # TODO: should be set before by user input
            cfg.ipfs.get(ipfs_hash, target, is_storage_paid)
            if idx > 0:
                # https://stackoverflow.com/a/31814223/2402577
                dst_filename = os.path.join(self.results_data_folder,
                                            os.path.basename(ipfs_hash))
                if os.path.exists(dst_filename):
                    _remove(dst_filename)

                shutil.move(target, dst_filename)
                target = dst_filename

            if self.cloudStorageID[idx] == StorageID.IPFS_GPG:
                cfg.ipfs.decrypt_using_gpg(f"{target}/{ipfs_hash}", target)

            try:
                _git.initialize_check(target)
            except Exception as e:
                raise e

            if not is_hashed:
                folder_size = calculate_size(self.results_folder)
                self.data_transfer_in_to_download_mb += folder_size - initial_folder_size
                initial_folder_size = folder_size

            if idx == 0 and not self.check_run_sh():
                self.complete_refund()
                return False

        log(f"==> data_transfer_in={self.data_transfer_in_to_download_mb} MB | "
            f"rounded={int(self.data_transfer_in_to_download_mb)} MB")
        return self.sbatch_call()
Exemple #5
0
def _tools(block_continue):  # noqa
    """Check whether the required functions are running or not.

    :param block_continue: Continue from given block number
    """
    session_start_msg(env.SLURMUSER, block_continue, pid)
    try:
        if not is_internet_on():
            raise Terminate("Network connection is down. Please try again")

        if not check_ubuntu_packages():
            raise Terminate()

        if not env.IS_BLOXBERG:
            is_geth_on()
        else:
            log(":beer:  Connected into [green]BLOXBERG[/green]", "bold")

        if not Contract.Ebb.is_orcid_verified(env.PROVIDER_ID):
            log(f"warning: provider [green]{env.PROVIDER_ID}[/green]'s orcid id is not authenticated yet")
            raise QuietExit

        slurm.is_on()
        if not is_process_on("mongod"):
            raise Exception("mongodb is not running in the background")

        # run_driver_cancel()  # TODO: uncomment
        if env.IS_EUDAT_USE:
            if not env.OC_USER:
                raise Terminate(f"OC_USER is not set in {env.LOG_PATH.joinpath('.env')}")

            eudat.login(env.OC_USER, env.LOG_PATH.joinpath(".eudat_client.txt"), env.OC_CLIENT)

        if env.IS_GDRIVE_USE:
            is_program_valid(["gdrive", "version"])
            if env.GDRIVE == "":
                raise Terminate(f"E: Path for gdrive='{env.GDRIVE}' please set a valid path in the .env file")

            provider_info = Contract.Ebb.get_provider_info(env.PROVIDER_ID)
            email = provider_info["email"]
            try:
                output, gdrive_email = gdrive.check_user(email)
            except Exception as e:
                print_tb(e)
                raise QuietExit from e

            if not output:
                log(
                    f"E: provider's registered email=[magenta]{email}[/magenta] does not match\n"
                    f"   with the set gdrive's email=[magenta]{gdrive_email}[/magenta]"
                )
                raise QuietExit

            log(f"==> provider_email=[magenta]{email}")

        if env.IS_IPFS_USE:
            if not os.path.isfile(env.GPG_PASS_FILE):
                log(f"E: Please store your gpg password in the {env.GPG_PASS_FILE}\nfile for decrypting using ipfs")
                raise QuietExit

            if not os.path.isdir(env.IPFS_REPO):
                log(f"E: {env.IPFS_REPO} does not exist")
                raise QuietExit

            run_ipfs_daemon()
    except QuietExit as e:
        raise e
    except Exception as e:
        print_tb(e)
        raise Terminate from e
def register_requester(self, yaml_fn, is_question=True):
    """Register or update requester into smart contract."""
    yaml_fn = os.path.expanduser(yaml_fn)
    try:
        run_ipfs_daemon()
        client = ipfshttpclient.connect("/ip4/127.0.0.1/tcp/5001/http")
    except Exception as e:
        log("E: Run ipfs daemon to detect your ipfs_id")
        print_tb(e)
        sys.exit(1)

    if not os.path.exists(yaml_fn):
        log(f"E: yaml_fn({yaml_fn}) does not exist")
        raise QuietExit

    args = Yaml(yaml_fn)
    ipfs_id = cfg.ipfs.get_ipfs_id(client)
    email = env.GMAIL
    gpg_fingerprint = get_gpg_fingerprint(email)
    try:
        is_gpg_published(gpg_fingerprint)
    except Exception as e:
        raise e

    account = args["config"]["account"].lower()
    email = args["config"]["email"]
    federation_cloud_id = args["config"]["federation_cloud_id"]
    # if env.IS_BLOXBERG:
    #     account = self.brownie_load_account().address

    log(f"==> registering {account} as requester")
    if is_byte_str_zero(account):
        log(f"E: account={account} is not valid, change it in [{c.pink}]~/.ebloc-broker/cfg.yaml"
            )
        raise QuietExit

    if len(federation_cloud_id) >= 128:
        raise Exception("E: federation_cloud_id is more than 128")

    if len(email) >= 128:
        raise Exception("E: email is more than 128")

    if len(gpg_fingerprint) != 40:
        raise Exception("E: gpg_fingerprint should be 40 characters")

    if self.does_requester_exist(account):
        log(f"warning: requester {account} is already registered")
        requester_info = Ebb.get_requester_info(account)
        if (requester_info["email"] == email
                and requester_info["gpg_fingerprint"] == gpg_fingerprint
                and requester_info["ipfs_id"] == ipfs_id
                and requester_info["f_id"] == federation_cloud_id):
            log(requester_info)
            log("## Same requester information is provided, nothing to do")
            raise QuietExit

        log("==> [bold yellow]registered_requester_info:")
        log(requester_info)
        _requester_info = {
            "email": email,
            "federation_cloud_id": federation_cloud_id,
            "gpg_fingerprint": gpg_fingerprint,
            "ipfs_id": ipfs_id,
        }
        log("==> [bold yellow]new_requester_info:")
        log(_requester_info)
        if is_question and not question_yes_no(
                "#> Would you like to update requester info?"):
            return

    try:
        tx = self._register_requester(account, gpg_fingerprint, email,
                                      federation_cloud_id, ipfs_id)
        return self.tx_id(tx)
    except Exception as e:
        print_tb(e)
        raise e