Esempio n. 1
0
    def is_hash_exists_online(self, ipfs_hash: str):
        log(f"## attempting to check IPFS file [green]{ipfs_hash}[/green] ... "
            )
        if not is_ipfs_on():
            raise IpfsNotConnected

        if not cfg.IS_THREADING_ENABLED:
            signal.signal(signal.SIGALRM, handler)
            signal.alarm(cfg.IPFS_TIMEOUT
                         )  # Set the signal handler and a 300-second alarm

        try:
            output = self._ipfs_stat(ipfs_hash, _is_ipfs_on=False)
            for line in output.split("\n"):
                if "CumulativeSize" not in line:
                    log(line)

            # output_json = json.dumps(output.as_json(), indent=4, sort_keys=True)
            cumulative_size = int(
                output.split("\n")[4].split(":")[1].replace(" ", ""))
            log(f"cumulative_size={cumulative_size}", "bold")
            return output, cumulative_size
        except Exception as e:
            raise Exception(
                f"E: Timeout, failed to find IPFS file: {ipfs_hash}") from e
Esempio n. 2
0
    def swarm_connect(self, ipfs_id: str):
        """Swarm connect into the ipfs node."""
        if not is_ipfs_on():
            raise IpfsNotConnected

        # TODO: check is valid IPFS id
        try:
            log(f" * trying to connect into {ipfs_id}")
            cmd = ["/usr/local/bin/ipfs", "swarm", "connect", ipfs_id]
            p, output, e = popen_communicate(cmd)
            if p.returncode != 0:
                log()
                e = e.replace("[/", "/").replace("]", "").replace("e: ",
                                                                  "").rstrip()
                if "failure: dial to self attempted" in e:
                    log(f"E: {e}")
                    if not cfg.IS_FULL_TEST and not question_yes_no(
                            "#> Would you like to continue?"):
                        raise QuietExit
                else:
                    log("E: connection into provider's IPFS node via swarm is not accomplished"
                        )
                    raise Exception(e)
            else:
                log(f"{output} {ok()}")
        except Exception as e:
            print_tb(e)
            raise e
Esempio n. 3
0
    def get_cumulative_size(self, ipfs_hash: str):
        if not is_ipfs_on():
            raise IpfsNotConnected

        output = self._ipfs_stat(ipfs_hash)
        if output:
            return int(output.split("\n")[4].split(":")[1].replace(" ", ""))
        else:
            raise Exception(f"CumulativeSize could not found for {ipfs_hash}")
Esempio n. 4
0
    def get(self, ipfs_hash, path, is_storage_paid):
        if not is_ipfs_on():
            raise IpfsNotConnected

        output = run_with_output(
            ["ipfs", "get", ipfs_hash, f"--output={path}"])
        logging.info(output)
        if is_storage_paid:
            # pin downloaded ipfs hash if storage is paid
            output = check_output(["ipfs", "pin", "add",
                                   ipfs_hash]).decode("utf-8").rstrip()
            logging.info(output)
Esempio n. 5
0
def main():
    try:
        config.env = config.ENV()
    except Exception as e:
        print_tb(e)
        log("E: env.IPFS_LOG is not set")
        sys.exit(1)

    if not is_ipfs_on():
        cfg.ipfs.remove_lock_files()
        run()
    else:
        log(f"## [green]IPFS[/green] daemon is already running {ok()}")
        sys.exit(100)
Esempio n. 6
0
    def _ipfs_stat(self, ipfs_hash, _is_ipfs_on=True):
        """Return stats of the give IPFS hash.

        This function *may* run for an indetermined time. Returns a dict with the
        size of the block with the given hash.
        """
        if _is_ipfs_on and not is_ipfs_on():
            raise IpfsNotConnected

        with cfg.console.status(
                f"$ ipfs object stat {ipfs_hash} --timeout={cfg.IPFS_TIMEOUT}s"
        ):
            return subprocess_call([
                "ipfs", "object", "stat", ipfs_hash,
                f"--timeout={cfg.IPFS_TIMEOUT}s"
            ])
Esempio n. 7
0
    def connect_to_bootstrap_node(self):
        """Connect into return addresses of the currently connected peers."""
        if not is_ipfs_on():
            raise IpfsNotConnected

        # cmd = ["ipfs", "bootstrap", "list"]
        # output = run(cmd)
        # s = StringIO(output)
        peers = self.client.bootstrap.list()["Peers"]
        peer_address = None
        for peer in peers:
            if re.search(r"/ip4/", peer) is not None:
                peer_address = peer
                break
        else:
            return False

        print(f"==> Trying to connect into {peer_address} using swarm connect")
        output = self.client.swarm.connect(peer_address)
        if ("connect" and "success") in str(output):
            log(str(output), "bold green")
            return True

        return False
Esempio n. 8
0
 def connect(self):
     """Connect into ipfs."""
     if is_ipfs_on(is_print=False):
         self.client = ipfshttpclient.connect(
             "/ip4/127.0.0.1/tcp/5001/http")
Esempio n. 9
0
def check_before_submit(self, provider, _from, provider_info, key, job):
    """Check job's conditions before submitting."""
    self.is_provider_valid(provider)
    self.is_requester_valid(_from)
    main_storage_id = job.storage_ids[0]
    is_use_ipfs = False
    for storage_id in job.storage_ids:
        if storage_id > 4:
            raise Exception(
                "Wrong storage_ids value is given. Please provide from 0 to 4")

        if storage_id in [StorageID.IPFS, StorageID.IPFS_GPG]:
            is_use_ipfs = True
            break

    if not job.code_hashes:
        raise Exception("source_code_hash list is empty")

    if len(key) >= 64:
        raise Exception(
            "Length of key is greater than 64, please provide lesser")

    key_len = 46
    if len(key) != key_len and main_storage_id in [
            StorageID.IPFS, StorageID.IPFS_GPG
    ]:
        raise Exception(
            f"E: key's length does not match with its original length, it should be {key_len}. "
            f"Please check your key length, given key={key}")

    key_len = 33
    if len(key) != 33 and main_storage_id == StorageID.GDRIVE:
        raise Exception(
            f"E: key's length does not match with its original length, it should be {key_len}. "
            "Please check your key length")

    for idx, core in enumerate(job.cores):
        if core > provider_info["available_core_num"]:
            raise Exception(
                f"Requested {core}, which is {core}, is greater than the provider's core number"
            )

        if job.run_time[idx] == 0:
            raise Exception(
                f"run_time{br(idx)} is provided as 0. Please provide non-zero value"
            )

    for core_min in job.run_time:
        if core_min > 1440:
            raise Exception(
                "E: run_time provided greater than 1440. Please provide smaller value"
            )

    for cache_type in job.cache_types:
        if cache_type > 1:
            # cache_type should be {0: private, 1: public}
            raise Exception(
                f"E: cache_type ({cache_type}) provided greater than 1. Please provide smaller value"
            )

    if is_use_ipfs:
        if not is_ipfs_on():
            sys.exit()

        try:
            cfg.ipfs.swarm_connect(provider_info["ipfs_id"])
        except Exception as e:
            log(f"E: {e}")
            if not cfg.IS_FULL_TEST and not question_yes_no(
                    "#> Would you like to continue?"):
                raise QuietExit from e

    for idx, source_code_hash in enumerate(job.code_hashes_str):
        if source_code_hash == "":
            raise Exception(
                f"source_code_hash{br(idx)} should not be empty string")

    requester_info = self.get_requester_info(_from)
    gpg_fingerprint = get_gpg_fingerprint(env.GMAIL).upper()
    if requester_info["gpg_fingerprint"].upper() != gpg_fingerprint:
        raise Exception(
            f"E: gpg_fingerprint does not match {requester_info['gpg_fingerprint'].upper()} "
            f"with registered gpg_fingerprint={gpg_fingerprint}")

    try:
        is_gpg_published(gpg_fingerprint)
    except Exception as e:
        raise e
Esempio n. 10
0
    def run(self) -> bool:
        self.start_time = time.time()
        if cfg.IS_THREADING_ENABLED:
            self.thread_log_setup()

        run_ipfs_daemon()
        log(f"{br(get_date())} Job's source code has been sent through ",
            "bold cyan",
            end="")
        if self.cloudStorageID[0] == StorageID.IPFS:
            log("[bold green]IPFS")
        else:
            log("[bold green]IPFS_GPG")

        if not is_ipfs_on():
            return False

        log(f"==> is_hash_locally_cached={cfg.ipfs.is_hash_locally_cached(self.job_key)}"
            )
        if not os.path.isdir(self.results_folder):
            os.makedirs(self.results_folder)

        _remove(f"{self.results_folder}/{self.job_key}")
        try:
            self.check_ipfs(self.job_key)
        except:
            return False

        self.registered_data_hashes = []
        for idx, source_code_hash in enumerate(self.code_hashes):
            if self.cloudStorageID[idx] == StorageID.NONE:
                self.registered_data_hashes.append(source_code_hash)  # GOTCHA
            else:
                ipfs_hash = bytes32_to_ipfs(source_code_hash)
                if ipfs_hash not in self.ipfs_hashes:
                    try:  # job_key as data hash already may added to the list
                        self.check_ipfs(ipfs_hash)
                    except:
                        return False

        initial_folder_size = calculate_size(self.results_folder)
        for idx, ipfs_hash in enumerate(self.ipfs_hashes):
            # here scripts knows that provided IPFS hashes exists online
            is_hashed = False
            log(f"## attempting to get IPFS file: {ipfs_hash} ... ", end="")
            if cfg.ipfs.is_hash_locally_cached(ipfs_hash):
                is_hashed = True
                log(ok("already cached"))
            else:
                log()

            if idx == 0:
                target = self.results_folder
            else:
                #  "_" added before the filename in case $ ipfs get <ipfs_hash>
                target = f"{self.results_data_folder}/_{ipfs_hash}"
                mkdir(target)

            is_storage_paid = False  # TODO: should be set before by user input
            cfg.ipfs.get(ipfs_hash, target, is_storage_paid)
            if idx > 0:
                # https://stackoverflow.com/a/31814223/2402577
                dst_filename = os.path.join(self.results_data_folder,
                                            os.path.basename(ipfs_hash))
                if os.path.exists(dst_filename):
                    _remove(dst_filename)

                shutil.move(target, dst_filename)
                target = dst_filename

            if self.cloudStorageID[idx] == StorageID.IPFS_GPG:
                cfg.ipfs.decrypt_using_gpg(f"{target}/{ipfs_hash}", target)

            try:
                _git.initialize_check(target)
            except Exception as e:
                raise e

            if not is_hashed:
                folder_size = calculate_size(self.results_folder)
                self.data_transfer_in_to_download_mb += folder_size - initial_folder_size
                initial_folder_size = folder_size

            if idx == 0 and not self.check_run_sh():
                self.complete_refund()
                return False

        log(f"==> data_transfer_in={self.data_transfer_in_to_download_mb} MB | "
            f"rounded={int(self.data_transfer_in_to_download_mb)} MB")
        return self.sbatch_call()