Example #1
0
    async def send_rpc_fetchchunk(self, chunkid):

        await asyncio.sleep(0)

        self.__logger.debug("FETCHCHUNK REQUEST to {}".format(self.server_ip))
        self.__logger.debug("Chunkid: {}".format(chunkid_to_hex(int(chunkid))))

        # chunkid is bignum so we need to serialize it
        chunkid_str = chunkid_to_hex(int(chunkid))
        request_packet = self.generate_packet(
            ["FETCHCHUNK_REQ", {
                "chunkid": chunkid_str
            }])

        response_data = await self.__send_rpc_to_mn("FETCHCHUNK_RESP",
                                                    request_packet)

        if set(response_data.keys()) != {"chunk"}:
            raise ValueError(
                "RPC parameters are wrong for FETCHCHUNK_RESP: %s" %
                response_data.keys())

        if type(response_data["chunk"]) not in [bytes, type(None)]:
            raise TypeError("chunk is not bytes or None: %s" %
                            type(response_data["chunk"]))

        chunk_data = response_data["chunk"]

        return chunk_data
Example #2
0
 def failed_to_fetch_chunk(self, chunkid):
     chunk = self.__chunk_db.get(chunkid)
     if chunk is None:
         # seems like this chunk got deleted while we were fetching it
         return
     self.__logger.warning("Failed to fetch chunk %s" %
                           chunkid_to_hex(chunkid))
     chunk.last_fetch_time = dt.now()
Example #3
0
    def __verify_all_files_on_disk(self):
        self.__logger.debug("Verifying local files in %s" % self.__storagedir)

        # reads the filesystem and fills our DB of chunks we have
        for chunkid in self.__storage.index():
            if not self.__storage.verify(chunkid):
                self.__logger.warning(
                    "Verify failed for local file at boot, deleting: %s" %
                    chunkid_to_hex(chunkid))
                self.__storage.delete(chunkid)
Example #4
0
    def get_chunk_if_we_have_it(self, chunkid):
        if not self.__alias_manager.we_own_chunk(chunkid):
            raise ValueError("We don't own this chunk!")

        chunk = self.__chunk_db.get(chunkid)
        if chunk is None:
            self.__logger.info(
                "This chunk is missing from our database, is it in any valid tickets? %s"
                % chunkid_to_hex(chunkid))
            return None

        return self.get_chunk(chunk)
Example #5
0
    async def send_rpc_spotcheck(self, chunkid, start, end):
        await asyncio.sleep(0)

        self.__logger.debug("SPOTCHECK REQUEST to %s, chunkid: %s" % (self, chunkid_to_hex(chunkid)))

        # chunkid is bignum so we need to serialize it
        chunkid_str = chunkid_to_hex(chunkid)
        request_packet = self.__return_rpc_packet(self.__server_pubkey, ["SPOTCHECK_REQ", {"chunkid": chunkid_str,
                                                                                           "start": start,
                                                                                           "end": end}])

        response_data = await self.__send_rpc_to_mn("SPOTCHECK_RESP", request_packet)

        if set(response_data.keys()) != {"digest"}:
            raise ValueError("RPC parameters are wrong for SPOTCHECK_RESP: %s" % response_data.keys())

        if type(response_data["digest"]) != str:
            raise TypeError("digest is not str: %s" % type(response_data["digest"]))

        response_digest = response_data["digest"]

        self.__logger.debug("SPOTCHECK RESPONSE from %s, msg: %s" % (self, response_digest))

        return response_digest
Example #6
0
    async def issue_random_tests_forever(self, waittime, number_of_chunks=1):
        while True:
            await asyncio.sleep(waittime)

            chunks = self.__chunkmanager.select_random_chunks_we_have(
                number_of_chunks)
            for chunk in chunks:
                self.__logger.debug("Selected chunk %s for random check" %
                                    chunkid_to_hex(chunk.chunkid))

                # get chunk
                data = self.__chunkmanager.get_chunk(chunk)

                # pick a random range
                require_true(len(data) > 1024)
                start = random.randint(0, len(data) - 1024)
                end = start + 1024

                # calculate digest
                digest = get_pynode_digest_hex(data[start:end])
                self.__logger.debug("Digest for range %s - %s is: %s" %
                                    (start, end, digest))

                # find owners for all the alt keys who are not us
                owners = self.__aliasmanager.find_other_owners_for_chunk(
                    chunk.chunkid)

                # call RPC on all other MNs
                for owner in owners:
                    mn = self.__mn_manager.get(owner)

                    try:
                        response_digest = await mn.send_rpc_spotcheck(
                            chunk.chunkid, start, end)
                    except RPCException as exc:
                        self.__logger.info(
                            "SPOTCHECK RPC FAILED for node %s with exception %s"
                            % (owner, exc))
                    else:
                        if response_digest != digest:
                            self.__logger.warning(
                                "SPOTCHECK FAILED for node %s (%s != %s)" %
                                (owner, digest, response_digest))
                        else:
                            self.__logger.debug(
                                "SPOTCHECK SUCCESS for node %s for chunk: %s" %
                                (owner, digest))
Example #7
0
    def receive_rpc_spotcheck(self, data):
        # NOTE: data is untrusted!
        if not isinstance(data, dict):
            raise TypeError("Data must be a dict!")

        if set(data.keys()) != {"chunkid", "start", "end"}:
            raise ValueError("Invalid arguments for spotcheck: %s" %
                             (data.keys()))

        for k, v in data.items():
            if k in ["start", "end"]:
                if not isinstance(v, int):
                    raise TypeError("Invalid type for key %s in spotcheck" % k)
            else:
                if not isinstance(v, str):
                    raise TypeError("Invalid type for key %s in spotcheck" % k)

        chunkid = hex_to_chunkid(data["chunkid"])
        start = data["start"]
        end = data["end"]

        # check if start and end are within parameters
        if start < 0:
            raise ValueError("start is < 0")
        if start >= end:
            raise ValueError("start >= end")
        if start > NetWorkSettings.CHUNKSIZE or end > NetWorkSettings.CHUNKSIZE:
            raise ValueError("start > CHUNKSIZE or end > CHUNKSIZE")

        # we don't actually need the full chunk here, but we get it anyway as we are running verify() on it
        chunk = self.__chunkmanager.get_chunk_if_we_have_it(chunkid)
        if chunk is not None:
            # generate digest
            data = chunk[start:end]
            digest = get_pynode_digest_hex(data)
        else:
            self.__logger.info("We failed spotcheck for chunk %s" %
                               chunkid_to_hex(chunkid))
            # return a bogus hash
            digest = get_pynode_digest_hex(b'DATA WAS NOT FOUND')

        return {"digest": digest}
Example #8
0
async def fetch_single_chunk_via_rpc(chunkid):
    for masternode in get_chunk_owners(chunkid):
        if masternode.pastel_id == get_blockchain_connection().pastelid:
            # don't attempt to connect ourselves
            continue

        mn = masternode.get_rpc_client()

        try:
            data = await mn.send_rpc_fetchchunk(chunkid)
        except RPCException as exc:
            tasks_logger.exception(
                "FETCHCHUNK RPC FAILED for node %s with exception %s" %
                (mn.server_ip, exc))
            continue
        except (ClientConnectorError, ServerTimeoutError) as clien_ex:
            tasks_logger.error('{} for {}'.format(str(clien_ex), mn.server_ip))
            continue
        except Exception as ex:
            tasks_logger.exception(
                "FETCHCHUNK RPC FAILED for node %s with exception %s" %
                (mn.server_ip, ex))
            continue

        if data is None:
            tasks_logger.debug("MN %s returned None for fetchchunk %s" %
                               (mn.server_ip, chunkid))
            # chunk was not found
            continue

        # if chunk is received:
        # verify that digest matches
        digest = get_pynode_digest_int(data)
        if chunkid != str(digest):
            tasks_logger.info(
                "MN %s returned bad chunk for fetchchunk %s, mismatched digest: %s"
                % (mn.server_ip, chunkid, digest))
            continue
        return data
    # nobody has this chunk
    tasks_logger.error("Unable to fetch chunk %s" %
                       chunkid_to_hex(int(chunkid)))
        async def fetch_single_chunk_via_rpc(chunkid):
            # we need to fetch it
            found = False
            for owner in self.__aliasmanager.find_other_owners_for_chunk(
                    chunkid):
                mn = self.__mn_manager.get(owner)

                try:
                    data = await mn.send_rpc_fetchchunk(chunkid)
                except RPCException as exc:
                    self.__logger.info(
                        "FETCHCHUNK RPC FAILED for node %s with exception %s" %
                        (owner, exc))
                    continue

                if data is None:
                    self.__logger.info(
                        "MN %s returned None for fetchchunk %s" %
                        (owner, chunkid))
                    # chunk was not found
                    continue

                # verify that digest matches
                digest = get_pynode_digest_int(data)
                if chunkid != digest:
                    self.__logger.info(
                        "MN %s returned bad chunk for fetchchunk %s, mismatched digest: %s"
                        % (owner, chunkid, digest))
                    continue

                # we have the chunk, store it!
                self.__chunkmanager.store_missing_chunk(chunkid, data)
                break

            # nobody has this chunk
            if not found:
                # TODO: fall back to reconstruct it from luby blocks
                self.__logger.error(
                    "Unable to fetch chunk %s, luby reconstruction is not yet implemented!"
                    % chunkid_to_hex(chunkid))
                self.__chunkmanager.failed_to_fetch_chunk(chunkid)
Example #10
0
    def get_random_missing_chunks(self, sample_size):
        now = dt.now()

        missing_chunks = []
        for chunkid, chunk in self.__missing_chunks.items():
            # if chunk has been fetched previously
            if chunk.last_fetch_time is not None:
                # and CHUNK_REFETCH_INTERVAL seconds have elapsed since
                elapsed = (now - chunk.last_fetch_time).total_seconds()
                if elapsed < NetWorkSettings.CHUNK_REFETCH_INTERVAL:
                    self.__logger.debug(
                        "Not refetching chunk: %s, elapsed: %s" %
                        (chunkid_to_hex(chunkid), elapsed))
                    continue

            missing_chunks.append(chunkid)

        # If sample size is less than the missing chunk list, we return it all. random.sample() errors out
        # when sample_size < len(items). This also conveniently handles the case when the list is empty
        if len(missing_chunks) <= sample_size:
            return missing_chunks
        else:
            return random.sample(missing_chunks, sample_size)
Example #11
0
 def dump_file_db(self):
     for k, v in self.__chunk_db.items():
         self.__logger.debug("FILE %s: %s" % (chunkid_to_hex(k), v))
         self.__storage.get(k)
Example #12
0
 def __str__(self):
     return "chunkid: %s, verified: %s, is_ours: %s" % (chunkid_to_hex(
         self.chunkid), self.verified, self.is_ours)