Ejemplo n.º 1
0
    async def get_channels_peers(self, _):
        # Get debug stats for peers serving channels
        current_time = time.time()
        result = []
        mapping = self.gigachannel_community.channels_peers
        with db_session:
            for id_tuple, peers in mapping._channels_dict.items():  # pylint:disable=W0212
                channel_pk, channel_id = id_tuple
                chan = self.mds.ChannelMetadata.get(public_key=channel_pk,
                                                    id_=channel_id)

                peers_list = []
                for p in peers:
                    peers_list.append(
                        (hexlify(p.mid), int(current_time - p.last_response)))

                chan_dict = {
                    "channel_name": chan.title if chan else None,
                    "channel_pk": hexlify(channel_pk),
                    "channel_id": channel_id,
                    "peers": peers_list,
                }
                result.append(chan_dict)

        return RESTResponse({"channels_list": result})
Ejemplo n.º 2
0
    async def on_remote_select_response(self, peer, response):

        request = self.request_cache.get(hexlify(peer.mid), response.id)
        if request is None:
            return

        # Check for limit on the number of packets per request
        if request.packets_limit > 1:
            request.packets_limit -= 1
        else:
            self.request_cache.pop(hexlify(peer.mid), response.id)

        # We use responses for requests about subscribed channels to bump our local channels ratings
        peer_vote = peer if request.request_kwargs.get("subscribed", None) is True else None

        result = await self.mds.process_compressed_mdblob_threaded(response.raw_blob, peer_vote_for_channels=peer_vote)
        # Maybe move this callback to MetadataStore side?
        if self.notifier:
            new_channels = [
                md.to_simple_dict()
                for md, result in result
                if md and md.metadata_type == CHANNEL_TORRENT and result == UNKNOWN_CHANNEL and md.origin_id == 0
            ]
            if new_channels:
                self.notifier.notify(
                    NTFY.CHANNEL_DISCOVERED, {"results": new_channels, "uuid": str(CHANNELS_VIEW_UUID)}
                )
Ejemplo n.º 3
0
    async def start_handle(self, download, atp):
        ltsession = self.get_session(download.config.get_hops())
        infohash = download.get_def().get_infohash()

        if infohash in self.metainfo_requests and self.metainfo_requests[infohash][0] != download:
            self._logger.info("Cancelling metainfo request(s) for infohash:%s", hexlify(infohash))
            metainfo_dl, _ = self.metainfo_requests.pop(infohash)
            # Leave the checkpoint. Any checkpoint that exists will belong to the download we are currently starting.
            await self.remove_download(metainfo_dl, remove_content=True, remove_checkpoint=False)
            self.downloads[infohash] = download

        known = {unhexlify(str(h.info_hash())): h for h in ltsession.get_torrents()}
        existing_handle = known.get(infohash)
        if existing_handle:
            # Reuse existing handle
            self._logger.debug("Reusing handle %s", hexlify(infohash))
            download.post_alert('add_torrent_alert', dict(handle=existing_handle))
        else:
            # Otherwise, add it anew
            self._logger.debug("Adding handle %s", hexlify(infohash))
            # To prevent flooding the DHT with a short burst of queries and triggering
            # flood protection, we postpone adding torrents until we get enough DHT peers.
            # The asynchronous wait should be done as close as possible to the actual
            # Libtorrent calls, so the higher-level download-adding logic does not block.
            # Otherwise, e.g. if added to the Session init sequence, this results in startup
            # time increasing by 10-20 seconds.
            # See https://github.com/Tribler/tribler/issues/5319
            if self.dht_readiness_timeout > 0 and self._dht_ready_task is not None:
                try:
                    await wait_for(shield(self._dht_ready_task), timeout=self.dht_readiness_timeout)
                except asyncio.TimeoutError:
                    self._logger.warning("Timeout waiting for libtorrent DHT getting enough peers")
            ltsession.async_add_torrent(encode_atp(atp))
        return await download.future_added
Ejemplo n.º 4
0
async def test_update_multiple_metadata_entries(enable_chant, enable_api,
                                                add_fake_torrents_channels,
                                                session):
    """
    Test updating attributes of several metadata entities at once with a PATCH request to REST API
    """
    # Test handling the wrong/empty JSON gracefully
    await do_request(session,
                     'metadata',
                     expected_code=400,
                     request_type='PATCH',
                     post_data='abc')

    # Test trying update a non-existing entry
    await do_request(
        session,
        'metadata',
        post_data=[{
            'public_key': hexlify(b'1' * 64),
            'id': 111
        }],
        expected_code=404,
        request_type='PATCH',
    )
    with db_session:
        md1 = session.mds.TorrentMetadata(title='old1',
                                          infohash=random_infohash())
        md2 = session.mds.ChannelMetadata(title='old2',
                                          infohash=random_infohash(),
                                          subscribed=False)

    NEW_NAME1 = "updated1"
    NEW_NAME2 = "updated2"
    patch_data = [
        {
            'public_key': hexlify(md1.public_key),
            'id': md1.id_,
            'title': NEW_NAME1
        },
        {
            'public_key': hexlify(md2.public_key),
            'id': md2.id_,
            'title': NEW_NAME2,
            'subscribed': 1
        },
    ]
    await do_request(session,
                     'metadata',
                     post_data=patch_data,
                     expected_code=200,
                     request_type='PATCH')
    with db_session:
        entry1 = session.mds.ChannelNode.get(rowid=md1.rowid)
        assert NEW_NAME1 == entry1.title
        assert UPDATED == entry1.status

        entry2 = session.mds.ChannelNode.get(rowid=md2.rowid)
        assert NEW_NAME2 == entry2.title
        assert UPDATED == entry2.status
        assert entry2.subscribed
Ejemplo n.º 5
0
def test_ffa_serialization(metadata_store):
    """
    Test converting free-for-all (unsigned) torrent metadata to payload and back
    """
    metadata1 = metadata_store.ChannelNode.from_dict({
        "public_key": b"",
        "id_": "123"
    })
    serialized1 = metadata1.serialized()
    # Make sure sig is really zeroes
    assert hexlify(serialized1).endswith(hexlify(NULL_SIG))
    metadata1.delete()
    orm.flush()

    metadata2 = metadata_store.ChannelNode.from_payload(
        ChannelNodePayload.from_signed_blob(serialized1))
    serialized2 = metadata2.serialized()
    assert serialized1 == serialized2

    # Check that it is impossible to create FFA node without specifying id_
    with pytest.raises(InvalidChannelNodeException):
        metadata_store.ChannelNode.from_dict({"public_key": b""})
    # Check that it is impossible to create FFA payload with non-null signature
    with pytest.raises(InvalidSignatureException):
        ChannelNodePayload(CHANNEL_NODE,
                           0,
                           NULL_KEY,
                           0,
                           0,
                           0,
                           signature=b"123")
    # Check that creating a pair of metadata entries do not trigger uniqueness constraints error
    metadata_store.ChannelNode.from_dict({"public_key": b"", "id_": "124"})
    metadata_store.ChannelNode.from_dict({"public_key": b"", "id_": "125"})
Ejemplo n.º 6
0
async def test_delete_multiple_metadata_entries(enable_chant, enable_api,
                                                session):
    """
    Test deleting multiple entries with JSON REST API
    """
    with db_session:
        md1 = session.mds.TorrentMetadata(title='old1',
                                          infohash=random_infohash())
        md2 = session.mds.TorrentMetadata(title='old2',
                                          infohash=random_infohash())
        assert session.mds.ChannelNode.select().count() == 2

    patch_data = [
        {
            'public_key': hexlify(md1.public_key),
            'id': md1.id_
        },
        {
            'public_key': hexlify(md2.public_key),
            'id': md2.id_
        },
    ]
    await do_request(session,
                     'metadata',
                     post_data=patch_data,
                     expected_code=200,
                     request_type='DELETE')
    with db_session:
        assert session.mds.ChannelNode.select().count() == 0
Ejemplo n.º 7
0
async def test_get_channels_peers(rest_api, endpoint, metadata_store,
                                  mock_gigachannel_community):  # pylint: disable=W0621, C0321
    """
    Test getting debug info about the state of channels to peers mapping
    """

    mapping = mock_gigachannel_community.channels_peers = ChannelsPeersMapping(
    )

    peer_key = default_eccrypto.generate_key("curve25519")
    chan_key = default_eccrypto.generate_key("curve25519")
    with db_session:
        chan = metadata_store.ChannelMetadata(sign_with=chan_key,
                                              name="bla",
                                              infohash=random_infohash())

    peer = Peer(peer_key, ("1.2.3.4", 5))
    mapping.add(peer, chan.public_key, chan.id_)

    result = await do_request(
        rest_api,
        'remote_query/channels_peers',
        request_type="GET",
        expected_code=200,
    )
    first_result = result["channels_list"][0]
    assert first_result["channel_name"] == chan.title
    assert first_result["channel_pk"] == hexlify(chan.public_key)
    assert first_result["channel_id"] == chan.id_
    assert first_result["peers"][0][0] == hexlify(peer.mid)
Ejemplo n.º 8
0
    async def remove_download(self,
                              download,
                              remove_content=False,
                              remove_checkpoint=True):
        infohash = download.get_def().get_infohash()
        handle = download.handle

        # Note that the following block of code needs to be able to deal with multiple simultaneous
        # calls using the same download object. We need to make sure that we don't return without
        # the removal having finished.
        if handle:
            if handle.is_valid():
                download.stream.disable()
                self._logger.debug("Removing handle %s", hexlify(infohash))
                ltsession = self.get_session(download.config.get_hops())
                ltsession.remove_torrent(handle, int(remove_content))
            # We need to wait even if the handle is invalid. It's important to synchronize
            # here because the upcoming call to shutdown will also cancel future_removed.
            await download.future_removed
        else:
            self._logger.debug(
                "Cannot remove handle %s because it does not exists",
                hexlify(infohash))
        await download.shutdown()

        if infohash in self.downloads and self.downloads[infohash] == download:
            self.downloads.pop(infohash)
            if remove_checkpoint:
                self.remove_config(infohash)
        else:
            self._logger.debug("Cannot remove unknown download")
Ejemplo n.º 9
0
 async def regenerate_channel_torrent(self, channel_pk, channel_id):
     self._logger.info("Regenerating personal channel %s %i",
                       hexlify(channel_pk), channel_id)
     with db_session:
         channel = self.mds.ChannelMetadata.get(public_key=channel_pk,
                                                id_=channel_id)
         if channel is None:
             self._logger.warning(
                 "Tried to regenerate non-existing channel %s %i",
                 hexlify(channel_pk), channel_id)
             return None
         channel_dirname = channel.dirname
     for d in self.download_manager.get_downloads_by_name(channel_dirname):
         await self.download_manager.remove_download(d, remove_content=True)
     with db_session:
         channel = self.mds.ChannelMetadata.get_for_update(
             public_key=channel_pk, id_=channel_id)
         regenerated = channel.consolidate_channel_torrent()
         # If the user created their channel, but added no torrents to it,
         # the channel torrent will not be created.
         if regenerated is None:
             return None
     tdef = TorrentDef.load_from_dict(regenerated)
     self.updated_my_channel(tdef)
     return tdef
Ejemplo n.º 10
0
    def _process_scrape_response(self, body):
        """
        This function handles the response body of a HTTP tracker,
        parsing the results.
        """
        # parse the retrieved results
        if body is None:
            self.failed(msg="no response body")

        response_dict = bdecode_compat(body)
        if not response_dict:
            self.failed(msg="no valid response")

        response_list = []

        unprocessed_infohash_list = self.infohash_list[:]
        if b'files' in response_dict and isinstance(response_dict[b'files'],
                                                    dict):
            for infohash in response_dict[b'files']:
                complete = 0
                incomplete = 0
                if isinstance(response_dict[b'files'][infohash], dict):
                    complete = response_dict[b'files'][infohash].get(
                        b'complete', 0)
                    incomplete = response_dict[b'files'][infohash].get(
                        b'incomplete', 0)

                # Sow complete as seeders. "complete: number of peers with the entire file, i.e. seeders (integer)"
                #  - https://wiki.theory.org/BitTorrentSpecification#Tracker_.27scrape.27_Convention
                seeders = complete
                leechers = incomplete

                # Store the information in the dictionary
                response_list.append({
                    'infohash': hexlify(infohash),
                    'seeders': seeders,
                    'leechers': leechers
                })

                # remove this infohash in the infohash list of this session
                if infohash in unprocessed_infohash_list:
                    unprocessed_infohash_list.remove(infohash)

        elif b'failure reason' in response_dict:
            self._logger.info("%s Failure as reported by tracker [%s]", self,
                              repr(response_dict[b'failure reason']))
            self.failed(msg=repr(response_dict[b'failure reason']))

        # handle the infohashes with no result (seeders/leechers = 0/0)
        for infohash in unprocessed_infohash_list:
            response_list.append({
                'infohash': hexlify(infohash),
                'seeders': 0,
                'leechers': 0
            })

        self.is_finished = True
        return {self.tracker_url: response_list}
Ejemplo n.º 11
0
    async def on_remote_select_response(self, peer, response_payload):
        """
        Match the the response that we received from the network to a query cache
        and process it by adding the corresponding entries to the MetadataStore database.
        This processes both direct responses and pushback (updates) responses
        """
        self.logger.info(f"Response from {hexlify(peer.mid)}")

        # ACHTUNG! the returned request cache can be any one of SelectRequest, PushbackWindow
        request = self.request_cache.get(hexlify(peer.mid), response_payload.id)
        if request is None:
            return

        # Remember that at least a single packet was received was received from the queried peer.
        if isinstance(request, SelectRequest):
            request.peer_responded = True

        # Check for limit on the number of packets per request
        if request.packets_limit > 1:
            request.packets_limit -= 1
        else:
            self.request_cache.pop(hexlify(peer.mid), response_payload.id)

        processing_results = await self.mds.process_compressed_mdblob_threaded(response_payload.raw_blob)
        self.logger.info(f"Response result: {processing_results}")

        # If we know about updated versions of the received stuff, push the updates back
        if isinstance(request, SelectRequest) and self.settings.push_updates_back_enabled:
            newer_entities = [r.md_obj for r in processing_results if r.obj_state == ObjState.GOT_NEWER_VERSION]
            self.send_db_results(peer, response_payload.id, newer_entities)

        if self.settings.channel_query_back_enabled:
            for result in processing_results:
                # Query back the sender for preview contents for the new channels
                # The fact that the object is previously unknown is indicated by process_payload in the
                # .obj_state property of returned ProcessingResults objects.
                if result.obj_state == ObjState.UNKNOWN_OBJECT and result.md_obj.metadata_type in (
                    CHANNEL_TORRENT,
                    COLLECTION_NODE,
                ):
                    request_dict = {
                        "metadata_type": [COLLECTION_NODE, REGULAR_TORRENT],
                        "channel_pk": hexlify(result.md_obj.public_key),
                        "origin_id": result.md_obj.id_,
                        "first": 0,
                        "last": self.settings.max_channel_query_back,
                    }
                    self.send_remote_select(peer=peer, **request_dict)

                # Query back for missing dependencies, e.g. thumbnail/description.
                # The fact that some dependency is missing is checked by the lower layer during
                # the query to process_payload and indicated through .missing_deps property of the
                # ProcessingResults objects returned by process_payload.
                for dep_query_dict in result.missing_deps:
                    self.send_remote_select(peer=peer, **dep_query_dict)

        if isinstance(request, SelectRequest) and request.processing_callback:
            request.processing_callback(request, processing_results)
Ejemplo n.º 12
0
    async def check_torrent_health(self,
                                   infohash,
                                   timeout=20,
                                   scrape_now=False):
        """
        Check the health of a torrent with a given infohash.
        :param infohash: Torrent infohash.
        :param timeout: The timeout to use in the performed requests
        :param scrape_now: Flag whether we want to force scraping immediately
        """
        tracker_set = []

        # We first check whether the torrent is already in the database and checked before
        with db_session:
            result = self.tribler_session.mds.TorrentState.get(
                infohash=database_blob(infohash))
            if result:
                torrent_id = result.infohash
                last_check = result.last_check
                time_diff = time.time() - last_check
                if time_diff < MIN_TORRENT_CHECK_INTERVAL and not scrape_now:
                    self._logger.debug(
                        "time interval too short, not doing torrent health check for %s",
                        hexlify(infohash))
                    return {
                        "db": {
                            "seeders": result.seeders,
                            "leechers": result.leechers,
                            "infohash": hexlify(infohash)
                        }
                    }

                # get torrent's tracker list from DB
                tracker_set = self.get_valid_trackers_of_torrent(torrent_id)

        tasks = []
        for tracker_url in tracker_set:
            session = self._create_session_for_request(tracker_url,
                                                       timeout=timeout)
            session.add_infohash(infohash)
            tasks.append(self.connect_to_tracker(session))

        if has_bep33_support():
            # Create a (fake) DHT session for the lookup if we have support for BEP33.
            session = FakeBep33DHTSession(self.tribler_session, infohash,
                                          timeout)

        else:
            # Otherwise, fallback on the normal DHT metainfo lookups.
            session = FakeDHTSession(self.tribler_session, infohash, timeout)

        self._session_list['DHT'].append(session)
        tasks.append(self.connect_to_tracker(session))

        res = await gather(*tasks, return_exceptions=True)
        return self.on_torrent_health_check_completed(infohash, res)
Ejemplo n.º 13
0
 def generate_dht_stats(self):
     self.dht_stats = {
         "num_tokens": randint(10, 50),
         "routing_table_buckets": randint(1, 10),
         "num_keys_in_store": randint(100, 500),
         "num_store_for_me": {hexlify(os.urandom(20)): randint(1, 8)},
         "num_peers_in_store": {},
         "node_id": hexlify(os.urandom(20)),
         "peer_id": hexlify(os.urandom(20)),
         "routing_table_size": randint(10, 50),
     }
Ejemplo n.º 14
0
    async def get_view(self, request):
        if not self.trust_graph:
            self.initialize_graph()

        def get_bandwidth_blocks(public_key, limit=5):
            return self.trustchain_db.get_latest_blocks(
                public_key, limit=limit, block_types=[b'tribler_bandwidth'])

        def get_friends(public_key, limit=5):
            return self.trustchain_db.get_connected_users(public_key,
                                                          limit=limit)

        depth = 0
        if 'depth' in request.query:
            depth = int(request.query['depth'])

        # If depth is zero or not provided then fetch all depth levels
        fetch_all = depth == 0

        try:
            if fetch_all:
                self.trust_graph.reset(hexlify(self.public_key))
            if fetch_all or depth == 1:
                self.trust_graph.add_blocks(
                    get_bandwidth_blocks(self.public_key, limit=100))
            if fetch_all or depth == 2:
                for friend in get_friends(self.public_key):
                    self.trust_graph.add_blocks(
                        get_bandwidth_blocks(unhexlify(friend['public_key']),
                                             limit=10))
            if fetch_all or depth == 3:
                for friend in get_friends(self.public_key):
                    self.trust_graph.add_blocks(
                        get_bandwidth_blocks(unhexlify(friend['public_key'])))
                    for fof in get_friends(unhexlify(friend['public_key'])):
                        self.trust_graph.add_blocks(
                            get_bandwidth_blocks(unhexlify(fof['public_key'])))
            if fetch_all or depth == 4:
                for user_block in self.trustchain_db.get_users():
                    self.trust_graph.add_blocks(
                        get_bandwidth_blocks(
                            unhexlify(user_block['public_key'])))
        except TrustGraphException as tgex:
            self.logger.warning(tgex)

        graph_data = self.trust_graph.compute_node_graph()

        return RESTResponse({
            'root_public_key': hexlify(self.public_key),
            'graph': graph_data,
            'bootstrap': self.get_bootstrap_info(),
            'num_tx': len(graph_data['edge']),
            'depth': depth,
        })
Ejemplo n.º 15
0
    def update_peer(self, mid, infohash, balance):
        """
        Update a peer with a specific mid for a specific infohash.
        """
        self.logger.debug("Updating peer with mid %s and ih %s (balance: %d)",
                          hexlify(mid), hexlify(infohash), balance)

        if mid not in self.tribler_peers:
            self.tribler_peers[mid] = {}

        self.tribler_peers[mid][infohash] = balance
Ejemplo n.º 16
0
    def setup(self, config=None, hidden=False, checkpoint_disabled=False):
        """
        Create a Download object. Used internally by Session.
        @param config DownloadConfig or None (in which case a new DownloadConfig() is created
        :returns a Deferred to which a callback can be added which returns the result of network_create_engine_wrapper.
        """
        self.hidden = hidden
        self.checkpoint_disabled = checkpoint_disabled or self.dummy
        self.config = config or DownloadConfig(
            state_dir=self.session.config.get_state_dir())

        self._logger.debug("Setup: %s", hexlify(self.tdef.get_infohash()))

        self.checkpoint()

        atp = {
            "save_path":
            path_util.normpath(get_default_dest_dir() /
                               self.config.get_dest_dir()),
            "storage_mode":
            lt.storage_mode_t.storage_mode_sparse,
            "flags":
            lt.add_torrent_params_flags_t.flag_paused
            | lt.add_torrent_params_flags_t.flag_duplicate_is_error
            | lt.add_torrent_params_flags_t.flag_update_subscribe
        }

        if self.config.get_share_mode():
            atp["flags"] = atp[
                "flags"] | lt.add_torrent_params_flags_t.flag_share_mode
        if self.config.get_upload_mode():
            atp["flags"] = atp[
                "flags"] | lt.add_torrent_params_flags_t.flag_upload_mode

        resume_data = self.config.get_engineresumedata()
        if not isinstance(self.tdef, TorrentDefNoMetainfo):
            metainfo = self.tdef.get_metainfo()
            torrentinfo = lt.torrent_info(metainfo)

            atp["ti"] = torrentinfo
            if resume_data and isinstance(resume_data, dict):
                # Rewrite save_path as a global path, if it is given as a relative path
                if b"save_path" in resume_data and not path_util.isabs(
                        ensure_unicode(resume_data[b"save_path"], 'utf8')):
                    resume_data[
                        b"save_path"] = self.state_dir / ensure_unicode(
                            resume_data[b"save_path"], 'utf8')
                atp["resume_data"] = lt.bencode(resume_data)
        else:
            atp["url"] = self.tdef.get_url(
            ) or "magnet:?xt=urn:btih:" + hexlify(self.tdef.get_infohash())
            atp["name"] = self.tdef.get_name_as_unicode()

        return atp
Ejemplo n.º 17
0
    async def _on_remote_select_basic(self, peer, request_payload, force_eva_response=False):
        try:
            db_results = await self.process_rpc_query(request_payload.json)

            # When we send our response to a host, we open a window of opportunity
            # for it to push back updates
            if db_results and not self.request_cache.has(hexlify(peer.mid), request_payload.id):
                self.request_cache.add(PushbackWindow(self.request_cache, hexlify(peer.mid), request_payload.id))

            self.send_db_results(peer, request_payload.id, db_results, force_eva_response)
        except (OperationalError, TypeError, ValueError) as error:
            self.logger.error(f"Remote select. The error occurred: {error}")
Ejemplo n.º 18
0
    async def test_remote_select_torrents(self):
        """
        Test dropping packets that go over the response limit for a remote select.

        """
        peer = self.nodes[0].my_peer
        mds0 = self.nodes[0].overlay.mds
        mds1 = self.nodes[1].overlay.mds

        with db_session:
            chan = mds0.ChannelMetadata.create_channel(random_string(100), "")
            torrent_infohash = random_infohash()
            torrent = mds0.TorrentMetadata(origin_id=chan.id_,
                                           infohash=torrent_infohash,
                                           title='title1')
            torrent.sign()

        processing_results = []

        def callback(request, results):  # pylint: disable=unused-argument
            processing_results.extend(results)

        self.nodes[1].overlay.send_remote_select(
            peer,
            metadata_type=REGULAR_TORRENT,
            infohash=hexlify(torrent_infohash),
            processing_callback=callback)
        await self.deliver_messages()

        assert len(processing_results) == 1
        obj = processing_results[0].md_obj
        assert isinstance(obj, mds1.TorrentMetadata)
        assert obj.title == 'title1'
        assert obj.health.seeders == 0

        with db_session:
            torrent = mds0.TorrentMetadata.get(infohash=torrent_infohash)
            torrent.timestamp += 1
            torrent.title = 'title2'
            torrent.sign()

        processing_results = []
        self.nodes[1].overlay.send_remote_select(
            peer,
            metadata_type=REGULAR_TORRENT,
            infohash=hexlify(torrent_infohash),
            processing_callback=callback)
        await self.deliver_messages()

        assert len(processing_results) == 1
        obj = processing_results[0].md_obj
        assert isinstance(obj, mds1.TorrentMetadata)
        assert obj.health.seeders == 0
Ejemplo n.º 19
0
    def on_torrent_health_check_completed(self, infohash, result):
        final_response = {}
        if not result or not isinstance(result, list):
            self._logger.info("Received invalid torrent checker result")
            self.tribler_session.notifier.notify(
                NTFY.CHANNEL_ENTITY_UPDATED, {
                    "infohash": hexlify(infohash),
                    "num_seeders": 0,
                    "num_leechers": 0,
                    "last_tracker_check": int(time.time()),
                    "health": "updated"
                })
            return final_response

        torrent_update_dict = {
            'infohash': infohash,
            'seeders': 0,
            'leechers': 0,
            'last_check': int(time.time())
        }
        for response in reversed(result):
            if isinstance(response, Exception):
                final_response[response.tracker_url] = {'error': str(response)}
                continue
            elif response is None:
                self._logger.warning("Torrent health response is none!")
                continue
            response_keys = list(response.keys())
            final_response[response_keys[0]] = response[response_keys[0]][0]

            s = response[response_keys[0]][0]['seeders']
            l = response[response_keys[0]][0]['leechers']

            # More leeches is better, because undefined peers are marked as leeches in DHT
            if s > torrent_update_dict['seeders'] or \
                    (s == torrent_update_dict['seeders'] and l > torrent_update_dict['leechers']):
                torrent_update_dict['seeders'] = s
                torrent_update_dict['leechers'] = l

        self._update_torrent_result(torrent_update_dict)
        self.update_torrents_checked(torrent_update_dict)

        # TODO: DRY! Stop doing lots of formats, just make REST endpoint automatically encode binary data to hex!
        self.tribler_session.notifier.notify(
            NTFY.CHANNEL_ENTITY_UPDATED, {
                "infohash": hexlify(infohash),
                "num_seeders": torrent_update_dict["seeders"],
                "num_leechers": torrent_update_dict["leechers"],
                "last_tracker_check": torrent_update_dict["last_check"],
                "health": "updated"
            })
        return final_response
Ejemplo n.º 20
0
    async def on_remote_select_response(self, peer, response_payload):
        """
        Match the the response that we received from the network to a query cache
        and process it by adding the corresponding entries to the MetadataStore database.
        This processes both direct responses and pushback (updates) responses
        """
        self.logger.info(f"Response from {hexlify(peer.mid)}")

        # ACHTUNG! the returned request cache can be either a SelectRequest or PushbackWindow
        request = self.request_cache.get(hexlify(peer.mid),
                                         response_payload.id)
        if request is None:
            return

        # Check for limit on the number of packets per request
        if request.packets_limit > 1:
            request.packets_limit -= 1
        else:
            self.request_cache.pop(hexlify(peer.mid), response_payload.id)

        processing_results = await self.mds.process_compressed_mdblob_threaded(
            response_payload.raw_blob)
        self.logger.info(f"Response result: {processing_results}")

        # If we now about updated versions of the received stuff, push the updates back
        if isinstance(
                request,
                SelectRequest) and self.settings.push_updates_back_enabled:
            newer_entities = [
                md for md, result in processing_results
                if result == GOT_NEWER_VERSION
            ]
            self.send_db_results(peer, response_payload.id, newer_entities)

        # Query back the sender for preview contents for the new channels
        # TODO: maybe transform this into a processing_callback?
        if self.settings.channel_query_back_enabled:
            new_channels = [
                md for md, result in processing_results
                if result in (UNKNOWN_CHANNEL, UNKNOWN_COLLECTION)
            ]
            for channel in new_channels:
                request_dict = {
                    "channel_pk": hexlify(channel.public_key),
                    "origin_id": channel.id_,
                    "first": 0,
                    "last": self.settings.max_channel_query_back,
                }
                self.send_remote_select(peer=peer, **request_dict)

        if isinstance(request, SelectRequest) and request.processing_callback:
            request.processing_callback(request, processing_results)
Ejemplo n.º 21
0
        def commit_channel_torrent(self,
                                   new_start_timestamp=None,
                                   commit_list=None):
            """
            Collect new/uncommitted and marked for deletion metadata entries, commit them to a channel torrent and
            remove the obsolete entries if the commit succeeds.
            :param new_start_timestamp: change the start_timestamp of the committed channel entry to this value
            :param commit_list: the list of ORM objects to commit into this channel torrent
            :return The new infohash, should be used to update the downloads
            """
            md_list = commit_list or self.get_contents_to_commit()

            if not md_list:
                return None

            try:
                update_dict, torrent = self.update_channel_torrent(md_list)
            except OSError:
                self._logger.error(
                    "Error during channel torrent commit, not going to garbage collect the channel. Channel %s",
                    hexlify(self.public_key),
                )
                return None

            if new_start_timestamp:
                update_dict['start_timestamp'] = new_start_timestamp
            # Update channel infohash, etc
            for attr, val in update_dict.items():
                setattr(self, attr, val)
            self.local_version = self.timestamp
            self.sign()

            # Change the statuses of committed entries and clean up obsolete TODELETE entries
            for g in md_list:
                if g.status in [NEW, UPDATED]:
                    g.status = COMMITTED
                elif g.status == TODELETE:
                    g.delete()

            # Write the channel mdblob to disk
            self.status = COMMITTED  # pylint: disable=W0201
            self.to_file(self._channels_dir / (self.dirname + BLOB_EXTENSION))

            self._logger.info(
                "Channel %s committed with %i new entries. New version is %i",
                hexlify(self.public_key),
                len(md_list),
                update_dict['timestamp'],
            )
            return torrent
Ejemplo n.º 22
0
    async def test_check_torrent_health(self):
        """
        Test the endpoint to fetch the health of a chant-managed, infohash-only torrent
        """
        infohash = b'a' * 20
        tracker_url = 'udp://localhost:%s/announce' % self.udp_port
        self.udp_tracker.tracker_info.add_info_about_infohash(
            infohash, 12, 11, 1)

        with db_session:
            tracker_state = self.session.mds.TrackerState(url=tracker_url)
            torrent_state = self.session.mds.TorrentState(
                trackers=tracker_state, infohash=infohash)
            self.session.mds.TorrentMetadata(infohash=infohash,
                                             title='ubuntu-torrent.iso',
                                             size=42,
                                             tracker_info=tracker_url,
                                             health=torrent_state)
        url = 'metadata/torrents/%s/health?timeout=%s&refresh=1' % (
            hexlify(infohash), TORRENT_CHECK_TIMEOUT)

        # Initialize the torrent checker
        self.session.torrent_checker = TorrentChecker(self.session)
        await self.session.torrent_checker.initialize()

        # Add mock DHT response - we both need to account for the case when BEP33 is used and the old lookup method
        self.session.dlmgr = MockObject()
        self.session.dlmgr.get_metainfo = lambda _, **__: succeed(None)
        self.session.dlmgr.dht_health_manager = MockObject()
        dht_health_dict = {
            "infohash": hexlify(infohash),
            "seeders": 1,
            "leechers": 2
        }
        self.session.dlmgr.dht_health_manager.get_health = lambda *_, **__: succeed(
            {"DHT": [dht_health_dict]})
        self.session.dlmgr.get_channel_downloads = lambda: []

        # Left for compatibility with other tests in this object
        await self.udp_tracker.start()
        json_response = await self.do_request(url)
        self.assertIn("health", json_response)
        self.assertIn("udp://localhost:%s" % self.udp_port,
                      json_response['health'])
        if has_bep33_support():
            self.assertIn("DHT", json_response['health'])

        json_response = await self.do_request(url + '&nowait=1')
        self.assertDictEqual(json_response, {'checking': '1'})
Ejemplo n.º 23
0
    def process_alert(self, alert, hops=0):
        alert_type = alert.__class__.__name__

        # Periodically, libtorrent will send us a state_update_alert, which contains the torrent status of
        # all torrents changed since the last time we received this alert.
        if alert_type == 'state_update_alert':
            for status in alert.status:
                infohash = unhexlify(str(status.info_hash))
                if infohash not in self.downloads:
                    self._logger.debug("Got state_update for unknown torrent %s", hexlify(infohash))
                    continue
                self.downloads[infohash].update_lt_status(status)

        infohash = unhexlify(str(alert.handle.info_hash() if hasattr(alert, 'handle') and alert.handle.is_valid()
                                 else getattr(alert, 'info_hash', '')))
        download = self.downloads.get(infohash)
        if download:
            if (download.handle and download.handle.is_valid())\
                    or (not download.handle and alert_type == 'add_torrent_alert') \
                    or (download.handle and alert_type == 'torrent_removed_alert'):
                download.process_alert(alert, alert_type)
            else:
                self._logger.debug("Got alert for download without handle %s: %s", hexlify(infohash), alert)
        elif infohash:
            self._logger.debug("Got alert for unknown download %s: %s", hexlify(infohash), alert)

        if alert_type == 'peer_disconnected_alert' and \
                self.tribler_session and self.tribler_session.payout_manager:
            self.tribler_session.payout_manager.do_payout(alert.pid.to_bytes())

        elif alert_type == 'session_stats_alert':
            queued_disk_jobs = alert.values['disk.queued_disk_jobs']
            queued_write_bytes = alert.values['disk.queued_write_bytes']
            num_write_jobs = alert.values['disk.num_write_jobs']

            if queued_disk_jobs == queued_write_bytes == num_write_jobs == 0:
                self.lt_session_shutdown_ready[hops] = True

            if self.session_stats_callback:
                self.session_stats_callback(alert)

        elif alert_type == "dht_pkt_alert":
            # We received a raw DHT message - decode it and check whether it is a BEP33 message.
            decoded = bdecode_compat(alert.pkt_buf)
            if decoded and b'r' in decoded:
                if b'BFsd' in decoded[b'r'] and b'BFpe' in decoded[b'r']:
                    self.dht_health_manager.received_bloomfilters(decoded[b'r'][b'id'],
                                                                  bytearray(decoded[b'r'][b'BFsd']),
                                                                  bytearray(decoded[b'r'][b'BFpe']))
Ejemplo n.º 24
0
 def remove_config(self, infohash):
     if infohash not in self.downloads:
         try:
             basename = hexlify(infohash) + '.conf'
             filename = self.get_checkpoint_dir() / basename
             self._logger.debug("Removing download checkpoint %s", filename)
             if os.access(filename, os.F_OK):
                 os.remove(filename)
         except:
             # Show must go on
             self._logger.exception("Could not remove state")
     else:
         self._logger.warning(
             "Download is back, restarted? Cancelling removal! %s",
             hexlify(infohash))
Ejemplo n.º 25
0
async def test_get_entry_not_found(enable_chant, enable_api, session):
    """
    Test trying to get a non-existing entry with the REST API GET request
    """
    await do_request(session,
                     'metadata/%s/%i' % (hexlify(b"0" * 64), 123),
                     expected_code=404)
Ejemplo n.º 26
0
async def test_update_entry(enable_chant, enable_api, session):
    """
    Test updating a metadata entry with REST API
    """
    new_title = 'bla2'
    new_tags = "Compressed"

    with db_session:
        chan = session.mds.ChannelMetadata.create_channel(title="bla")
        chan.status = COMMITTED

    patch_params = {'title': new_title, 'tags': new_tags}

    result = await do_request(
        session,
        'metadata/%s/%i' % (hexlify(chan.public_key), chan.id_),
        request_type='PATCH',
        post_data=patch_params,
        expected_code=200,
    )

    assert new_title == result['name']
    assert new_tags == result['category']
    with db_session:
        chan = session.mds.ChannelMetadata.get_my_channels().first()
        assert chan.status == UPDATED
        assert chan.tags == new_tags
        assert chan.title == new_title
Ejemplo n.º 27
0
 async def check_and_regen_personal_channels(self):
     # Test if our channels are there, but we don't share these because Tribler was closed unexpectedly
     try:
         with db_session:
             for channel in self.session.mds.ChannelMetadata.get_my_channels(
             ).where(lambda g: g.status == COMMITTED):
                 channel_download = self.session.dlmgr.get_download(
                     bytes(channel.infohash))
                 if channel_download is None:
                     self._logger.warning(
                         "Torrent for personal channel %s %i does not exist.",
                         hexlify(channel.public_key),
                         channel.id_,
                     )
                     self.regenerate_channel_torrent(
                         channel.public_key, channel.id_)
                 else:
                     self.register_task(
                         f"Check personal channel {hexlify(channel.public_key), channel.id_}",
                         self.check_and_regen_personal_channel_torrent,
                         channel.public_key,
                         channel.id_,
                         channel_download,
                     )
     except Exception:
         self._logger.exception(
             "Error when tried to resume personal channel seeding on GigaChannel Manager startup"
         )
Ejemplo n.º 28
0
    def on_save_resume_data_alert(self, alert):
        """
        Callback for the alert that contains the resume data of a specific download.
        This resume data will be written to a file on disk.
        """
        self._logger.debug(f'On save resume data alert: {alert}')
        if self.checkpoint_disabled:
            return

        resume_data = alert.resume_data
        # Make save_path relative if the torrent is saved in the Tribler state directory
        if self.state_dir and b'save_path' in resume_data:
            save_path = Path(resume_data[b'save_path'].decode('utf8'))
            if save_path.exists():
                resume_data[b'save_path'] = str(
                    save_path.normalize_to(self.state_dir))

        metainfo = {
            'infohash': self.tdef.get_infohash(),
            'name': self.tdef.get_name_as_unicode(),
            'url': self.tdef.get_url()
        } if isinstance(self.tdef,
                        TorrentDefNoMetainfo) else self.tdef.get_metainfo()

        self.config.set_metainfo(metainfo)
        self.config.set_engineresumedata(resume_data)

        # Save it to file
        basename = hexlify(resume_data[b'info-hash']) + '.conf'
        filename = self.dlmgr.get_checkpoint_dir() / basename
        self.config.config['download_defaults'][
            'name'] = self.tdef.get_name_as_unicode(
            )  # store name (for debugging)
        self.config.write(str(filename))
        self._logger.debug('Saving download config to file %s', filename)
Ejemplo n.º 29
0
    async def process_torrents_health(self, peer, torrent_healths):
        infohashes_to_resolve = []
        with db_session:
            for infohash, seeders, leechers, last_check in torrent_healths:
                torrent_state = self.mds.TorrentState.get(infohash=infohash)
                if torrent_state and last_check > torrent_state.last_check:
                    # Replace current information
                    torrent_state.seeders = seeders
                    torrent_state.leechers = leechers
                    torrent_state.last_check = last_check
                    self.logger.info(
                        f"{hexlify(infohash)} updated ({seeders},{leechers})")
                elif not torrent_state:
                    self.mds.TorrentState(infohash=infohash,
                                          seeders=seeders,
                                          leechers=leechers,
                                          last_check=last_check)
                    self.logger.info(
                        f"{hexlify(infohash)} added ({seeders},{leechers})")
                    infohashes_to_resolve.append(infohash)

        for infohash in infohashes_to_resolve:
            # Get a single result per infohash to avoid duplicates
            self.send_remote_select(peer=peer,
                                    infohash=hexlify(infohash),
                                    last=1)
Ejemplo n.º 30
0
    async def get_metainfo(self, infohash, timeout=30, hops=None, url=None):
        """
        Lookup metainfo for a given infohash. The mechanism works by joining the swarm for the infohash connecting
        to a few peers, and downloading the metadata for the torrent.
        :param infohash: The (binary) infohash to lookup metainfo for.
        :param timeout: A timeout in seconds.
        :param hops: the number of tunnel hops to use for this lookup. If None, use config default.
        :param url: Optional URL. Can contain trackers info, etc.
        :return: The metainfo
        """
        infohash_hex = hexlify(infohash)
        if infohash in self.metainfo_cache:
            self._logger.info('Returning metainfo from cache for %s',
                              infohash_hex)
            return self.metainfo_cache[infohash]['meta_info']

        self._logger.info('Trying to fetch metainfo for %s', infohash_hex)
        if infohash in self.metainfo_requests:
            download = self.metainfo_requests[infohash][0]
            self.metainfo_requests[infohash][1] += 1
        elif infohash in self.downloads:
            download = self.downloads[infohash]
        else:
            tdef = TorrentDefNoMetainfo(infohash, 'metainfo request', url=url)
            dcfg = DownloadConfig()
            dcfg.set_hops(hops or self.download_defaults.number_hops)
            dcfg.set_upload_mode(
                True
            )  # Upload mode should prevent libtorrent from creating files
            dcfg.set_dest_dir(self.metadata_tmpdir)
            try:
                download = self.start_download(tdef=tdef,
                                               config=dcfg,
                                               hidden=True,
                                               checkpoint_disabled=True)
            except TypeError:
                return
            self.metainfo_requests[infohash] = [download, 1]

        try:
            metainfo = download.tdef.get_metainfo() or await wait_for(
                shield(download.future_metainfo), timeout)
            self._logger.info('Successfully retrieved metainfo for %s',
                              infohash_hex)
            self.metainfo_cache[infohash] = {
                'time': timemod.time(),
                'meta_info': metainfo
            }
        except (CancelledError, asyncio.TimeoutError):
            metainfo = None
            self._logger.info('Failed to retrieve metainfo for %s',
                              infohash_hex)

        if infohash in self.metainfo_requests:
            self.metainfo_requests[infohash][1] -= 1
            if self.metainfo_requests[infohash][1] <= 0:
                await self.remove_download(download, remove_content=True)
                self.metainfo_requests.pop(infohash, None)

        return metainfo