def test_vsids(metadata_store, freezer): """ Test VSIDS-based channel popularity system. """ peer_key = default_eccrypto.generate_key("curve25519") assert metadata_store.Vsids[0].bump_amount == 1.0 channel = metadata_store.ChannelMetadata.create_channel('test', 'test') metadata_store.vote_bump(channel.public_key, channel.id_, peer_key.pub().key_to_bin()[10:]) freezer.move_to('2021-09-25') metadata_store.vote_bump(channel.public_key, channel.id_, peer_key.pub().key_to_bin()[10:]) assert channel.votes > 0.0 assert metadata_store.Vsids[0].bump_amount > 1.0 # Make sure normalization for display purposes work assert channel.to_simple_dict()["votes"] == 1.0 # Make sure the rescale works for the channels metadata_store.Vsids[0].normalize() assert metadata_store.Vsids[0].bump_amount == 1.0 assert channel.votes == 1.0 # Ensure that vote by another person counts peer_key = default_eccrypto.generate_key("curve25519") metadata_store.vote_bump(channel.public_key, channel.id_, peer_key.pub().key_to_bin()[10:]) assert channel.votes == 2.0 freezer.move_to('2021-09-26') # Ensure that a repeated vote supersedes the first vote but does not count as a new one metadata_store.vote_bump(channel.public_key, channel.id_, peer_key.pub().key_to_bin()[10:]) assert 2.0 < channel.votes < 2.5
def setup_test(window=1, packet_size=1000, packets=10000): """ Create two nodes who will be sending packets to each other. :param window: the window size for packets (1 is synchronous) :param packet_size: the size of each packet :param packets: the number of packets to send in the experiment :return: the deferred that fires once the experiment is complete """ configuration = get_default_configuration() configuration['overlays'] = [] configuration['keys'] = [] master_peer = Peer(default_eccrypto.generate_key(u"low")) peer_ipv8 = IPv8(configuration) peer_ipv8.keys = {'my_peer': Peer(default_eccrypto.generate_key(u"low"))} peer_ipv8.overlays = [LoadOverlay(master_peer, peer_ipv8.keys['my_peer'], peer_ipv8.endpoint, peer_ipv8.network, window, packet_size, packets)] counterparty_ipv8 = IPv8(configuration) counterparty_ipv8.keys = {'my_peer': Peer(default_eccrypto.generate_key(u"low"))} counterparty_ipv8.overlays = [LoadOverlay(master_peer, counterparty_ipv8.keys['my_peer'], counterparty_ipv8.endpoint, counterparty_ipv8.network, 0, packet_size, packets)] peer_ipv8.overlays[0].send(counterparty_ipv8.endpoint.get_address()) return DeferredList([peer_ipv8.overlays[0].done, counterparty_ipv8.overlays[0].done])
def test_create_link_to_com_chain(self, monkeypatch): """ Test creating a linked half that points back towards a previous block """ key = default_eccrypto.generate_key(u"curve25519") com_key = default_eccrypto.generate_key(u"curve25519").pub().key_to_bin() db = MockDBManager() com_link = Links(((1, ShortKey("30303030")),)) link = FakeBlock(com_id=com_key, links=com_link) monkeypatch.setattr( MockDBManager, "get_chain", lambda _, chain_id: MockChain() if chain_id == com_key else None, ) monkeypatch.setattr( MockChain, "consistent_terminal", Links((link.com_dot,)), ) block = BamiBlock.create( b"test", encode_raw({"id": 42}), db, key.pub().key_to_bin(), com_id=com_key ) # include the personal community # Attach to the assert block.links == Links((link.com_dot,)) assert block.previous == Links((GENESIS_DOT,)) assert block.sequence_number == GENESIS_SEQ assert block.com_seq_num == link.com_seq_num + 1 assert block.public_key == key.pub().key_to_bin() assert block.signature == EMPTY_SIG assert block.type == b"test" assert block.transaction == encode_raw({"id": 42}) assert block.com_id == com_key
async def test_get_channels_peers(rest_api, endpoint, metadata_store, mock_gigachannel_community): # pylint: disable=W0621, C0321 """ Test getting debug info about the state of channels to peers mapping """ mapping = mock_gigachannel_community.channels_peers = ChannelsPeersMapping( ) peer_key = default_eccrypto.generate_key("curve25519") chan_key = default_eccrypto.generate_key("curve25519") with db_session: chan = metadata_store.ChannelMetadata(sign_with=chan_key, name="bla", infohash=random_infohash()) peer = Peer(peer_key, ("1.2.3.4", 5)) mapping.add(peer, chan.public_key, chan.id_) result = await do_request( rest_api, 'remote_query/channels_peers', request_type="GET", expected_code=200, ) first_result = result["channels_list"][0] assert first_result["channel_name"] == chan.title assert first_result["channel_pk"] == hexlify(chan.public_key) assert first_result["channel_id"] == chan.id_ assert first_result["peers"][0][0] == hexlify(peer.mid)
def test_query_on_introduction(self): """ Test querying a peer that was just introduced to us. """ send_ok = [] def mock_send(_): send_ok.append(1) self.nodes[1].overlay.send_remote_select_subscribed_channels = mock_send peer = self.nodes[0].my_peer self.nodes[1].overlay.introduction_response_callback(peer, None, None) self.assertIn(peer.mid, self.nodes[1].overlay.queried_subscribed_channels_peers) self.assertTrue(send_ok) # Make sure the same peer will not be queried twice in case the walker returns to it self.nodes[1].overlay.introduction_response_callback(peer, None, None) self.assertEqual(len(send_ok), 1) # Test clearing queried peers set when it outgrows its capacity self.nodes[1].overlay.queried_peers_limit = 2 self.nodes[1].overlay.introduction_response_callback(Peer(default_eccrypto.generate_key("low")), None, None) self.assertEqual(len(self.nodes[1].overlay.queried_subscribed_channels_peers), 2) self.nodes[1].overlay.introduction_response_callback(Peer(default_eccrypto.generate_key("low")), None, None) self.assertEqual(len(self.nodes[1].overlay.queried_subscribed_channels_peers), 1)
def test_query_on_introduction(self): """ Test querying a peer that was just introduced to us. """ send_ok = [] def mock_send(_): send_ok.append(1) self.nodes[1].overlay.send_remote_select_subscribed_channels = mock_send peer = self.nodes[0].my_peer payload = Mock() self.nodes[1].overlay.introduction_response_callback(peer, None, payload) self.assertIn(peer.mid, self.nodes[1].overlay.queried_peers) self.assertTrue(send_ok) # Make sure the same peer will not be queried twice in case the walker returns to it self.nodes[1].overlay.introduction_response_callback(peer, None, payload) self.assertEqual(len(send_ok), 1) # Test clearing queried peers set when it outgrows its capacity self.nodes[1].overlay.settings.queried_peers_limit = 2 self.nodes[1].overlay.introduction_response_callback(Peer(default_eccrypto.generate_key("low")), None, payload) self.assertEqual(len(self.nodes[1].overlay.queried_peers), 2) self.nodes[1].overlay.introduction_response_callback(Peer(default_eccrypto.generate_key("low")), None, payload) # The set has been cleared, so the number of queried peers must be dropped back to 1 self.assertEqual(len(self.nodes[1].overlay.queried_peers), 1) # Ensure that we're not going to query ourselves self.nodes[1].overlay.introduction_response_callback(self.nodes[1].overlay.my_peer, None, payload) self.assertEqual(len(send_ok), 3)
def test_strategy_multi_peer(self): """ If we have multiple peers, we should select one and send it our channel views. Also, we should still inspect our download queue. """ self.community.get_peers_return = [ Peer(default_eccrypto.generate_key(u"very-low")), Peer(default_eccrypto.generate_key(u"very-low")), Peer(default_eccrypto.generate_key(u"very-low")), ] self.strategy.take_step() self.assertEqual(1, len(self.community.send_random_to_called)) self.assertIn(self.community.send_random_to_called[0], self.community.get_peers_return)
def test_sign_transaction(): key1 = default_eccrypto.generate_key('curve25519') key2 = default_eccrypto.generate_key('curve25519') tx = BandwidthTransactionData(1, key1.pub().key_to_bin(), key2.pub().key_to_bin(), EMPTY_SIGNATURE, EMPTY_SIGNATURE, 3000) tx.sign(key1, as_a=True) assert tx.is_valid() assert tx.signature_a != EMPTY_SIGNATURE assert tx.signature_b == EMPTY_SIGNATURE tx.sign(key2, as_a=False) assert tx.is_valid() assert tx.signature_a != EMPTY_SIGNATURE assert tx.signature_b != EMPTY_SIGNATURE
def test_channels_peers_mapping_drop_excess_peers(self): """ Test dropping old excess peers from a channel to peers mapping """ mapping = ChannelsPeersMapping() chan_pk = Mock() chan_id = 123 num_excess_peers = 20 t = time.time() - 1000 first_peer_timestamp = t for k in range(0, mapping.max_peers_per_channel + num_excess_peers): peer = Peer(default_eccrypto.generate_key("very-low"), ("1.2.3.4", 5)) peer.last_response = t t += 1.0 mapping.add(peer, chan_pk, chan_id) if k == 0: first_peer_timestamp = peer.last_response chan_peers_3 = mapping.get_last_seen_peers_for_channel( chan_pk, chan_id, 3) assert len(chan_peers_3) == 3 chan_peers = mapping.get_last_seen_peers_for_channel(chan_pk, chan_id) assert len(chan_peers) == mapping.max_peers_per_channel assert chan_peers_3 == chan_peers[0:3] assert chan_peers == sorted(chan_peers, key=lambda x: x.last_response, reverse=True) # Make sure only the older peers are dropped as excess for p in chan_peers: assert p.last_response > first_peer_timestamp # Test removing a peer directly, e.g. as a result of a query timeout peer = Peer(default_eccrypto.generate_key("very-low"), ("1.2.3.4", 5)) mapping.add(peer, chan_pk, chan_id) mapping.remove_peer(peer) for p in chan_peers: mapping.remove_peer(p) assert mapping.get_last_seen_peers_for_channel(chan_pk, chan_id) == [] # Make sure the stuff is cleaned up assert len(mapping._peers_channels) == 0 assert len(mapping._channels_dict) == 0
def test_get_channels(self): """ Test whether we can get channels """ # First we create a few channels for ind in xrange(10): self.mds.ChannelNode._my_key = default_eccrypto.generate_key('low') self.mds.ChannelMetadata(title='channel%d' % ind, subscribed=(ind % 2 == 0), infohash=random_infohash()) channels = self.mds.ChannelMetadata.get_entries(first=1, last=5) self.assertEqual(len(channels), 5) # Test filtering channels = self.mds.ChannelMetadata.get_entries(first=1, last=5, txt_filter='channel5') self.assertEqual(len(channels), 1) # Test sorting channels = self.mds.ChannelMetadata.get_entries(first=1, last=10, sort_by='title', sort_desc=True) self.assertEqual(len(channels), 10) self.assertEqual(channels[0].title, 'channel9') # Test fetching subscribed channels channels = self.mds.ChannelMetadata.get_entries(first=1, last=10, sort_by='title', subscribed=True) self.assertEqual(len(channels), 5)
class MockedCommunity(Community, CommunityRoutines): master_peer = Peer(default_eccrypto.generate_key(u"very-low")) def __init__(self, *args, **kwargs): if kwargs.get("work_dir"): self.work_dir = kwargs.pop("work_dir") super().__init__(*args, **kwargs) self._req = RequestCache() for base in self.__class__.__bases__: if issubclass(base, MessageStateMachine): base.setup_messages(self) @property def persistence(self) -> BaseDB: return MockDBManager() @property def settings(self) -> Any: return MockSettings() def send_packet(self, *args, **kwargs) -> None: self.ez_send(*args, **kwargs) @property def request_cache(self) -> RequestCache: return self._req async def unload(self): await self._req.shutdown() return await super().unload()
def __init__(self, configuration, endpoint_override=None, enable_statistics=False, extra_communities=None): if endpoint_override: self.endpoint = endpoint_override else: self.endpoint = UDPEndpoint(port=configuration['port'], ip=configuration['address']) self.endpoint.open() if enable_statistics: self.endpoint = StatisticsEndpoint(self, self.endpoint) self.network = Network() # Load/generate keys self.keys = {} for key_block in configuration['keys']: if key_block['file'] and isfile(key_block['file']): with open(key_block['file'], 'r') as f: content = f.read() try: # IPv8 Standardized bin format self.keys[key_block['alias']] = Peer(default_eccrypto.key_from_private_bin(content)) except ValueError: try: # Try old Tribler M2Crypto PEM format content = b64decode(content[31:-30].replace('\n', '')) peer = Peer(M2CryptoSK(keystring=content)) peer.mid # This will error out if the keystring is not M2Crypto self.keys[key_block['alias']] = peer except: # Try old LibNacl format content = "LibNaCLSK:" + content self.keys[key_block['alias']] = Peer(default_eccrypto.key_from_private_bin(content)) else: self.keys[key_block['alias']] = Peer(default_eccrypto.generate_key(key_block['generation'])) if key_block['file']: with open(key_block['file'], 'w') as f: f.write(self.keys[key_block['alias']].key.key_to_bin()) # Setup logging logging.basicConfig(**configuration['logger']) self.overlay_lock = RLock() self.strategies = [] self.overlays = [] for overlay in configuration['overlays']: overlay_class = _COMMUNITIES.get(overlay['class'], (extra_communities or {}).get(overlay['class'])) my_peer = self.keys[overlay['key']] overlay_instance = overlay_class(my_peer, self.endpoint, self.network, **overlay['initialize']) self.overlays.append(overlay_instance) for walker in overlay['walkers']: strategy_class = _WALKERS.get(walker['strategy'], overlay_instance.get_available_strategies().get(walker['strategy'])) args = walker['init'] target_peers = walker['peers'] self.strategies.append((strategy_class(overlay_instance, **args), target_peers)) for config in overlay['on_start']: reactor.callWhenRunning(getattr(overlay_instance, config[0]), *config[1:]) self.state_machine_lc = LoopingCall(self.on_tick) self.state_machine_lc.start(configuration['walker_interval'], False)
def test_process_payload_reject_older_entry(metadata_store): """ Test rejecting and returning LOCAL_VERSION_NEWER upon receiving an older version of an already known metadata entry """ key = default_eccrypto.generate_key("curve25519") torrent_old = metadata_store.TorrentMetadata(title='blabla', timestamp=11, id_=3, infohash=random_infohash(), sign_with=key) payload_old = torrent_old._payload_class(**torrent_old.to_dict()) torrent_old.delete() torrent_updated = metadata_store.TorrentMetadata( title='blabla', timestamp=12, id_=3, infohash=random_infohash(), sign_with=key) # Test rejecting older version of the same entry assert metadata_store.process_payload( payload_old, skip_personal_metadata_payload=False)[0] == ProcessingResult( md_obj=torrent_updated, obj_state=ObjState.LOCAL_VERSION_NEWER)
def test_process_payload(metadata_store): metadata_store.ChannelNode._my_key = default_eccrypto.generate_key("curve25519") _, node_payload, node_deleted_payload = get_payloads(metadata_store.ChannelNode) assert not metadata_store.process_payload(node_payload) assert [(None, DELETED_METADATA)] == metadata_store.process_payload(node_deleted_payload) # Do nothing in case it is unknown/abstract payload type, like ChannelNode assert not metadata_store.process_payload(node_payload) for md_class, expected_reaction in ( (metadata_store.ChannelMetadata, UNKNOWN_CHANNEL), (metadata_store.TorrentMetadata, UNKNOWN_TORRENT), (metadata_store.CollectionNode, UNKNOWN_COLLECTION), ): node, node_payload, node_deleted_payload = get_payloads(md_class) node_dict = node.to_dict() node.delete() # Check that there is no action if trying to delete an unknown object assert not metadata_store.process_payload(node_deleted_payload) # Check if node metadata object is properly created on payload processing processed_node, reaction = metadata_store.process_payload(node_payload)[0] assert reaction == expected_reaction assert node_dict['metadata_type'] == processed_node.to_dict()['metadata_type'] # Check that payload processing returns the local node in case we already now about it assert processed_node, NO_ACTION == metadata_store.process_payload(node_payload)[0]
def add_fake_torrents_channels(session): infohashes = [] torrents_per_channel = 5 # Add a few channels with db_session: for ind in range(10): ext_key = default_eccrypto.generate_key('curve25519') channel = session.mds.ChannelMetadata( title='channel%d' % ind, subscribed=(ind % 2 == 0), num_entries=torrents_per_channel, infohash=random_infohash(), id_=123, sign_with=ext_key, version=10, local_version=(ind % 11), ) for torrent_ind in range(torrents_per_channel): rand_infohash = random_infohash() infohashes.append(rand_infohash) t = session.mds.TorrentMetadata(origin_id=channel.id_, title='torrent%d' % torrent_ind, infohash=rand_infohash, sign_with=ext_key) t.health.seeders = int.from_bytes(t.infohash[:2], byteorder="big") t.health.self_checked = bool(torrent_ind % 2 == 1) t.health.last_check = int( time()) - (60 * 60 * 24 * 7 if torrent_ind % 2 else 0)
async def setUp(self): await super(BaseTestMetadataEndpoint, self).setUp() self.infohashes = [] torrents_per_channel = 5 # Add a few channels with db_session: for ind in range(10): self.ext_key = default_eccrypto.generate_key('curve25519') channel = self.session.mds.ChannelMetadata( title='channel%d' % ind, subscribed=(ind % 2 == 0), num_entries=torrents_per_channel, infohash=random_infohash(), id_=123, sign_with=self.ext_key, ) for torrent_ind in range(torrents_per_channel): rand_infohash = random_infohash() self.infohashes.append(rand_infohash) self.session.mds.TorrentMetadata( origin_id=channel.id_, title='torrent%d' % torrent_ind, infohash=rand_infohash, sign_with=self.ext_key, )
def test_has_valid_signature(self): """ Test whether a signature can be validated correctly """ metadata = self.mds.ChannelNode.from_dict({}) self.assertTrue(metadata.has_valid_signature()) md_dict = metadata.to_dict() # Mess with the signature metadata.signature = b'a' self.assertFalse(metadata.has_valid_signature()) # Create metadata with wrong key metadata.delete() md_dict.update(public_key=database_blob(b"aaa")) md_dict.pop("rowid") metadata = self.mds.ChannelNode(skip_key_check=True, **md_dict) self.assertFalse(metadata.has_valid_signature()) key = default_eccrypto.generate_key(u"curve25519") metadata2 = self.mds.ChannelNode(sign_with=key, **md_dict) self.assertTrue(database_blob(key.pub().key_to_bin()[10:]), metadata2.public_key) md_dict2 = metadata2.to_dict() md_dict2["signature"] = md_dict["signature"] self.assertRaises(InvalidSignatureException, self.mds.ChannelNode, **md_dict2)
async def test_remote_search_mapped_peers(self): """ Test using mapped peers for channel queries. """ key = default_eccrypto.generate_key("curve25519") channel_pk = key.pub().key_to_bin()[10:] channel_id = 123 kwargs = {"channel_pk": channel_pk, "origin_id": channel_id} await self.introduce_nodes() source_peer = self.nodes[2].overlay.get_peers()[0] self.nodes[2].overlay.channels_peers.add(source_peer, channel_pk, channel_id) self.nodes[2].overlay.notifier = None # We disable getting random peers, so the only source for peers is channels peers map self.nodes[2].overlay.get_random_peers = lambda _: [] self.nodes[2].overlay.send_remote_select = Mock() self.nodes[2].overlay.send_search_request(**kwargs) # The peer must have queried at least one peer self.nodes[2].overlay.send_remote_select.assert_called()
async def test_get_known_subscribed_peers_for_node(self): key = default_eccrypto.generate_key("curve25519") with db_session: channel = self.overlay(0).mds.ChannelMetadata( origin_id=0, infohash=random_infohash(), sign_with=key) folder1 = self.overlay(0).mds.CollectionNode(origin_id=channel.id_, sign_with=key) folder2 = self.overlay(0).mds.CollectionNode(origin_id=folder1.id_, sign_with=key) orphan = self.overlay(0).mds.CollectionNode(origin_id=123123, sign_with=key) source_peer = self.nodes[1].my_peer self.overlay(0).channels_peers.add(source_peer, channel.public_key, channel.id_) assert [source_peer ] == self.overlay(0).get_known_subscribed_peers_for_node( channel.public_key, channel.id_) assert [source_peer ] == self.overlay(0).get_known_subscribed_peers_for_node( folder1.public_key, folder1.id_) assert [source_peer ] == self.overlay(0).get_known_subscribed_peers_for_node( folder2.public_key, folder2.id_) assert [] == self.overlay(0).get_known_subscribed_peers_for_node( orphan.public_key, orphan.id_)
def test_get_channels(metadata_store): """ Test whether we can get channels """ # First we create a few channels for ind in range(10): metadata_store.ChannelNode._my_key = default_eccrypto.generate_key( 'low') metadata_store.ChannelMetadata(title='channel%d' % ind, subscribed=(ind % 2 == 0), infohash=random_infohash()) channels = metadata_store.ChannelMetadata.get_entries(first=1, last=5) assert len(channels) == 5 # Test filtering channels = metadata_store.ChannelMetadata.get_entries( first=1, last=5, txt_filter='channel5') assert len(channels) == 1 # Test sorting channels = metadata_store.ChannelMetadata.get_entries(first=1, last=10, sort_by='title', sort_desc=True) assert len(channels) == 10 assert channels[0].title == 'channel9' # Test fetching subscribed channels channels = metadata_store.ChannelMetadata.get_entries(first=1, last=10, sort_by='title', subscribed=True) assert len(channels) == 5
def test_delete_recursive(metadata_store): """ Test deleting channel and its contents recursively """ src_chan = create_ext_chan(metadata_store, default_eccrypto.generate_key("curve25519")) src_chan.delete() assert not metadata_store.ChannelNode.select().count() src_chan = create_ext_chan(metadata_store, default_eccrypto.generate_key("curve25519")) src_chan_rowid = src_chan.rowid src_chan.delete(recursive=False) assert metadata_store.ChannelNode.select().count() == 7 with pytest.raises(ObjectNotFound): metadata_store.ChannelNode.__getitem__(src_chan_rowid)
def test_vsids(self): """ Test VSIDS-based channel popularity system. """ peer_key = default_eccrypto.generate_key(u"curve25519") self.assertEqual(1.0, self.mds.Vsids[0].bump_amount) channel = self.mds.ChannelMetadata.create_channel('test', 'test') self.mds.vote_bump(channel.public_key, channel.id_, peer_key.pub().key_to_bin()[10:]) sleep( 0.1 ) # Necessary mostly on Windows, because of the lower timer resolution self.mds.vote_bump(channel.public_key, channel.id_, peer_key.pub().key_to_bin()[10:]) self.assertLess(0.0, channel.votes) self.assertLess(1.0, self.mds.Vsids[0].bump_amount) # Make sure normalization for display purposes work self.assertAlmostEqual(channel.to_simple_dict()["votes"], 1.0) # Make sure the rescale works for the channels self.mds.Vsids[0].normalize() self.assertEqual(1.0, self.mds.Vsids[0].bump_amount) self.assertEqual(1.0, channel.votes)
async def test_send_personal_and_random_channels(self): """ Test whether sending the personal channel works correctly """ with db_session: # Add non-personal channel channel = self.nodes[0].overlay.metadata_store.ChannelMetadata.create_channel("non-personal", "bla") self.add_random_torrent(self.nodes[0].overlay.metadata_store.TorrentMetadata, channel=channel) channel.commit_channel_torrent() # Add personal channel self.nodes[0].overlay.metadata_store.ChannelNode._my_key = default_eccrypto.generate_key(u"curve25519") # After the previous line the previously created channel becomes non-personal channel = self.nodes[0].overlay.metadata_store.ChannelMetadata.create_channel("personal", "bla") self.add_random_torrent(self.nodes[0].overlay.metadata_store.TorrentMetadata, channel=channel) channel.commit_channel_torrent() await self.nodes[0].overlay.prepare_gossip_blob_cache() self.nodes[0].overlay.send_random_to(Peer(self.nodes[1].my_peer.public_key, self.nodes[1].endpoint.wan_address)) await self.deliver_messages(timeout=0.5) with db_session: self.assertEqual(len(self.nodes[1].overlay.metadata_store.ChannelMetadata.select()), 2) channels = self.nodes[1].overlay.metadata_store.ChannelMetadata.select()[:] self.assertEqual(channels[0].contents_len, 1) self.assertEqual(channels[1].contents_len, 1)
def test_has_valid_signature(metadata_store): """ Test whether a signature can be validated correctly """ metadata = metadata_store.ChannelNode.from_dict({}) assert metadata.has_valid_signature() md_dict = metadata.to_dict() # Mess with the signature metadata.signature = b'a' assert not metadata.has_valid_signature() # Create metadata with wrong key metadata.delete() md_dict.update(public_key=database_blob(b"aaa")) md_dict.pop("rowid") metadata = metadata_store.ChannelNode(skip_key_check=True, **md_dict) assert not metadata.has_valid_signature() key = default_eccrypto.generate_key("curve25519") metadata2 = metadata_store.ChannelNode(sign_with=key, **md_dict) assert database_blob(key.pub().key_to_bin()[10:]), metadata2.public_key md_dict2 = metadata2.to_dict() md_dict2["signature"] = md_dict["signature"] with pytest.raises(InvalidSignatureException): metadata_store.ChannelNode(**md_dict2)
def _create_batches(num_batches=2, num_blocks=100, txs=None): key = default_eccrypto.generate_key(u"curve25519") com_id = key.pub().key_to_bin() return [ create_block_batch(com_id, num_blocks, txs[i] if txs else None) for i in range(num_batches) ]
def test_delete_recursive(self): """ Test deleting channel and its contents recursively """ src_chan = self.create_ext_chan( default_eccrypto.generate_key(u"curve25519")) src_chan.delete() self.assertEqual(0, self.mds.ChannelNode.select().count()) src_chan = self.create_ext_chan( default_eccrypto.generate_key(u"curve25519")) src_chan_rowid = src_chan.rowid src_chan.delete(recursive=False) self.assertEqual(7, self.mds.ChannelNode.select().count()) self.assertRaises(ObjectNotFound, self.mds.ChannelNode.__getitem__, src_chan_rowid)
def test_process_payload(metadata_store): sender_key = default_eccrypto.generate_key("curve25519") for md_class in ( metadata_store.ChannelMetadata, metadata_store.TorrentMetadata, metadata_store.CollectionNode, metadata_store.ChannelDescription, metadata_store.ChannelThumbnail, ): node, node_payload, node_deleted_payload = get_payloads( md_class, sender_key) node_dict = node.to_dict() node.delete() # Check that there is no action if trying to delete an unknown object assert not metadata_store.process_payload(node_deleted_payload) # Check if node metadata object is properly created on payload processing result = metadata_store.process_payload(node_payload)[0] assert result.obj_state == ObjState.NEW_OBJECT assert node_dict['metadata_type'] == result.md_obj.to_dict( )['metadata_type'] # Check that nothing happens in case in case we already know about the local node assert metadata_store.process_payload( node_payload)[0].obj_state == ObjState.LOCAL_VERSION_SAME
def test_invalid_sign(self): key = default_eccrypto.generate_key(u"curve25519") blk = FakeBlock() blk.sign(key) assert not blk.block_invariants_valid()
def test_cleanup_pony_experimental_db(self): # Assert True is returned for a garbled db and nothing is done with it garbled_db = self.session_base_dir / 'garbled.db' with open(garbled_db, 'w') as f: f.write("123") self.assertRaises(sqlite3.DatabaseError, cleanup_pony_experimental_db, garbled_db) self.assertTrue(garbled_db.exists()) # Create a Pony database of older experimental version pony_db = self.session_base_dir / 'pony.db' pony_db_bak = self.session_base_dir / 'pony2.db' my_key = default_eccrypto.generate_key(u"curve25519") mds = MetadataStore(pony_db, self.session_base_dir, my_key) mds.shutdown() shutil.copyfile(pony_db, pony_db_bak) with contextlib.closing(sqlite3.connect(pony_db)) as connection, connection: cursor = connection.cursor() cursor.execute("DROP TABLE MiscData") # Assert older experimental version is deleted cleanup_pony_experimental_db(pony_db) self.assertFalse(pony_db.exists()) # Assert recent database version is left untouched cleanup_pony_experimental_db(pony_db_bak) self.assertTrue(pony_db_bak.exists())
def make_wrong_payload(filename): key = default_eccrypto.generate_key("curve25519") metadata_payload = SignedPayload( 666, 0, database_blob(key.pub().key_to_bin()[10:]), signature=b'\x00' * 64, skip_key_check=True ) with open(filename, 'wb') as output_file: output_file.write(metadata_payload.serialized())