def testAddContact(self):
        """ Tests if the bucket handles contact additions/updates correctly """
        # Test if contacts can be added to empty list
        # Add k contacts to bucket
        for i in range(constants.k):
            tmpContact = self.contact_manager.make_contact(
                generate_id(), next(self.address_generator), 4444, 0, None)
            self.kbucket.addContact(tmpContact)
            self.assertEqual(
                self.kbucket._contacts[i], tmpContact,
                "Contact in position %d not the same as the newly-added contact"
                % i)

        # Test if contact is not added to full list
        tmpContact = self.contact_manager.make_contact(
            generate_id(), next(self.address_generator), 4444, 0, None)
        self.assertRaises(kbucket.BucketFull, self.kbucket.addContact,
                          tmpContact)

        # Test if an existing contact is updated correctly if added again
        existingContact = self.kbucket._contacts[0]
        self.kbucket.addContact(existingContact)
        self.assertEqual(
            self.kbucket._contacts.index(existingContact),
            len(self.kbucket._contacts) - 1,
            'Contact not correctly updated; it should be at the end of the list of contacts'
        )
    def test_split_bucket(self):
        """ Tests if the the routing table correctly dynamically splits k-buckets """
        self.assertEqual(self.routingTable._buckets[0].rangeMax, 2**384,
                             'Initial k-bucket range should be 0 <= range < 2**384')
        # Add k contacts
        for i in range(constants.k):
            node_id = generate_id(b'remote node %d' % i)
            contact = self.contact_manager.make_contact(node_id, '127.0.0.1', 9182, self.protocol)
            yield self.routingTable.addContact(contact)

        self.assertEqual(len(self.routingTable._buckets), 1,
                             'Only k nodes have been added; the first k-bucket should now '
                             'be full, but should not yet be split')
        # Now add 1 more contact
        node_id = generate_id(b'yet another remote node')
        contact = self.contact_manager.make_contact(node_id, '127.0.0.1', 9182, self.protocol)
        yield self.routingTable.addContact(contact)
        self.assertEqual(len(self.routingTable._buckets), 2,
                             'k+1 nodes have been added; the first k-bucket should have been '
                             'split into two new buckets')
        self.assertNotEqual(self.routingTable._buckets[0].rangeMax, 2**384,
                         'K-bucket was split, but its range was not properly adjusted')
        self.assertEqual(self.routingTable._buckets[1].rangeMax, 2**384,
                             'K-bucket was split, but the second (new) bucket\'s '
                             'max range was not set properly')
        self.assertEqual(self.routingTable._buckets[0].rangeMax,
                             self.routingTable._buckets[1].rangeMin,
                             'K-bucket was split, but the min/max ranges were '
                             'not divided properly')
 def setUp(self):
     self.contact_manager = ContactManager()
     self.node_ids = [generate_id(), generate_id(), generate_id()]
     make_contact = self.contact_manager.make_contact
     self.first_contact = make_contact(self.node_ids[1], '127.0.0.1', 1000, None, 1)
     self.second_contact = make_contact(self.node_ids[0], '192.168.0.1', 1000, None, 32)
     self.second_contact_second_reference = make_contact(self.node_ids[0], '192.168.0.1', 1000, None, 32)
     self.first_contact_different_values = make_contact(self.node_ids[1], '192.168.1.20', 1000, None, 50)
    def testGetContacts(self):
        # try and get 2 contacts from empty list
        result = self.kbucket.getContacts(2)
        self.assertFalse(
            len(result) != 0,
            "Returned list should be empty; returned list length: %d" %
            (len(result)))

        # Add k-2 contacts
        node_ids = []
        if constants.k >= 2:
            for i in range(constants.k - 2):
                node_ids.append(generate_id())
                tmpContact = self.contact_manager.make_contact(
                    node_ids[-1], next(self.address_generator), 4444, 0, None)
                self.kbucket.addContact(tmpContact)
        else:
            # add k contacts
            for i in range(constants.k):
                node_ids.append(generate_id())
                tmpContact = self.contact_manager.make_contact(
                    node_ids[-1], next(self.address_generator), 4444, 0, None)
                self.kbucket.addContact(tmpContact)

        # try to get too many contacts
        # requested count greater than bucket size; should return at most k contacts
        contacts = self.kbucket.getContacts(constants.k + 3)
        self.assertTrue(
            len(contacts) <= constants.k,
            'Returned list should not have more than k entries!')

        # verify returned contacts in list
        for node_id, i in zip(node_ids, range(constants.k - 2)):
            self.assertFalse(
                self.kbucket._contacts[i].id != node_id,
                "Contact in position %s not same as added contact" % (str(i)))

        # try to get too many contacts
        # requested count one greater than number of contacts
        if constants.k >= 2:
            result = self.kbucket.getContacts(constants.k - 1)
            self.assertFalse(
                len(result) != constants.k - 2,
                "Too many contacts in returned list %s - should be %s" %
                (len(result), constants.k - 2))
        else:
            result = self.kbucket.getContacts(constants.k - 1)
            # if the count is <= 0, it should return all of it's contats
            self.assertFalse(
                len(result) != constants.k,
                "Too many contacts in returned list %s - should be %s" %
                (len(result), constants.k - 2))
            result = self.kbucket.getContacts(constants.k - 3)
            self.assertFalse(
                len(result) != constants.k - 3,
                "Too many contacts in returned list %s - should be %s" %
                (len(result), constants.k - 3))
Exemplo n.º 5
0
    def setup(self):
        """Create the blob directory and database if necessary, start all desired services"""

        log.debug("Setting up the lbry session")

        if self.lbryid is None:
            self.lbryid = generate_id()

        if self.wallet is None:
            self.wallet = PTCWallet(self.db_dir)

        if self.peer_manager is None:
            self.peer_manager = PeerManager()

        if self.use_upnp is True:
            d = self._try_upnp()
        else:
            d = defer.succeed(True)

        if self.peer_finder is None:
            d.addCallback(lambda _: self._setup_dht())
        else:
            if self.hash_announcer is None and self.peer_port is not None:
                log.warning("The server has no way to advertise its available blobs.")
                self.hash_announcer = DummyHashAnnouncer()

        d.addCallback(lambda _: self._setup_other_components())
        return d
Exemplo n.º 6
0
def main(args=None):
    conf.initialize_settings()
    parser = argparse.ArgumentParser()
    parser.add_argument('--limit', type=int)
    parser.add_argument('--download', action='store_true',
                        help='Set flag to also download each sd_blob and report on success')
    args = parser.parse_args(args)

    log_support.configure_console()
    log_support.configure_twisted()

    # make a fresh dir or else we will include blobs that we've
    # already downloaded but might not otherwise be available.
    db_dir = tempfile.mkdtemp()
    try:
        blob_dir = os.path.join(db_dir, 'blobfiles')
        os.makedirs(blob_dir)
        storage = Wallet.InMemoryStorage()
        wallet = Wallet.LBRYumWallet(storage)
        session = Session.Session(
            0,
            db_dir=db_dir,
            lbryid=utils.generate_id(),
            blob_dir=blob_dir,
            dht_node_port=4444,
            known_dht_nodes=conf.settings['known_dht_nodes'],
            peer_port=3333,
            use_upnp=False,
            wallet=wallet
        )
        api = analytics.Api.new_instance()
        run(args, session, api)
        reactor.run()
    finally:
        shutil.rmtree(db_dir)
Exemplo n.º 7
0
    def update_network(self):
        import random
        dl = []
        announced_blobs = []

        for node in self.nodes:  # random events
            if random.randint(
                    0, 10000) < 75 and announced_blobs:  # get peers for a blob
                log.info('find blob')
                blob_hash = random.choice(announced_blobs)
                dl.append(node.getPeersForBlob(blob_hash))
            if random.randint(0, 10000) < 25:  # announce a blob
                log.info('announce blob')
                blob_hash = generate_id()
                announced_blobs.append((blob_hash, node.node_id))
                dl.append(node.announceHaveBlob(blob_hash))

        random.shuffle(self.nodes)

        # kill nodes
        while random.randint(0, 100) > 95:
            dl.append(self.pop_node())
            log.info('pop node')

        # add nodes
        while random.randint(0, 100) > 95:
            dl.append(self.add_node())
            log.info('add node')
        return tuple(dl), announced_blobs
Exemplo n.º 8
0
    def _generateID(self):
        """ Generates an n-bit pseudo-random identifier

        @return: A globally unique n-bit pseudo-random identifier
        @rtype: str
        """
        return generate_id()
Exemplo n.º 9
0
    def setup(self):
        """Create the blob directory and database if necessary, start all desired services"""

        log.debug("Setting up the lbry session")

        if self.lbryid is None:
            self.lbryid = generate_id()

        if self.wallet is None:
            self.wallet = PTCWallet(self.db_dir)

        if self.peer_manager is None:
            self.peer_manager = PeerManager()

        if self.use_upnp is True:
            d = self._try_upnp()
        else:
            d = defer.succeed(True)

        if self.peer_finder is None:
            d.addCallback(lambda _: self._setup_dht())
        else:
            if self.hash_announcer is None and self.peer_port is not None:
                log.warning(
                    "The server has no way to advertise its available blobs.")
                self.hash_announcer = DummyHashAnnouncer()

        d.addCallback(lambda _: self._setup_other_components())
        return d
 def _claim(self):
     return ClaimDict.load_dict({
         "version": "_0_0_1",
         "claimType": "streamType",
         "stream": {
             "source": {
                 "source":
                 generate_id(self.random.getrandbits(512)).encode('hex'),
                 "version":
                 "_0_0_1",
                 "contentType":
                 "video/mp4",
                 "sourceType":
                 "lbry_sd_hash"
             },
             "version": "_0_0_1",
             "metadata": {
                 "license": "LBRY Inc",
                 "description":
                 "What is LBRY? An introduction with Alex Tabarrok",
                 "language": "en",
                 "title": "What is LBRY?",
                 "author": "Samuel Bryan",
                 "version": "_0_1_0",
                 "nsfw": False,
                 "licenseUrl": "",
                 "preview": "",
                 "thumbnail":
                 "https://s3.amazonaws.com/files.lbry.io/logo.png"
             }
         }
     })
Exemplo n.º 11
0
    def _generateID(self):
        """ Generates an n-bit pseudo-random identifier

        @return: A globally unique n-bit pseudo-random identifier
        @rtype: str
        """
        return generate_id()
Exemplo n.º 12
0
    def test_nullify_token(self):
        blob_hash = generate_id(1)
        announcing_node = self.nodes[20]
        # announce the blob
        announce_d = announcing_node.announceHaveBlob(blob_hash)
        self.pump_clock(5 + 1)
        storing_node_ids = yield announce_d
        self.assertEqual(len(storing_node_ids), 8)

        for node in set(self.nodes).union(set(self._seeds)):
            # now, everyone has the wrong token
            node.change_token()
            node.change_token()

        announce_d = announcing_node.announceHaveBlob(blob_hash)
        self.pump_clock(5 + 1)
        storing_node_ids = yield announce_d
        self.assertEqual(
            len(storing_node_ids),
            0)  # can't store, wrong tokens, but they get nullified

        announce_d = announcing_node.announceHaveBlob(blob_hash)
        self.pump_clock(5 + 1)
        storing_node_ids = yield announce_d
        self.assertEqual(len(storing_node_ids),
                         8)  # next attempt succeeds as it refreshes tokens
Exemplo n.º 13
0
 def setUp(self):
     self.clock = task.Clock()
     self.contact_manager = ContactManager(self.clock.seconds)
     self.contact = self.contact_manager.make_contact(
         generate_id(), "127.0.0.1", 4444, None)
     self.clock.advance(3600)
     self.assertTrue(self.contact.contact_is_good is None)
Exemplo n.º 14
0
def main(args=None):
    conf.initialize_settings()
    parser = argparse.ArgumentParser()
    parser.add_argument('--limit', type=int)
    parser.add_argument('--download', action='store_true',
                        help='Set flag to also download each sd_blob and report on success')
    args = parser.parse_args(args)

    log_support.configure_console()
    log_support.configure_twisted()

    # make a fresh dir or else we will include blobs that we've
    # already downloaded but might not otherwise be available.
    db_dir = tempfile.mkdtemp()
    try:
        blob_dir = os.path.join(db_dir, 'blobfiles')
        os.makedirs(blob_dir)
        storage = Wallet.InMemoryStorage()
        wallet = Wallet.LBRYumWallet(storage)
        session = Session.Session(
            0,
            db_dir=db_dir,
            lbryid=utils.generate_id(),
            blob_dir=blob_dir,
            dht_node_port=4444,
            known_dht_nodes=conf.settings['known_dht_nodes'],
            peer_port=3333,
            use_upnp=False,
            wallet=wallet
        )
        api = analytics.Api.new_instance()
        run(args, session, api)
        reactor.run()
    finally:
        shutil.rmtree(db_dir)
    def testRemoveContact(self):
        # try remove contact from empty list
        rmContact = self.contact_manager.make_contact(
            generate_id(), next(self.address_generator), 4444, 0, None)
        self.assertRaises(ValueError, self.kbucket.removeContact, rmContact)

        # Add couple contacts
        for i in range(constants.k - 2):
            tmpContact = self.contact_manager.make_contact(
                generate_id(), next(self.address_generator), 4444, 0, None)
            self.kbucket.addContact(tmpContact)

        # try remove contact from empty list
        self.kbucket.addContact(rmContact)
        result = self.kbucket.removeContact(rmContact)
        self.assertFalse(rmContact in self.kbucket._contacts,
                         "Could not remove contact from bucket")
 def test_get_contact(self):
     """ Tests if a specific existing contact can be retrieved correctly """
     contact_id = generate_id(b'node2')
     contact = self.contact_manager.make_contact(contact_id, '127.0.0.1', 9182, self.protocol)
     # Now add it...
     yield self.routingTable.addContact(contact)
     # ...and get it again
     same_contact = self.routingTable.getContact(contact_id)
     self.assertEqual(contact, same_contact, 'getContact() should return the same contact')
Exemplo n.º 17
0
def join_network(udp_port, known_nodes):
    lbryid = generate_id()

    log.info('Creating node')
    node = Node(udpPort=udp_port, node_id=lbryid)

    log.info('Joining network')
    yield node.joinNetwork(known_nodes)

    defer.returnValue(node)
Exemplo n.º 18
0
def join_network(udp_port, known_nodes):
    lbryid = generate_id()

    log.info('Creating node')
    node = Node(udpPort=udp_port, node_id=lbryid)

    log.info('Joining network')
    yield node.joinNetwork(known_nodes)

    defer.returnValue(node)
 def test_add_contact(self):
     """ Tests if a contact can be added and retrieved correctly """
     # Create the contact
     contact_id = generate_id(b'node2')
     contact = self.contact_manager.make_contact(contact_id, '127.0.0.1', 9182, self.protocol)
     # Now add it...
     yield self.routingTable.addContact(contact)
     # ...and request the closest nodes to it (will retrieve it)
     closest_nodes = self.routingTable.findCloseNodes(contact_id)
     self.assertEqual(len(closest_nodes), 1)
     self.assertIn(contact, closest_nodes)
Exemplo n.º 20
0
 def get_node_id(self):
     node_id_filename = os.path.join(self.ensure_data_dir(), "node_id")
     if not self._node_id:
         if os.path.isfile(node_id_filename):
             with open(node_id_filename, "r") as node_id_file:
                 self._node_id = base58.b58decode(str(node_id_file.read()).strip())
     if not self._node_id:
         self._node_id = utils.generate_id()
         with open(node_id_filename, "w") as node_id_file:
             node_id_file.write(base58.b58encode(self._node_id))
     return self._node_id
Exemplo n.º 21
0
 def get_installation_id(self):
     install_id_filename = os.path.join(self.ensure_data_dir(), "install_id")
     if not self._installation_id:
         if os.path.isfile(install_id_filename):
             with open(install_id_filename, "r") as install_id_file:
                 self._installation_id = str(install_id_file.read()).strip()
     if not self._installation_id:
         self._installation_id = base58.b58encode(utils.generate_id())
         with open(install_id_filename, "w") as install_id_file:
             install_id_file.write(self._installation_id)
     return self._installation_id
Exemplo n.º 22
0
Arquivo: conf.py Projeto: SIGAUni/lbry
 def get_node_id(self):
     node_id_filename = os.path.join(self.ensure_data_dir(), "node_id")
     if not self._node_id:
         if os.path.isfile(node_id_filename):
             with open(node_id_filename, "r") as node_id_file:
                 self._node_id = base58.b58decode(node_id_file.read())
     if not self._node_id:
         self._node_id = utils.generate_id()
         with open(node_id_filename, "w") as node_id_file:
             node_id_file.write(base58.b58encode(self._node_id))
     return self._node_id
Exemplo n.º 23
0
 def get_lbry_id(self):
     lbry_id_filename = os.path.join(self.ensure_data_dir(), 'lbryid')
     if not self._lbry_id:
         if os.path.isfile(lbry_id_filename):
             with open(lbry_id_filename, 'r') as lbryid_file:
                 self._lbry_id = base58.b58decode(lbryid_file.read())
     if not self._lbry_id:
         self._lbry_id = utils.generate_id()
         with open(lbry_id_filename, 'w') as lbryid_file:
             lbryid_file.write(base58.b58encode(self._lbry_id))
     return self._lbry_id
Exemplo n.º 24
0
 def get_installation_id(self):
     install_id_filename = os.path.join(self.ensure_data_dir(), "install_id")
     if not self._installation_id:
         if os.path.isfile(install_id_filename):
             with open(install_id_filename, "r") as install_id_file:
                 self._installation_id = install_id_file.read()
     if not self._installation_id:
         self._installation_id = base58.b58encode(utils.generate_id())
         with open(install_id_filename, "w") as install_id_file:
             install_id_file.write(self._installation_id)
     return self._installation_id
Exemplo n.º 25
0
 def setUp(self):
     self.contact_manager = ContactManager()
     self.node_ids = [generate_id(), generate_id(), generate_id()]
     self.firstContact = self.contact_manager.make_contact(
         self.node_ids[1], '127.0.0.1', 1000, None, 1)
     self.secondContact = self.contact_manager.make_contact(
         self.node_ids[0], '192.168.0.1', 1000, None, 32)
     self.secondContactCopy = self.contact_manager.make_contact(
         self.node_ids[0], '192.168.0.1', 1000, None, 32)
     self.firstContactDifferentValues = self.contact_manager.make_contact(
         self.node_ids[1], '192.168.1.20', 1000, None, 50)
     self.assertRaises(ValueError, self.contact_manager.make_contact,
                       self.node_ids[1], '192.168.1.20', 100000, None)
     self.assertRaises(ValueError, self.contact_manager.make_contact,
                       self.node_ids[1], '192.168.1.20.1', 1000, None)
     self.assertRaises(ValueError, self.contact_manager.make_contact,
                       self.node_ids[1], 'this is not an ip', 1000, None)
     self.assertRaises(ValueError, self.contact_manager.make_contact,
                       "this is not a node id", '192.168.1.20.1', 1000,
                       None)
Exemplo n.º 26
0
def mock_node_generator(count=None, mock_node_ids=MOCK_DHT_NODES):
    if mock_node_ids is None:
        mock_node_ids = MOCK_DHT_NODES

    for num, node_ip in enumerate(address_generator()):
        if count and num >= count:
            break
        if num >= len(mock_node_ids):
            node_id = generate_id().encode('hex')
        else:
            node_id = mock_node_ids[num]
        yield (node_id, node_ip)
 def test_remove_contact(self):
     """ Tests contact removal """
     # Create the contact
     contact_id = generate_id(b'node2')
     contact = self.contact_manager.make_contact(contact_id, '127.0.0.1', 9182, self.protocol)
     # Now add it...
     yield self.routingTable.addContact(contact)
     # Verify addition
     self.assertEqual(len(self.routingTable._buckets[0]), 1, 'Contact not added properly')
     # Now remove it
     self.routingTable.removeContact(contact)
     self.assertEqual(len(self.routingTable._buckets[0]), 0, 'Contact not removed properly')
Exemplo n.º 28
0
    def __init__(self,
                 fixed_defaults,
                 adjustable_defaults,
                 persisted_settings=None,
                 environment=None,
                 cli_settings=None):

        self._installation_id = None
        # utils.generate_id(): 返回sha384(512随机数)哈希摘要
        self._session_id = base58.b58encode(utils.generate_id())
        self._node_id = None

        self._fixed_defaults = fixed_defaults
        self._adjustable_defaults = adjustable_defaults

        self._data = {
            TYPE_DEFAULT: {},  # defaults
            TYPE_PERSISTED:
            {},  # stored settings from daemon_settings.yml (or from a db, etc)
            TYPE_ENV: {},  # settings from environment variables
            TYPE_CLI: {},  # command-line arguments
            TYPE_RUNTIME: {},  # set during runtime (using self.set(), etc)
        }

        # the order in which a piece of data is searched for. earlier types override later types
        self._search_order = (TYPE_RUNTIME, TYPE_CLI, TYPE_ENV, TYPE_PERSISTED,
                              TYPE_DEFAULT)

        # 将固定和可变的配置都赋值给self._data[TYPE_DEFAULT]
        self._data[TYPE_DEFAULT].update(self._fixed_defaults)
        self._data[TYPE_DEFAULT].update(
            {k: v[1]
             for (k, v) in self._adjustable_defaults.iteritems()})

        if persisted_settings is None:
            persisted_settings = {}
        self._validate_settings(
            persisted_settings
        )  # 验证传入参数的set(keys) 不能大于self._data[TYPE_DEFAULT]
        self._data[TYPE_PERSISTED].update(persisted_settings)

        # 取出环境变量中ADJUSTABLE_SETTINGS属性的设置
        env_settings = self._parse_environment(environment)
        self._validate_settings(
            env_settings
        )  # # 验证传入参数的set(keys) 不能大于self._data[TYPE_DEFAULT]的set(keys)
        self._data[TYPE_ENV].update(env_settings)

        if cli_settings is None:
            cli_settings = {}
        self._validate_settings(cli_settings)
        self._data[TYPE_CLI].update(cli_settings)
Exemplo n.º 29
0
 def __init__(self, lbryid, seeds, node_port, rpc_port):
     AuthJSONRPCServer.__init__(self, False)
     self.root = None
     self.port = None
     self.seeds = seeds
     self.node_port = node_port
     self.rpc_port = rpc_port
     if lbryid:
         lbryid = lbryid.decode('hex')
     else:
         lbryid = generate_id()
     self.node_id = lbryid
     self.external_ip = get_external_ip_and_setup_upnp()
     self.node_port = node_port
Exemplo n.º 30
0
Arquivo: conf.py Projeto: nasht12/lbry
    def __init__(self,
                 fixed_defaults,
                 adjustable_defaults,
                 persisted_settings=None,
                 environment=None,
                 cli_settings=None):

        self._installation_id = None
        self._session_id = base58.b58encode(utils.generate_id())
        self._node_id = None

        self._fixed_defaults = fixed_defaults
        self._adjustable_defaults = adjustable_defaults

        self._data = {
            TYPE_DEFAULT: {},  # defaults
            TYPE_PERSISTED:
            {},  # stored settings from daemon_settings.yml (or from a db, etc)
            TYPE_ENV: {},  # settings from environment variables
            TYPE_CLI: {},  # command-line arguments
            TYPE_RUNTIME: {},  # set during runtime (using self.set(), etc)
        }

        # the order in which a piece of data is searched for. earlier types override later types
        self._search_order = (TYPE_RUNTIME, TYPE_CLI, TYPE_ENV, TYPE_PERSISTED,
                              TYPE_DEFAULT)

        # types of data where user specified config values can be stored
        self._user_specified = (TYPE_RUNTIME, TYPE_CLI, TYPE_ENV,
                                TYPE_PERSISTED)

        self._data[TYPE_DEFAULT].update(self._fixed_defaults)
        self._data[TYPE_DEFAULT].update(
            {k: v[1]
             for (k, v) in self._adjustable_defaults.iteritems()})

        if persisted_settings is None:
            persisted_settings = {}
        self._validate_settings(persisted_settings)
        self._data[TYPE_PERSISTED].update(persisted_settings)

        env_settings = self._parse_environment(environment)
        self._validate_settings(env_settings)
        self._data[TYPE_ENV].update(env_settings)

        if cli_settings is None:
            cli_settings = {}
        self._validate_settings(cli_settings)
        self._data[TYPE_CLI].update(cli_settings)
Exemplo n.º 31
0
    def test_store_and_expire(self):
        blob_hash = generate_id(1)
        announcing_node = self.nodes[20]
        # announce the blob
        announce_d = announcing_node.announceHaveBlob(blob_hash)
        self.pump_clock(5)
        storing_node_ids = yield announce_d
        all_nodes = set(self.nodes).union(set(self._seeds))

        # verify the nodes we think stored it did actually store it
        storing_nodes = [node for node in all_nodes if node.node_id.encode('hex') in storing_node_ids]
        self.assertEquals(len(storing_nodes), len(storing_node_ids))
        self.assertEquals(len(storing_nodes), constants.k)
        for node in storing_nodes:
            self.assertTrue(node._dataStore.hasPeersForBlob(blob_hash))
            datastore_result = node._dataStore.getPeersForBlob(blob_hash)
            self.assertEquals(map(lambda contact: (contact.id, contact.address, contact.port),
                                  node._dataStore.getStoringContacts()), [(announcing_node.node_id,
                                                                           announcing_node.externalIP,
                                                                           announcing_node.port)])
            self.assertEquals(len(datastore_result), 1)
            expanded_peers = []
            for peer in datastore_result:
                host = ".".join([str(ord(d)) for d in peer[:4]])
                port, = struct.unpack('>H', peer[4:6])
                peer_node_id = peer[6:]
                if (host, port, peer_node_id) not in expanded_peers:
                    expanded_peers.append((peer_node_id, host, port))
            self.assertEquals(expanded_peers[0],
                              (announcing_node.node_id, announcing_node.externalIP, announcing_node.peerPort))

        # verify the announced blob expires in the storing nodes datastores

        self.clock.advance(constants.dataExpireTimeout)         # skip the clock directly ahead
        for node in storing_nodes:
            self.assertFalse(node._dataStore.hasPeersForBlob(blob_hash))
            datastore_result = node._dataStore.getPeersForBlob(blob_hash)
            self.assertEquals(len(datastore_result), 0)
            self.assertTrue(blob_hash in node._dataStore._dict)  # the looping call shouldn't have removed it yet
            self.assertEquals(len(node._dataStore.getStoringContacts()), 1)

        self.pump_clock(constants.checkRefreshInterval + 1)  # tick the clock forward (so the nodes refresh)
        for node in storing_nodes:
            self.assertFalse(node._dataStore.hasPeersForBlob(blob_hash))
            datastore_result = node._dataStore.getPeersForBlob(blob_hash)
            self.assertEquals(len(datastore_result), 0)
            self.assertEquals(len(node._dataStore.getStoringContacts()), 0)
            self.assertTrue(blob_hash not in node._dataStore._dict)  # the looping call should have fired
Exemplo n.º 32
0
    def setup(self):
        """Create the blob directory and database if necessary, start all desired services"""

        log.debug("Starting session.")

        if self.node_id is None:
            self.node_id = generate_id()

        if self.use_upnp is True:
            d = self._try_upnp()
        else:
            d = defer.succeed(True)
        d.addCallback(lambda _: self.storage.setup())
        d.addCallback(lambda _: self._setup_dht())
        d.addCallback(lambda _: self._setup_other_components())
        return d
Exemplo n.º 33
0
    def __init__(self, fixed_defaults, adjustable_defaults, persisted_settings=None,
                 environment=None, cli_settings=None):

        self._installation_id = None
        self._session_id = base58.b58encode(utils.generate_id())
        self._node_id = None

        self._fixed_defaults = fixed_defaults
        self._adjustable_defaults = adjustable_defaults

        self._data = {
            TYPE_DEFAULT: {},  # defaults
            TYPE_PERSISTED: {},  # stored settings from daemon_settings.yml (or from a db, etc)
            TYPE_ENV: {},  # settings from environment variables
            TYPE_CLI: {},  # command-line arguments
            TYPE_RUNTIME: {},  # set during runtime (using self.set(), etc)
        }

        # the order in which a piece of data is searched for. earlier types override later types
        self._search_order = (
            TYPE_RUNTIME, TYPE_CLI, TYPE_ENV, TYPE_PERSISTED, TYPE_DEFAULT
        )

        # types of data where user specified config values can be stored
        self._user_specified = (
            TYPE_RUNTIME, TYPE_CLI, TYPE_ENV, TYPE_PERSISTED
        )

        self._data[TYPE_DEFAULT].update(self._fixed_defaults)
        self._data[TYPE_DEFAULT].update(
            {k: v[1] for (k, v) in self._adjustable_defaults.iteritems()})

        if persisted_settings is None:
            persisted_settings = {}
        self._validate_settings(persisted_settings)
        self._data[TYPE_PERSISTED].update(persisted_settings)

        env_settings = self._parse_environment(environment)
        self._validate_settings(env_settings)
        self._data[TYPE_ENV].update(env_settings)

        if cli_settings is None:
            cli_settings = {}
        self._validate_settings(cli_settings)
        self._data[TYPE_CLI].update(cli_settings)
 def test_boolean(self):
     """ Test "equals" and "not equals" comparisons """
     self.assertNotEqual(
         self.first_contact, self.contact_manager.make_contact(
             self.first_contact.id, self.first_contact.address, self.first_contact.port + 1, None, 32
         )
     )
     self.assertNotEqual(
         self.first_contact, self.contact_manager.make_contact(
             self.first_contact.id, '193.168.1.1', self.first_contact.port, None, 32
         )
     )
     self.assertNotEqual(
         self.first_contact, self.contact_manager.make_contact(
             generate_id(), self.first_contact.address, self.first_contact.port, None, 32
         )
     )
     self.assertEqual(self.second_contact, self.second_contact_second_reference)
Exemplo n.º 35
0
def join_network(udp_port, known_nodes):
    lbryid = generate_id()

    log.info('Creating Node...')
    node = Node(udpPort=udp_port, lbryid=lbryid)

    log.info('Joining network...')
    d = node.joinNetwork(known_nodes)

    def log_network_size():
        log.info("Approximate number of nodes in DHT: %s", str(node.getApproximateTotalDHTNodes()))
        log.info("Approximate number of blobs in DHT: %s", str(node.getApproximateTotalHashes()))

    d.addCallback(lambda _: log_network_size())

    d.addCallback(lambda _: node)

    return d
Exemplo n.º 36
0
def join_network(udp_port, known_nodes):
    lbryid = generate_id()

    log.info('Creating Node')
    node = Node(udpPort=udp_port, lbryid=lbryid)

    log.info('Joining network')
    d = node.joinNetwork(known_nodes)

    def log_network_size():
        log.info("Approximate number of nodes in DHT: %s",
                 str(node.getApproximateTotalDHTNodes()))
        log.info("Approximate number of blobs in DHT: %s",
                 str(node.getApproximateTotalHashes()))

    d.addCallback(lambda _: log_network_size())

    d.addCallback(lambda _: node)

    return d
Exemplo n.º 37
0
def main(args=None):
    conf.initialize_settings()

    parser = argparse.ArgumentParser()
    parser.add_argument('destination', type=conf.server_port, nargs='+')
    parser.add_argument('--names', nargs='*')
    parser.add_argument('--limit', type=int)
    args = parser.parse_args(args)

    log_support.configure_console(level='INFO')

    db_dir = appdirs.user_data_dir('lighthouse-uploader')
    safe_makedirs(db_dir)
    # no need to persist metadata info
    storage = Wallet.InMemoryStorage()
    wallet = Wallet.LBRYumWallet(storage)
    blob_dir = os.path.join(db_dir, 'blobfiles')
    safe_makedirs(blob_dir)
    # Don't set a hash_announcer, we have no need to tell anyone we
    # have these blobs
    blob_manager = BlobManager.DiskBlobManager(None, blob_dir, db_dir)
    # TODO: make it so that I can disable the BlobAvailabilityTracker
    #       or, in general, make the session more reusable for users
    #       that only want part of the functionality
    session = Session.Session(
        blob_data_payment_rate=0,
        db_dir=db_dir,
        lbryid=utils.generate_id(),
        blob_dir=blob_dir,
        dht_node_port=4444,
        known_dht_nodes=conf.settings['known_dht_nodes'],
        peer_port=3333,
        use_upnp=False,
        wallet=wallet,
        blob_manager=blob_manager,
    )
    assert session.wallet
    run(session, args.destination, args.names, args.limit)
    reactor.run()
def main(args=None):
    conf.initialize_settings()

    parser = argparse.ArgumentParser()
    parser.add_argument('destination', type=conf.server_port, nargs='+')
    parser.add_argument('--names', nargs='*')
    parser.add_argument('--limit', type=int)
    args = parser.parse_args(args)

    log_support.configure_console(level='INFO')

    db_dir = appdirs.user_data_dir('lighthouse-uploader')
    safe_makedirs(db_dir)
    # no need to persist metadata info
    storage = Wallet.InMemoryStorage()
    wallet = Wallet.LBRYumWallet(storage)
    blob_dir = os.path.join(db_dir, 'blobfiles')
    safe_makedirs(blob_dir)
    # Don't set a hash_announcer, we have no need to tell anyone we
    # have these blobs
    blob_manager = BlobManager.DiskBlobManager(None, blob_dir, db_dir)
    # TODO: make it so that I can disable the BlobAvailabilityTracker
    #       or, in general, make the session more reusable for users
    #       that only want part of the functionality
    session = Session.Session(
        blob_data_payment_rate=0,
        db_dir=db_dir,
        node_id=utils.generate_id(),
        blob_dir=blob_dir,
        dht_node_port=4444,
        known_dht_nodes=conf.settings['known_dht_nodes'],
        peer_port=3333,
        use_upnp=False,
        wallet=wallet,
        blob_manager=blob_manager,
    )
    assert session.wallet
    run(session, args.destination, args.names, args.limit)
    reactor.run()
Exemplo n.º 39
0
    def start(self):
        self.upnp_component = self.component_manager.get_component(
            UPNP_COMPONENT)
        self.external_peer_port = self.upnp_component.upnp_redirects.get(
            "TCP", GCS("peer_port"))
        self.external_udp_port = self.upnp_component.upnp_redirects.get(
            "UDP", GCS("dht_node_port"))
        node_id = CS.get_node_id()
        if node_id is None:
            node_id = generate_id()

        self.dht_node = node.Node(node_id=node_id,
                                  udpPort=GCS('dht_node_port'),
                                  externalUDPPort=self.external_udp_port,
                                  externalIP=self.upnp_component.external_ip,
                                  peerPort=self.external_peer_port)

        self.dht_node.start_listening()
        yield self.dht_node._protocol._listening
        d = self.dht_node.joinNetwork(GCS('known_dht_nodes'))
        d.addCallback(lambda _: self.dht_node.start_looping_calls())
        d.addCallback(lambda _: log.info("Joined the dht"))
        log.info("Started the dht")
Exemplo n.º 40
0
 def _make_lbryid(self):
     self.lbryid = generate_id()
     d = self.settings.save_lbryid(self.lbryid)
     return d