Exemplo n.º 1
0
def download_it(peer, timeout, blob_hash):
    tmp_dir = yield threads.deferToThread(tempfile.mkdtemp)
    storage = SQLiteStorage(tmp_dir, reactor)
    yield storage.setup()
    tmp_blob_manager = DiskBlobManager(tmp_dir, storage)

    config = {'auto_connect': True}
    if conf.settings['lbryum_wallet_dir']:
        config['lbryum_wallet_dir'] = conf.settings['lbryum_wallet_dir']
        config['use_keyring'] = False
        config['blockchain_name'] = conf.settings['blockchain_name']
        config['lbryum_servers'] = []
    wallet = LbryWalletManager.from_lbrynet_config(config, storage)

    downloader = SinglePeerDownloader()
    downloader.setup(wallet)

    try:
        blob_downloaded = yield downloader.download_blob_from_peer(
            peer, timeout, blob_hash, tmp_blob_manager)
        if blob_downloaded:
            log.info("SUCCESS!")
            blob = yield tmp_blob_manager.get_blob(blob_hash)
            pprint(blob)
            if not blob.verified:
                log.error("except that its not verified....")
            else:
                reader = BlobStreamDescriptorReader(blob)
                info = None
                for x in range(0, 3):
                    try:
                        info = yield reader.get_info()
                    except ValueError:
                        pass
                    if info:
                        break

                    # there's some kind of race condition where it sometimes doesnt write the blob to disk in time
                    time.sleep(0.1)

                if info is not None:
                    pprint(info)
                    for content_blob in info['blobs']:
                        if 'blob_hash' in content_blob:
                            yield download_it(peer, timeout,
                                              content_blob['blob_hash'])
        else:
            log.error("Download failed")
    finally:
        yield tmp_blob_manager.stop()
        yield threads.deferToThread(shutil.rmtree, tmp_dir)

    defer.returnValue(True)
Exemplo n.º 2
0
def download_it(peer, timeout, blob_hash):
    tmp_dir = yield threads.deferToThread(tempfile.mkdtemp)
    announcer = DummyHashAnnouncer()
    tmp_blob_manager = DiskBlobManager(announcer, tmp_dir, tmp_dir)

    config = {'auto_connect': True}
    if conf.settings['lbryum_wallet_dir']:
        config['lbryum_path'] = conf.settings['lbryum_wallet_dir']
    storage = Wallet.InMemoryStorage()
    wallet = Wallet.LBRYumWallet(storage, config)

    downloader = SinglePeerDownloader()
    downloader.setup(wallet)

    try:
        blob_downloaded = yield downloader.download_blob_from_peer(
            peer, timeout, blob_hash, tmp_blob_manager)
        if blob_downloaded:
            log.info("SUCCESS!")
            blob = yield tmp_blob_manager.get_blob(blob_hash)
            pprint(blob)
            if not blob.verified:
                log.error("except that its not verified....")
            else:
                reader = BlobStreamDescriptorReader(blob)
                info = None
                for x in range(0, 3):
                    try:
                        info = yield reader.get_info()
                    except ValueError:
                        pass
                    if info:
                        break
                    time.sleep(
                        0.1
                    )  # there's some kind of race condition where it sometimes doesnt write the blob to disk in time

                if info is not None:
                    pprint(info)
                    for content_blob in info['blobs']:
                        if 'blob_hash' in content_blob:
                            yield download_it(peer, timeout,
                                              content_blob['blob_hash'])
        else:
            log.error("Download failed")
    finally:
        yield tmp_blob_manager.stop()
        yield threads.deferToThread(shutil.rmtree, tmp_dir)

    defer.returnValue(True)
Exemplo n.º 3
0
class BlobManagerTest(unittest.TestCase):
    @defer.inlineCallbacks
    def setUp(self):
        conf.initialize_settings(False)
        self.blob_dir = tempfile.mkdtemp()
        self.db_dir = tempfile.mkdtemp()
        self.bm = DiskBlobManager(self.blob_dir, SQLiteStorage(self.db_dir))
        self.peer = Peer('somehost', 22)
        yield self.bm.storage.setup()

    @defer.inlineCallbacks
    def tearDown(self):
        yield self.bm.stop()
        yield self.bm.storage.stop()
        shutil.rmtree(self.blob_dir)
        shutil.rmtree(self.db_dir)

    @defer.inlineCallbacks
    def _create_and_add_blob(self, should_announce=False):
        # create and add blob to blob manager
        data_len = random.randint(1, 1000)
        data = b''.join(
            random.choice(string.ascii_lowercase).encode()
            for _ in range(data_len))

        hashobj = get_lbry_hash_obj()
        hashobj.update(data)
        out = hashobj.hexdigest()
        blob_hash = out

        # create new blob
        yield self.bm.setup()
        blob = yield self.bm.get_blob(blob_hash, len(data))

        writer, finished_d = yield blob.open_for_writing(self.peer)
        yield writer.write(data)
        yield self.bm.blob_completed(blob, should_announce)

        # check to see if blob is there
        self.assertTrue(os.path.isfile(os.path.join(self.blob_dir, blob_hash)))
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertTrue(blob_hash in blobs)
        defer.returnValue(blob_hash)

    @defer.inlineCallbacks
    def test_create_blob(self):
        blob_hashes = []

        # create a bunch of blobs
        for i in range(0, 10):
            blob_hash = yield self._create_and_add_blob()
            blob_hashes.append(blob_hash)
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertEqual(10, len(blobs))

    @defer.inlineCallbacks
    def test_delete_blob(self):
        # create blob
        blob_hash = yield self._create_and_add_blob()
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertEqual(len(blobs), 1)

        # delete blob
        yield self.bm.delete_blobs([blob_hash])
        self.assertFalse(os.path.isfile(os.path.join(self.blob_dir,
                                                     blob_hash)))
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertEqual(len(blobs), 0)
        blobs = yield self.bm.storage.get_all_blob_hashes()
        self.assertEqual(len(blobs), 0)
        self.assertFalse(blob_hash in self.bm.blobs)

        # delete blob that was already deleted once
        yield self.bm.delete_blobs([blob_hash])

        # delete blob that does not exist, nothing will
        # happen
        blob_hash = random_lbry_hash()
        yield self.bm.delete_blobs([blob_hash])

    @defer.inlineCallbacks
    def test_delete_open_blob(self):
        # Test that a blob that is opened for writing will not be deleted

        # create blobs
        blob_hashes = []
        for i in range(0, 10):
            blob_hash = yield self._create_and_add_blob()
            blob_hashes.append(blob_hash)
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertEqual(len(blobs), 10)

        # open the last blob
        blob = yield self.bm.get_blob(blob_hashes[-1])
        w, finished_d = yield blob.open_for_writing(self.peer)

        # schedule a close, just to leave the reactor clean
        finished_d.addBoth(lambda x: None)
        self.addCleanup(w.close)

        # delete the last blob and check if it still exists
        yield self.bm.delete_blobs([blob_hash])
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertEqual(len(blobs), 10)
        self.assertTrue(blob_hashes[-1] in blobs)
        self.assertTrue(
            os.path.isfile(os.path.join(self.blob_dir, blob_hashes[-1])))

    @defer.inlineCallbacks
    def test_should_announce(self):
        # create blob with should announce
        blob_hash = yield self._create_and_add_blob(should_announce=True)
        out = yield self.bm.get_should_announce(blob_hash)
        self.assertTrue(out)
        count = yield self.bm.count_should_announce_blobs()
        self.assertEqual(1, count)

        # set should announce to False
        yield self.bm.set_should_announce(blob_hash, should_announce=False)
        out = yield self.bm.get_should_announce(blob_hash)
        self.assertFalse(out)
        count = yield self.bm.count_should_announce_blobs()
        self.assertEqual(0, count)
Exemplo n.º 4
0
class BlobManagerTest(unittest.TestCase):
    def setUp(self):
        conf.initialize_settings()
        self.blob_dir = tempfile.mkdtemp()
        self.db_dir = tempfile.mkdtemp()
        hash_announcer = DummyHashAnnouncer()
        self.bm = DiskBlobManager(hash_announcer, self.blob_dir, self.db_dir)
        self.peer = Peer('somehost', 22)

    def tearDown(self):
        self.bm.stop()
        # BlobFile will try to delete itself  in _close_writer
        # thus when calling rmtree we may get a FileNotFoundError
        # for the blob file
        shutil.rmtree(self.blob_dir, ignore_errors=True)
        shutil.rmtree(self.db_dir)

    @defer.inlineCallbacks
    def _create_and_add_blob(self):
        # create and add blob to blob manager
        data_len = random.randint(1, 1000)
        data = ''.join(
            random.choice(string.lowercase) for data_len in range(data_len))

        hashobj = get_lbry_hash_obj()
        hashobj.update(data)
        out = hashobj.hexdigest()
        blob_hash = out

        # create new blob
        yield self.bm.setup()
        blob = yield self.bm.get_blob(blob_hash, len(data))

        writer, finished_d = yield blob.open_for_writing(self.peer)
        yield writer.write(data)
        yield self.bm.blob_completed(blob)
        yield self.bm.add_blob_to_upload_history(blob_hash, 'test', len(data))

        # check to see if blob is there
        self.assertTrue(os.path.isfile(os.path.join(self.blob_dir, blob_hash)))
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertTrue(blob_hash in blobs)
        defer.returnValue(blob_hash)

    @defer.inlineCallbacks
    def test_create_blob(self):
        blob_hashes = []

        # create a bunch of blobs
        for i in range(0, 10):
            blob_hash = yield self._create_and_add_blob()
            blob_hashes.append(blob_hash)
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertEqual(10, len(blobs))

    @defer.inlineCallbacks
    def test_delete_blob(self):
        # create blob
        blob_hash = yield self._create_and_add_blob()
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertEqual(len(blobs), 1)

        # delete blob
        yield self.bm.delete_blobs([blob_hash])
        self.assertFalse(os.path.isfile(os.path.join(self.blob_dir,
                                                     blob_hash)))
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertEqual(len(blobs), 0)
        blobs = yield self.bm._get_all_blob_hashes()
        self.assertEqual(len(blobs), 0)

        # delete blob that does not exist, nothing will
        # happen
        blob_hash = random_lbry_hash()
        out = yield self.bm.delete_blobs([blob_hash])

    @defer.inlineCallbacks
    def test_delete_open_blob(self):
        # Test that a blob that is opened for writing will not be deleted

        # create blobs
        blob_hashes = []
        for i in range(0, 10):
            blob_hash = yield self._create_and_add_blob()
            blob_hashes.append(blob_hash)
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertEqual(len(blobs), 10)

        # open the last blob
        blob = yield self.bm.get_blob(blob_hashes[-1])
        writer, finished_d = yield blob.open_for_writing(self.peer)

        # delete the last blob and check if it still exists
        out = yield self.bm.delete_blobs([blob_hash])
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertEqual(len(blobs), 10)
        self.assertTrue(blob_hashes[-1] in blobs)
        self.assertTrue(
            os.path.isfile(os.path.join(self.blob_dir, blob_hashes[-1])))
Exemplo n.º 5
0
class BlobManagerTest(unittest.TestCase):
    def setUp(self):
        conf.initialize_settings()
        self.blob_dir = tempfile.mkdtemp()
        self.db_dir = tempfile.mkdtemp()
        hash_announcer = DummyHashAnnouncer()
        self.bm = DiskBlobManager(hash_announcer, self.blob_dir, self.db_dir)
        self.peer = Peer('somehost', 22)

    def tearDown(self):
        self.bm.stop()
        # BlobFile will try to delete itself  in _close_writer
        # thus when calling rmtree we may get a FileNotFoundError
        # for the blob file
        shutil.rmtree(self.blob_dir, ignore_errors=True)
        shutil.rmtree(self.db_dir)

    @defer.inlineCallbacks
    def _create_and_add_blob(self, should_announce=False):
        # create and add blob to blob manager
        data_len = random.randint(1, 1000)
        data = ''.join(random.choice(string.lowercase) for data_len in range(data_len))

        hashobj = get_lbry_hash_obj()
        hashobj.update(data)
        out = hashobj.hexdigest()
        blob_hash = out

        # create new blob
        yield self.bm.setup()
        blob = yield self.bm.get_blob(blob_hash, len(data))

        writer, finished_d = yield blob.open_for_writing(self.peer)
        yield writer.write(data)
        yield self.bm.blob_completed(blob, should_announce)
        yield self.bm.add_blob_to_upload_history(blob_hash, 'test', len(data))

        # check to see if blob is there
        self.assertTrue(os.path.isfile(os.path.join(self.blob_dir, blob_hash)))
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertTrue(blob_hash in blobs)
        defer.returnValue(blob_hash)

    @defer.inlineCallbacks
    def test_create_blob(self):
        blob_hashes = []

        # create a bunch of blobs
        for i in range(0, 10):
            blob_hash = yield self._create_and_add_blob()
            blob_hashes.append(blob_hash)
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertEqual(10, len(blobs))


    @defer.inlineCallbacks
    def test_delete_blob(self):
        # create blob
        blob_hash = yield self._create_and_add_blob()
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertEqual(len(blobs), 1)

        # delete blob
        yield self.bm.delete_blobs([blob_hash])
        self.assertFalse(os.path.isfile(os.path.join(self.blob_dir, blob_hash)))
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertEqual(len(blobs), 0)
        blobs = yield self.bm._get_all_blob_hashes()
        self.assertEqual(len(blobs), 0)
        self.assertFalse(blob_hash in self.bm.blobs)

        # delete blob that was already deleted once
        out = yield self.bm.delete_blobs([blob_hash])

        # delete blob that does not exist, nothing will
        # happen
        blob_hash = random_lbry_hash()
        out = yield self.bm.delete_blobs([blob_hash])


    @defer.inlineCallbacks
    def test_delete_open_blob(self):
        # Test that a blob that is opened for writing will not be deleted

        # create blobs
        blob_hashes = []
        for i in range(0, 10):
            blob_hash = yield self._create_and_add_blob()
            blob_hashes.append(blob_hash)
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertEqual(len(blobs), 10)

        # open the last blob
        blob = yield self.bm.get_blob(blob_hashes[-1])
        writer, finished_d = yield blob.open_for_writing(self.peer)

        # delete the last blob and check if it still exists
        out = yield self.bm.delete_blobs([blob_hash])
        blobs = yield self.bm.get_all_verified_blobs()
        self.assertEqual(len(blobs), 10)
        self.assertTrue(blob_hashes[-1] in blobs)
        self.assertTrue(os.path.isfile(os.path.join(self.blob_dir, blob_hashes[-1])))

    @defer.inlineCallbacks
    def test_should_announce(self):
        # create blob with should announce
        blob_hash = yield self._create_and_add_blob(should_announce=True)
        out = yield self.bm.get_should_announce(blob_hash)
        self.assertTrue(out)
        count = yield self.bm.count_should_announce_blobs()
        self.assertEqual(1, count)

        # set should annouce to False
        out = yield self.bm.set_should_announce(blob_hash, should_announce=False)
        out = yield self.bm.get_should_announce(blob_hash)
        self.assertFalse(out)
        count = yield self.bm.count_should_announce_blobs()
        self.assertEqual(0, count)