Ejemplo n.º 1
0
class TestJsonRpc(trial.unittest.TestCase):
    def setUp(self):
        def noop():
            return None

        mock_conf_settings(self)
        util.resetTime(self)
        self.test_daemon = get_test_daemon()
        self.test_daemon.session.wallet = Wallet.LBRYumWallet(
            storage=Wallet.InMemoryStorage())
        self.test_daemon.session.wallet.network = FakeNetwork()
        self.test_daemon.session.wallet.get_best_blockhash = noop

    def test_status(self):
        d = defer.maybeDeferred(self.test_daemon.jsonrpc_status)
        d.addCallback(lambda status: self.assertDictContainsSubset(
            {'is_running': False}, status))

    @unittest.skipIf(is_android(
    ), 'Test cannot pass on Android because PYTHONOPTIMIZE removes the docstrings.'
                     )
    def test_help(self):
        d = defer.maybeDeferred(self.test_daemon.jsonrpc_help,
                                command='status')
        d.addCallback(lambda result: self.assertSubstring(
            'daemon status', result['help']))
Ejemplo n.º 2
0
class TestJsonRpc(unittest.TestCase):
    def setUp(self):
        def noop():
            return None

        mock_conf_settings(self)
        util.resetTime(self)
        self.test_daemon = get_test_daemon()
        self.test_daemon.wallet.is_first_run = False
        self.test_daemon.wallet.get_best_blockhash = noop

    def test_status(self):
        d = defer.maybeDeferred(self.test_daemon.jsonrpc_status)
        d.addCallback(lambda status: self.assertDictContainsSubset(
            {'is_running': False}, status))

    def test_help(self):
        d = defer.maybeDeferred(self.test_daemon.jsonrpc_help,
                                command='status')
        d.addCallback(lambda result: self.assertSubstring(
            'daemon status', result['help']))
        # self.assertSubstring('daemon status', d.result)

    if is_android():
        test_help.skip = "Test cannot pass on Android because PYTHONOPTIMIZE removes the docstrings."
Ejemplo n.º 3
0
class TestLogger(trial.unittest.TestCase):
    def raiseError(self):
        raise Exception('terrible things happened')

    def triggerErrback(self, callback=None):
        d = defer.Deferred()
        d.addCallback(lambda _: self.raiseError())
        d.addErrback(self.log.fail(callback), 'My message')
        d.callback(None)
        return d

    def setUp(self):
        self.log = custom_logger.Logger('test')
        self.stream = StringIO.StringIO()
        handler = logging.StreamHandler(self.stream)
        handler.setFormatter(
            logging.Formatter("%(filename)s:%(lineno)d - %(message)s"))
        self.log.addHandler(handler)

    @unittest.skipIf(
        is_android(),
        'Test cannot pass on Android because the tests package is compiled '
        'which results in a different method call stack')
    def test_can_log_failure(self):
        def output_lines():
            return self.stream.getvalue().split('\n')

        # the line number could change if this file gets refactored
        expected_first_line = 'test_customLogger.py:20 - My message: terrible things happened'

        # testing the entirety of the message is futile as the
        # traceback will depend on the system the test is being run on
        # but hopefully these two tests are good enough
        d = self.triggerErrback()
        d.addCallback(lambda _: self.assertEquals(expected_first_line,
                                                  output_lines()[0]))
        d.addCallback(lambda _: self.assertEqual(10, len(output_lines())))
        return d

    def test_can_log_failure_with_callback(self):
        callback = mock.Mock()
        d = self.triggerErrback(callback)
        d.addCallback(lambda _: callback.assert_called_once_with(mock.ANY))
        return d
Ejemplo n.º 4
0
class TestTransfer(TestCase):
    def setUp(self):
        mocks.mock_conf_settings(self)
        self.server_processes = []
        self.session = None
        self.lbry_file_manager = None
        self.is_generous = True
        self.addCleanup(self.take_down_env)

    def take_down_env(self):

        d = defer.succeed(True)
        if self.lbry_file_manager is not None:
            d.addCallback(lambda _: self.lbry_file_manager.stop())
        if self.session is not None:
            d.addCallback(lambda _: self.session.shut_down())

        def delete_test_env():
            dirs = ['server', 'server1', 'server2', 'client']
            files = ['test_file']
            for di in dirs:
                if os.path.exists(di):
                    shutil.rmtree(di)
            for f in files:
                if os.path.exists(f):
                    os.remove(f)
            for p in self.server_processes:
                p.terminate()
            return True

        d.addCallback(lambda _: threads.deferToThread(delete_test_env))
        return d

    @staticmethod
    def wait_for_event(event, timeout):

        from twisted.internet import reactor
        d = defer.Deferred()

        def stop():
            set_check.stop()
            if stop_call.active():
                stop_call.cancel()
                d.callback(True)

        def check_if_event_set():
            if event.is_set():
                logging.debug("Dead event has been found set")
                stop()

        def done_waiting():
            logging.warning(
                "Event has not been found set and timeout has expired")
            stop()

        set_check = task.LoopingCall(check_if_event_set)
        set_check.start(.1)
        stop_call = reactor.callLater(timeout, done_waiting)
        return d

    @staticmethod
    def wait_for_hash_from_queue(hash_queue):
        logging.debug("Waiting for the sd_hash to come through the queue")

        d = defer.Deferred()

        def check_for_start():
            if hash_queue.empty() is False:
                logging.debug("Client start event has been found set")
                start_check.stop()
                d.callback(hash_queue.get(False))
            else:
                logging.debug("Client start event has NOT been found set")

        start_check = task.LoopingCall(check_for_start)
        start_check.start(1.0)

        return d

    @unittest.skipIf(is_android(),
                     'Test cannot pass on Android because multiprocessing '
                     'is not supported at the OS level.')
    def test_lbry_transfer(self):
        sd_hash_queue = Queue()
        kill_event = Event()
        dead_event = Event()
        lbry_uploader = LbryUploader(sd_hash_queue, kill_event, dead_event,
                                     5209343)
        uploader = Process(target=lbry_uploader.start)
        uploader.start()
        self.server_processes.append(uploader)

        logging.debug("Testing transfer")

        wallet = FakeWallet()
        peer_manager = PeerManager()
        peer_finder = FakePeerFinder(5553, peer_manager, 1)
        hash_announcer = FakeAnnouncer()
        rate_limiter = DummyRateLimiter()
        sd_identifier = StreamDescriptorIdentifier()

        db_dir, blob_dir = mk_db_and_blob_dir()
        self.session = Session(conf.ADJUSTABLE_SETTINGS['data_rate'][1],
                               db_dir=db_dir,
                               node_id="abcd",
                               peer_finder=peer_finder,
                               hash_announcer=hash_announcer,
                               blob_dir=blob_dir,
                               peer_port=5553,
                               use_upnp=False,
                               rate_limiter=rate_limiter,
                               wallet=wallet,
                               blob_tracker_class=DummyBlobAvailabilityTracker,
                               dht_node_class=Node,
                               is_generous=self.is_generous,
                               external_ip="127.0.0.1")

        self.lbry_file_manager = EncryptedFileManager(self.session,
                                                      sd_identifier)

        def make_downloader(metadata, prm):
            factories = metadata.factories
            return factories[0].make_downloader(metadata,
                                                prm.min_blob_data_payment_rate,
                                                prm, db_dir)

        def download_file(sd_hash):
            prm = self.session.payment_rate_manager
            d = download_sd_blob(self.session, sd_hash, prm)
            d.addCallback(sd_identifier.get_metadata_for_sd_blob)
            d.addCallback(make_downloader, prm)
            d.addCallback(lambda downloader: downloader.start())
            return d

        def check_md5_sum():
            f = open(os.path.join(db_dir, 'test_file'))
            hashsum = MD5.new()
            hashsum.update(f.read())
            self.assertEqual(hashsum.hexdigest(),
                             "4ca2aafb4101c1e42235aad24fbb83be")

        @defer.inlineCallbacks
        def start_transfer(sd_hash):
            logging.debug("Starting the transfer")
            yield self.session.setup()
            yield add_lbry_file_to_sd_identifier(sd_identifier)
            yield self.lbry_file_manager.setup()
            yield download_file(sd_hash)
            yield check_md5_sum()

        def stop(arg):
            if isinstance(arg, Failure):
                logging.debug("Client is stopping due to an error. Error: %s",
                              arg.getTraceback())
            else:
                logging.debug("Client is stopping normally.")
            kill_event.set()
            logging.debug("Set the kill event")
            d = self.wait_for_event(dead_event, 15)

            def print_shutting_down():
                logging.info("Client is shutting down")

            d.addCallback(lambda _: print_shutting_down())
            d.addCallback(lambda _: rm_db_and_blob_dir(db_dir, blob_dir))
            d.addCallback(lambda _: arg)
            return d

        d = self.wait_for_hash_from_queue(sd_hash_queue)
        d.addCallback(start_transfer)
        d.addBoth(stop)

        return d

    @unittest.skipIf(is_android(),
                     'Test cannot pass on Android because multiprocessing '
                     'is not supported at the OS level.')
    def test_last_blob_retrieval(self):
        kill_event = Event()
        dead_event_1 = Event()
        blob_hash_queue_1 = Queue()
        blob_hash_queue_2 = Queue()
        fast_uploader = Process(target=start_blob_uploader,
                                args=(blob_hash_queue_1, kill_event,
                                      dead_event_1, False))
        fast_uploader.start()
        self.server_processes.append(fast_uploader)
        dead_event_2 = Event()
        slow_uploader = Process(target=start_blob_uploader,
                                args=(blob_hash_queue_2, kill_event,
                                      dead_event_2, True))
        slow_uploader.start()
        self.server_processes.append(slow_uploader)

        logging.debug("Testing transfer")

        wallet = FakeWallet()
        peer_manager = PeerManager()
        peer_finder = FakePeerFinder(5553, peer_manager, 2)
        hash_announcer = FakeAnnouncer()
        rate_limiter = DummyRateLimiter()

        db_dir, blob_dir = mk_db_and_blob_dir()
        self.session = Session(
            conf.ADJUSTABLE_SETTINGS['data_rate'][1],
            db_dir=db_dir,
            node_id="abcd",
            peer_finder=peer_finder,
            hash_announcer=hash_announcer,
            blob_dir=blob_dir,
            peer_port=5553,
            use_upnp=False,
            rate_limiter=rate_limiter,
            wallet=wallet,
            blob_tracker_class=DummyBlobAvailabilityTracker,
            is_generous=conf.ADJUSTABLE_SETTINGS['is_generous_host'][1],
            external_ip="127.0.0.1")

        d1 = self.wait_for_hash_from_queue(blob_hash_queue_1)
        d2 = self.wait_for_hash_from_queue(blob_hash_queue_2)
        d = defer.DeferredList([d1, d2], fireOnOneErrback=True)

        def get_blob_hash(results):
            self.assertEqual(results[0][1], results[1][1])
            return results[0][1]

        d.addCallback(get_blob_hash)

        def download_blob(blob_hash):
            prm = self.session.payment_rate_manager
            downloader = StandaloneBlobDownloader(blob_hash,
                                                  self.session.blob_manager,
                                                  peer_finder, rate_limiter,
                                                  prm, wallet)
            d = downloader.download()
            return d

        def start_transfer(blob_hash):

            logging.debug("Starting the transfer")

            d = self.session.setup()
            d.addCallback(lambda _: download_blob(blob_hash))

            return d

        d.addCallback(start_transfer)

        def stop(arg):
            if isinstance(arg, Failure):
                logging.debug("Client is stopping due to an error. Error: %s",
                              arg.getTraceback())
            else:
                logging.debug("Client is stopping normally.")
            kill_event.set()
            logging.debug("Set the kill event")
            d1 = self.wait_for_event(dead_event_1, 15)
            d2 = self.wait_for_event(dead_event_2, 15)
            dl = defer.DeferredList([d1, d2])

            def print_shutting_down():
                logging.info("Client is shutting down")

            dl.addCallback(lambda _: print_shutting_down())
            dl.addCallback(lambda _: rm_db_and_blob_dir(db_dir, blob_dir))
            dl.addCallback(lambda _: arg)
            return dl

        d.addBoth(stop)
        return d

    @unittest.skipIf(is_android(),
                     'Test cannot pass on Android because multiprocessing '
                     'is not supported at the OS level.')
    def test_double_download(self):
        sd_hash_queue = Queue()
        kill_event = Event()
        dead_event = Event()
        lbry_uploader = LbryUploader(sd_hash_queue, kill_event, dead_event,
                                     5209343)
        uploader = Process(target=lbry_uploader.start)
        uploader.start()
        self.server_processes.append(uploader)

        logging.debug("Testing double download")

        wallet = FakeWallet()
        peer_manager = PeerManager()
        peer_finder = FakePeerFinder(5553, peer_manager, 1)
        hash_announcer = FakeAnnouncer()
        rate_limiter = DummyRateLimiter()
        sd_identifier = StreamDescriptorIdentifier()

        downloaders = []

        db_dir, blob_dir = mk_db_and_blob_dir()
        self.session = Session(
            conf.ADJUSTABLE_SETTINGS['data_rate'][1],
            db_dir=db_dir,
            node_id="abcd",
            peer_finder=peer_finder,
            hash_announcer=hash_announcer,
            blob_dir=blob_dir,
            peer_port=5553,
            use_upnp=False,
            rate_limiter=rate_limiter,
            wallet=wallet,
            blob_tracker_class=DummyBlobAvailabilityTracker,
            is_generous=conf.ADJUSTABLE_SETTINGS['is_generous_host'][1],
            external_ip="127.0.0.1")

        self.lbry_file_manager = EncryptedFileManager(self.session,
                                                      sd_identifier)

        @defer.inlineCallbacks
        def make_downloader(metadata, prm):
            factories = metadata.factories
            downloader = yield factories[0].make_downloader(
                metadata, prm.min_blob_data_payment_rate, prm, db_dir)
            defer.returnValue(downloader)

        @defer.inlineCallbacks
        def download_file(sd_hash):
            prm = self.session.payment_rate_manager
            sd_blob = yield download_sd_blob(self.session, sd_hash, prm)
            metadata = yield sd_identifier.get_metadata_for_sd_blob(sd_blob)
            downloader = yield make_downloader(metadata, prm)
            downloaders.append(downloader)
            yield downloader.start()
            defer.returnValue(downloader)

        def check_md5_sum():
            f = open(os.path.join(db_dir, 'test_file'))
            hashsum = MD5.new()
            hashsum.update(f.read())
            self.assertEqual(hashsum.hexdigest(),
                             "4ca2aafb4101c1e42235aad24fbb83be")

        def delete_lbry_file(downloader):
            logging.debug("deleting the file")
            return self.lbry_file_manager.delete_lbry_file(downloader)

        def check_lbry_file(downloader):
            d = downloader.status()

            def check_status_report(status_report):
                self.assertEqual(status_report.num_known,
                                 status_report.num_completed)
                self.assertEqual(status_report.num_known, 3)

            d.addCallback(check_status_report)
            return d

        @defer.inlineCallbacks
        def start_transfer(sd_hash):
            # download a file, delete it, and download it again

            logging.debug("Starting the transfer")
            yield self.session.setup()
            yield add_lbry_file_to_sd_identifier(sd_identifier)
            yield self.lbry_file_manager.setup()
            downloader = yield download_file(sd_hash)
            yield check_md5_sum()
            yield check_lbry_file(downloader)
            yield delete_lbry_file(downloader)
            downloader = yield download_file(sd_hash)
            yield check_lbry_file(downloader)
            yield check_md5_sum()
            yield delete_lbry_file(downloader)

        def stop(arg):
            if isinstance(arg, Failure):
                logging.debug("Client is stopping due to an error. Error: %s",
                              arg.getTraceback())
            else:
                logging.debug("Client is stopping normally.")
            kill_event.set()
            logging.debug("Set the kill event")
            d = self.wait_for_event(dead_event, 15)

            def print_shutting_down():
                logging.info("Client is shutting down")

            d.addCallback(lambda _: print_shutting_down())
            d.addCallback(lambda _: rm_db_and_blob_dir(db_dir, blob_dir))
            d.addCallback(lambda _: arg)
            return d

        d = self.wait_for_hash_from_queue(sd_hash_queue)
        d.addCallback(start_transfer)
        d.addBoth(stop)
        return d

    @unittest.skip("Sadly skipping failing test instead of fixing it")
    def test_multiple_uploaders(self):
        sd_hash_queue = Queue()
        num_uploaders = 3
        kill_event = Event()
        dead_events = [Event() for _ in range(num_uploaders)]
        ready_events = [Event() for _ in range(1, num_uploaders)]
        lbry_uploader = LbryUploader(sd_hash_queue, kill_event, dead_events[0],
                                     5209343, 9373419, 2**22)
        uploader = Process(target=lbry_uploader.start)
        uploader.start()
        self.server_processes.append(uploader)

        logging.debug("Testing multiple uploaders")

        wallet = FakeWallet()
        peer_manager = PeerManager()
        peer_finder = FakePeerFinder(5553, peer_manager, num_uploaders)
        hash_announcer = FakeAnnouncer()
        rate_limiter = DummyRateLimiter()
        sd_identifier = StreamDescriptorIdentifier()

        db_dir, blob_dir = mk_db_and_blob_dir()
        self.session = Session(
            conf.ADJUSTABLE_SETTINGS['data_rate'][1],
            db_dir=db_dir,
            node_id="abcd",
            peer_finder=peer_finder,
            hash_announcer=hash_announcer,
            blob_dir=blob_dir,
            peer_port=5553,
            use_upnp=False,
            rate_limiter=rate_limiter,
            wallet=wallet,
            blob_tracker_class=DummyBlobAvailabilityTracker,
            is_generous=conf.ADJUSTABLE_SETTINGS['is_generous_host'][1],
            external_ip="127.0.0.1")

        self.lbry_file_manager = EncryptedFileManager(self.session,
                                                      sd_identifier)

        def start_additional_uploaders(sd_hash):
            for i in range(1, num_uploaders):
                uploader = Process(target=start_lbry_reuploader,
                                   args=(sd_hash, kill_event, dead_events[i],
                                         ready_events[i - 1], i, 2**10))
                uploader.start()
                self.server_processes.append(uploader)
            return defer.succeed(True)

        def wait_for_ready_events():
            return defer.DeferredList([
                self.wait_for_event(ready_event, 60)
                for ready_event in ready_events
            ])

        def make_downloader(metadata, prm):
            info_validator = metadata.validator
            options = metadata.options
            factories = metadata.factories
            chosen_options = [
                o.default_value
                for o in options.get_downloader_options(info_validator, prm)
            ]
            return factories[0].make_downloader(metadata, chosen_options, prm)

        def download_file(sd_hash):
            prm = self.session.payment_rate_manager
            d = download_sd_blob(self.session, sd_hash, prm)
            d.addCallback(sd_identifier.get_metadata_for_sd_blob)
            d.addCallback(make_downloader, prm)
            d.addCallback(lambda downloader: downloader.start())
            return d

        def check_md5_sum():
            f = open('test_file')
            hashsum = MD5.new()
            hashsum.update(f.read())
            self.assertEqual(hashsum.hexdigest(),
                             "e5941d615f53312fd66638239c1f90d5")

        def start_transfer(sd_hash):

            logging.debug("Starting the transfer")

            d = start_additional_uploaders(sd_hash)
            d.addCallback(lambda _: wait_for_ready_events())
            d.addCallback(lambda _: self.session.setup())
            d.addCallback(
                lambda _: add_lbry_file_to_sd_identifier(sd_identifier))
            d.addCallback(lambda _: self.lbry_file_manager.setup())
            d.addCallback(lambda _: download_file(sd_hash))
            d.addCallback(lambda _: check_md5_sum())

            return d

        def stop(arg):
            if isinstance(arg, Failure):
                logging.debug("Client is stopping due to an error. Error: %s",
                              arg.getTraceback())
            else:
                logging.debug("Client is stopping normally.")
            kill_event.set()
            logging.debug("Set the kill event")
            d = defer.DeferredList([
                self.wait_for_event(dead_event, 15)
                for dead_event in dead_events
            ])

            def print_shutting_down():
                logging.info("Client is shutting down")

            d.addCallback(lambda _: print_shutting_down())
            d.addCallback(lambda _: rm_db_and_blob_dir(db_dir, blob_dir))
            d.addCallback(lambda _: arg)
            return d

        d = self.wait_for_hash_from_queue(sd_hash_queue)
        d.addCallback(start_transfer)
        d.addBoth(stop)

        return d