Esempio n. 1
0
class LoggingTestCase(TestCase):
    def setUp(self):
        super(LoggingTestCase, self).setUp()
        from logbook import TestHandler
        self.log_handler = TestHandler()
        self.log_handler.push_application()

    def tearDown(self):
        super(LoggingTestCase, self).tearDown()
        self.log_handler.pop_application()

    def assert_log_records_len(self, n):
        self.assertEquals(len(self.log_handler.records), n)

    def assert_any_log_record(self, pred):
        self.assertTrue(any(pred(o) for o in self.log_handler.records),
                        "any_log_record({!r}) is False, log records: {!r}".format(pred, self.dump_log_records()))

    def assert_no_log_record(self, pred):
        self.assertFalse(any(pred(o) for o in self.log_handler.records),
                         "no_log_record({!r}) is False, log records: {!r}".format(pred, self.dump_log_records()))

    def assert_all_log_record(self, pred):
        self.assertTrue(all(pred(o) for o in self.log_handler.records),
                        "all_log_record({!r}) is False, log records: {!r}".format(pred, self.dump_log_records()))

    def dump_log_records(self):
        return [r.to_dict() for r in self.log_handler.records]
Esempio n. 2
0
class KettleTestCase(TestCase):
    def _pre_setup(self):
        # set up logging # TODO: Make this actually work
        self.log_handler = TestHandler()
        self.log_handler.push_thread()

        # Truncate all tables
        truncate_all(engine)

        # Reset session
        session.Session.remove()

    def __call__(self, result=None):
        """
        Wrapper around default __call__ method to perform common test set up.
        This means that user-defined Test Cases aren't required to include a
        call to super().setUp().
        """
        try:
            self._pre_setup()
        except (KeyboardInterrupt, SystemExit):
            raise
        except Exception:
            import sys

            result.addError(self, sys.exc_info())
            return
        super(KettleTestCase, self).__call__(result)
        try:
            self._post_teardown()
        except (KeyboardInterrupt, SystemExit):
            raise
        except Exception:
            import sys

            result.addError(self, sys.exc_info())
            return

    def _post_teardown(self):
        # tear down logging
        self.log_handler.pop_thread()

        del calls[:]

    def assertRun(self, task):
        if (task.id, "run") not in calls:
            raise AssertionError("%s has not been run" % (task,))

    def assertNotRun(self, task):
        if (task.id, "run") in calls:
            raise AssertionError("%s has been run" % (task,))

    def assertReverted(self, task):
        if (task.id, "revert") not in calls:
            raise AssertionError("%s has not been reverted" % (task,))

    def assertNotReverted(self, task):
        if (task.id, "revert") in calls:
            raise AssertionError("%s has been reverted" % (task,))
Esempio n. 3
0
class BaseTestCase(unittest.TestCase):
    MOCKS = []

    def setUp(self):
        self.log_handler = TestHandler()
        self.log_handler.format_string = '{record.message}'
        self.log_handler.push_thread()

    def tearDown(self):
        self.log_handler.pop_thread()
Esempio n. 4
0
class BaseTestCase(unittest.TestCase):
    def setUp(self):
        # Capture the log
        self.log_handler = TestHandler()
        self.log_handler.push_application()
        # Connect to the database
        self.rd = redis.StrictRedis(REDIS['host'],
                                    port=REDIS['port'],
                                    password=REDIS['password'],
                                    socket_timeout=REDIS['timeout'])
        self.rd.flushdb()

    def tearDown(self):
        # Removes the log capture
        self.log_handler.pop_application()
        # Removes all data from the local redis instance
        self.rd.flushdb()
Esempio n. 5
0
def test_standard_logging_works():
    with TestHandler() as handler:
        logger = logging.getLogger('Dummy')
        with redirected_logging():
            logger.info('hello world')
        assert handler.formatted_records == [
            '[INFO] Dummy: hello world'
        ]
Esempio n. 6
0
    def compare_current_with_last_candle(self, exchange, assets, end_dt,
                                         freq, data_frequency, data_portal):
        """
        Creates DataFrames from the bundle and exchange for the specified
        data set.

        Parameters
        ----------
        exchange: Exchange
        assets
        end_dt
        bar_count
        freq
        data_frequency
        data_portal

        Returns
        -------

        """
        data = dict()

        assets = sorted(assets, key=lambda a: a.symbol)
        log_catcher = TestHandler()
        with log_catcher:
            symbols = [asset.symbol for asset in assets]
            print(
                'comparing data for {}/{} with {} timeframe on {}'.format(
                    exchange.name, symbols, freq, end_dt
                )
            )
            data['candle'] = data_portal.get_history_window(
                assets=assets,
                end_dt=end_dt,
                bar_count=1,
                frequency=freq,
                field='close',
                data_frequency=data_frequency,
            )
            set_print_settings()
            print(
                'the bundle first / last row:\n{}'.format(
                    data['candle'].iloc[[-1]]
                )
            )
            current = data_portal.get_spot_value(
                assets=assets,
                field='close',
                dt=end_dt,
                data_frequency=data_frequency,
            )
            data['current'] = pd.Series(data=current, index=assets)
            print(
                'the current price:\n{}'.format(
                    data['current']
                )
            )
            pass
Esempio n. 7
0
    def test_orders(self):
        population = 3
        quote_currency = 'eth'
        order_amount = 0.1

        exchanges = select_random_exchanges(
            population=population,
            features=['fetchOrder'],
            is_authenticated=True,
            base_currency=quote_currency,
        )  # Type: list[Exchange]

        log_catcher = TestHandler()
        with log_catcher:
            for exchange in exchanges:
                exchange.init()

                assets = exchange.get_assets(quote_currency=quote_currency)
                asset = select_random_assets(assets, 1)[0]
                self.assertIsInstance(asset, TradingPair)

                tickers = exchange.tickers([asset])
                price = tickers[asset]['last_price']

                amount = order_amount / price

                limit_price = price * 0.8
                style = ExchangeLimitOrder(limit_price=limit_price)

                order = exchange.order(
                    asset=asset,
                    amount=amount,
                    style=style,
                )
                sleep(1)

                open_order, _ = exchange.get_order(order.id, asset)
                self.assertEqual(0, open_order.status)

                exchange.cancel_order(open_order, asset)
                sleep(1)

                canceled_order, _ = exchange.get_order(open_order.id, asset)
                warnings = [
                    record for record in log_catcher.records
                    if record.level == WARNING
                ]

                self.assertEqual(0, len(warnings))
                self.assertEqual(2, canceled_order.status)
                print('tested {exchange} / {symbol}, order: {order}'.format(
                    exchange=exchange.name,
                    symbol=asset.symbol,
                    order=order.id,
                ))
        pass
Esempio n. 8
0
    def compare_bundle_with_exchange(self, exchange, assets, end_dt, bar_count,
                                     freq, data_frequency, data_portal):
        """
        Creates DataFrames from the bundle and exchange for the specified
        data set.

        Parameters
        ----------
        exchange: Exchange
        assets
        end_dt
        bar_count
        sample_minutes

        Returns
        -------

        """
        data = dict()

        log_catcher = TestHandler()
        with log_catcher:
            data['bundle'] = data_portal.get_history_window(
                assets=assets,
                end_dt=end_dt,
                bar_count=bar_count,
                frequency=freq,
                field='close',
                data_frequency=data_frequency,
            )
            print('bundle data:\n{}'.format(data['bundle'].tail(10)))

            candles = exchange.get_candles(
                end_dt=end_dt,
                freq=freq,
                assets=assets,
                bar_count=bar_count,
            )
            data['exchange'] = get_candles_df(
                candles=candles,
                field='close',
                freq=freq,
                bar_count=bar_count,
                end_dt=end_dt,
            )
            print('exchange data:\n{}'.format(data['exchange'].tail(10)))
            for source in data:
                df = data[source]
                path = output_df(df, assets, '{}_{}'.format(freq, source))
                print('saved {}:\n{}'.format(source, path))

            assert_frame_equal(
                right=data['bundle'],
                left=data['exchange'],
                check_less_precise=True,
            )
Esempio n. 9
0
    def _pre_setup(self):
        # set up logging # TODO: Make this actually work
        self.log_handler = TestHandler()
        self.log_handler.push_thread()

        # Truncate all tables
        truncate_all(engine)

        # Reset session
        session.Session.remove()
Esempio n. 10
0
 def setUp(self):
     # Capture the log
     self.log_handler = TestHandler()
     self.log_handler.push_application()
     # Connect to the database
     self.rd = redis.StrictRedis(REDIS['host'],
                                 port=REDIS['port'],
                                 password=REDIS['password'],
                                 socket_timeout=REDIS['timeout'])
     self.rd.flushdb()
Esempio n. 11
0
def test_asyncio():
    logger = logging.getLogger('Dummy')

    async def util():
        for i in range(3):
            logger.info('I am the util function', extra={'iteration': i+1})
            await asyncio.sleep(0)

    async def task(task_name: str):
        def inject_extra(record):
            record.extra['task_name'] = task_name
            record.extra['task_id'] = id(asyncio.current_task())

        with Handler(bubble=True).contextbound():
            with Processor(inject_extra).contextbound():
                logger.info('I am the task')
                await asyncio.sleep(0)
                await util()
                logger.info('I am still the task')

    root_handler = TestHandler()
    root_handler.formatter = text_formatter
    with root_handler.applicationbound():
        with redirected_logging():
            asyncio.get_event_loop().run_until_complete(asyncio.gather(task('one'), task('two'), task('three')))

    records = root_handler.formatted_records
    assert 'INFO: Dummy: I am the task <task_name=one' in records[0]
    assert 'INFO: Dummy: I am the task <task_name=two' in records[1]
    assert 'INFO: Dummy: I am the task <task_name=three' in records[2]
    assert 'INFO: Dummy: I am the util function <iteration=1, task_name=one' in records[3]
    assert 'INFO: Dummy: I am the util function <iteration=1, task_name=two' in records[4]
    assert 'INFO: Dummy: I am the util function <iteration=1, task_name=three' in records[5]
    assert 'INFO: Dummy: I am the util function <iteration=2, task_name=one' in records[6]
    assert 'INFO: Dummy: I am the util function <iteration=2, task_name=two' in records[7]
    assert 'INFO: Dummy: I am the util function <iteration=2, task_name=three' in records[8]
    assert 'INFO: Dummy: I am the util function <iteration=3, task_name=one' in records[9]
    assert 'INFO: Dummy: I am the util function <iteration=3, task_name=two' in records[10]
    assert 'INFO: Dummy: I am the util function <iteration=3, task_name=three' in records[11]
    assert 'INFO: Dummy: I am still the task <task_name=one' in records[12]
    assert 'INFO: Dummy: I am still the task <task_name=two' in records[13]
    assert 'INFO: Dummy: I am still the task <task_name=three' in records[14]
Esempio n. 12
0
def genome(aphidicola):
    handler = TestHandler()
    genome = ("GCA_000521565.1_Buchnera_aphidicola_G002_"
              "Myzus_persicae_Complete_Genome.fasta")
    genome = os.path.join(aphidicola.path, genome)
    with handler:
        genome = Genome(genome, assembly_summary)
        genome.sketch()
        genome.get_contigs()
        genome.get_assembly_size()
        genome.get_unknowns()
        yield genome, handler
Esempio n. 13
0
def test_mdc_works():
    def inject_extra(record):
        record.extra['ip'] = '127.0.0.1'
        record.extra['username'] = '******'

    with TestHandler() as handler:
        handler.formatter = text_formatter
        logger = logging.getLogger('Dummy')
        with redirected_logging():
            with Processor(inject_extra):
                logger.info('hello world')
        assert len(handler.formatted_records) == 1
        assert 'INFO: Dummy: hello world <ip=127.0.0.1, username=Andrey>' in handler.formatted_records[0]
Esempio n. 14
0
def test_exception_text_formatter():
    with TestHandler() as handler:
        handler.formatter = text_formatter
        logger = logging.getLogger('Dummy')
        with redirected_logging():
            try:
                raise Exception('Something bad!')
            except Exception:
                logger.error('hello world', exc_info=True)
        assert len(handler.formatted_records) == 1
        record = handler.formatted_records[0]
        assert 'ERROR: Dummy: hello world' in record
        assert '/logbook_test.py' in record
        assert 'Exception: Something bad!' in record
Esempio n. 15
0
def test_exception_json_formatter():
    with TestHandler() as handler:
        handler.formatter = json_formatter
        logger = logging.getLogger('Dummy')
        with redirected_logging():
            try:
                raise Exception('Something bad!')
            except Exception:
                logger.error('hello world', exc_info=True)
        assert len(handler.formatted_records) == 1
        record = json.loads(handler.formatted_records[0])
        assert record['level'] == 'ERROR'
        assert record['name'] == 'Dummy'
        assert record['message'] == 'hello world'
        assert '/logbook_test.py' in record['exception']
        assert 'Exception: Something bad!' in record['exception']
Esempio n. 16
0
def test_json_formatting_works():
    def inject_extra(record):
        record.extra['ip'] = '127.0.0.1'
        record.extra['username'] = '******'

    with TestHandler() as handler:
        handler.formatter = json_formatter
        logger = logging.getLogger('Dummy')
        with redirected_logging():
            with Processor(inject_extra):
                logger.info('hello world')
        assert len(handler.formatted_records) == 1
        record = json.loads(handler.formatted_records[0])
        assert record['level'] == 'INFO'
        assert record['name'] == 'Dummy'
        assert record['message'] == 'hello world'
        assert record['ip'] == '127.0.0.1'
        assert record['username'] == 'Andrey'
Esempio n. 17
0
def make_test_handler(testcase, *args, **kwargs):
    """
    Returns a TestHandler which will be used by the given testcase. This
    handler can be used to test log messages.

    Parameters
    ----------
    testcase: unittest.TestCase
        The test class in which the log handler will be used.
    *args, **kwargs
        Forwarded to the new TestHandler object.

    Returns
    -------
    handler: logbook.TestHandler
        The handler to use for the test case.
    """
    handler = TestHandler(*args, **kwargs)
    testcase.addCleanup(handler.close)
    return handler
Esempio n. 18
0
    def test_run_examples(self):
        folder = join('..', '..', '..', 'catalyst', 'examples')
        files = [f for f in listdir(folder) if isfile(join(folder, f))]

        algo_list = []
        for filename in files:
            name = os.path.basename(filename)
            if filter_algos and name not in filter_algos:
                continue

            module_name = 'catalyst.examples.{}'.format(name.replace(
                '.py', ''))
            algo_list.append(module_name)

        for module_name in algo_list:
            algo = importlib.import_module(module_name)
            namespace = module_name.replace('.', '_')

            log_catcher = TestHandler()
            with log_catcher:
                run_algorithm(
                    capital_base=0.1,
                    data_frequency='minute',
                    initialize=algo.initialize,
                    handle_data=algo.handle_data,
                    analyze=TestSuiteAlgo.analyze,
                    exchange_name='poloniex',
                    algo_namespace='test_{}'.format(namespace),
                    base_currency='eth',
                    start=pd.to_datetime('2017-10-01', utc=True),
                    end=pd.to_datetime('2017-10-02', utc=True),
                    # output=out
                )
                warnings = [
                    record for record in log_catcher.records
                    if record.level == WARNING
                ]

                if len(warnings) > 0:
                    print('WARNINGS:\n{}'.format(warnings))
            pass
Esempio n. 19
0
def run():
    with TestHandler() as handler:
        for x in xrange(500):
            log.warning('this is not handled')
Esempio n. 20
0
    def test_run_examples(self):
        # folder = join('..', '..', '..', 'catalyst', 'examples')
        HERE = os.path.dirname(os.path.abspath(__file__))
        folder = os.path.join(HERE, '..', '..', '..', 'catalyst', 'examples')

        files = [
            f for f in os.listdir(folder)
            if os.path.isfile(os.path.join(folder, f))
        ]

        algo_list = []
        for filename in files:
            name = os.path.basename(filename)
            if filter_algos and name not in filter_algos:
                continue

            module_name = 'catalyst.examples.{}'.format(name.replace(
                '.py', ''))
            algo_list.append(module_name)

        exchanges = ['poloniex', 'bittrex', 'binance']
        asset_name = 'btc_usdt'
        quote_currency = 'usdt'
        capital_base = 10000
        data_freq = 'daily'
        start_date = pd.to_datetime('2017-10-01', utc=True)
        end_date = pd.to_datetime('2017-12-01', utc=True)

        for exchange_name in exchanges:
            ingest_exchange_bundles(exchange_name, data_freq, asset_name)

            for module_name in algo_list:
                algo = importlib.import_module(module_name)
                # namespace = module_name.replace('.', '_')

                log_catcher = TestHandler()
                with log_catcher:
                    run_algorithm(
                        capital_base=capital_base,
                        data_frequency=data_freq,
                        initialize=algo.initialize,
                        handle_data=algo.handle_data,
                        analyze=TestSuiteAlgo.analyze,
                        exchange_name=exchange_name,
                        algo_namespace='test_{}'.format(exchange_name),
                        quote_currency=quote_currency,
                        start=start_date,
                        end=end_date,
                        # output=out
                    )
                    warnings = [
                        record for record in log_catcher.records
                        if record.level == WARNING
                    ]

                    assert (len(warnings) == 1)
                    assert (warnings[0].message == ALPHA_WARNING_MESSAGE)
                    assert (not log_catcher.has_errors)
                    assert (not log_catcher.has_criticals)

            clean_exchange_bundles(exchange_name, data_freq)
Esempio n. 21
0
 def setUp(self):
     super(LoggingTestCase, self).setUp()
     from logbook import TestHandler
     self.log_handler = TestHandler()
     self.log_handler.push_application()
Esempio n. 22
0
 def setUp(self):
     self.log_handler = TestHandler()
     self.log_handler.format_string = '{record.message}'
     self.log_handler.push_thread()
Esempio n. 23
0
    def compare_bundle_with_exchange(self, exchange, assets, end_dt, bar_count,
                                     freq, data_frequency, data_portal, field):
        """
        Creates DataFrames from the bundle and exchange for the specified
        data set.

        Parameters
        ----------
        exchange: Exchange
        assets
        end_dt
        bar_count
        freq
        data_frequency
        data_portal

        Returns
        -------

        """
        data = dict()

        log_catcher = TestHandler()
        with log_catcher:
            symbols = [asset.symbol for asset in assets]
            print(
                'comparing {} for {}/{} with {} timeframe until {}'.format(
                    field, exchange.name, symbols, freq, end_dt
                )
            )
            data['bundle'] = data_portal.get_history_window(
                assets=assets,
                end_dt=end_dt,
                bar_count=bar_count,
                frequency=freq,
                field=field,
                data_frequency=data_frequency,
            )
            set_print_settings()
            print(
                'the bundle data:\n{}'.format(
                    data['bundle']
                )
            )
            candles = exchange.get_candles(
                end_dt=end_dt,
                freq=freq,
                assets=assets,
                bar_count=bar_count,
            )
            data['exchange'] = get_candles_df(
                candles=candles,
                field=field,
                freq=freq,
                bar_count=bar_count,
                end_dt=end_dt,
            )
            print(
                'the exchange data:\n{}'.format(
                    data['exchange']
                )
            )
            for source in data:
                df = data[source]
                path, folder = output_df(
                    df, assets, '{}_{}'.format(freq, source)
                )

            print('saved {} test results: {}'.format(end_dt, folder))

            assert_frame_equal(
                right=data['bundle'][:-1],
                left=data['exchange'][:-1],
                check_less_precise=1,
            )
            try:
                assert_frame_equal(
                    right=data['bundle'][:-1],
                    left=data['exchange'][:-1],
                    check_less_precise=min([a.decimals for a in assets]),
                )
            except Exception as e:
                print(
                    'Some differences were found within a 1 decimal point '
                    'interval of confidence: {}'.format(e)
                )
                with open(os.path.join(folder, 'compare.txt'), 'w+') as handle:
                    handle.write(e.args[0])

            pass