Esempio n. 1
0
def pytest_collection_modifyitems(config, items):
    skip_slow = False
    skip_pdf = False
    if config.getoption('--skip-slow'):
        skip_slow = mark.skip('skipped; --skip-slow option is used')
    if config.getoption('--skip-pdf'):
        skip_pdf = mark.skip('skipped; --skip-pdf option is used')
    if config.getoption('--no-pdf'):
        skip_pdf = mark.skip('skipped; --skip-pdf option is used')
    for item in items:
        if skip_slow and 'slow' in item.keywords:
            item.add_marker(skip_slow)
        if skip_pdf and 'pdf' in item.keywords:
            item.add_marker(skip_pdf)
Esempio n. 2
0
def pytest_collection_modifyitems(config, items):
    skip_slow = False
    skip_pdf = False
    if config.getoption('--skip-slow'):
        skip_slow = mark.skip('skipped; --skip-slow option is used')
    if config.getoption('--skip-pdf'):
        skip_pdf = mark.skip('skipped; --skip-pdf option is used')
    if config.getoption('--no-pdf'):
        skip_pdf = mark.skip('skipped; --skip-pdf option is used')
    for item in items:
        if skip_slow and 'slow' in item.keywords:
            item.add_marker(skip_slow)
        if skip_pdf and 'pdf' in item.keywords:
            item.add_marker(skip_pdf)
Esempio n. 3
0
def pytest_collection_modifyitems(config, items):
    if not config.getoption('--runslow'):
        skip_slow = mark.skip(reason='need --runslow option to run')

        for item in items:
            if 'slow' in item.keywords:
                item.add_marker(skip_slow)
Esempio n. 4
0
def pytest_collection_modifyitems(config, items):
    if not config.getoption('--runslow'):
        skip_slow = mark.skip(reason='need --runslow option to run')

        for item in items:
            if 'slow' in item.keywords:
                item.add_marker(skip_slow)
Esempio n. 5
0
def pytest_collection_modifyitems(config, items):
    if config.getoption("--runslow"):
        # --runslow given in cli: do not skip slow tests
        return
    skip_slow = mark.skip(reason="needs --runslow to run")
    for item in items:
        if "slow" in item.keywords:
            item.add_marker(skip_slow)
def pytest_runtest_setup(item):
    """Apply custom markers.

    Marks:

     - not implemented tests as expected to fail,
     - not written tests as skipped.
    """
    not_implemented = item.get_closest_marker("notimplemented")
    not_written = item.get_closest_marker("notwritten")

    if not_implemented:
        item.add_marker(mark.xfail(reason="Not implemented"))

    if not_written:
        item.add_marker(mark.skip(reason="Not written"))
Esempio n. 7
0
    from coalaip.data_formats import _extract_ld_data
    mock_get_format = mocker.patch('coalaip.data_formats._get_format_from_data')
    mock_extract_from_json = mocker.patch('coalaip.data_formats._extract_ld_data_from_json')

    mock_get_format.return_value = 'json'
    data = {'data': 'data'}

    _extract_ld_data(data)
    mock_get_format.assert_called_once_with(data)
    mock_extract_from_json.assert_called_once_with(data)


@mark.parametrize('data_format,default_keys', [
    ('json', {'type_key': 'type'}),
    ('jsonld', {'type_key': '@type', 'context_key': '@context', 'id_key': '@id'}),
    mark.skip(('ipld', {'type_key': 'type'})),
])
def test_extract_from_format_calls_extract_from_keys(mocker, data_format,
                                                     default_keys):
    import importlib
    utils = importlib.import_module('coalaip.data_formats')
    extract_fn = getattr(utils, '_extract_ld_data_from_{}'.format(data_format))

    mock_extract_from_keys = mocker.patch('coalaip.data_formats._extract_ld_data_from_keys')
    data = {'data': 'data'}

    extract_fn(data)
    mock_extract_from_keys.assert_called_once_with(data, **default_keys)


@mark.parametrize('data_format,custom_keys', [
Esempio n. 8
0
class TestFFTConvolve(object):
    pad_tuple = lambda self, shape, ndim: (1, ) * (ndim - len(shape)) + shape
    pad_shape = lambda self, arr, ndim: arr.reshape(
        self.pad_tuple(arr.shape, ndim))

    @fixture
    def in1(self, shape1):
        return random(np.complex64, shape1)

    @fixture
    def in2(self, shape2):
        return random(np.complex64, shape2)

    #@fixture(params=[(4,), (1,4), (2,4), (1,1,4), (1,2,4), (4,1,4), (4,2,4)])
    #def shape1(self, request):
    #    return request.param

    #@fixture(params=[(2,), (4,), (8,), (2,4), (2,8)])
    #def shape2(self, request):
    #    return request.param

    #@fixture(params=[-2, -1, 0, 1, 2])
    #def axis(self, request, shape1, shape2):
    #    # skip tests with invalid shape1/shape2/axis combination
    #    axis = request.param
    #    ndim = max(len(shape1), len(shape2))
    #    shape1 = list(self.pad_tuple(shape1, ndim))
    #    shape2 = list(self.pad_tuple(shape2, ndim))
    #    try:
    #        shape1.pop(axis)
    #        shape2.pop(axis)
    #    except:
    #        skip()
    #    for s1, s2 in zip(shape1, shape2):
    #        if (s1 != s2) and (s1 != 1) and (s2 != 1):
    #            skip()
    #    return axis

    def reference(self, in1, in2, dtype, axis):
        # equalise number of dimensions, make 'axis' the last
        ndim = max(in1.ndim, in2.ndim)
        in1 = np.rollaxis(self.pad_shape(in1, ndim), axis, ndim)
        in2 = np.rollaxis(self.pad_shape(in2, ndim), axis, ndim)
        # determine result shape
        shape = (tuple(max(s1, s2)
                       for s1, s2 in zip(in1.shape, in2.shape))[:-1] +
                 (in1.shape[-1] + in2.shape[-1] - 1, ))
        # broadcast arrays except for last dimension
        in1 = np.broadcast_to(in1, shape[:-1] + (in1.shape[-1], ))
        in2 = np.broadcast_to(in2, shape[:-1] + (in2.shape[-1], ))
        # evaluate 1d convolutions
        result = np.empty(shape, dtype)
        for idx in np.ndindex(*shape[:-1]):
            result[idx] = np.convolve(in1[idx], in2[idx], mode='full')
        # put 'axis' back in place
        return np.rollaxis(result, -1, axis)

    def do_convolve(self, thread, in1, in2, axis, reps=1):
        convolve_ = dsp.fft.FFTConvolve(in1, in2, axis)
        convolve = convolve_.compile(thread)
        out = thread.empty_like(convolve.parameter.output)
        for _ in range(reps):
            convolve(out, thread.to_device(in1), thread.to_device(in2))
        thread.synchronize()
        return out.get()

    @mark.parametrize(
        'shape1,shape2,axis',
        [[(4, ), (8, ), -1], [(8, ), (4, ), -1], [(8, ),
                                                  (4, ), 0], [(4, 8),
                                                              (4, ), -1],
         [(4, 8),
          (4, ), 1], [(4, 8), (1, 4), -1], [(8, 4),
                                            (1, 4), 1], [(8, 4), (4, 4), -2],
         [(4, 8), (4, 4), 1], [(2, 4, 8),
                               (4, 8), -2], [(2, 4, 8),
                                             (4, 8), -1], [(2, 4, 8),
                                                           (4, 8), 1],
         [(2, 4, 8),
          (4, 8), 2], [(1 << 10, 4096),
                       (1, 129), -1], [(1 << 10, 4096 - 128), (1, 129), -1]])
    def test_convolve(self, thread, in1, in2, axis):
        out = self.do_convolve(thread, in1, in2, axis)
        reference = self.reference(in1, in2, out.dtype, axis)
        try:
            assert (np.allclose(out, reference, rtol=1e-4, atol=1e-4))
        except:
            #print(reference)
            #print(out.get())
            print(in1.shape, in2.shape, out.shape, axis)
            print(reference - out)
            raise

    @mark.parametrize('shape1,shape2,axis', [
        mark.skip([(1 << 10, 4096), (1, 129), -1]),
        mark.skip([(1 << 10, 4096 - 128),
                   (1, 129), -1]), [(1 << 13, 4096 - 128, 1), (1, 129, 1), -2],
        [(1 << 12, 4096 - 128, 2),
         (1, 129, 1), -2], [(1 << 11, 4096 - 128, 4), (1, 129, 1), -2]
    ],
                      ids=[
                          '1k*4k_in', '1k*4k_out', '8k*4k_out', '4k*4k_out*2',
                          '2k*4k_out*4'
                      ])
    def test_speed(self, thread, in1, in2, axis):
        '''
        Results on RX460: (1<<26 samples)
            * 4k output is faster than 4k \pm 128, for fft and tvmode
            * fft 33.3ms, fft 0.2ms, mul 0.7ms, ifft 33.5ms with 4k input, 1k blocks
            * fft 1.8ms, fft 0.1ms, mul 0.7ms, fft 1.3ms with 4k output, 1k blocks
            * fft 28.5ms, fft 0.1ms, mul 11.8ms, ifft 17.7ms, total 58ms with 4k output, 16k blocks
            * fft 56.8ms, fft 0.1ms, mul 11.8ms, ifft 36.5ms, total 105ms with 4k output, 8k blocks, 2ch
            * fft 58ms, fft 0.1ms, mul 11.8ms, ifft 40ms, total 110ms with 4k output, 4k blocks, 4ch
        '''
        self.do_convolve(thread, in1, in2, axis, reps=1)
Esempio n. 9
0
FIRST_COMPLETED_CONTRIBUTE = 369810
LAST_COMPLETED_CONTRIBUTE = 4398805
FORKING_CONTRIBUTE = 1212533

# Redemption
REPORTING_WINDOW_CREATE = 848027
INITIAL_REPORT_REDEMPTION = 517315
CROWDSOURCER_REDEMPTION = 432903
PARTICIPATION_TOKEN_REDEMPTION = 93737

# Trading
CREATE_ORDER = 551929
FILL_ORDER = 828536
CLAIM_PROCEEDS = 2171595

pytestmark = mark.skip(reason="Just for testing gas cost")

tester.STARTGAS = long(6.7 * 10**6)


def test_feeWindowCreation(localFixture, universe, cash):
    endTime = long(localFixture.chain.head_state.timestamp +
                   timedelta(days=365).total_seconds())

    with PrintGasUsed(localFixture, "REPORTING_WINDOW_CREATE",
                      REPORTING_WINDOW_CREATE):
        universe.getOrCreateFeeWindowByTimestamp(endTime)


def test_marketCreation(localFixture, universe, cash):
    marketCreationFee = universe.getOrCacheMarketCreationCost()
Esempio n. 10
0
    # Test the correct data format was persisted
    manifestation_persisted_data = manifestation.to_jsonld()

    # Check we called plugin.save() with the correct data
    mock_save_call_list = mock_plugin.save.call_args_list
    assert len(mock_save_call_list) == 1
    assert mock_save_call_list[0] == (
        (manifestation_persisted_data, ),
        {
            'user': alice_user
        },
    )


@mark.parametrize('use_data_format_enum', [True, False])
@mark.parametrize('data_format', [None, 'json', 'jsonld', mark.skip('ipld')])
def test_register_manifestation(mock_plugin, mock_coalaip, manifestation_data,
                                alice_user, data_format, use_data_format_enum,
                                mock_work_create_id,
                                mock_manifestation_create_id,
                                mock_copyright_create_id):
    from tests.utils import create_entity_id_setter

    # Remove the 'manifestationOfWork' key to create a new Work
    del manifestation_data['manifestationOfWork']

    # Set the persisted ids of the entities
    mock_plugin.save.side_effect = create_entity_id_setter(
        mock_work_create_id,
        mock_manifestation_create_id,
        mock_copyright_create_id,
Esempio n. 11
0
    _extract_ld_data(data)
    mock_get_format.assert_called_once_with(data)
    mock_extract_from_json.assert_called_once_with(data)


@mark.parametrize('data_format,default_keys', [
    ('json', {
        'type_key': 'type'
    }),
    ('jsonld', {
        'type_key': '@type',
        'context_key': '@context',
        'id_key': '@id'
    }),
    mark.skip(('ipld', {
        'type_key': 'type'
    })),
])
def test_extract_from_format_calls_extract_from_keys(mocker, data_format,
                                                     default_keys):
    import importlib
    utils = importlib.import_module('coalaip.data_formats')
    extract_fn = getattr(utils, '_extract_ld_data_from_{}'.format(data_format))

    mock_extract_from_keys = mocker.patch(
        'coalaip.data_formats._extract_ld_data_from_keys')
    data = {'data': 'data'}

    extract_fn(data)
    mock_extract_from_keys.assert_called_once_with(data, **default_keys)
Esempio n. 12
0
@mark.parametrize('entity_cls_name', ALL_ENTITY_CLS)
def test_entity_init_raises_on_non_subclassed_plugin(base_model,
                                                     entity_cls_name):
    entity_cls = get_entity_cls(entity_cls_name)

    class NonSubclassPlugin():
        pass

    # Instantiation should raise if plugin not subclassed from AbstractPlugin
    with raises(TypeError):
        entity_cls(model=base_model, plugin=NonSubclassPlugin())


@mark.parametrize('entity_cls_name', ALL_ENTITY_CLS)
@mark.parametrize('use_data_format_enum', [True, False])
@mark.parametrize('data_format', [None, 'json', 'jsonld', mark.skip('ipld')])
def test_entity_init_from_data(mock_plugin, data_format, use_data_format_enum,
                               entity_cls_name, request):
    entity_cls = get_entity_cls(entity_cls_name)
    data = request.getfixturevalue(DATA_NAME_FOR_ENTITY_CLS[entity_cls_name])
    json = request.getfixturevalue(JSON_NAME_FOR_ENTITY_CLS[entity_cls_name])
    jsonld = request.getfixturevalue(JSONLD_NAME_FOR_ENTITY_CLS[entity_cls_name])

    kwargs = {}
    if data_format is None:
        kwargs['data'] = data
    else:
        if data_format == 'json':
            kwargs['data'] = json
        elif data_format == 'jsonld':
            kwargs['data'] = jsonld
Esempio n. 13
0
from eth_tester.exceptions import TransactionFailed
from utils import captureFilteredLogs, AssertLog, nullAddress, TokenDelta, PrintGasUsed
from pytest import raises, mark

pytestmark = mark.skip(reason="We might not even need governance and currently dont account for transfering ownership")

def test_gov(contractsFixture, universe, reputationToken, cash):
    if not contractsFixture.paraAugur:
        return

    nexus = contractsFixture.contracts["OINexus"]

    deployer = contractsFixture.accounts[0]
    bob = contractsFixture.accounts[1]
    alice = contractsFixture.accounts[2]

    reputationToken.faucet(100, sender=bob)
    reputationToken.faucet(100, sender=alice)

    feePot = contractsFixture.getFeePot(universe)

    cash.faucet(10000)
    cash.approve(feePot.address, 10000000000000000)

    reputationToken.approve(feePot.address, 10000000000000000, sender=bob)
    reputationToken.approve(feePot.address, 10000000000000000, sender=alice)

    rewardsToken = contractsFixture.upload('../src/contracts/Cash.sol', "rewardsToken")
    lpToken = contractsFixture.upload('../src/contracts/Cash.sol', "lpToken")

    # Deploy GOV token
Esempio n. 14
0
import re
import itertools

import numpy as np
import pandas as pd

from .lib import MeasurementTests, CaptureMeasurement

try:
    import pulsegen
    from uqtools import (config, Parameter, Constant, Sweep, ProgramAWG,
                         ZeroAWG, ProgramAWGParametric, ProgramAWGSweep,
                         MeasureAWGSweep, MultiAWGSweep, NormalizeAWG)
    from uqtools.helpers import resolve_value
except ImportError:
    pytestmark = mark.skip()
    NormalizeAWG = lambda: False
# import Parameter, otherwise test collection fill fail when pulsegen is n/a
from uqtools import Parameter


# switch to CSVStore for tests that need the file system
@fixture
def filesystem(monkeypatch, tmpdir):
    monkeypatch.setattr(config, 'store', 'CSVStore')
    monkeypatch.setattr(config, 'datadir', str(tmpdir))


# minimal pulsegen configuration
def pulse_config(nchpairs):
    '''Set up pulsegen. Return number of chpairs.'''
Esempio n. 15
0
import logging
from pytest import approx, mark, skip, fixture

import pandas as pd
import numpy as np
import binpickle

import lenskit.util.test as lktu
from lenskit.algorithms import Recommender

from lenskit.algorithms import tf as lktf
try:
    import tensorflow as tf
except ImportError:
    pytestmark = mark.skip('cannot import tensorflow')

if not lktf.TF_AVAILABLE:
    pytestmark = mark.skip('tensorflow not available')

_log = logging.getLogger(__name__)


@fixture(scope='function')
def tf_session():
    tf.keras.backend.clear_session()


@mark.slow
def test_tf_bmf_save_load(tmp_path, tf_session):
    "Training, saving, and loading a bias model."
    fn = tmp_path / 'bias.bpk'
Esempio n. 16
0
FIRST_COMPLETED_CONTRIBUTE =    328081
LAST_COMPLETED_CONTRIBUTE =     3407216
FORKING_CONTRIBUTE =            980020

# Redemption
REPORTING_WINDOW_CREATE =           383330
INITIAL_REPORT_REDEMPTION =         581468
CROWDSOURCER_REDEMPTION =           418563
PARTICIPATION_TOKEN_REDEMPTION =    115984

# Trading
CREATE_ORDER =      591818
FILL_ORDER =        835790
CLAIM_PROCEEDS =    1230099

pytestmark = mark.skip(reason="Just for testing gas cost")

tester.STARTGAS = long(6.7 * 10**6)

def test_feeWindowCreation(localFixture, universe, cash):
    endTime = long(localFixture.chain.head_state.timestamp + timedelta(days=365).total_seconds())

    with PrintGasUsed(localFixture, "REPORTING_WINDOW_CREATE", REPORTING_WINDOW_CREATE):
        universe.getOrCreateFeeWindowByTimestamp(endTime)

def test_marketCreation(localFixture, universe, cash):
    marketCreationFee = universe.getOrCacheMarketCreationCost()

    endTime = long(localFixture.chain.head_state.timestamp + timedelta(days=1).total_seconds())
    feePerEthInWei = 10**16
    denominationToken = cash
Esempio n. 17
0
import numpy as np

from csr import CSR
from csr.test_utils import csrs, has_memory
from csr.kernels import use_kernel

from pytest import skip, mark
from hypothesis import given

import test_multiply as tmm
import test_mult_vec as tmv

try:
    from csr.kernels import mkl
except ImportError:
    pytestmark = mark.skip("MKL is not available")

_log = logging.getLogger(__name__)


@contextmanager
def mkl_lim(lim=1000):
    "Limit MKL to a capacity of X"
    save = mkl.max_nnz
    try:
        mkl.max_nnz = lim
        yield lim
    finally:
        mkl.max_nnz = save
        pass
Esempio n. 18
0
from eth_tester.exceptions import TransactionFailed
from utils import captureFilteredLogs, AssertLog, nullAddress, TokenDelta
from pytest import raises, mark

pytestmark = mark.skip(reason="Hack no longer viable without further hacks. To test manually remove the 'onlyOwner' modifier from the OINexus:addParaAugur function and run with --paraAugur")

def test_oi_nexus(contractsFixture, universe):
    if not contractsFixture.paraAugur:
        return

    nexus = contractsFixture.contracts["OINexus"]

    account = contractsFixture.accounts[0]
    universeAddress = universe.address
    paraUniverse1 = contractsFixture.accounts[1]
    paraUniverse2 = contractsFixture.accounts[2]
    paraUniverse3 = contractsFixture.accounts[3]

    # We'll do a hack to unit test the contract and make ourselves an ParaAugur instance
    nexus.addParaAugur(account)

    # Now we'll add a universe and a "paraUniverse" which will just be an account key so that we can send from it to do updates
    nexus.registerParaUniverse(universeAddress, paraUniverse1)

    # Lets provide some test data and get / store a new reporting fee. In this paraUniverse lets say we're at exactly the target OI. This will give us the default reporting fee back
    targetRepMarketCapInAttoCash = 100
    repMarketCapInAttoCash = 100
    reportingFee = nexus.recordParaUniverseValuesAndUpdateReportingFee(universeAddress, targetRepMarketCapInAttoCash, repMarketCapInAttoCash, sender=paraUniverse1)
    assert reportingFee == 10000
Esempio n. 19
0
from xarray.testing import assert_allclose

from indica.converters import CoordinateTransform
from .test_flux_surfaces import flux_coordinates
from .test_flux_surfaces import flux_coordinates_and_axes
from .test_magnetic import magnetic_coordinates
from .test_magnetic import magnetic_coordinates_and_axes
from .test_transect import transect_coordinates
from .test_transect import transect_coordinates_and_axes
from .test_trivial import trivial_transforms
from .test_trivial import trivial_transforms_and_axes
from ..strategies import arbitrary_coordinates
from ..strategies import domains

pytestmark = mark.skip(
    reason=
    "These tests rely on mathematical identities that do not hold numerically."
)


@composite
def coordinate_transforms_and_axes(
        draw,
        domain=((0.0, 1.0), (0.0, 1.0), (0.0, 1.0)),
        min_side=1,
        max_side=12,
):
    """Strategy for generating abritrary
    :py:class:`indica.converters.CoordinateTransform` objects and a
    set of x1, x2, and t axes to go with them. They should already
    have had an equilibrium object set.
Esempio n. 20
0
    assert manifestation.persist_id is not None

    # Test the correct data format was persisted
    manifestation_persisted_data = manifestation.to_jsonld()

    # Check we called plugin.save() with the correct data
    mock_save_call_list = mock_plugin.save.call_args_list
    assert len(mock_save_call_list) == 1
    assert mock_save_call_list[0] == (
        (manifestation_persisted_data,),
        {'user': alice_user},
    )


@mark.parametrize('use_data_format_enum', [True, False])
@mark.parametrize('data_format', [None, 'json', 'jsonld', mark.skip('ipld')])
def test_register_manifestation(mock_plugin, mock_coalaip, manifestation_data,
                                alice_user, data_format, use_data_format_enum,
                                mock_work_create_id,
                                mock_manifestation_create_id,
                                mock_copyright_create_id):
    from tests.utils import create_entity_id_setter

    # Remove the 'manifestationOfWork' key to create a new Work
    del manifestation_data['manifestationOfWork']

    # Set the persisted ids of the entities
    mock_plugin.save.side_effect = create_entity_id_setter(
        mock_work_create_id,
        mock_manifestation_create_id,
        mock_copyright_create_id,
Esempio n. 21
0
#!/bin/python3
# -*- coding: utf-8 -*-
"""Example usage of the tqdm and progressbar2 modules."""

import sys
from time import sleep

import progressbar
from pytest import mark
from tqdm import tqdm

examples = []

# Testing the progbars is a bit slow, so we'll skip them.
# Comment the following line if testing of progbar.py is required.
pytestmark = mark.skip(reason="Testing progbars takes a long time")


def example(fn):
    """Display progress bars."""
    def wrapped():
        try:
            sys.stdout.write("Running: %s\n" % fn.__name__)
            fn()
            sys.stdout.write("\n")
        except KeyboardInterrupt:
            sys.stdout.write("\nSkipping example.\n\n")

    examples.append(wrapped)
    return wrapped
Esempio n. 22
0
from utils import longToHexString, stringToBytes, twentyZeros, thirtyTwoZeros, longTo32Bytes
from pytest import fixture, raises, mark
from eth_tester.exceptions import TransactionFailed

pytestmark = mark.skip(reason="Mock Tests off")

numTicks = 10 ** 10
def test_market_creation(localFixture, mockUniverse, mockDisputeWindow, mockCash, chain, constants, mockMarket, mockReputationToken, mockShareToken, mockShareTokenFactory):
    fee = 16
    oneEther = 10 ** 18
    endTime = localFixture.contracts["Time"].getTimestamp() + constants.DESIGNATED_REPORTING_DURATION_SECONDS()
    market = localFixture.upload('../source/contracts/reporting/Market.sol', 'newMarket')

    with raises(TransactionFailed):
        market.initialize(mockUniverse.address, endTime, fee, fixture.accounts[1], fixture.accounts[1], 1, numTicks)

    with raises(TransactionFailed):
        market.initialize(mockUniverse.address, endTime, fee, fixture.accounts[1], fixture.accounts[1], 9, numTicks)

    with raises(TransactionFailed):
        market.initialize(mockUniverse.address, endTime, fee, fixture.accounts[1], fixture.accounts[1], 7, numTicks)

    with raises(TransactionFailed):
        market.initialize(mockUniverse.address, endTime, oneEther / 2 + 1, fixture.accounts[1], fixture.accounts[1], 5, numTicks)

    with raises(TransactionFailed):
        market.initialize(mockUniverse.address, endTime, fee, longToHexString(0), fixture.accounts[1], 5, numTicks)

    with raises(TransactionFailed):
        market.initialize(mockUniverse.address, endTime, fee, fixture.accounts[1], longToHexString(0), 5, numTicks)
Esempio n. 23
0
import logging
from pytest import approx, mark, skip, fixture

import pandas as pd
import numpy as np
import binpickle

import lenskit.util.test as lktu
from lenskit.algorithms import Recommender

try:
    from lenskit.algorithms import tf as lktf
    import tensorflow as tf
except ImportError:
    pytestmark = mark.skip('tensorflow not available')

_log = logging.getLogger(__name__)


@fixture(scope='function')
def tf_session():
    tf.keras.backend.clear_session()


@mark.slow
def test_tf_bmf_save_load(tmp_path, tf_session):
    "Training, saving, and loading a bias model."
    fn = tmp_path / 'bias.bpk'
    ratings = lktu.ml_test.ratings

    original = lktf.BiasedMF(20, batch_size=1024, epochs=20)
Esempio n. 24
0
import sys
from time import sleep

from progressbar import (ETA, AbsoluteETA, AdaptiveETA, AdaptiveTransferSpeed,
                         AnimatedMarker, Bar, BouncingBar, Counter,
                         FileTransferSpeed, FormatLabel, Percentage,
                         ProgressBar, ReverseBar, RotatingMarker,
                         SimpleProgress, Timer)
from pytest import mark
from tqdm import tqdm

examples = []

# Testing the progbars is a bit slow, so we'll skip them.
# Comment the following line if testing of progbar.py is required.
pytestmark = mark.skip(reason="Testing progbars takes a long time")


def example(fn):
    """Display progress bars."""
    def wrapped():
        try:
            sys.stdout.write('Running: %s\n' % fn.__name__)
            fn()
            sys.stdout.write('\n')
        except KeyboardInterrupt:
            sys.stdout.write('\nSkipping example.\n\n')

    examples.append(wrapped)
    return wrapped
Esempio n. 25
0
from pytest import mark as _mark

from . import config as cfg

not_written = _mark.skip(reason='Test not written')
no_SQLLite = _mark.skipif('sqlite' in cfg.SQLALCHEMY_DATABASE_URI.lower(),
                          reason='SQLite Does not support this test')

skip = _mark.skip
skipif = _mark.skipif