def test_mutual_recursion():
    t = st.deferred(lambda: a | b)
    a = st.deferred(lambda: st.none() | st.tuples(st.just("a"), b))
    b = st.deferred(lambda: st.none() | st.tuples(st.just("b"), a))

    for c in ("a", "b"):
        assert minimal(t, lambda x: x is not None and x[0] == c) == (c, None)
def brief_data(essential_count=5, nice_to_have_count=5):
    return fixed_dictionaries({
        'specialistRole': just('developer'),
        'location': lists(elements=just(['ACT'])),
        'essentialRequirements': requirements_list(essential_count),
        'niceToHaveRequirements': requirements_list(nice_to_have_count),
    })
示例#3
0
    def steps(self):
        strategies = []
        for rule in self.rules():
            converted_arguments = {}
            valid = True
            if rule.precondition is not None and not rule.precondition(self):
                continue
            for k, v in sorted(rule.arguments.items()):
                if isinstance(v, Bundle):
                    bundle = self.bundle(v.name)
                    if not bundle:
                        valid = False
                        break
                converted_arguments[k] = v
            if valid:
                strategies.append(TupleStrategy((
                    just(rule),
                    FixedKeysDictStrategy(converted_arguments)
                ), tuple))
        if not strategies:
            raise InvalidDefinition(
                u'No progress can be made from state %r' % (self,)
            )

        for name, bundle in self.bundles.items():
            if len(bundle) > 1:
                strategies.append(
                    builds(
                        ShuffleBundle, just(name),
                        lists(integers(0, len(bundle) - 1))))

        return one_of(strategies)
def brief_data(essential_count=5, nice_to_have_count=5):
    return fixed_dictionaries({
        'title': just('My Test Brief Title'),
        'specialistRole': just('developer'),
        'location': text(min_size=1, alphabet='abcdefghijkl'),
        'essentialRequirements': requirements_list(essential_count),
        'niceToHaveRequirements': requirements_list(nice_to_have_count),
    })
    def test_add_default_field_mapping_is_deprecated(self):
        class UnregisteredCustomishField(CustomishField):
            """Just to get deprecation warning when registered."""

        add_default_field_mapping(UnregisteredCustomishField, just(u"a"))
        with self.assertRaises(InvalidArgument):
            # Double-registering is an error, and registry is shared.
            register_field_strategy(UnregisteredCustomishField, just(u"a"))
示例#6
0
 def resolve_Type(thing):
     if thing.__args__ is None:
         return st.just(type)
     inner = thing.__args__[0]
     if getattr(inner, '__origin__', None) is typing.Union:
         return st.sampled_from(inner.__args__)
     elif hasattr(inner, '__union_params__'):  # pragma: no cover
         return st.sampled_from(inner.__union_params__)
     return st.just(inner)
def test_errors_are_deferred_until_repr_is_calculated():
    s = st.builds(
        lambda x, y: 1,
        st.just(IHaveABadRepr()),
        y=st.one_of(
            st.sampled_from((IHaveABadRepr(),)), st.just(IHaveABadRepr()))
    ).map(lambda t: t).filter(lambda t: True).flatmap(
        lambda t: st.just(IHaveABadRepr()))

    with pytest.raises(ValueError):
        repr(s)
示例#8
0
def interleaved_strategy_factory():
    '''
    Generate interleaved fastq that guarantees ids are same for pairs
    *_kwargs are supplied to gen seq_rec_strategy_factory
    to customize forward and reverse reads
    '''
    strategy = st.uuids().map(str).flatmap(
        lambda id:
            st.tuples(
                seq_rec_strategy_factory(5, 20, idstrat=st.shared(st.just(id), key=id)),
                seq_rec_strategy_factory(5, 20, idstrat=st.shared(st.just(id), key=id))))
    return strategy
示例#9
0
    def the_func(x):
        """
        From a nat and base generally a valid carry-in value.

        :param x: a nat and a corresponding base
        :type x: tuple of (list of int) * int
        :returns: strategies that yields triple of nat, base, carry-in
        """
        (nat, base) = x
        return strategies.tuples(
            strategies.just(nat), strategies.just(base), strategies.integers(min_value=0, max_value=base - 1)
        )
示例#10
0
def _one_hots():
    index_size = st.integers(min_value=1, max_value=5)
    lengths = st.lists(
        elements=st.integers(min_value=0, max_value=5))
    return st.tuples(index_size, lengths).flatmap(
        lambda x: st.tuples(
            st.just(x[0]),
            st.just(x[1]),
            st.lists(
                elements=st.integers(min_value=0, max_value=x[0] - 1),
                min_size=sum(x[1]),
                max_size=sum(x[1]))))
示例#11
0
 def gen_with_size(args):
     lengths, inner_shape = args
     data_dim = [sum(lengths)] + inner_shape
     lengths = np.array(lengths, dtype=np.int32)
     if with_pad_data:
         return st.tuples(
             st.just(lengths),
             hu.arrays(data_dim),
             hu.arrays(inner_shape),
             hu.arrays(inner_shape))
     else:
         return st.tuples(st.just(lengths), hu.arrays(data_dim))
示例#12
0
def regex_strategy(regex, fullmatch):
    if not hasattr(regex, "pattern"):
        regex = re.compile(regex)

    is_unicode = isinstance(regex.pattern, text_type)

    parsed = sre_parse.parse(regex.pattern, flags=regex.flags)

    if not parsed:
        if is_unicode:
            return st.text()
        else:
            return st.binary()

    if is_unicode:
        base_padding_strategy = st.text()
        empty = st.just(u"")
        newline = st.just(u"\n")
    else:
        base_padding_strategy = st.binary()
        empty = st.just(b"")
        newline = st.just(b"\n")

    right_pad = base_padding_strategy
    left_pad = base_padding_strategy

    if fullmatch:
        right_pad = empty
    elif parsed[-1][0] == sre.AT:
        if parsed[-1][1] == sre.AT_END_STRING:
            right_pad = empty
        elif parsed[-1][1] == sre.AT_END:
            if regex.flags & re.MULTILINE:
                right_pad = st.one_of(
                    empty, st.builds(operator.add, newline, right_pad)
                )
            else:
                right_pad = st.one_of(empty, newline)
    if fullmatch:
        left_pad = empty
    elif parsed[0][0] == sre.AT:
        if parsed[0][1] == sre.AT_BEGINNING_STRING:
            left_pad = empty
        elif parsed[0][1] == sre.AT_BEGINNING:
            if regex.flags & re.MULTILINE:
                left_pad = st.one_of(empty, st.builds(operator.add, left_pad, newline))
            else:
                left_pad = empty

    base = base_regex_strategy(regex, parsed).filter(regex.search)

    return maybe_pad(regex, base, left_pad, right_pad)
def deployment_configurations():
    return strategies.builds(
        model.DeploymentConfiguration,
        domain=domains(),
        kubernetes_namespace=kubernetes_namespaces(),
        subscription_manager_endpoint=urls(),

        s3_access_key_id=aws_access_key_id(),
        s3_secret_key=aws_secret_key(),

        introducer_image=strategies.just(u"tahoe-introducer:latest"),
        storageserver_image=strategies.just(u"tahoe-storageserver:latest"),
    )
示例#14
0
    def __init__(self):
        self.reinit()
        self.acquired = set()
        self.size = 0

        self.init_step = tuples(
            just("init"),
            integers(
                min_value=1,
                max_value=32
            )
        )
        self.acquire_step = tuples(just("acquire"), just(0))
 def test_foreign_key_primary(self, buf):
     # Regression test for #1307
     company_strategy = from_model(Company, name=just("test"))
     strategy = from_model(
         CompanyExtension, company=company_strategy, self_modifying=just(2)
     )
     try:
         ConjectureData.for_buffer(buf).draw(strategy)
     except HypothesisException:
         reject()
     # Draw again with the same buffer. This will cause a duplicate
     # primary key.
     ConjectureData.for_buffer(buf).draw(strategy)
     assert CompanyExtension.objects.all().count() == 1
示例#16
0
 def steps(self):
     if not self.initialized:
         self.initialized = True
         return self.init_step
     else:
         if self.acquired:
             release_step = tuples(just(
                 "release"
             ), sampled_from(sorted(self.acquired)))
         else:
             release_step = tuples(just(
                 "release"
             ), integers(min_value=0, max_value=self.size - 1))
         return release_step | self.acquire_step
示例#17
0
    def make_radix(base):
        """
        Build a radix from base.

        :param int base: the base of the radix
        """
        list1 = build_nat(base, max_len)
        list2 = build_nat(base, max_len)
        list3 = build_nat(base, max_len)
        if list1 == [] and list2 == [] and list3 == []:
            return strategies.builds(Radix, strategies.just(0), list1, list2, list3, strategies.just(base))
        else:
            return strategies.builds(
                Radix, strategies.sampled_from((-1, 1)), list1, list2, list3, strategies.just(base)
            )
示例#18
0
def models(model, **extra):
    result = {}
    mappings = field_mappings()
    mandatory = set()
    for f in model._meta.concrete_fields:
        if isinstance(f, dm.AutoField):
            continue
        try:
            mapped = mappings[type(f)]
        except KeyError:
            if not f.null:
                mandatory.add(f.name)
            continue
        if f.null:
            mapped = st.one_of(st.none(), mapped)
        result[f.name] = mapped
    missed = {x for x in mandatory if x not in extra}
    if missed:
        raise InvalidArgument((
            'Missing arguments for mandatory field%s %s for model %s' % (
                's' if len(missed) > 1 else '',
                ', '.join(missed),
                model.__name__,
            )))
    for k, v in extra.items():
        if isinstance(v, SearchStrategy):
            result[k] = v
        else:
            result[k] = st.just(v)
    result.update(extra)
    return ModelStrategy(model, result)
示例#19
0
def question_type_and_weight() -> SearchStrategy:
    return one_of(
        tuples(sampled_from(Question.CHOICE_TYPES),
               fixed_decimals()),
        tuples(sampled_from(sorted(set(Question.available_types()) - set(Question.CHOICE_TYPES))),
               just(0))
    )
示例#20
0
def jenkins_build_results(inQueue=None, builds=None):
    """Create a strategy for generating Jenkins API information for a job.

    :param strategy inQueue: strategy for the inQueue key, or None to use
        the default.
    :param strategy builds: strategy for populating the builds key, or None
        for the default. The special value `NO_BUILDS` will mean that the
        builds key is not in the resulting dict at all.
    :return strategy: a strategy.
    """
    strats = []
    if inQueue is None:
        inQueue = booleans()
        strats.append(just(pmap()))
    without_builds = fixed_dictionaries(dict(
        inQueue=inQueue))
    if builds is None or builds is NO_BUILDS:
        strats.append(without_builds)
    if builds is None:
        builds = lists(jenkins_builds, average_size=1)
    if builds is not NO_BUILDS:
        with_builds = fixed_dictionaries(dict(
            inQueue=inQueue,
            builds=builds,
            property=dictionaries(
                text(max_size=2), text(max_size=2),
                average_size=1, max_size=2)))
        strats.append(with_builds)
    return one_of(*strats)
示例#21
0
def header(header_class, **kwargs):
    """Create a strategy for producing headers of a specific class.

    Args:
        header_class: The type of header to be produced. This class will be
            introspected to determine suitable strategies for each named
            field.

        **kwargs: Any supplied keyword arguments can be used to fix the value
            of particular header fields.
    """

    field_strategies = {}
    for field_name in header_class.ordered_field_names():
        if field_name in kwargs:
            field_strategy = just(kwargs.pop(field_name))
        else:
            value_type = getattr(header_class, field_name).value_type
            field_strategy = integers(value_type.MINIMUM, value_type.MAXIMUM)
        field_strategies[field_name] = field_strategy

    if len(kwargs) > 0:
        raise TypeError("Unrecognised binary header field names {} for {}".format(
            ', '.join(kwargs.keys()),
            header_class.__name__))

    return fixed_dictionaries(field_strategies) \
        .map(lambda kw: header_class(**kw))
示例#22
0
def test_flatmap_retrieve_from_db():
    constant_float_lists = strategy(floats(0, 1)).flatmap(
        lambda x: lists(just(x))
    )

    track = []

    db = ExampleDatabase()

    @given(constant_float_lists, settings=Settings(database=db))
    def record_and_test_size(xs):
        track.append(xs)
        assert sum(xs) < 1

    with pytest.raises(AssertionError):
        record_and_test_size()

    assert track
    example = track[-1]

    while track:
        track.pop()

    with pytest.raises(AssertionError):
        record_and_test_size()

    assert track[0] == example
示例#23
0
def test_may_fill_with_nan_when_unique_is_set():
    find_any(
        nps.arrays(
            dtype=float, elements=st.floats(allow_nan=False), shape=10,
            unique=True, fill=st.just(float('nan'))),
        lambda x: np.isnan(x).any()
    )
def brief_response_data(essential_count=5, nice_to_have_count=5):
    return fixed_dictionaries({
        "essentialRequirements": requirements_list(essential_count, answers=True),
        "niceToHaveRequirements": requirements_list(nice_to_have_count, answers=True),
        "availability": _brief_response_availability,
        "respondToEmailAddress": just("*****@*****.**"),
    })
示例#25
0
def test_prints_debug_on_no_simplification():
    with Settings(verbosity=Verbosity.debug):
        with capture_out() as o:
            find(just(u"hi"), bool)
    v = o.getvalue()
    print(v)
    assert u"No simplifiers" in v
示例#26
0
def lengths(size, min_segments=None, max_segments=None, **kwargs):
    # First generate number of boarders between segments
    # Then create boarder values and add 0 and size
    # By sorting and computing diff we convert them to lengths of
    # possible 0 value
    if min_segments is None:
        min_segments = 0
    if max_segments is None:
        max_segments = size
    assert min_segments >= 0
    assert min_segments <= max_segments
    if size == 0 and max_segments == 0:
        return st.just(np.empty(shape=[0], dtype=np.int32))
    assert max_segments > 0, "size is not 0, need at least one segment"
    return st.integers(
        min_value=max(min_segments - 1, 0), max_value=max_segments - 1
    ).flatmap(
        lambda num_borders:
        hypothesis.extra.numpy.arrays(
            np.int32, num_borders, elements=st.integers(
                min_value=0, max_value=size
            )
        )
    ).map(
        lambda x: np.append(x, np.array([0, size], dtype=np.int32))
    ).map(sorted).map(np.diff)
示例#27
0
 def steps(self):
     strategies = []
     for rule in self.rules():
         converted_arguments = {}
         valid = True
         if rule.precondition is not None and not rule.precondition(self):
             continue
         for k, v in sorted(rule.arguments.items()):
             if isinstance(v, Bundle):
                 bundle = self.bundle(v.name)
                 if not bundle:
                     valid = False
                     break
                 else:
                     v = sampled_from(bundle)
             converted_arguments[k] = v
         if valid:
             strategies.append(TupleStrategy((
                 just(rule),
                 FixedKeysDictStrategy(converted_arguments)
             ), tuple))
     if not strategies:
         raise InvalidDefinition(
             u'No progress can be made from state %r' % (self,)
         )
     return one_of(*strategies)
def brief_response_data(essential_count=5, nice_to_have_count=5):
    return fixed_dictionaries({
        "essentialRequirements": requirements_list(essential_count, answers=True),
        "niceToHaveRequirements": requirements_list(nice_to_have_count, answers=True),
        "availability": text(min_size=1, average_size=10, alphabet='abcdefghijkl'),
        "respondToEmailAddress": just("*****@*****.**"),
    })
示例#29
0
def api_results(min_size=0, max_size=20, hook_types=None):
    count = integers(min_value=min_size, max_value=max_size).example()
    hook_types = hook_types or get_hook_names()

    return fixed_dictionaries(
        {
            "count": just(count),
            "next": none(),
            "prev": none(),
            "results": lists(
                fixed_dictionaries(
                    {
                        "name": text(min_size=1),
                        "latest_version": integers(min_value=0),
                        "content": fixed_dictionaries(
                            {
                                "hook_type": sampled_from(hook_types),
                                "version": integers(min_value=0),
                                "description": text(min_size=1),
                                "download_url": text(min_size=1),
                                "checksum": text(min_size=1),
                            }
                        ),
                    }
                ),
                min_size=count,
                max_size=count,
            ),
        }
    )
示例#30
0
def test_flatmap_retrieve_from_db():
    constant_float_lists = floats(0, 1).flatmap(
        lambda x: lists(just(x))
    )

    track = []

    db = ExampleDatabase()

    @given(constant_float_lists)
    @settings(database=db)
    def record_and_test_size(xs):
        if sum(xs) >= 1:
            track.append(xs)
            assert False

    with pytest.raises(AssertionError):
        record_and_test_size()

    assert track
    example = track[-1]
    track = []

    with pytest.raises(AssertionError):
        record_and_test_size()

    assert track[0] == example
def mixtures_same_family(draw,
                         batch_shape=None,
                         event_dim=None,
                         enable_vars=False,
                         depth=None):
    """Strategy for drawing `MixtureSameFamily` distributions.

  The component distribution is drawn from the `distributions` strategy.

  The Categorical mixture distributions are either shared across all batch
  members, or drawn independently for the full batch (as required by
  `MixtureSameFamily`).

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      `MixtureSameFamily` distribution.  The component distribution will have a
      batch shape of 1 rank higher (for the components being mixed).  Hypothesis
      will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the component
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    depth: Python `int` giving maximum nesting depth of compound Distributions.

  Returns:
    dists: A strategy for drawing `MixtureSameFamily` distributions with the
      specified `batch_shape` (or an arbitrary one if omitted).
  """
    if depth is None:
        depth = draw(depths())

    if batch_shape is None:
        # Ensure the components dist has at least one batch dim (a component dim).
        batch_shape = draw(tfp_hps.shapes(min_ndims=1, min_lastdimsize=2))
    else:  # This mixture adds a batch dim to its underlying components dist.
        batch_shape = tensorshape_util.concatenate(
            batch_shape,
            draw(tfp_hps.shapes(min_ndims=1, max_ndims=1, min_lastdimsize=2)))

    component = draw(
        distributions(batch_shape=batch_shape,
                      event_dim=event_dim,
                      enable_vars=enable_vars,
                      depth=depth - 1))
    hp.note(
        'Drawing MixtureSameFamily with component {}; parameters {}'.format(
            component, params_used(component)))
    # scalar or same-shaped categorical?
    mixture_batch_shape = draw(
        hps.one_of(hps.just(batch_shape[:-1]), hps.just(tf.TensorShape([]))))
    mixture_dist = draw(
        base_distributions(dist_name='Categorical',
                           batch_shape=mixture_batch_shape,
                           event_dim=tensorshape_util.as_list(batch_shape)[-1],
                           enable_vars=enable_vars))
    hp.note(('Forming MixtureSameFamily with '
             'mixture distribution {}; parameters {}').format(
                 mixture_dist, params_used(mixture_dist)))
    result_dist = tfd.MixtureSameFamily(components_distribution=component,
                                        mixture_distribution=mixture_dist,
                                        validate_args=True)
    if batch_shape[:-1] != result_dist.batch_shape:
        msg = ('MixtureSameFamily strategy generated a bad batch shape '
               'for {}, should have been {}.').format(result_dist,
                                                      batch_shape[:-1])
        raise AssertionError(msg)
    return result_dist
示例#32
0
def test_overlapping_posarg_kwarg_fails():
    with pytest.raises(TypeError):
        st.times(time.min, time.max, st.none(),
                 timezones=st.just(None)).validate()
示例#33
0
def everythings(
    draw,
    min_int=None,
    max_int=None,
    allow_inf=True,
    allow_null_bytes_in_keys=True,
    allow_quotes_in_keys=True,
    allow_control_characters_in_values=True,
    min_key_length=0,
    allow_datetime_microseconds=True,
):
    key_text = text(
        characters(
            blacklist_categories=("Cs", ) if allow_null_bytes_in_keys else
            ("Cs", "Cc"),
            blacklist_characters='"' if not allow_quotes_in_keys else None,
        ),
        min_size=min_key_length,
    )
    strings = text(
        characters(blacklist_categories=(
            "Cs", ) if allow_control_characters_in_values else ("Cs", "Cc")))
    dts = datetimes(
        min_value=datetime(1900, 1, 1),
        max_value=datetime(2200, 1, 1),
        timezones=just(timezone.utc),
    )
    if not allow_datetime_microseconds:
        dts = dts.map(lambda d: datetime(
            d.year,
            d.month,
            d.day,
            d.hour,
            d.minute,
            d.second,
            tzinfo=d.tzinfo,
        ))
    return Everything(
        draw(strings),
        draw(binary()),
        draw(integers(min_value=min_int, max_value=max_int)),
        draw(floats(allow_nan=False, allow_infinity=allow_inf)),
        draw(
            dictionaries(key_text,
                         integers(min_value=min_int, max_value=max_int))),
        draw(lists(integers(min_value=min_int, max_value=max_int))),
        tuple(draw(lists(integers(min_value=min_int, max_value=max_int)))),
        (
            draw(strings),
            draw(integers(min_value=min_int, max_value=max_int)),
            draw(floats(allow_nan=False, allow_infinity=allow_inf)),
        ),
        Counter(
            draw(
                dictionaries(key_text,
                             integers(min_value=min_int, max_value=max_int)))),
        draw(
            dictionaries(
                integers(min_value=min_int, max_value=max_int),
                floats(allow_nan=False, allow_infinity=allow_inf),
            )),
        draw(
            dictionaries(floats(allow_nan=False, allow_infinity=allow_inf),
                         strings)),
        draw(lists(floats(allow_nan=False, allow_infinity=allow_inf))),
        draw(lists(strings)),
        draw(sets(floats(allow_nan=False, allow_infinity=allow_inf))),
        draw(sets(integers(min_value=min_int, max_value=max_int))),
        draw(frozensets(strings)),
        Everything.AnIntEnum.A,
        Everything.AStringEnum.A,
        draw(dts),
    )
def test_can_simplify_flatmap_with_bounded_left_hand_size():
    assert minimal(
        booleans().flatmap(lambda x: lists(just(x))),
        lambda x: len(x) >= 10) == [False] * 10
def test_cacheable_things_are_cached():
    x = st.just(())
    assert st.tuples(x) == st.tuples(x)
    bits = attr.ib()
    children = attr.ib(default=attr.Factory(dict))


@attr.s()
class Terminal(object):
    status = attr.ib()
    payload = attr.ib(default=None)


nodes = st.deferred(lambda: terminals | writes | branches)

# Does not include Status.OVERFLOW by design: That happens because of the size
# of the string, not the input language.
terminals = st.one_of(
    st.just(Terminal(Status.VALID)),
    st.just(Terminal(Status.INVALID)),
    st.builds(Terminal,
              status=st.just(Status.INTERESTING),
              payload=st.integers(0, 10)),
)

branches = st.builds(Branch, bits=st.integers(1, 64))

writes = st.builds(Write, value=st.binary(min_size=1), child=nodes)


def run_language_test_for(root, data, seed):
    random.seed(seed)

    def test(local_data):
示例#37
0
def test_typevars_can_be_redefined():
    """We test that one can register a custom strategy for all type vars."""
    A = typing.TypeVar("A")

    with temp_registered(typing.TypeVar, st.just(1)):
        assert_all_examples(st.from_type(A), lambda obj: obj == 1)
示例#38
0
def test_required_args(target, args, kwargs):
    # Mostly checking that `self` (and only self) is correctly excluded
    st.builds(target, *map(st.just, args),
              **{k: st.just(v)
                 for k, v in kwargs.items()}).example()
from hypothesis import given, example, settings
from hypothesis.strategies import just
import pexpect

from mock_aerohive.MockAerohive import INCOMPLETE_COMMAND

import mock_aerohive.MockAerohiveFixture as MockAerohive


@given(command=just("hostname"))
@settings(max_examples=5, deadline=None)
@example(command="hostname ")
def test_incomplete_command(command, MockAerohive):
    username = "******"
    password = "******"
    aerohive = MockAerohive()
    aerohive.addUser(username, password)
    port = aerohive.run("127.0.0.1")

    connection = pexpect.spawn("ssh -o UserKnownHostsFile=/dev/null \"" +
                               username + "@" + "127.0.0.1\" -p " + str(port),
                               timeout=5)
    connection.expect_exact("continue connecting (yes/no)? ")
    connection.sendline("yes")
    connection.expect_exact("assword: ")
    connection.sendline(password)
    connection.expect_exact(aerohive.prompt())
    connection.sendline(command)
    connection.expect_exact(INCOMPLETE_COMMAND)
    connection.expect_exact(aerohive.prompt())
    connection.sendline("exit")
示例#40
0
文件: base.py 项目: lycantropos/topo
from hypothesis import strategies

from .factories import (to_byte_strings, to_characters,
                        to_homogeneous_frozensets, to_homogeneous_tuples,
                        to_strings)

booleans = strategies.booleans()
integers = (booleans | strategies.integers())
real_numbers = (integers
                | strategies.floats(allow_nan=False, allow_infinity=True)
                | strategies.decimals(allow_nan=False, allow_infinity=True))
numbers = (real_numbers
           | strategies.complex_numbers(allow_nan=False, allow_infinity=True))
scalars = (strategies.none()
           | numbers
           | strategies.just(NotImplemented)
           | strategies.just(Ellipsis))
byte_strings = to_byte_strings()
strings = to_strings(to_characters())
deferred_hashables = strategies.deferred(lambda: hashables)
hashables = (scalars
             | byte_strings
             | strings
             | to_homogeneous_frozensets(deferred_hashables)
             | to_homogeneous_tuples(deferred_hashables))
class MagicFolderFromConfigTests(SyncTestCase):
    """
    Tests for ``MagicFolder.from_config``.
    """
    @given(
        folder_names(),
        relative_paths(),
        path_segments(),
        relative_paths(),
        just(LOCAL_AUTHOR),
        sampled_from([b"URI:DIR2:", b"URI:DIR2-RO:"]),
        integers(min_value=1, max_value=10000),
        binary(),
    )
    def test_uploader_service(
            self,
            name,
            file_path,
            relative_magic_path,
            relative_state_path,
            author,
            collective_cap_kind,
            poll_interval,
            content,
    ):
        """
        ``MagicFolder.from_config`` creates an ``UploaderService`` which will
        upload snapshots using the given Tahoe client object.
        """
        reactor = task.Clock()

        root = create_fake_tahoe_root()
        http_client = create_tahoe_treq_client(root)
        tahoe_client = create_tahoe_client(
            DecodedURL.from_text(U"http://example.invalid./"),
            http_client,
        )

        ignored, upload_dircap = root.add_mutable_data(
            b"URI:DIR2:",
            json.dumps([
                u"dirnode",
                {u"children": {}},
            ]),
        )

        ignored, collective_dircap = root.add_mutable_data(
            collective_cap_kind,
            json.dumps([
                u"dirnode",
                {u"children": {}},
            ]),
        )

        basedir = FilePath(self.mktemp()).asTextMode("utf-8")
        global_config = create_global_configuration(
            basedir,
            u"tcp:-1",
            FilePath(u"/non-tahoe-directory"),
            u"tcp:127.0.0.1:-1",
        )

        magic_path = basedir.preauthChild(relative_magic_path)
        magic_path.asBytesMode("utf-8").makedirs()

        statedir = basedir.child(u"state")
        state_path = statedir.preauthChild(relative_state_path)

        target_path = magic_path.preauthChild(file_path)
        target_path.asBytesMode("utf-8").parent().makedirs(ignoreExistingDirectory=True)
        target_path.asBytesMode("utf-8").setContent(content)

        global_config.create_magic_folder(
            name,
            magic_path,
            state_path,
            author,
            collective_dircap,
            upload_dircap,
            poll_interval,
        )

        magic_folder = MagicFolder.from_config(
            reactor,
            tahoe_client,
            name,
            global_config,
            WebSocketStatusService(),
        )

        magic_folder.startService()
        self.addCleanup(magic_folder.stopService)

        self.assertThat(
            magic_folder.uploader_service.running,
            Equals(True),
        )

        self.assertThat(
            magic_folder.uploader_service._remote_snapshot_creator._local_author,
            Equals(author),
        )

        self.assertThat(
            magic_folder.folder_name,
            Equals(name),
        )

        # add a file. This won't actually add a file until we advance
        # the clock.
        d = magic_folder.local_snapshot_service.add_file(
            target_path,
        )

        self.assertThat(
            d,
            succeeded(Always()),
        )

        def children():
            return json.loads(root._uri.data[upload_dircap])[1][u"children"]

        reactor.advance(poll_interval - 1)

        self.assertThat(
            children(),
            Equals({}),
        )

        reactor.advance(1)

        self.assertThat(
            children(),
            ContainsDict({path2magic(file_path): Always()}),
            "Children dictionary {!r} did not contain expected path".format(
                children,
            ),
        )
def valid_slices(draw, batch_shape):
    """Samples a legal (possibly empty) slice for shape batch_shape."""
    # We build up a list of slices in several stages:
    # 1. Choose 0 to batch_rank slices to come before an Ellipsis (...).
    # 2. Decide whether or not to add an Ellipsis; if using, updating the indexing
    #    used (e.g. batch_shape[i]) to identify safe bounds.
    # 3. Choose 0 to [remaining_dims] slices to come last.
    # 4. Decide where to insert between 0 and 3 newaxis slices.
    batch_shape = tf.TensorShape(batch_shape).as_list()
    slices = []
    batch_rank = len(batch_shape)
    arbitrary_slices = hps.tuples(
        hps.one_of(hps.just(None), hps.integers(min_value=-100,
                                                max_value=100)),
        hps.one_of(hps.just(None), hps.integers(min_value=-100,
                                                max_value=100)),
        hps.one_of(
            hps.just(None),
            hps.integers(min_value=-100, max_value=100).filter(
                lambda x: x != 0))).map(lambda tup: slice(*tup))

    # 1. Choose 0 to batch_rank slices to come before an Ellipsis (...).
    nslc_before_ellipsis = draw(hps.integers(min_value=0,
                                             max_value=batch_rank))
    for i in range(nslc_before_ellipsis):
        slc = draw(
            hps.one_of(hps.integers(min_value=0, max_value=batch_shape[i] - 1),
                       arbitrary_slices))
        slices.append(slc)
    # 2. Decide whether or not to add an Ellipsis; if using, updating the indexing
    #    used (e.g. batch_shape[i]) to identify safe bounds.
    has_ellipsis = draw(hps.booleans().map(lambda x: (Ellipsis, x)))[1]
    nslc_after_ellipsis = draw(
        hps.integers(min_value=0, max_value=batch_rank - nslc_before_ellipsis))
    if has_ellipsis:
        slices.append(Ellipsis)
        remain_start, remain_end = (batch_rank - nslc_after_ellipsis,
                                    batch_rank)
    else:
        remain_start = nslc_before_ellipsis
        remain_end = nslc_before_ellipsis + nslc_after_ellipsis
    # 3. Choose 0 to [remaining_dims] slices to come last.
    for i in range(remain_start, remain_end):
        slc = draw(
            hps.one_of(hps.integers(min_value=0, max_value=batch_shape[i] - 1),
                       arbitrary_slices))
        slices.append(slc)
    # 4. Decide where to insert between 0 and 3 newaxis slices.
    newaxis_positions = draw(
        hps.lists(hps.integers(min_value=0, max_value=len(slices)),
                  max_size=3))
    for i in sorted(newaxis_positions, reverse=True):
        slices.insert(i, tf.newaxis)
    slices = tuple(slices)
    # Since `d[0]` ==> `d.__getitem__(0)` instead of `d.__getitem__((0,))`;
    # and similarly `d[:3]` ==> `d.__getitem__(slice(None, 3))` instead of
    # `d.__getitem__((slice(None, 3),))`; it is useful to test such scenarios.
    if len(slices) == 1 and draw(hps.booleans()):
        # Sometimes only a single item non-tuple.
        return slices[0]
    return slices
    "s",
    [
        st.floats(),
        st.tuples(st.integers()),
        st.tuples(),
        st.one_of(st.integers(), st.text()),
    ],
)
def test_is_cacheable(s):
    assert s.is_cacheable


@pytest.mark.parametrize(
    "s",
    [
        st.just([]),
        st.tuples(st.integers(), st.just([])),
        st.one_of(st.integers(), st.text(), st.just([])),
    ],
)
def test_is_not_cacheable(s):
    assert not s.is_cacheable


def test_non_cacheable_things_are_not_cached():
    x = st.just([])
    assert st.tuples(x) != st.tuples(x)


def test_cacheable_things_are_cached():
    x = st.just(())
def test_can_simplify_on_both_sides_of_flatmap():
    assert minimal(
        integers().flatmap(lambda x: lists(just(x))),
        lambda x: len(x) >= 10
    ) == [0] * 10
        )
    )
    TestRandom = strategy_test_suite(randoms())
    TestInts = strategy_test_suite(integers())
    TestBoolLists = strategy_test_suite(lists(booleans()))
    TestDictionaries = strategy_test_suite(
        dictionaries(keys=tuples(integers(), integers()), values=booleans()))
    TestOrderedDictionaries = strategy_test_suite(
        dictionaries(
            keys=integers(), values=integers(), dict_class=OrderedDict))
    TestString = strategy_test_suite(text())
    BinaryString = strategy_test_suite(binary())
    TestIntBool = strategy_test_suite(tuples(integers(), booleans()))
    TestFloats = strategy_test_suite(floats())
    TestComplex = strategy_test_suite(complex_numbers())
    TestJust = strategy_test_suite(just(u'hi'))
    TestTemplates = strategy_test_suite(templates_for(sets(integers())))

    TestEmptyString = strategy_test_suite(text(alphabet=u''))
    TestSingleString = strategy_test_suite(
        text(alphabet=u'a', average_size=10.0))
    TestManyString = strategy_test_suite(text(alphabet=u'abcdef☃'))

    Stuff = namedtuple(u'Stuff', (u'a', u'b'))
    TestNamedTuple = strategy_test_suite(
        builds(Stuff, integers(), integers()))

    TestTrees = strategy_test_suite(
        n_ary_tree(integers(), integers(), integers()))

    TestMixedSets = strategy_test_suite(sets(
def test_non_cacheable_things_are_not_cached():
    x = st.just([])
    assert st.tuples(x) != st.tuples(x)
示例#47
0
import subprocess
import unittest

import raft_cffi

from hypothesis import given
from hypothesis.strategies import lists, just, integers, one_of

commands = one_of(
    just('append'),
    just('poll'),
    integers(min_value=1, max_value=10),
)


class Log(object):
    def __init__(self):
        self.entries = []
        self.base = 0

    def append(self, ety):
        self.entries.append(ety)

    def poll(self):
        self.base += 1
        return self.entries.pop(0)

    def delete(self, idx):
        idx -= 1
        if idx < self.base:
            idx = self.base
示例#48
0
from pyproprop import processed_property


class ClassWithDefaultProperties:
    """Dummy class for testing processed properties with default/optional
    values."""

    prop_a = processed_property("prop_a", type=int, optional=True)
    prop_b = processed_property("prop_b", type=int, optional=True, default=1)


@given(st.integers())
def test_optional_instantiation_with_int(test_value):
    """Property instantiates with default value correctly."""
    test_fixture = ClassWithDefaultProperties()
    test_fixture.prop_a = test_value
    test_fixture.prop_b = test_value
    assert test_fixture.prop_a == test_value
    assert test_fixture.prop_b == test_value


@given(st.just(None))
def test_optional_instantiation_with_none(test_value):
    """Property instantiates with default value correctly."""
    test_fixture = ClassWithDefaultProperties()
    test_fixture.prop_a = test_value
    test_fixture.prop_b = test_value
    assert test_fixture.prop_a is None
    assert test_fixture.prop_b == 1
示例#49
0
 def to_multicontours(vertices: List[Point]) -> Strategy[Multicontour]:
     return strategies.builds(partial(to_multicontour, context=context),
                              strategies.just(vertices),
                              to_sizes(len(vertices)), choosers())
示例#50
0
 def STRING(self, token: Token) -> st.SearchStrategy:
     # (all strategies should return lists)
     return st.just([literal_eval(token.value)])
def test_can_simplify_on_right_hand_strategy_of_flatmap():
    assert minimal(integers().flatmap(lambda x: lists(just(x)))) == []
示例#52
0
import datetime

import pytz
import hypothesis as h
import hypothesis.strategies as st
import hypothesis.extra.numpy as npst
import hypothesis.extra.pytz as tzst
import numpy as np

import pyarrow as pa

# TODO(kszucs): alphanum_text, surrogate_text
custom_text = st.text(
    alphabet=st.characters(min_codepoint=0x41, max_codepoint=0x7E))

null_type = st.just(pa.null())
bool_type = st.just(pa.bool_())

binary_type = st.just(pa.binary())
string_type = st.just(pa.string())
large_binary_type = st.just(pa.large_binary())
large_string_type = st.just(pa.large_string())
fixed_size_binary_type = st.builds(pa.binary,
                                   st.integers(min_value=0, max_value=16))
binary_like_types = st.one_of(binary_type, string_type, large_binary_type,
                              large_string_type, fixed_size_binary_type)

signed_integer_types = st.sampled_from(
    [pa.int8(), pa.int16(), pa.int32(),
     pa.int64()])
unsigned_integer_types = st.sampled_from(
示例#53
0
def arrays(draw, type, size=None, nullable=True):
    if isinstance(type, st.SearchStrategy):
        ty = draw(type)
    elif isinstance(type, pa.DataType):
        ty = type
    else:
        raise TypeError('Type must be a pyarrow DataType')

    if isinstance(size, st.SearchStrategy):
        size = draw(size)
    elif size is None:
        size = draw(_default_array_sizes)
    elif not isinstance(size, int):
        raise TypeError('Size must be an integer')

    if pa.types.is_null(ty):
        h.assume(nullable)
        value = st.none()
    elif pa.types.is_boolean(ty):
        value = st.booleans()
    elif pa.types.is_integer(ty):
        values = draw(npst.arrays(ty.to_pandas_dtype(), shape=(size, )))
        return pa.array(values, type=ty)
    elif pa.types.is_floating(ty):
        values = draw(npst.arrays(ty.to_pandas_dtype(), shape=(size, )))
        # Workaround ARROW-4952: no easy way to assert array equality
        # in a NaN-tolerant way.
        values[np.isnan(values)] = -42.0
        return pa.array(values, type=ty)
    elif pa.types.is_decimal(ty):
        # TODO(kszucs): properly limit the precision
        # value = st.decimals(places=type.scale, allow_infinity=False)
        h.reject()
    elif pa.types.is_time(ty):
        value = st.times()
    elif pa.types.is_date(ty):
        value = st.dates()
    elif pa.types.is_timestamp(ty):
        min_int64 = -(2**63)
        max_int64 = 2**63 - 1
        min_datetime = datetime.datetime.fromtimestamp(min_int64 // 10**9)
        max_datetime = datetime.datetime.fromtimestamp(max_int64 // 10**9)
        try:
            offset_hours = int(ty.tz)
            tz = pytz.FixedOffset(offset_hours * 60)
        except ValueError:
            tz = pytz.timezone(ty.tz)
        value = st.datetimes(timezones=st.just(tz),
                             min_value=min_datetime,
                             max_value=max_datetime)
    elif pa.types.is_duration(ty):
        value = st.timedeltas()
    elif pa.types.is_interval(ty):
        value = st.timedeltas()
    elif pa.types.is_binary(ty) or pa.types.is_large_binary(ty):
        value = st.binary()
    elif pa.types.is_string(ty) or pa.types.is_large_string(ty):
        value = st.text()
    elif pa.types.is_fixed_size_binary(ty):
        value = st.binary(min_size=ty.byte_width, max_size=ty.byte_width)
    elif pa.types.is_list(ty):
        value = _pylist(ty.value_type, size=size, nullable=nullable)
    elif pa.types.is_large_list(ty):
        value = _pylist(ty.value_type, size=size, nullable=nullable)
    elif pa.types.is_fixed_size_list(ty):
        value = _pylist(ty.value_type, size=ty.list_size, nullable=nullable)
    elif pa.types.is_dictionary(ty):
        values = _pylist(ty.value_type, size=size, nullable=nullable)
        return pa.array(draw(values), type=ty)
    elif pa.types.is_map(ty):
        value = _pymap(ty.key_type,
                       ty.item_type,
                       size=_default_array_sizes,
                       nullable=nullable)
    elif pa.types.is_struct(ty):
        h.assume(len(ty) > 0)
        fields, child_arrays = [], []
        for field in ty:
            fields.append(field)
            child_arrays.append(draw(arrays(field.type, size=size)))
        return pa.StructArray.from_arrays(child_arrays, fields=fields)
    else:
        raise NotImplementedError(ty)

    if nullable:
        value = st.one_of(st.none(), value)
    values = st.lists(value, min_size=size, max_size=size)

    return pa.array(draw(values), type=ty)
示例#54
0
def kernels(
    draw,
    kernel_name=None,
    batch_shape=None,
    event_dim=None,
    feature_dim=None,
    feature_ndims=None,
    enable_vars=False,
    depth=None):
  """Strategy for drawing arbitrary Kernels.

  Args:
    draw: Hypothesis function supplied by `@hps.composite`.
    kernel_name: Optional Python `str`.  If given, the produced kernels
      will all have this type.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      Kernel.  Hypothesis will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the
      kernel's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    feature_dim: Optional Python int giving the size of each feature dimension.
      If omitted, Hypothesis will choose one.
    feature_ndims: Optional Python int stating the number of feature dimensions
      inputs will have. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    depth: Python `int` giving maximum nesting depth of compound kernel.
  Returns:
    kernels: A strategy for drawing Kernels with the specified `batch_shape`
      (or an arbitrary one if omitted).
    kernel_variable_names: List of kernel parameters that are variables.
  """

  if depth is None:
    depth = draw(depths())
  if kernel_name is None and depth > 0:
    bases = hps.just(None)
    compounds = hps.sampled_from(SPECIAL_KERNELS)
    kernel_name = draw(hps.one_of([bases, compounds]))
  if kernel_name is None or kernel_name in INSTANTIABLE_BASE_KERNELS:
    return draw(
        base_kernels(
            kernel_name,
            batch_shape=batch_shape,
            event_dim=event_dim,
            feature_dim=feature_dim,
            feature_ndims=feature_ndims,
            enable_vars=enable_vars))

  if kernel_name == 'SchurComplement':
    return draw(schur_complements(
        batch_shape=batch_shape,
        event_dim=event_dim,
        feature_dim=feature_dim,
        feature_ndims=feature_ndims,
        enable_vars=enable_vars,
        depth=depth))
  elif kernel_name == 'FeatureScaled':
    return draw(feature_scaleds(
        batch_shape=batch_shape,
        event_dim=event_dim,
        feature_dim=feature_dim,
        feature_ndims=feature_ndims,
        enable_vars=enable_vars,
        depth=depth))

  raise ValueError('Kernel name not found.')
示例#55
0
from segpy.ibm_float import (ieee2ibm, ibm2ieee, MAX_IBM_FLOAT,
                             SMALLEST_POSITIVE_NORMAL_IBM_FLOAT,
                             LARGEST_NEGATIVE_NORMAL_IBM_FLOAT, MIN_IBM_FLOAT,
                             IBMFloat, EPSILON_IBM_FLOAT,
                             MAX_EXACT_INTEGER_IBM_FLOAT,
                             MIN_EXACT_INTEGER_IBM_FLOAT, EXPONENT_BIAS)

from segpy.util import almost_equal

ibm_compatible_negative_floats = floats(MIN_IBM_FLOAT,
                                        LARGEST_NEGATIVE_NORMAL_IBM_FLOAT)
ibm_compatible_positive_floats = floats(SMALLEST_POSITIVE_NORMAL_IBM_FLOAT,
                                        MAX_IBM_FLOAT)

ibm_compatible_non_negative_floats = one_of(
    just(0.0), floats(SMALLEST_POSITIVE_NORMAL_IBM_FLOAT, MAX_IBM_FLOAT))

ibm_compatible_non_positive_floats = one_of(
    just(0.0), floats(MIN_IBM_FLOAT, LARGEST_NEGATIVE_NORMAL_IBM_FLOAT))


def ibm_compatible_floats(min_f, max_f):
    truncated_min_f = max(min_f, MIN_IBM_FLOAT)
    truncated_max_f = min(max_f, MAX_IBM_FLOAT)

    strategies = []
    if truncated_min_f <= LARGEST_NEGATIVE_NORMAL_IBM_FLOAT <= truncated_max_f:
        strategies.append(
            floats(truncated_min_f, LARGEST_NEGATIVE_NORMAL_IBM_FLOAT))

    if truncated_min_f <= SMALLEST_POSITIVE_NORMAL_IBM_FLOAT <= truncated_max_f:
示例#56
0
    binary,
    data,
)

from cattr import Converter

from . import (
    primitive_strategies,
    seqs_of_primitives,
    lists_of_primitives,
    dicts_of_primitives,
    enums_of_primitives,
)
from ._compat import change_type_param

ints_and_type = tuples(integers(), just(int))
floats_and_type = tuples(floats(allow_nan=False), just(float))
strs_and_type = tuples(text(), just(unicode))
bytes_and_type = tuples(binary(), just(bytes))

primitives_and_type = one_of(ints_and_type, floats_and_type, strs_and_type,
                             bytes_and_type)

mut_set_types = sampled_from([Set, MutableSet])
set_types = one_of(mut_set_types, just(FrozenSet))


def create_generic_type(generic_types, param_type):
    """Create a strategy for generating parameterized generic types."""
    return one_of(
        generic_types,
def distributions(draw,
                  dist_name=None,
                  batch_shape=None,
                  event_dim=None,
                  enable_vars=False,
                  depth=None,
                  eligibility_filter=lambda name: True):
    """Strategy for drawing arbitrary Distributions.

  This may draw compound distributions (i.e., `Independent`,
  `MixtureSameFamily`, and/or `TransformedDistribution`), in which case the
  underlying distributions are drawn recursively from this strategy as well.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    dist_name: Optional Python `str`.  If given, the produced distributions
      will all have this type.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      Distribution.  Hypothesis will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    depth: Python `int` giving maximum nesting depth of compound Distributions.
      If `None`, Hypothesis will bias choose one, with a bias towards shallow
      nests.
    eligibility_filter: Optional Python callable.  Blacklists some Distribution
      class names so they will not be drawn at the top level.

  Returns:
    dists: A strategy for drawing Distributions with the specified `batch_shape`
      (or an arbitrary one if omitted).

  Raises:
    ValueError: If it doesn't know how to instantiate a Distribution of class
      `dist_name`.
  """
    if depth is None:
        depth = draw(depths())

    if dist_name is None and depth > 0:
        bases = hps.just(None)
        compounds = hps.one_of(
            map(hps.just, [
                'Independent', 'MixtureSameFamily', 'TransformedDistribution'
            ]))
        dist_name = draw(hps.one_of([bases, compounds]))

    if (dist_name is None or dist_name in INSTANTIABLE_BASE_DISTS
            or dist_name == 'Empirical'):
        return draw(
            base_distributions(dist_name, batch_shape, event_dim, enable_vars,
                               eligibility_filter))
    if dist_name == 'Independent':
        return draw(independents(batch_shape, event_dim, enable_vars, depth))
    if dist_name == 'MixtureSameFamily':
        return draw(
            mixtures_same_family(batch_shape, event_dim, enable_vars, depth))
    if dist_name == 'TransformedDistribution':
        return draw(
            transformed_distributions(batch_shape, event_dim, enable_vars,
                                      depth))
    raise ValueError('Unknown Distribution name {}'.format(dist_name))
示例#58
0
class TestCrossEntropyOps(hu.HypothesisTestCase):
    @given(
        inputs=st.lists(
            elements=st.integers(min_value=1, max_value=5),
            min_size=1,
            max_size=2,
            average_size=2,
        ).flatmap(
            lambda shape: st.tuples(
                hu.arrays(
                    dims=shape,
                    elements=st.one_of(
                        st.floats(min_value=-1.0, max_value=-0.1),
                        st.floats(min_value=0.1, max_value=1.0),
                    )),
                hu.arrays(
                    dims=shape,
                    elements=st.sampled_from([0.0, 1.0]),
                ),
            )
        ),
        options=st.one_of(
            st.tuples(st.just(True), st.just(False)),
            st.tuples(st.just(False), st.just(True)),
            st.tuples(st.just(False), st.just(False))
        ),
        **hu.gcs
    )
    def test_sigmoid_cross_entropy_with_logits(
        self, inputs, options, gc, dc
    ):
        logits, targets = inputs
        log_D_trick, unjoined_lr_loss = options

        def sigmoid_xentr_logit_ref(logits, targets):
            if unjoined_lr_loss:
                s = unjoined_sigmoid_cross_entropy(logits, targets)
            else:
                s = (
                    sigmoid_cross_entropy_with_logits(logits, targets)
                    if not log_D_trick else
                    sigmoid_cross_entropy_with_logits_with_log_D_trick(
                        logits, targets
                    )
                )
            m = np.mean(s, axis=len(logits.shape) - 1)
            return (m, )

        def sigmoid_xentr_logit_grad_ref(g_out, outputs, fwd_inputs):
            fwd_logits, fwd_targets = fwd_inputs
            inner_size = fwd_logits.shape[-1]
            if unjoined_lr_loss:
                m = unjoined_sigmoid_cross_entropy_grad(logits, targets)
            else:
                m = (
                    sigmoid_cross_entropy_with_logits_grad(fwd_logits, fwd_targets)
                    if not log_D_trick else
                    sigmoid_cross_entropy_with_logits_with_log_D_trick_grad(
                        fwd_logits, fwd_targets
                    )
                )
            # m = fwd_targets - sigmoid(fwd_logits)
            g_in = -np.expand_dims(g_out, axis=-1) * m / inner_size
            return (g_in, None)

        op = core.CreateOperator(
            'SigmoidCrossEntropyWithLogits', ['logits', 'targets'],
            ['xentropy'],
            log_D_trick=log_D_trick,
            unjoined_lr_loss=unjoined_lr_loss
        )
        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[logits, targets],
            reference=sigmoid_xentr_logit_ref,
            output_to_grad='xentropy',
            grad_reference=sigmoid_xentr_logit_grad_ref)

    @given(
        log_D_trick=st.just(False),
        **hu.gcs_cpu_only
    )
    def test_cross_entropy_and_unjoied_cross_entropy_relation(
        self, log_D_trick, gc, dc
    ):
        logits = np.array([1.4720, 0.3500, -0.6529, -1.1908, 0.8357,
                    -1.0774, -0.3395, -0.2469, 0.6708, -1.8332], dtype='f')
        targets = np.array([1., 1., 1., 1., 1., 1., 0., 0., 0., 0.], dtype='f')
        lr_size = targets.size
        unjoined_lr_loss = False

        def sigmoid_xentr_logit_ref(logits, targets):
            if unjoined_lr_loss:
                s = unjoined_sigmoid_cross_entropy(logits, targets)
            else:
                s = sigmoid_cross_entropy_with_logits(logits, targets)
            m = np.mean(s, axis=len(logits.shape) - 1)
            return (m, )

        def sigmoid_xentr_logit_grad_ref(g_out, outputs, fwd_inputs):
            fwd_logits, fwd_targets = fwd_inputs
            inner_size = fwd_logits.shape[-1]
            if unjoined_lr_loss:
                m = unjoined_sigmoid_cross_entropy_grad(logits, targets)
            else:
                m = sigmoid_cross_entropy_with_logits_grad(
                    fwd_logits, fwd_targets)

            # m = fwd_targets - sigmoid(fwd_logits)
            g_in = -np.expand_dims(g_out, axis=-1) * m / inner_size
            return (g_in, None)

        op = core.CreateOperator(
            'SigmoidCrossEntropyWithLogits', ['logits', 'targets'],
            ['xentropy'],
            log_D_trick=log_D_trick,
            unjoined_lr_loss=unjoined_lr_loss
        )
        output_lr = self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[logits, targets],
            reference=sigmoid_xentr_logit_ref,
            output_to_grad='xentropy',
            grad_reference=sigmoid_xentr_logit_grad_ref)

        # Unjoined dataset where labels change later
        logits = np.array([1.4720, 0.3500, -0.6529, -1.1908, 0.8357,
                    -1.0774, -0.3395, -0.2469, 0.6708, -1.8332, 1.4720, 0.3500,
                    -0.6529, -1.1908, 0.8357, -1.0774], dtype='f')
        targets = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.,
                            0., 1., 1., 1., 1., 1., 1.], dtype='f')
        unjoined_lr_loss = True
        unjoined_lr_size = targets.size

        op = core.CreateOperator(
            'SigmoidCrossEntropyWithLogits', ['logits', 'targets'],
            ['xentropy'],
            log_D_trick=log_D_trick,
            unjoined_lr_loss=unjoined_lr_loss
        )
        outputs_unjoined_lr = self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[logits, targets],
            reference=sigmoid_xentr_logit_ref,
            output_to_grad='xentropy',
            grad_reference=sigmoid_xentr_logit_grad_ref)

        self.assertAlmostEqual(
            output_lr[0].item(0) * lr_size / unjoined_lr_size,
            outputs_unjoined_lr[0].item(0),
            delta=0.0001)

    @given(
        inputs=st.lists(
            elements=st.integers(min_value=1, max_value=5),
            min_size=1,
            max_size=2,
            average_size=2,
        ).flatmap(
            lambda shape: st.tuples(
                hu.arrays(
                    dims=shape,
                    elements=st.one_of(
                        st.floats(min_value=-1.0, max_value=-0.1),
                        st.floats(min_value=0.1, max_value=1.0),
                    )),
                hu.arrays(
                    dims=shape,
                    elements=st.sampled_from([0.0, 1.0]),
                ),
                hu.arrays(
                    dims=shape,
                    elements=st.floats(min_value=0.1, max_value=1.0),
                ),
            )
        ),
        **hu.gcs
    )
    def test_weighted_sigmoid_cross_entropy_with_logits(self, inputs, gc, dc):
        logits, targets, weights = inputs

        def weighted_sigmoid_xentr_logit_ref(logits, targets, weights):
            s = sigmoid_cross_entropy_with_logits(logits, targets)
            s = np.multiply(s, weights)
            m = np.mean(s, axis=len(logits.shape) - 1)
            return (m, )

        def weighted_sigmoid_xentr_logit_grad_ref(g_out, outputs, fwd_inputs):
            fwd_logits, fwd_targets, fwd_weights = fwd_inputs
            inner_size = fwd_logits.shape[-1]
            m = fwd_targets - sigmoid(fwd_logits)
            m = np.multiply(m, weights)
            g_in = -np.expand_dims(g_out, axis=-1) * m / inner_size
            return (g_in, None, None)

        op = core.CreateOperator(
            'WeightedSigmoidCrossEntropyWithLogits',
            ['logits', 'targets', 'weights'],
            ['xentropy'])
        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[logits, targets, weights],
            reference=weighted_sigmoid_xentr_logit_ref,
            output_to_grad='xentropy',
            grad_reference=weighted_sigmoid_xentr_logit_grad_ref)

    @given(n=st.integers(2, 10),
           b=st.integers(1, 5),
           **hu.gcs_cpu_only)
    def test_soft_label_cross_entropy(self, n, b, gc, dc):
        # Initialize X and add 1e-2 for numerical stability
        X = np.random.rand(b, n).astype(np.float32)
        X = X + 1e-2
        for i in range(b):
            X[i] = X[i] / np.sum(X[i])

        # Initialize label
        label = np.random.rand(b, n).astype(np.float32)
        for i in range(b):
            label[i] = label[i] / np.sum(label[i])

        # Reference implementation of cross entropy with soft labels
        def soft_label_xentr_ref(X, label):
            xent = [np.sum((-label[j][i] * np.log(max(X[j][i], 1e-20))
                            for i in range(len(X[0])))) for j in range(b)]
            return (xent,)

        op = core.CreateOperator("CrossEntropy", ["X", "label"], ["Y"])

        # TODO(surya) Once CrossEntropyOp is ported to GPU, add the respective
        # tests to this unit test.
        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[X, label],
            reference=soft_label_xentr_ref,
        )

        self.assertGradientChecks(
            gc, op, [X, label], 0, [0], stepsize=1e-4, threshold=1e-2)

@fails
@given(x=integers(min_value=0, max_value=3), name=text())
def test_can_be_given_keyword_args(x, name):
    assume(x > 0)
    assert len(name) < x


@fails
@given(one_of(floats(), booleans()), one_of(floats(), booleans()))
def test_one_of_produces_different_values(x, y):
    assert type(x) == type(y)


@given(just(42))
def test_is_the_answer(x):
    assert x == 42


@fails
@given(text(), text())
def test_text_addition_is_not_commutative(x, y):
    assert x + y == y + x


@fails
@given(binary(), binary())
def test_binary_addition_is_not_commutative(x, y):
    assert x + y == y + x
示例#60
0
def test_can_register_NewType():
    Name = typing.NewType("Name", str)
    st.register_type_strategy(Name, st.just("Eric Idle"))
    assert st.from_type(Name).example() == "Eric Idle"