示例#1
0
 def test_containment_negative_j_out_of_range(self, i_range, j_range, data):
     num_indices = len(i_range) * len(j_range)
     v_range = data.draw(ranges(min_size=num_indices, max_size=num_indices))
     i = data.draw(integers())
     j = data.draw(integers())
     assume(j not in j_range)
     catalog = FirstIndexVariesQuickestCatalog2D(i_range, j_range, v_range)
     assert (i, j) not in catalog
示例#2
0
def test_power_measurement(instrument, data):
    channel_id = data.draw(sampled_from(instrument.channel_ids))
    channel = instrument.channel(channel_id)
    voltage = data.draw(floats(channel.voltage.protection.min, channel.voltage.protection.max).map(lambda v: round(v, 3)))
    current = data.draw(floats(channel.current.protection.min, channel.current.protection.max).map(lambda v: round(v, 3)))
    instrument._inst._channel_voltage_measurements[channel_id] = voltage
    instrument._inst._channel_current_measurements[channel_id] = current
    assert channel.power.measurement == round(voltage*current, 3)
示例#3
0
 def test_key_missing_raises_value_error(self, i_range, j_range, data):
     num_indices = len(i_range) * len(j_range)
     v_range = data.draw(ranges(min_size=num_indices, max_size=num_indices))
     catalog = FirstIndexVariesQuickestCatalog2D(i_range, j_range, v_range)
     value = data.draw(integers())
     assume(value not in v_range)
     with raises(ValueError):
         catalog.key(value)
示例#4
0
def test_set_voltage_setpoint_level(instrument, data):
    channel_id = data.draw(sampled_from(instrument.channel_ids))
    channel = instrument.channel(channel_id)
    voltage = data.draw(
        floats(channel.voltage.protection.min, channel.voltage.protection.max).map(
            lambda v: round(v, 3)))
    channel.voltage.setpoint.level = voltage
    assert channel.voltage.setpoint.level == voltage
示例#5
0
def test_set_voltage_setpoint_step_increment(instrument, data):
    channel_id = data.draw(sampled_from(instrument.channel_ids))
    channel = instrument.channel(channel_id)
    increment = data.draw(
        floats(channel.voltage.protection.min, channel.voltage.protection.max).map( # TODO: Experimentally determine maximum
            lambda v: round(v, 3)))
    channel.voltage.setpoint.step.increment = increment
    assert channel.voltage.setpoint.step.increment == increment
示例#6
0
 def test_out_of_range_key_raises_value_error(self, key_range, data):
     value_range = data.draw(ranges(min_size=len(key_range), max_size=len(key_range)))
     key = data.draw(integers())
     assume(not (key_range.start <= key <= key_range[-1]))
     catalog = LinearRegularCatalog(key_range.start, key_range[-1], key_range.step,
                                    value_range.start, value_range[-1], value_range.step)
     with raises(KeyError):
         catalog[key]
def test_fuzz_fractions_bounds(data):
    denom = data.draw(none() | integers(1, 100), label="denominator")
    fracs = none() | fractions(max_denominator=denom)
    low, high = data.draw(tuples(fracs, fracs), label="low, high")
    if low is not None and high is not None and low > high:
        low, high = high, low
    try:
        val = data.draw(fractions(low, high, denom), label="value")
    except InvalidArgument:
        reject()  # fractions too close for given max_denominator
    if low is not None:
        assert low <= val
    if high is not None:
        assert val <= high
    if denom is not None:
        assert 1 <= val.denominator <= denom
 def allocate_loadbalancer_ingress(self, data):
     """
     Complete the S4 Customer Grid service setup by updating its status to
     reflect the existence of a platform-supplied LoadBalancer.  This would
     happen due to actions taken by Kubernetes for any ``LoadBalancer``
     service.
     """
     services = [
         service
         for service
         in self.kubernetes._state.services.items
         if service.spec.type == u"LoadBalancer"
         and service.status is not None
         and not service.status.loadBalancer.ingress
     ]
     assume([] != services)
     for service in services:
         self.kubernetes._state_changed(self.kubernetes._state.replace(
             u"services",
             service,
             service.transform(
                 [u"status", u"loadBalancer", u"ingress"],
                 [self.kube_model.v1.LoadBalancerIngress(
                     hostname=data.draw(domains()),
                 )],
             ),
         ))
示例#9
0
def test_struct(data):
    _elements = one_of(characters(), integers(), text())
    _sets = sets(elements=_elements, min_size=1, average_size=5)
    allowed_keys = data.draw(_sets)

    keys = list(allowed_keys)
    values = data.draw(lists(elements=_elements, min_size=len(keys), max_size=len(keys)))
    s = py_tools.Struct(allowed_keys)

    for key, value in zip(keys, values):
        s[key] = value
        assert s[key] == value

    assert s == {k: v for k, v in zip(keys, values)}

    prohibited_keys = {k for k in data.draw(_sets) if k not in allowed_keys}
    for key in prohibited_keys:
        with pytest.raises(KeyError):
            s[key] = data.draw(_elements)
示例#10
0
 def test_repr(self, i_range, j_range, data):
     num_indices = len(i_range) * len(j_range)
     v_range = data.draw(ranges(min_size=num_indices, max_size=num_indices))
     catalog = FirstIndexVariesQuickestCatalog2D(i_range, j_range, v_range)
     r = repr(catalog)
     assert r.startswith('FirstIndexVariesQuickestCatalog2D')
     assert 'i_range={!r}'.format(i_range) in r
     assert 'j_range={!r}'.format(j_range) in r
     assert 'v_range={!r}'.format(v_range) in r
     assert check_balanced(r)
示例#11
0
def test_fuzz_decimals_bounds(data):
    places = data.draw(none() | integers(0, 20), label='places')
    finite_decs = decimals(allow_nan=False, allow_infinity=False,
                           places=places) | none()
    low, high = data.draw(tuples(finite_decs, finite_decs), label='low, high')
    if low is not None and high is not None and low > high:
        low, high = high, low
    ctx = decimal.Context(prec=data.draw(integers(1, 100), label='precision'))
    try:
        with decimal.localcontext(ctx):
            strat = decimals(low, high, allow_nan=False,
                             allow_infinity=False, places=places)
            val = data.draw(strat, label='value')
    except InvalidArgument:
        reject()  # decimals too close for given places
    if low is not None:
        assert low <= val
    if high is not None:
        assert val <= high
    if places is not None:
        assert val.as_tuple().exponent == -places
示例#12
0
 def test_repr(self, key_range, data):
     value_range = data.draw(ranges(min_size=len(key_range), max_size=len(key_range)))
     catalog = LinearRegularCatalog(key_range.start, key_range[-1], key_range.step,
                                    value_range.start, value_range[-1], value_range.step)
     r = repr(catalog)
     assert r.startswith('LinearRegularCatalog')
     assert 'key_min={}'.format(catalog._key_min) in r
     assert 'key_max={}'.format(catalog._key_max) in r
     assert 'key_stride={}'.format(catalog._key_stride) in r
     assert 'value_first={}'.format(catalog._value_start) in r
     assert 'value_last={}'.format(catalog._value_stop) in r
     assert 'value_stride={}'.format(catalog._value_stride) in r
     assert check_balanced(r)
 def remove_pod(self, data):
     """
     An existing customer grid pod goes away, as would happen if a user
     cancelled their subscription.
     """
     assume(0 < len(self.pods))
     pod, values = data.draw(sampled_from(sorted(self.pods.items())))
     _, storage_pem, _, intro_pem, _ = values
     del self.pods[pod]
     self.used_tubs.difference_update({
         Tub(storage_pem).getTubID(),
         Tub(intro_pem).getTubID(),
     })
     self.case.successResultOf(self.client.delete(pod))
示例#14
0
def test_fuzz_decimals_bounds(data):
    places = data.draw(none() | integers(0, 20), label="places")
    finite_decs = (decimals(
        allow_nan=False, allow_infinity=False, places=places) | none())
    low, high = data.draw(tuples(finite_decs, finite_decs), label="low, high")
    if low is not None and high is not None and low > high:
        low, high = high, low
    ctx = decimal.Context(prec=data.draw(integers(1, 100), label="precision"))
    try:
        with decimal.localcontext(ctx):
            strat = decimals(low,
                             high,
                             allow_nan=False,
                             allow_infinity=False,
                             places=places)
            val = data.draw(strat, label="value")
    except InvalidArgument:
        reject()  # decimals too close for given places
    if low is not None:
        assert low <= val
    if high is not None:
        assert val <= high
    if places is not None:
        assert val.as_tuple().exponent == -places
示例#15
0
def test_broadcast_elements_gufunc_args(parsed_sig, min_side, max_side, max_dims_extra, dtype, data):
    signature = unparse(parsed_sig)
    parsed_sig, _ = parsed_sig

    excluded = data.draw(sets(integers(0, len(parsed_sig) - 1)).map(tuple))

    min_side, max_side = sorted([min_side, max_side])

    choices = data.draw(real_from_dtype(dtype))
    elements = sampled_from(choices)

    S = gu.gufunc_args(
        signature,
        excluded=excluded,
        min_side=min_side,
        max_side=max_side,
        max_dims_extra=max_dims_extra,
        dtype=dtype,
        elements=elements,
    )

    X = data.draw(S)

    validate_elements(X, choices=choices, dtype=dtype)
示例#16
0
def test_simple(data, seed):

    model = data.draw(sampled_from([LinearChainCRF]))
    struct = model.struct
    torch.manual_seed(seed)
    vals, (batch, N) = struct._rand()
    lengths = torch.tensor(
        [data.draw(integers(min_value=2, max_value=N)) for b in range(batch - 1)] + [N]
    )

    dist = model(vals, lengths)
    edges, enum_lengths = dist.enumerate_support()
    print(edges.shape)
    log_probs = dist.log_prob(edges)
    for b in range(lengths.shape[0]):
        log_probs[enum_lengths[b] :, b] = -1e9

    assert torch.isclose(log_probs.exp().sum(0), torch.tensor(1.0)).all()

    entropy = dist.entropy
    assert torch.isclose(entropy, -log_probs.exp().mul(log_probs).sum(0)).all()

    argmax = dist.argmax
    _, max_indices = log_probs.max(0)

    amax = edges[max_indices, torch.arange(batch)]
    print(argmax.nonzero())
    print((amax - argmax).nonzero(), lengths)
    assert (amax == argmax).all()

    samples = dist.sample((100,))
    marginals = dist.marginals
    assert ((samples.mean(0) - marginals).abs() < 0.2).all()

    dist.kmax(5)
    dist.count
    def deactivate(self, data):
        identifiers = self.database.list_active_subscription_identifiers()
        assume(0 < len(identifiers))
        subscription_id = data.draw(sampled_from(sorted(identifiers)))
        Message.log(deactivating=subscription_id)
        self.database.deactivate_subscription(subscription_id)

        # We no longer require that the pods and replicasets belonging to this
        # subscription exist since the system is supposed to destroy them if
        # there is no corresponding active subscription.  We use ``discard``
        # because we may be deactivating the subscription before we ever got
        # around to creating a Deployment for it (or before the ReplicaSet or
        # Pod for the Deployment got created by the system).
        self.has_replicaset.discard(subscription_id)
        self.has_pod.discard(subscription_id)
示例#18
0
def test_numeric_target_to_feature_dicts(
    data, n_targets, n_features, min_features_per_target, max_features_per_target
):
    assume(min_features_per_target <= n_features)
    assume(max_features_per_target <= n_features)
    target_to_feature_dict = data.draw(
        numeric_target_to_feature_dicts(
            n_targets, n_features, min_features_per_target, max_features_per_target
        )
    )
    assert len(target_to_feature_dict) == n_targets
    for target, features in target_to_feature_dict.items():
        assert max(features) < n_features
        assert min(features) >= 0
        assert len(set(features)) == len(features)
        assert min_features_per_target <= len(features) <= max_features_per_target
示例#19
0
 def test_fit_target_to_feature_dict_consistent(self, data, X_y, estimator):
     X, y = X_y
     target_to_feature_dict = data.draw(
         numeric_target_to_feature_dicts(n_targets=y.shape[1], n_features=X.shape[1])
     )
     multi_feature_multi_output_regressor = MultiFeatureMultiOutputRegressor(
         estimator
     )
     multi_feature_multi_output_regressor.fit(
         X, y, target_to_features_dict=target_to_feature_dict
     )
     for i, estimator_ in enumerate(
         multi_feature_multi_output_regressor.estimators_
     ):
         expected_n_features = len(target_to_feature_dict[i])
         assert len(estimator_.coef_) == expected_n_features
示例#20
0
def test_str_target_to_feature_dicts(
    data, targets, features, min_features_per_target, max_features_per_target
):
    assume(min_features_per_target <= len(targets))
    assume(max_features_per_target <= len(features))
    target_to_feature_dict = data.draw(
        str_target_to_feature_dicts(
            targets, features, min_features_per_target, max_features_per_target
        )
    )
    assert len(target_to_feature_dict) == len(targets)
    for target, target_features in target_to_feature_dict.items():
        assert all([feature in features for feature in target_features])
        assert len(set(target_features)) == len(target_features)
        assert (
            min_features_per_target <= len(target_features) <= max_features_per_target
        )
示例#21
0
def test_shapes_gufunc_args(parsed_sig_and_size, dtype, unique, data):
    parsed_sig, min_side, max_side = parsed_sig_and_size

    signature = unparse(parsed_sig)

    # We could also test using elements strategy that then requires casting,
    # but that would be kind of complicated to come up with compatible combos
    elements = from_dtype(np.dtype(dtype))

    # Assumes zero broadcast dims by default
    S = gu.gufunc_args(signature, min_side=min_side, max_side=max_side, dtype=dtype, elements=elements, unique=unique)

    X = data.draw(S)
    shapes = [np.shape(xx) for xx in X]

    validate_shapes(shapes, parsed_sig[0], min_side, max_side)
    validate_elements(X, dtype=dtype, unique=unique)
示例#22
0
def test_simple_dataarrays(dims, dtype, sizes, data):
    elements = None
    min_side, max_side = sizes

    S = hxr.simple_dataarrays(dims, dtype, elements, min_side, max_side)

    da = data.draw(S)

    assert da.dims == tuple(dims)
    assert all(ss >= min_side for ss in da.sizes.values())
    assert (max_side is None) or all(ss <= max_side
                                     for ss in da.sizes.values())
    assert da.dtype == np.dtype(dtype)
    for dd in dims:
        L = da.coords[dd].values.tolist()
        assert all(isinstance(ss, int) for ss in L)
        assert L == list(range(len(L)))
示例#23
0
def test_parts_from_marginals(data, seed):
    # todo: add CKY, DepTree too?
    model = data.draw(sampled_from([LinearChain, SemiMarkov]))
    struct = model()
    torch.manual_seed(seed)
    vals, (batch, N) = struct._rand()

    edge = model(MaxSemiring).marginals(vals).long()

    sequence, extra = model.from_parts(edge)
    edge_ = model.to_parts(sequence, extra)

    assert (torch.isclose(edge, edge_)).all(), edge - edge_

    sequence_, extra_ = model.from_parts(edge_)
    assert extra == extra_, (extra, extra_)

    assert (torch.isclose(sequence, sequence_)).all(), sequence - sequence_
示例#24
0
def test_batching_lengths(model_test, semiring, data):
    "Test batching"
    gen = Gen(model_test, data, LogSemiring)
    model, vals, N, batch = gen.model, gen.vals, gen.N, gen.batch
    lengths = torch.tensor(
        [data.draw(integers(min_value=2, max_value=N)) for b in range(batch - 1)] + [N]
    )
    # first way: batched implementation
    partition = model(semiring).logpartition(vals, lengths=lengths)[0][0]
    # second way: unbatched implementation
    for b in range(batch):
        vals_b = vals[b:(b + 1), :(lengths[b] - 1)]
        lengths_b = lengths[b:(b + 1)]
        partition_b = model(semiring).logpartition(vals_b, lengths=lengths_b)[0][0]
        assert torch.isclose(partition[b], partition_b).all()
    # test _dp_standard
    partition_dp_standard = model(semiring)._dp_standard(vals, lengths=lengths)[0][0]
    assert torch.isclose(partition, partition_dp_standard).all()
示例#25
0
    def _test_token_group_operation(
        self,
        operation,
        matches_tokens,
        voucher,
        num_passes,
        now,
        random,
        data,
    ):
        configless = self.useFixture(
            ConfiglessMemoryVoucherStore(
                DummyRedeemer(),
                lambda: now,
            ), )
        # Make sure there are enough tokens for us to use!
        self.assertThat(
            configless.redeem(voucher, num_passes),
            succeeded(Always()),
        )

        # Figure out some subset, maybe empty, of passes from the group that
        # we will try to operate on.
        group_size = data.draw(integers(min_value=0, max_value=num_passes))
        indices = range(num_passes)
        random.shuffle(indices)
        spent_indices = indices[:group_size]

        # Get some passes and perform the operation.
        pass_factory = SpendingController.for_store(
            tokens_to_passes=configless.redeemer.tokens_to_passes,
            store=configless.store,
        )
        group = pass_factory.get(u"message", num_passes)
        spent, rest = group.split(spent_indices)
        operation(spent)

        # Verify the expected outcome of the operation using the supplied
        # matcher factory.
        self.assertThat(
            configless.store,
            matches_tokens(num_passes, spent),
        )
示例#26
0
def random_slice(taken_from, random, data):
    """
    Divide ``taken_from`` into two pieces with elements randomly assigned to
    one piece or the other.

    :param list taken_from: A list of elements to divide.  This will be
        mutated.

    :param random: A ``random`` module-alike.

    :param data: A Hypothesis data object for drawing values.

    :return: A two-tuple of the two resulting lists.
    """
    count = data.draw(integers(min_value=1, max_value=len(taken_from)))
    random.shuffle(taken_from)
    remaining = taken_from[:-count]
    sliced = taken_from[-count:]
    return remaining, sliced
示例#27
0
def test_partition(resolution, start_date, entry_count, partition_start, data):
    log_entries = []

    if resolution == "weekly":
        partition_start %= 7

    if start_date is not None:
        for d in range(entry_count):
            dl, ul = data.draw(
                lists(integers(min_value=0, max_value=2**64 - 1),
                      min_size=2,
                      max_size=2))
            log_entries.append(
                parser.RawEntry(start_date + timedelta(days=d), dl, ul))

    if start_date is None:
        with pytest.raises(TypeError):
            parser._partition(log_entries, resolution)
    else:
        if resolution == "weekly":
            res = parser._partition(log_entries,
                                    resolution,
                                    week_start=partition_start)
            assert sum(len(part) for part in res) == entry_count
            assert all([len(part) <= 7 for part in res])
            for part in res:
                if any([
                        entry.date.weekday() == partition_start
                        for entry in part
                ]):
                    assert (sorted(part, key=lambda e: e.date)
                            [0].date.weekday() == partition_start)
        else:
            res = parser._partition(log_entries,
                                    resolution,
                                    month_start=partition_start)
            assert sum(len(part) for part in res) == entry_count
            for part in res:
                if any([entry.date.day == partition_start for entry in part]):
                    assert (sorted(
                        part,
                        key=lambda e: e.date)[0].date.day == partition_start)
示例#28
0
    def test_complex_always_column_major_general(self, i_range, j_range, data):
        num_indices = len(i_range) * len(j_range)
        v_range = data.draw(ranges(min_size=num_indices, max_size=num_indices))
        d = {k: v for k, v in zip(((i, j) for j, i in product(j_range, i_range)), v_range)}

        # The catalog builder needs to be smart enough to recover the i and j ranges, the base
        # value, and the stride from this data.
        catalog_builder = CatalogBuilder(d)
        catalog = catalog_builder.create()
        assert isinstance(catalog, FirstIndexVariesQuickestCatalog2D)
        assert catalog.key_min() == (i_range.start, j_range.start)
        assert catalog.key_max() == (last(i_range), last(j_range))
        assert catalog.value_first() == v_range.start
        assert catalog.value_last() == last(v_range)
        assert catalog.i_min == i_range.start
        assert catalog.i_max == last(i_range)
        assert catalog.j_min == j_range.start
        assert catalog.j_max == last(j_range)
        assert len(catalog) == num_indices
        assert all(d[key] == catalog[key] for key in d)
 def test_parses(self, data):
     """
     Configurations built by the strategy can be parsed.
     """
     tempdir = self.useFixture(TempDir())
     config_text = data.draw(
         tahoe_config_texts(
             storage_client_plugins={},
             shares=one_of(
                 just((None, None, None)),
                 share_parameters(),
             ),
         ),
     )
     note(config_text)
     config_from_string(
         tempdir.join(b"tahoe.ini"),
         b"tub.port",
         config_text.encode("utf-8"),
     )
示例#30
0
def test_unix_times(
    data: DataObject,
    start_datetime: dt.datetime | None,
    end_datetime: dt.datetime | None,
) -> None:
    try:
        unix = data.draw(
            unix_times(start_datetime=start_datetime,
                       end_datetime=end_datetime))
    except InvalidArgument:
        assume(False)
    else:
        assert isinstance(unix, int)
        datetime = dt.datetime.fromtimestamp(unix, tz=UTC).replace(tzinfo=None)
        buffer = dt.timedelta(seconds=1)  # 1 second buffer needed
        assert (
            ((dt.datetime(1970, 1, 1) if start_datetime is None else
              start_datetime) - buffer) <= datetime <=
            ((dt.datetime.now() if end_datetime is None else end_datetime) +
             buffer))
示例#31
0
def test_structure_simple_from_dict_default(converter, cl_and_vals, data):
    """Test structuring non-nested attrs classes with default value."""
    cl, vals = cl_and_vals
    obj = cl(*vals)
    attrs_with_defaults = [a for a in fields(cl) if a.default is not NOTHING]
    to_remove = data.draw(
        lists(elements=sampled_from(attrs_with_defaults), unique=True))

    for a in to_remove:
        if isinstance(a.default, Factory):
            setattr(obj, a.name, a.default.factory())
        else:
            setattr(obj, a.name, a.default)

    dumped = asdict(obj)

    for a in to_remove:
        del dumped[a.name]

    assert obj == converter.structure(dumped, cl)
示例#32
0
def test_parts_from_marginals(data, seed):
    # todo: add CKY, DepTree too?
    model = data.draw(sampled_from([CKY_CRF, DepTree]))
    struct = model()
    torch.manual_seed(seed)
    vals, (batch, N) = test_lookup[model]._rand()
    vals_jax = struct.resize(np.array(vals.numpy()))
    Ns = np.array([N] * vals_jax.shape[0])

    edge = model(MaxSemiring).marginals(vals_jax, Ns)
    sequence, extra = struct.from_parts(edge)
    edge_ = struct.to_parts(sequence, extra, Ns)
    print(edge)
    print(sequence)
    print(edge_)
    assert (np.isclose(edge, edge_)).all(), edge - edge_

    sequence_, extra_ = struct.from_parts(edge_)
    assert (extra == extra_).all(), (extra, extra_)
    assert (np.isclose(sequence, sequence_)).all(), sequence - sequence_
示例#33
0
def test_bmm(backend, data):
    small_ints = integers(min_value=2, max_value=4)
    A, B, C, D = (
        data.draw(small_ints),
        data.draw(small_ints),
        data.draw(small_ints),
        data.draw(small_ints),
    )
    a = data.draw(tensors(backend=shared[backend], shape=(D, A, B)))
    b = data.draw(tensors(backend=shared[backend], shape=(1, B, C)))

    c = a @ b
    c2 = ((a.contiguous().view(D, A, B, 1) *
           b.contiguous().view(1, 1, B, C)).sum(2).view(D, A, C))
    assert_close_tensor(c, c2)
 def create_pods(self, data):
     """
     Fabricate Pods which warrant existence and which Kubernetes would have
     made for us had we actually been using it.  This happens automatically
     as a consequence of creating an appropriate Deployments (by way of
     ReplicaSets).
     """
     deployments = list(
         deployment
         for deployment in self.kubernetes._state.deployments.items
         if 0 == len(get_pods(self.kubernetes._state, deployment)))
     assume([] != deployments)
     addresses = ipv4_addresses()
     for deployment in deployments:
         self.kubernetes._state_changed(
             self.kubernetes._state.create(
                 u"pods",
                 derive_pod(self.kube_model, deployment,
                            data.draw(addresses)),
             ), )
         self.has_pod.add(deployment.metadata.annotations[u"subscription"])
示例#35
0
    def test_upgrades_run(self, values, data):
        """
        ``Schema.run_upgrades`` executes all of the statements from the given
        ``SchemaUpgrade`` instances.
        """
        # Pick a version at which to start the database.
        current_version = data.draw(
            integers(min_value=0, max_value=len(values)),
        )

        upgrades = list(
            # Interpolating into SQL here ... bad form but I don't want to
            # hand-code a bunch of unique SQL statements for this test.  A
            # schema upgrade would normally not have a variable in it like
            # this.
            SchemaUpgrade(["INSERT INTO [a] ([b]) VALUES ({})".format(value)])
            for value
            in values
        )

        schema = Schema(upgrades=upgrades)
        db = connect(":memory:")
        cursor = db.cursor()

        # Create the table we're going to mess with.
        cursor.execute("CREATE TABLE [a] ([b] INTEGER)")

        # Fast-forward to the state we're going to pretend the database is at.
        change_user_version(cursor, lambda old_version: current_version)

        # Run whatever upgrades remain appropriate.
        schema.run_upgrades(cursor)

        cursor.execute("SELECT [b] FROM [a]")
        selected_values = list(b for (b,) in cursor.fetchall())

        self.assertThat(
            selected_values,
            Equals(values[current_version:]),
        )
示例#36
0
def test_mm(backend, data):
    small_ints = integers(min_value=2, max_value=4)
    A, B, C, D = (
        data.draw(small_ints),
        data.draw(small_ints),
        data.draw(small_ints),
        data.draw(small_ints),
    )
    a = data.draw(tensors(backend=backend, shape=(D, A, B)))
    b = data.draw(tensors(backend=backend, shape=(1, B, C)))

    c = a @ b
    c2 = ((a.contiguous().view(D, A, B, 1) *
           b.contiguous().view(1, 1, B, C)).sum(2).view(D, A, C))
    for ind in c._tensor.indices():
        assert_close(c[ind], c2[ind])
示例#37
0
def test_params(data, seed):
    model = data.draw(
        sampled_from([Alignment, DepTree, SemiMarkov, DepTree, CKY, CKY_CRF]))
    struct = model()
    torch.manual_seed(seed)
    vals, (batch, N) = struct._rand()
    if isinstance(vals, tuple):
        vals = tuple((v.requires_grad_(True) for v in vals))
    else:
        vals.requires_grad_(True)
    # torch.autograd.set_detect_anomaly(True)
    semiring = LogSemiring
    alpha = model(semiring).sum(vals)
    alpha.sum().backward()

    if not isinstance(vals, tuple):
        b = vals.grad.detach()
        vals.grad.zero_()
        alpha = model(semiring).sum(vals, _autograd=False)
        alpha.sum().backward()
        c = vals.grad.detach()
        assert torch.isclose(b, c).all()
示例#38
0
def test_dataarrays(dtype, size_dims, size_sides, data):
    elements = None
    coords_elements = None

    min_dims, max_dims = size_dims
    min_side, max_side = size_sides

    S = hxr.dataarrays(dtype, elements, coords_elements, min_side, max_side,
                       min_dims, max_dims)

    da = data.draw(S)

    assert len(da.dims) >= min_dims
    assert len(da.dims) <= max_dims
    assert all(ss >= min_side for ss in da.sizes.values())
    assert (max_side is None) or all(ss <= max_side
                                     for ss in da.sizes.values())
    assert da.dtype == np.dtype(dtype)
    for dd in da.dims:
        L = da.coords[dd].values.tolist()
        assert all(isinstance(ss, int) for ss in L)
        assert len(set(L)) == len(L)
示例#39
0
def test_alignment(data):
    model = data.draw(sampled_from([Alignment]))
    semiring = data.draw(sampled_from([StdSemiring]))
    struct = model(semiring)
    vals, (batch, N) = model._rand()
    struct = model(semiring,
                   max_gap=max(3,
                               abs(vals.shape[1] - vals.shape[2]) + 1))
    vals.fill_(1)
    alpha = struct.sum(vals)

    model = data.draw(sampled_from([Alignment]))
    semiring = data.draw(sampled_from([StdSemiring]))
    struct = model(semiring)
    vals, (batch, N) = model._rand()
    vals.fill_(1)

    alpha = struct.sum(vals)
    count = struct.enumerate(vals)[0]
    assert torch.isclose(count, alpha).all()
    model = data.draw(sampled_from([Alignment]))
    semiring = data.draw(sampled_from([LogSemiring]))
    struct = model(semiring)
    vals, (batch, N) = model._rand()
    alpha = struct.sum(vals)
    count = struct.enumerate(vals)[0]
    assert torch.isclose(count, alpha).all()

    semiring = data.draw(sampled_from([MaxSemiring]))
    struct = model(semiring, local=True)
    vals, (batch, N) = model._rand()
    vals[..., 0] = -2 * vals[..., 0].abs()
    vals[..., 1] = vals[..., 1].abs()
    vals[..., 2] = -2 * vals[..., 2].abs()
    alpha = struct.sum(vals)
    count = struct.enumerate(vals)[0]
    mx = struct.marginals(vals)
    print(alpha, count)
    print(mx[0].nonzero())

    assert torch.isclose(count, alpha).all()
示例#40
0
    def test_archive_redundant(self, data):
        """
        make sure when archive command is called multiple times for a given
        interval it will not create duplicate archives
        """
        test_time = datetime(2005, 6, 7)

        # generate measurements for a day
        day_data = data.draw(self.generate_measurements(test_time))

        # create archives for day
        out = StringIO()
        period_end = test_time + relativedelta(days=1)
        period_end = period_end.replace(tzinfo=pytz.UTC)
        call_command('archive_measurements',
                     'day',
                     period_end=period_end,
                     stdout=out)

        # get number of archives
        n_archives = ArchiveDay.objects.filter(
            starttime__gte=period_end -
            relativedelta(days=1)).filter(starttime__lt=period_end).count()

        # call archive again
        call_command('archive_measurements',
                     'day',
                     period_end=period_end,
                     stdout=out)

        # check that the number of archives didn't change
        n_archives2 = ArchiveDay.objects.filter(
            starttime__gte=period_end -
            relativedelta(days=1)).filter(starttime__lt=period_end).count()

        if day_data:
            self.assertTrue(n_archives > 0)
        self.assertEqual(n_archives, n_archives2)
示例#41
0
def test_vars_to_dims_dicts(var_sizes, dim_sizes, data):
    min_vars, max_vars = var_sizes
    min_dims, max_dims = dim_sizes

    S = hxr.vars_to_dims_dicts(min_vars, max_vars, min_dims, max_dims)

    D = data.draw(S)

    n = len(D)
    assert n >= min_vars
    assert (max_vars is None) or (n <= max_vars)
    assert all(len(dd) >= min_dims for _, dd in D.items())
    assert (max_dims is None) or all(
        len(dd) <= max_dims for _, dd in D.items())
    assert all(all(isinstance(ss, str) for ss in dd) for _, dd in D.items())

    if all(len(dd) <= MAX_DIM_LEN for _, dd in D.items()):
        ds = xr.Dataset({
            vv: xr.DataArray(np.zeros((1, ) * len(dd)), dims=dd)
            for vv, dd in D.items()
        })
        assert set(ds) == set(D.keys())
        assert all(ds[vv].dims == tuple(dd) for vv, dd in D.items())
 def create_pods(self, data):
     """
     Fabricate Pods which warrant existence and which Kubernetes would have
     made for us had we actually been using it.  This happens automatically
     as a consequence of creating an appropriate Deployments (by way of
     ReplicaSets).
     """
     deployments = list(
         deployment
         for deployment
         in self.kubernetes._state.deployments.items
         if 0 == len(get_pods(self.kubernetes._state, deployment))
     )
     assume([] != deployments)
     addresses = ipv4_addresses()
     for deployment in deployments:
         self.kubernetes._state_changed(
             self.kubernetes._state.create(
                 u"pods",
                 derive_pod(self.kube_model, deployment, data.draw(addresses)),
             ),
         )
         self.has_pod.add(deployment.metadata.annotations[u"subscription"])
示例#43
0
def test_kmax(data):
    model = data.draw(sampled_from([LinearChain, SemiMarkov, DepTree]))
    K = 2
    semiring = KMaxSemiring(K)
    struct = model(semiring)
    vals, (batch, N) = model._rand()
    max1 = model(MaxSemiring).sum(vals)
    alpha = struct.sum(vals, _raw=True)
    assert (alpha[0] == max1).all()
    assert (alpha[1] <= max1).all()

    topk = struct.marginals(vals, _raw=True)
    argmax = model(MaxSemiring).marginals(vals)
    assert (topk[0] == argmax).all()
    print(topk[0].nonzero(), topk[1].nonzero())
    assert (topk[1] != topk[0]).any()

    if model != DepTree:
        log_probs = model(MaxSemiring).enumerate(vals)[1]
        tops = torch.topk(torch.cat(log_probs, dim=0), 5, 0)[0]
        assert torch.isclose(struct.score(topk[1], vals), alpha[1]).all()
        for k in range(K):
            assert (torch.isclose(alpha[k], tops[k])).all()
示例#44
0
def test_bcast_tuple_of_arrays(args, data):
    """Now testing broadcasting of tuple_of_arrays, kind of crazy since it uses
    gufuncs to test itself. Some awkwardness here since there are a lot of
    corner cases when dealing with object types in the numpy extension.

    For completeness, should probably right a function like this for the other
    functions, but there always just pass dtype, elements, unique to
    `_tuple_of_arrays` anyway, so this should be pretty good.
    """
    shapes, dtype, elements, unique = args

    shapes = shapes.ravel()
    # Need to squeeze out due to weird behaviour of object
    dtype = np.squeeze(dtype, -1)
    elements = np.squeeze(elements, -1)

    elements_shape = max(dtype.shape, elements.shape)
    dtype_ = np.broadcast_to(dtype, elements_shape)
    if elements_shape == ():
        elements = from_dtype(dtype_.item())
    else:
        elements = [from_dtype(dd) for dd in dtype_]

    shapes_shape = max(shapes.shape, dtype.shape, elements_shape, unique.shape)
    shapes = np.broadcast_to(shapes, shapes_shape)

    S = gu._tuple_of_arrays(shapes, dtype, elements=elements, unique=unique)
    X = data.draw(S)

    assert len(shapes) == len(X)
    for spec, drawn in zip(shapes, X):
        assert tuple(spec) == np.shape(drawn)

    for ii, xx in enumerate(X):
        dd = dtype[ii] if dtype.size > 1 else dtype.item()
        uu = unique[ii] if unique.size > 1 else unique.item()
        validate_elements([xx], dtype=dd, unique=uu)
示例#45
0
        def test_hostname_labels_long_idn_punycode(self, data):
            # type: (SearchStrategy) -> None
            """
            hostname_labels() handles case where idna_text() generates text
            that encoded to punycode ends up as longer than allowed.
            """
            @composite
            def mock_idna_text(draw, min_size, max_size):
                # type: (DrawCallable, int, int) -> Text
                # We want a string that does not exceed max_size, but when
                # encoded to punycode, does exceed max_size.
                # So use a unicode character that is larger when encoded,
                # "á" being a great example, and use it max_size times, which
                # will be max_size * 3 in size when encoded.
                return u"\N{LATIN SMALL LETTER A WITH ACUTE}" * max_size

            with patch("hyperlink.hypothesis.idna_text", mock_idna_text):
                label = data.draw(hostname_labels())
                try:
                    check_label(label)
                    idna_encode(label)
                except UnicodeError:  # pragma: no cover
                    raise AssertionError(
                        "Invalid IDN label: {!r}".format(label))
示例#46
0
def test_indexing(data, x):
    d = data.draw(dim(x))
    i = data.draw(integers(min_value=0, max_value=x.shape[d] - 1))
    x2 = x[{d: i}]
    assert set(x2.dims) == set(x.dims) - set([d])

    ds = data.draw(dims(x))
    index = {}
    for d in ds:
        i = data.draw(integers(min_value=0, max_value=x.shape[d] - 1))
        index[d] = i
    x2 = x[index]
    assert set(x2.dims) == set(x.dims) - set(ds)

    ds = data.draw(dims(x))
    index = {}
    for d in ds:
        i = data.draw(integers(min_value=0, max_value=x.shape[d] - 1))
        j = data.draw(integers(min_value=i + 1, max_value=x.shape[d]))
        index[d] = slice(i, j)
    x2 = x[index]
    assert set(x2.dims) == set(x.dims)
    x[index] = 6
示例#47
0
 def test_j_min(self, i_range, j_range, data):
     num_indices = len(i_range) * len(j_range)
     v_range = data.draw(ranges(min_size=num_indices, max_size=num_indices))
     catalog = FirstIndexVariesQuickestCatalog2D(i_range, j_range, v_range)
     assert catalog.j_min == j_range.start
示例#48
0
 def test_v_range_preserved(self, i_range, j_range, data):
     num_indices = len(i_range) * len(j_range)
     v_range = data.draw(ranges(min_size=num_indices, max_size=num_indices))
     catalog = FirstIndexVariesQuickestCatalog2D(i_range, j_range, v_range)
     assert catalog.v_range == v_range
示例#49
0
 def test_iteration(self, key_range, data):
     value_range = data.draw(ranges(min_size=len(key_range), max_size=len(key_range)))
     catalog = LinearRegularCatalog(key_range.start, key_range[-1], key_range.step,
                                    value_range.start, value_range[-1], value_range.step)
     assert all(k == m for k, m in zip(key_range, iter(catalog)))
示例#50
0
 def test_containment_positive(self, i_range, j_range, data):
     num_indices = len(i_range) * len(j_range)
     v_range = data.draw(ranges(min_size=num_indices, max_size=num_indices))
     catalog = FirstIndexVariesQuickestCatalog2D(i_range, j_range, v_range)
     assert all((i, j) in catalog for (i, j) in product(i_range, j_range))
示例#51
0
 def test_mismatched_ranges_raises_value_error(self, i_range, j_range, data):
     num_indices = len(i_range) * len(j_range)
     v_range = data.draw(ranges())
     assume(len(v_range) != num_indices)
     with raises(ValueError):
         FirstIndexVariesQuickestCatalog2D(i_range, j_range, v_range)
示例#52
0
 def test_iteration(self, i_range, j_range, data):
     num_indices = len(i_range) * len(j_range)
     v_range = data.draw(ranges(min_size=num_indices, max_size=num_indices))
     catalog = FirstIndexVariesQuickestCatalog2D(i_range, j_range, v_range)
     assert all(a == b for a, b in zip(((i, j) for (j, i) in product(j_range, i_range)), iter(catalog)))
示例#53
0
 def test_length(self, i_range, j_range, data):
     num_indices = len(i_range) * len(j_range)
     v_range = data.draw(ranges(min_size=num_indices, max_size=num_indices))
     catalog = FirstIndexVariesQuickestCatalog2D(i_range, j_range, v_range)
     assert len(catalog) == num_indices
示例#54
0
 def test_value_stop(self, i_range, j_range, data):
     num_indices = len(i_range) * len(j_range)
     v_range = data.draw(ranges(min_size=num_indices, max_size=num_indices))
     catalog = FirstIndexVariesQuickestCatalog2D(i_range, j_range, v_range)
     assert catalog.value_last() == last(v_range)
示例#55
0
 def test_key_max(self, i_range, j_range, data):
     num_indices = len(i_range) * len(j_range)
     v_range = data.draw(ranges(min_size=num_indices, max_size=num_indices))
     catalog = FirstIndexVariesQuickestCatalog2D(i_range, j_range, v_range)
     assert catalog.key_max() == (i_range.stop - i_range.step,
                                  j_range.stop - j_range.step)
示例#56
0
 def test_key(self, i_range, j_range, data):
     num_indices = len(i_range) * len(j_range)
     v_range = data.draw(ranges(min_size=num_indices, max_size=num_indices))
     catalog = FirstIndexVariesQuickestCatalog2D(i_range, j_range, v_range)
     assert all(catalog.key(value) == key for key, value in catalog.items())
示例#57
0
def test_get_voltage_setpoint_step_default(instrument, data):
    channel_id = data.draw(sampled_from(instrument.channel_ids))
    channel = instrument.channel(channel_id)
    assert channel.voltage.setpoint.step.default == 0.001
示例#58
0
def test_set_current_setpoint_level(instrument, data):
    channel_id = data.draw(sampled_from(instrument.channel_ids))
    channel = instrument.channel(channel_id)
    current = data.draw(floats(channel.current.protection.min, channel.current.protection.max).map(lambda v: round(v, 3)))
    channel.current.setpoint.level = current
    assert channel.current.setpoint.level == current
 def get_example(self, strat, data):
     data.draw(strat)
示例#60
0
def test_floats_are_in_range(x, y, data):
    x, y = sorted((x, y))
    assume(x < y)

    t = data.draw(floats(x, y))
    assert x <= t <= y