Example #1
0
def json_feature(draw):
    """Generate a json address feature and its center."""
    point_type = draw(st.sampled_from(['Point', 'Linestring']))

    if point_type == 'Linestring':
        # the feature geometry points are saved as (lon, lat), so make sure to reverse the normal coords
        points = draw(st.lists(random_coords().map(lambda point: (point[1], point[0]))))
        # calculate the center of the feature
        center = tuple(reversed(coords_center(points))) if points else (None, None)
    else:
        center = draw(random_coords())
        points = tuple(reversed(center))

    feature_def = {
        'geometry': {
            'type': point_type,
            'coordinates': points,
        },
        'properties': {
            'addr:city': draw(st.sampled_from(['Caer Morhen', 'Minas Tirith', 'Sietch Tabr', 'Gondolin'])),
            'addr:housenumber': str(draw(st.integers(max_value=500))),
            'addr:postcode': '%02d-%03d' % (draw(st.integers(max_value=999)), draw(st.integers(max_value=999))),
            'addr:street': draw(st.sampled_from([None, 'Prosta', 'Penny Lane', 'Main', 'Millers'])),
            'amenity': draw(st.sampled_from([None, 'library', 'school', 'college', 'bench'])),
            'email': draw(st.sampled_from([None, '*****@*****.**', '*****@*****.**', '*****@*****.**'])),
            'fax': draw(st.integers(min_value=100000, max_value=999999)),
            'name': draw(st.sampled_from([None, 'Sceas', 'Geeas', 'WWfaol'])),
        }
    }
    return feature_def, center
def test_containment(n):
    iv = minimal(
        tuples(lists(integers()), integers()),
        lambda x: x[1] in x[0] and x[1] >= n,
        timeout_after=60
    )
    assert iv == ([n], n)
Example #3
0
def np_images(draw,
              number,
              width=st.integers(min_img_width, max_img_width).example(),
              height=st.integers(min_img_height, max_img_height).example()):
    return draw(st.lists(hnp.arrays(np.uint32, (width,height),
                                    elements=st.integers(0,max_uint32)),
                         min_size=number, max_size=number))
def test_suppressing_filtering_health_check():
    forbidden = set()

    def unhealthy_filter(x):
        if len(forbidden) < 200:
            forbidden.add(x)
        return x not in forbidden

    @given(st.integers().filter(unhealthy_filter))
    def test1(x):
        raise ValueError()

    with raises(FailedHealthCheck):
        test1()

    forbidden = set()

    @settings(suppress_health_check=[
        HealthCheck.filter_too_much, HealthCheck.too_slow])
    @given(st.integers().filter(unhealthy_filter))
    def test2(x):
        raise ValueError()

    with raises(ValueError):
        test2()
def test_seeds_off_random():
    s = settings(max_shrinks=0, database=None)
    r = random.getstate()
    x = find(st.integers(), lambda x: True, settings=s)
    random.setstate(r)
    y = find(st.integers(), lambda x: True, settings=s)
    assert x == y
Example #6
0
def _inputs(draw):
    N = draw(st.integers(min_value=0, max_value=5))
    D = draw(st.integers(min_value=1, max_value=5))
    # N, D, data, lambda1, lambda2
    return (
        N,
        D,
        draw(st.lists(
            min_size=N * D,
            max_size=N * D,
            elements=st.one_of(
                st.floats(min_value=-10, max_value=1 - TOLERANCE),
                st.floats(min_value=1 + TOLERANCE, max_value=10))
        )),
        draw(st.lists(
            elements=st.one_of(
                st.floats(min_value=-2, max_value=-TOLERANCE),
                st.floats(min_value=TOLERANCE, max_value=2)),
            min_size=D,
            max_size=D,
        )),
        draw(st.lists(
            elements=st.floats(min_value=-2, max_value=2),
            min_size=D,
            max_size=D,
        )),
    )
def test_different_keys_are_not_shared():
    find_any(
        st.tuples(
            st.shared(st.integers(), key=1),
            st.shared(st.integers(), key=2)),
        lambda x: x[0] != x[1]
    )
Example #8
0
def lengths(size, min_segments=None, max_segments=None, **kwargs):
    # First generate number of boarders between segments
    # Then create boarder values and add 0 and size
    # By sorting and computing diff we convert them to lengths of
    # possible 0 value
    if min_segments is None:
        min_segments = 0
    if max_segments is None:
        max_segments = size
    assert min_segments >= 0
    assert min_segments <= max_segments
    if size == 0 and max_segments == 0:
        return st.just(np.empty(shape=[0], dtype=np.int32))
    assert max_segments > 0, "size is not 0, need at least one segment"
    return st.integers(
        min_value=max(min_segments - 1, 0), max_value=max_segments - 1
    ).flatmap(
        lambda num_borders:
        hypothesis.extra.numpy.arrays(
            np.int32, num_borders, elements=st.integers(
                min_value=0, max_value=size
            )
        )
    ).map(
        lambda x: np.append(x, np.array([0, size], dtype=np.int32))
    ).map(sorted).map(np.diff)
Example #9
0
def _gen_test_add_padding(with_pad_data=True,
                          is_remove=False):
    def gen_with_size(args):
        lengths, inner_shape = args
        data_dim = [sum(lengths)] + inner_shape
        lengths = np.array(lengths, dtype=np.int32)
        if with_pad_data:
            return st.tuples(
                st.just(lengths),
                hu.arrays(data_dim),
                hu.arrays(inner_shape),
                hu.arrays(inner_shape))
        else:
            return st.tuples(st.just(lengths), hu.arrays(data_dim))

    min_len = 4 if is_remove else 0
    lengths = st.lists(
        st.integers(min_value=min_len, max_value=10),
        min_size=0,
        max_size=5)
    inner_shape = st.lists(
        st.integers(min_value=1, max_value=3),
        min_size=0,
        max_size=2)
    return st.tuples(lengths, inner_shape).flatmap(gen_with_size)
Example #10
0
def api_results(min_size=0, max_size=20, hook_types=None):
    count = integers(min_value=min_size, max_value=max_size).example()
    hook_types = hook_types or get_hook_names()

    return fixed_dictionaries(
        {
            "count": just(count),
            "next": none(),
            "prev": none(),
            "results": lists(
                fixed_dictionaries(
                    {
                        "name": text(min_size=1),
                        "latest_version": integers(min_value=0),
                        "content": fixed_dictionaries(
                            {
                                "hook_type": sampled_from(hook_types),
                                "version": integers(min_value=0),
                                "description": text(min_size=1),
                                "download_url": text(min_size=1),
                                "checksum": text(min_size=1),
                            }
                        ),
                    }
                ),
                min_size=count,
                max_size=count,
            ),
        }
    )
Example #11
0
def test_keys_and_default_are_not_shared():
    find_any(
        st.tuples(
            st.shared(st.integers(), key=1),
            st.shared(st.integers())),
        lambda x: x[0] != x[1]
    )
Example #12
0
def slice_node(draw):
    lower = draw(hs.one_of(const_node(hs.integers()), hs.none()))
    upper = draw(hs.one_of(const_node(hs.integers()), hs.none()))
    step = draw(hs.one_of(const_node(hs.integers()), hs.none()))
    node = astroid.Slice()
    node.postinit(lower, upper, step)
    return node
Example #13
0
def _tensor_splits(draw):
    lengths = draw(st.lists(st.integers(1, 5), min_size=1, max_size=10))
    batch_size = draw(st.integers(1, 5))
    element_pairs = [
        (batch, r) for batch in range(batch_size) for r in range(len(lengths))
    ]
    perm = draw(st.permutations(element_pairs))
    perm = perm[:-1]  # skip one range
    ranges = [[(0, 0)] * len(lengths) for _ in range(batch_size)]
    offset = 0
    for pair in perm:
        ranges[pair[0]][pair[1]] = (offset, lengths[pair[1]])
        offset += lengths[pair[1]]

    data = draw(st.lists(
        st.floats(min_value=-1.0, max_value=1.0),
        min_size=offset,
        max_size=offset
    ))

    key = draw(st.permutations(range(offset)))

    return (
        np.array(data).astype(np.float32), np.array(ranges),
        np.array(lengths), np.array(key).astype(np.int64)
    )
Example #14
0
 def test_missing_key_raises_key_error(self, r, d):
     values = d.draw(lists(integers(), min_size=len(r), max_size=len(r)))
     k = d.draw(integers())
     assume(k not in r)
     catalog = RegularCatalog(r.start, r[-1], r.step, values)
     with raises(KeyError):
         catalog[k]
Example #15
0
def test_can_clone_morphers_into_inactive_morphers():
    m = find(
        s.lists(morphers),
        lambda x: len(x) >= 2 and x[0].become(s.integers()) >= 0)
    with BuildContext():
        m_as_ints = [x.clean_slate().become(s.integers()) for x in m]
    assert m_as_ints == [0, 0]
Example #16
0
def perturbed_by_subsection(draw, string_strategy):
    """
    A strategy that constructs a string using the supplied ``string_strategy``,
    and then perturbs it by replacing sections of it with other (possibly empty)
    strings.
    """
    output_string = draw(string_strategy)
    iterations = draw(strategies.integers(min_value=1, max_value=10))

    for _ in range(iterations):
        range_start = draw(strategies.integers(
            min_value=0,
            max_value=len(output_string)
        ))
        range_end = draw(strategies.integers(
            min_value=range_start,
            max_value=len(output_string)
        ))
        if range_start == range_end:
            min_size = 1
        else:
            min_size = 0
        substitution = draw(strategies.text(min_size=min_size))

        output_string = output_string[:range_start] + substitution + output_string[range_end:]

    return output_string
 def matrix(draw):
     rows = draw(st.integers(1, 10))
     columns = draw(st.integers(1, 10))
     return [
         [draw(st.integers(0, 10000)) for _ in range(columns)]
         for _ in range(rows)
     ]
Example #18
0
def _tensor_splits(draw, add_axis=False):
    """Generates (axis, split_info, tensor_splits) tuples."""
    tensor = draw(hu.tensor(min_value=4))  # Each dim has at least 4 elements.
    axis = draw(st.integers(-len(tensor.shape), len(tensor.shape) - 1))
    if add_axis:
        # Simple case: get individual slices along one axis, where each of them
        # is (N-1)-dimensional. The axis will be added back upon concatenation.
        return (
            axis,
            np.ones(tensor.shape[axis], dtype=np.int32),
            [
                np.array(tensor.take(i, axis=axis))
                for i in range(tensor.shape[axis])
            ]
        )
    else:
        # General case: pick some (possibly consecutive, even non-unique)
        # indices at which we will split the tensor, along the given axis.
        splits = sorted(draw(
            st.lists(elements=st.integers(0, tensor.shape[axis]), max_size=4)
        ) + [0, tensor.shape[axis]])
        return (
            axis,
            np.array(np.diff(splits), dtype=np.int32),
            [
                tensor.take(range(splits[i], splits[i + 1]), axis=axis)
                for i in range(len(splits) - 1)
            ],
        )
Example #19
0
def test_streaming_flatmap_past_point_of_read():
    s = find(
        streaming(integers().flatmap(lambda n: integers(min_value=n))),
        lambda x: x[0])
    assert s[0] == 1
    for i in hrange(100):
        s[i]
Example #20
0
def _dense_features_map(draw, num_records, **kwargs):
    float_lengths = draw(
        st.lists(
            st.integers(min_value=1, max_value=10),
            min_size=num_records,
            max_size=num_records
        )
    )

    total_length = sum(float_lengths)

    float_keys = draw(
        st.lists(
            st.integers(min_value=1, max_value=100),
            min_size=total_length,
            max_size=total_length,
            unique=True
        )
    )

    float_values = draw(
        st.lists(st.floats(),
                 min_size=total_length,
                 max_size=total_length)
    )

    return [float_lengths, float_keys, float_values]
Example #21
0
def _lists_pairs_with_duplication(draw):
    # pylint: disable=too-many-locals
    n = draw(st.integers(min_value=3, max_value=LISTS_MAX_SIZE))
    indexes = st.integers(min_value=0, max_value=n - 1)
    keys = draw(st.lists(IMMUTABLES, min_size=n, max_size=n))
    vals = draw(st.lists(IMMUTABLES, min_size=n, max_size=n))
    fwd = OrderedDict(izip(keys, vals))
    inv = OrderedDict(izip(vals, keys))
    which_to_dup = draw(RAND).choice((1, 2, 3))
    should_dup_key = which_to_dup in (1, 3)
    should_dup_val = which_to_dup in (2, 3)
    should_add_dup_key = should_dup_key and len(fwd) < n
    should_add_dup_val = should_dup_val and len(inv) < n
    if not should_add_dup_key and not should_add_dup_val:
        return list(izip(keys, vals))
    if should_add_dup_key:
        dup_key_idx = draw(indexes)
        added_key = keys[dup_key_idx]
    else:
        added_key = draw(IMMUTABLES)
        assume(added_key not in fwd)
    if should_add_dup_val:
        dup_val_idx = draw(indexes)
        if should_add_dup_key:
            assume(dup_val_idx != dup_key_idx)
        added_val = vals[dup_val_idx]
    else:
        added_val = draw(IMMUTABLES)
        assume(added_val not in inv)
    insert_idx = draw(indexes)
    keys.insert(insert_idx, added_key)
    vals.insert(insert_idx, added_val)
    return list(izip(keys, vals))
Example #22
0
def seq_qual_strategy_factory(min_length=1, max_length=250, min_qual=0, max_qual=40, alphabet='ATGCN'):
    return st.integers(min_value=min_length, max_value=max_length).flatmap(
        lambda n:
            st.tuples(
                st.text(alphabet=alphabet, min_size=n, max_size=n),
                st.lists(st.integers(min_value=min_qual, max_value=max_qual), min_size=n, max_size=n)
            )
    )
def write_pattern(draw, min_size=0):
    keys = draw(st.lists(st.integers(0, 1000), unique=True, min_size=1))
    values = draw(st.lists(st.integers(), unique=True, min_size=1))
    return draw(
        st.lists(
            st.tuples(st.sampled_from(keys), st.sampled_from(values)), min_size=min_size
        )
    )
Example #24
0
def test_uniqueness_does_not_affect_other_rows_2():
    data_frames = pdst.data_frames([
        pdst.column('A', dtype=int, unique=False),
        pdst.column('B', dtype=int, unique=True)],
        rows=st.tuples(st.integers(0, 10), st.integers(0, 10)),
        index=pdst.range_indexes(2, 2)
    )
    find_any(data_frames, lambda x: x['A'][0] == x['A'][1])
def test_minimizes_ints_from_down_to_boundary(boundary):
    def is_good(x):
        assert x >= boundary - 10
        return x >= boundary

    assert minimal(integers(min_value=boundary - 10), is_good) == boundary

    assert minimal(integers(min_value=boundary), lambda x: True) == boundary
Example #26
0
 def test_containment_negative_j_out_of_range(self, i_range, j_range, data):
     num_indices = len(i_range) * len(j_range)
     v_range = data.draw(ranges(min_size=num_indices, max_size=num_indices))
     i = data.draw(integers())
     j = data.draw(integers())
     assume(j not in j_range)
     catalog = FirstIndexVariesQuickestCatalog2D(i_range, j_range, v_range)
     assert (i, j) not in catalog
def test_tuple_strategy_too_large_to_fit():
    x = frozensets(integers(0, 5))
    assert not math.isinf(x.template_upper_bound)
    x = tuples(x, x)
    assert not math.isinf(x.template_upper_bound)
    assert math.isinf(tuples(x, x).template_upper_bound)
    assert math.isinf(
        tuples(integers(), x).template_upper_bound)
Example #28
0
def test_diff_values_int(data):
    x = data.draw(st.integers(), label='x')
    y = data.draw(st.integers(min_value=x + 1), label='x+1')
    z = data.draw(st.integers(max_value=x - 1), label='x-1')

    assert not are_values_different(x, x)
    assert are_values_different(x, y)
    assert are_values_different(x, z)
    assert are_values_different(y, z)
def test_always_reduces_integers_to_smallest_suitable_sizes(problem):
    n, blob = problem
    blob = hbytes(blob)
    try:
        d = ConjectureData.for_buffer(blob)
        k = d.draw(st.integers())
        stop = blob[len(d.buffer)]
    except (StopTest, IndexError):
        reject()

    assume(k > n)
    assume(stop > 0)

    def f(data):
        k = data.draw(st.integers())
        data.output = repr(k)
        if data.draw_bits(8) == stop and k >= n:
            data.mark_interesting()

    runner = ConjectureRunner(f, random=Random(0), settings=settings(
        suppress_health_check=HealthCheck.all(), timeout=unlimited,
        phases=(Phase.shrink,), database=None, verbosity=Verbosity.debug
    ), database_key=None)

    runner.test_function(ConjectureData.for_buffer(blob))

    assert runner.interesting_examples

    v, = runner.interesting_examples.values()

    shrinker = runner.new_shrinker(v, lambda x: x.status == Status.INTERESTING)

    shrinker.clear_passes()
    shrinker.add_new_pass('minimize_individual_blocks')

    shrinker.shrink()

    v = shrinker.shrink_target

    m = ConjectureData.for_buffer(v.buffer).draw(st.integers())
    assert m == n

    # Upper bound on the length needed is calculated as follows:
    # * We have an initial byte at the beginning to decide the length of the
    #   integer.
    # * We have a terminal byte as the stop value.
    # * The rest is the integer payload. This should be n. Including the sign
    #   bit, n needs (1 + n.bit_length()) / 8 bytes (rounded up). But we only
    #   have power of two sizes, so it may be up to a factor of two more than
    #   that.
    bits_needed = 1 + n.bit_length()
    actual_bits_needed = min(
        [s for s in WideRangeIntStrategy.sizes if s >= bits_needed])
    bytes_needed = actual_bits_needed // 8
    # 3 extra bytes: two for the sampler, one for the capping value.
    assert len(v.buffer) == 3 + bytes_needed
Example #30
0
def test_can_simplify_lists_of_morphers_of_single_type():
    ms = find(
        s.lists(morphers),
        lambda x: sum(t.become(s.integers()) for t in x) >= 100,
        settings=Settings(database=None)
    )

    with BuildContext():
        ls = [t.clean_slate().become(s.integers()) for t in ms]
    assert sum(ls) == 100
def test_branches_pass_through_deferred():
    x = st.one_of(st.booleans(), st.integers())
    y = st.deferred(lambda: x)
    assert x.branches == y.branches
Example #32
0
""" Unit tests for the `Hospital` class of players. """

from hypothesis import given
from hypothesis.strategies import integers, lists, text

from matching import Player as Resident
from matching.players import Hospital

capacity = integers(min_value=1)
pref_names = lists(text(), min_size=1)


@given(name=text(), capacity=capacity)
def test_init(name, capacity):
    """ Make an instance of Hospital and check their attributes are correct. """

    hospital = Hospital(name, capacity)

    assert hospital.name == name
    assert hospital.capacity == capacity
    assert hospital._original_capacity == capacity
    assert hospital.prefs is None
    assert hospital.pref_names is None
    assert hospital._original_prefs is None
    assert hospital.matching == []


@given(name=text(), capacity=capacity, pref_names=pref_names)
def test_get_favourite(name, capacity, pref_names):
    """ Check the correct player is returned as the hospital's favourite. """
Example #33
0
class MemongerTest(hu.HypothesisTestCase):
    @given(input_dim=st.integers(min_value=1, max_value=10),
           output_dim=st.integers(min_value=1, max_value=10),
           batch_size=st.integers(min_value=1, max_value=10),
           do=st.sampled_from(hu.device_options),
           algo=st.sampled_from(memonger.AssignmentAlgorithm))
    @settings(max_examples=5, timeout=120)
    def test_simple_memonger(self, input_dim, output_dim, batch_size, do,
                             algo):
        m = model_helper.ModelHelper()
        fc1 = brew.fc(m, "data", "fc1", dim_in=input_dim, dim_out=output_dim)
        fc2 = brew.fc(m, fc1, "fc2", dim_in=output_dim, dim_out=output_dim)
        fc3 = brew.fc(m, fc2, "fc3", dim_in=output_dim, dim_out=output_dim)

        fc3.Relu([], fc3)\
           .Softmax([], "pred") \
           .LabelCrossEntropy(["label"], ["xent"]) \
           .AveragedLoss([], "loss")
        input_to_grad = m.AddGradientOperators(["loss"])
        m.net.Proto().device_option.CopyFrom(do)
        m.param_init_net.Proto().device_option.CopyFrom(do)
        static_blobs = \
            [o for op in m.param_init_net.Proto().op for o in op.output] + \
            ["data", "label", "loss", input_to_grad["fc1_w"]]

        optimization = memonger.optimize_interference(m.Proto(),
                                                      static_blobs,
                                                      algo=algo)
        data = np.random.randn(batch_size, input_dim).astype(np.float32)
        label = np.random.randint(low=0, high=output_dim,
                                  size=(batch_size, )).astype(np.int32)
        workspace.RunNetOnce(m.param_init_net)
        workspace.FeedBlob("data", data, device_option=do)
        workspace.FeedBlob("label", label, device_option=do)
        workspace.RunNetOnce(m.net)
        loss = workspace.FetchBlob("loss")
        grad = workspace.FetchBlob(str(input_to_grad["fc1_w"]))
        workspace.RunNetOnce(optimization.net)
        optimized_loss = workspace.FetchBlob("loss")
        optimized_grad = workspace.FetchBlob(str(input_to_grad["fc1_w"]))
        np.testing.assert_almost_equal(loss, optimized_loss)
        np.testing.assert_almost_equal(grad, optimized_grad)
        stats = memonger.compute_statistics(optimization.assignments)
        self.assertLess(stats.optimized_nbytes, stats.baseline_nbytes)

        # run with blob sizes
        blob_sizes = memonger.collect_blob_sizes(m.Proto())
        optimization1 = memonger.optimize_interference(m.Proto(),
                                                       static_blobs,
                                                       blob_sizes=blob_sizes,
                                                       algo=algo)
        workspace.RunNetOnce(optimization1.net)
        optimized_loss = workspace.FetchBlob("loss")
        optimized_grad = workspace.FetchBlob(str(input_to_grad["fc1_w"]))
        np.testing.assert_almost_equal(loss, optimized_loss)
        np.testing.assert_almost_equal(grad, optimized_grad)
        stats = memonger.compute_statistics(optimization1.assignments)
        self.assertLessEqual(stats.optimized_nbytes, stats.baseline_nbytes)

    @given(input_dim=st.integers(min_value=1, max_value=10),
           output_dim=st.integers(min_value=1, max_value=10),
           batch_size=st.integers(min_value=1, max_value=10),
           do=st.sampled_from(hu.device_options))
    @settings(max_examples=5, timeout=120)
    def test_fast_memonger(self, input_dim, output_dim, batch_size, do):
        m = model_helper.ModelHelper()
        fc1 = brew.fc(m, "data", "fc1", dim_in=input_dim, dim_out=output_dim)
        fc2 = brew.fc(m, fc1, "fc2", dim_in=output_dim, dim_out=output_dim)
        fc3 = brew.fc(m, fc2, "fc3", dim_in=output_dim, dim_out=output_dim)

        fc3.Relu([], fc3)\
           .Softmax([], "pred") \
           .LabelCrossEntropy(["label"], ["xent"]) \
           .AveragedLoss([], "loss")
        input_to_grad = m.AddGradientOperators(["loss"])
        m.net.Proto().device_option.CopyFrom(do)
        m.param_init_net.Proto().device_option.CopyFrom(do)
        static_blobs = \
            [o for op in m.param_init_net.Proto().op for o in op.output] + \
            ["data", "label", "loss", input_to_grad["fc1_w"]]

        optimized_net = memonger.optimize_inference_fast(
            m.Proto(), static_blobs)
        data = np.random.randn(batch_size, input_dim).astype(np.float32)
        label = np.random.randint(low=0, high=output_dim,
                                  size=(batch_size, )).astype(np.int32)
        workspace.RunNetOnce(m.param_init_net)
        workspace.FeedBlob("data", data, device_option=do)
        workspace.FeedBlob("label", label, device_option=do)
        workspace.RunNetOnce(m.net)
        loss = workspace.FetchBlob("loss")
        grad = workspace.FetchBlob(str(input_to_grad["fc1_w"]))
        workspace.RunNetOnce(optimized_net)
        optimized_loss = workspace.FetchBlob("loss")
        optimized_grad = workspace.FetchBlob(str(input_to_grad["fc1_w"]))
        np.testing.assert_almost_equal(loss, optimized_loss)
        np.testing.assert_almost_equal(grad, optimized_grad)

        self.assertLess(count_blobs(optimized_net), count_blobs(m.Proto()))

    def test_fast_memonger_unique_outputs(self):
        m = model_helper.ModelHelper()
        fc = []
        for i in range(2):
            z = brew.fc(m,
                        "data{}".format(i),
                        "fc".format(i),
                        dim_in=2,
                        dim_out=2)
            fc.append(z)
        r = []
        # Trick is here to have same input appear twice in a same Sum
        for x in fc:
            for y in fc:
                r.append(brew.sum(m, [x, y], 1))
        concated = brew.concat(m, r, "concated")
        brew.relu(m, concated, "merged")

        static_blobs = \
            [o for op in m.param_init_net.Proto().op for o in op.output] + \
            ["merged"] + ["data{}".format(i) for i in range(len(fc))]

        optimized_net = memonger.optimize_inference_fast(
            m.Proto(), static_blobs)
        for op in optimized_net.op:
            self.assertEqual(len(op.output), len(set(op.output)), str(op))

    @given(input_dim=st.integers(min_value=1, max_value=4),
           output_dim=st.integers(min_value=1, max_value=4),
           batch_size=st.integers(min_value=1, max_value=4))
    def test_gradient_optim(self, input_dim, output_dim, batch_size):
        m = model_helper.ModelHelper()
        with core.NameScope("name_x"):
            fc1 = brew.fc(m,
                          "data",
                          "fc1",
                          dim_in=input_dim,
                          dim_out=output_dim)
            fc2 = brew.fc(m, fc1, "fc2", dim_in=output_dim, dim_out=output_dim)
            fc3 = brew.fc(m, fc2, "fc3", dim_in=output_dim, dim_out=output_dim)
            fc4 = brew.fc(m, fc3, "fc4", dim_in=output_dim, dim_out=output_dim)
            fc5 = brew.fc(m, fc4, "fc5", dim_in=output_dim, dim_out=output_dim)
            fc5.Relu([], fc5)\
               .Softmax([], "pred") \
               .LabelCrossEntropy(["label"], ["xent"]) \
               .AveragedLoss([], "loss")
        input_to_grad = m.AddGradientOperators(["name_x/loss"])

        blobs_before = count_blobs(m.net.Proto())
        optim_proto = memonger.share_grad_blobs(
            m.net,
            ["name_x/loss"],
            set(viewvalues(m.param_to_grad)),
            "name_x/",
            share_activations=False,
        )
        blobs_after = count_blobs(optim_proto)
        self.assertLess(blobs_after, blobs_before)

        optim_proto_wacts = memonger.share_grad_blobs(
            m.net,
            ["name_x/loss"],
            set(viewvalues(m.param_to_grad)),
            "name_x/",
            share_activations=True,
            dont_share_blobs=set([str(input_to_grad["name_x/fc1_w"])]),
        )
        blobs_wact_optim = count_blobs(optim_proto_wacts)
        self.assertLessEqual(blobs_wact_optim, blobs_after)

        # Check that the last activations are not shared
        self.assertTrue(has_blob(optim_proto, "name_x/fc5"))
        self.assertTrue(
            has_blob(optim_proto_wacts, "name_x/fc5"),
            "Dont remap final activation",
        )

        # Test networks produce exactly same gradients
        data = np.random.randn(batch_size, input_dim).astype(np.float32)
        label = np.random.randint(low=0, high=output_dim,
                                  size=(batch_size, )).astype(np.int32)
        workspace.RunNetOnce(m.param_init_net)
        workspace.FeedBlob("name_x/data", data)
        workspace.FeedBlob("name_x/label", label)
        workspace.RunNetOnce(m.net)
        loss = workspace.FetchBlob("name_x/loss")
        grad = workspace.FetchBlob(str(input_to_grad["name_x/fc1_w"]))
        workspace.RunNetOnce(optim_proto)
        optimized_loss = workspace.FetchBlob("name_x/loss")
        optimized_grad = workspace.FetchBlob(str(
            input_to_grad["name_x/fc1_w"]))
        np.testing.assert_almost_equal(loss, optimized_loss)
        np.testing.assert_almost_equal(grad, optimized_grad)

        workspace.FeedBlob(str(input_to_grad["name_x/fc1_w"]), np.array([0.0]))

        # Run with the forward optimization
        workspace.RunNetOnce(optim_proto_wacts)
        optimized_loss = workspace.FetchBlob("name_x/loss")
        optimized_grad = workspace.FetchBlob(str(
            input_to_grad["name_x/fc1_w"]))
        np.testing.assert_almost_equal(loss, optimized_loss)
        np.testing.assert_almost_equal(grad, optimized_grad)

    @unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
    def test_memonger_mix_cpu_gpu(self):
        '''
        Check that memonger does not make blobs cross CPU/GPU boundary
        '''
        m = model_helper.ModelHelper()
        with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
            fc1 = brew.fc(m, "data", "fc1", dim_in=2, dim_out=2)
            fc2 = brew.fc(m, fc1, "fc2", dim_in=2, dim_out=2)
            fc3 = brew.fc(m, fc2, "fc3", dim_in=2, dim_out=2)
            fc4 = brew.fc(m, fc3, "fc4", dim_in=2, dim_out=2)
            fc4_cpu = m.net.CopyGPUToCPU(fc4, "fc4_cpu")
        with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU, 0)):
            fc5_cpu = brew.fc(m, fc4_cpu, "fc5_cpu", dim_in=2, dim_out=2)
            fc6_cpu = brew.fc(m, fc5_cpu, "fc6_cpu", dim_in=2, dim_out=2)
            fc7_cpu = brew.fc(m, fc6_cpu, "fc7_cpu", dim_in=2, dim_out=2)
            fc7_cpu.Relu([], fc7_cpu) \
               .Softmax([], "pred") \
               .LabelCrossEntropy(["label"], ["xent"]) \
               .AveragedLoss([], "loss")
        m.AddGradientOperators(["loss"])

        blobs_before = count_blobs(m.net.Proto())
        optim_proto = memonger.share_grad_blobs(
            m.net,
            ["loss"],
            set(viewvalues(m.param_to_grad)),
            "",
            share_activations=True,
            dont_share_blobs=set(),
        )
        blobs_after = count_blobs(optim_proto)
        self.assertLess(blobs_after, blobs_before)

        # Create set of blobs on CPU side and GPU side and check they don't
        # overlap
        device_blobs = {caffe2_pb2.CPU: set(), caffe2_pb2.CUDA: set()}
        for op in optim_proto.op:
            if op.type not in ['CopyCPUToGPU', "CopyGPUToCPU"]:
                dev = op.device_option.device_type
                for b in list(op.input) + list(op.output):
                    device_blobs[dev].add(b)

        device_crossers = device_blobs[caffe2_pb2.CPU].intersection(
            device_blobs[caffe2_pb2.CUDA])
        self.assertEquals(device_crossers, set())

    @given(input_dim=st.integers(min_value=4, max_value=4),
           output_dim=st.integers(min_value=4, max_value=4),
           batch_size=st.integers(min_value=4, max_value=4))
    def test_gradient_optim_tree(self, input_dim, output_dim, batch_size):
        m = model_helper.ModelHelper()
        with core.NameScope("name_x"):
            fc1 = brew.fc(m,
                          "data",
                          "fc1",
                          dim_in=input_dim,
                          dim_out=output_dim)
            fc2 = brew.fc(m, fc1, "fc2", dim_in=output_dim, dim_out=output_dim)
            fc3 = brew.fc(m, fc2, "fc3", dim_in=output_dim, dim_out=output_dim)
            fc4 = brew.fc(m, fc3, "fc4", dim_in=output_dim, dim_out=output_dim)
            fc5 = brew.fc(m, fc4, "fc5", dim_in=output_dim, dim_out=output_dim)
            fc5.Relu([], fc5) \
               .Softmax([], "pred1") \
               .LabelCrossEntropy(["label"], ["xent1"]) \
               .AveragedLoss([], "loss1")
            fc6 = brew.fc(m, fc5, "fc6", dim_in=output_dim, dim_out=output_dim)
            fc6.Relu([], fc6) \
               .Softmax([], "pred2") \
               .LabelCrossEntropy(["label"], ["xent2"]) \
               .AveragedLoss([], "loss2")
        input_to_grad = m.AddGradientOperators(
            ["name_x/loss1", "name_x/loss2"])

        blobs_before = count_blobs(m.net.Proto())
        optim_proto = memonger.share_grad_blobs(
            m.net,
            ["name_x/loss1", "name_x/loss2"],
            set(viewvalues(m.param_to_grad)),
            "name_x",  # "name_x//shared_gradinp_0_shared" if using "name_x/"
            share_activations=True,
            dont_share_blobs=set([
                'name_x/fc6', 'name_x/fc5',
                str(input_to_grad["name_x/fc1_w"])
            ]),
        )
        blobs_after = count_blobs(optim_proto)
        self.assertLess(blobs_after, blobs_before)
        self.assertTrue(has_blob(optim_proto, "name_x/fc6"))

        # Test networks produce exactly same gradients
        data = np.random.randn(batch_size, input_dim).astype(np.float32)
        label = np.random.randint(low=0, high=output_dim,
                                  size=(batch_size, )).astype(np.int32)
        workspace.RunNetOnce(m.param_init_net)
        workspace.FeedBlob("name_x/data", data)
        workspace.FeedBlob("name_x/label", label)
        workspace.RunNetOnce(m.net)
        loss1 = workspace.FetchBlob("name_x/loss1")
        loss2 = workspace.FetchBlob("name_x/loss2")
        grad = workspace.FetchBlob(str(input_to_grad["name_x/fc1_w"]))
        workspace.FeedBlob(str(input_to_grad["name_x/fc1_w"]), np.array([0.0]))

        workspace.RunNetOnce(optim_proto)
        optimized_loss1 = workspace.FetchBlob("name_x/loss1")
        optimized_loss2 = workspace.FetchBlob("name_x/loss2")
        optimized_grad = workspace.FetchBlob(str(
            input_to_grad["name_x/fc1_w"]))
        np.testing.assert_almost_equal(loss1, optimized_loss1)
        np.testing.assert_almost_equal(loss2, optimized_loss2)
        np.testing.assert_almost_equal(grad, optimized_grad)

    @given(input_dim=st.integers(min_value=4, max_value=4),
           output_dim=st.integers(min_value=4, max_value=4),
           batch_size=st.integers(min_value=4, max_value=4))
    def test_forward_optim_tree_daggy(self, input_dim, output_dim, batch_size):
        m = model_helper.ModelHelper()
        m.Proto().type = "dag"
        m.Proto().num_workers = 4

        with core.NameScope("name_x"):
            fc1 = brew.fc(m,
                          "data",
                          "fc1",
                          dim_in=input_dim,
                          dim_out=output_dim)
            fc2 = brew.fc(m, fc1, "fc2", dim_in=output_dim, dim_out=output_dim)

            fc3 = brew.fc(m, fc2, "fc3", dim_in=output_dim, dim_out=output_dim)
            fc4 = brew.fc(m, fc3, "fc4", dim_in=output_dim, dim_out=output_dim)
            fc5 = brew.fc(m, fc4, "fc5", dim_in=output_dim, dim_out=output_dim)

            # Branch
            fc3b = brew.fc(m,
                           fc2,
                           "fc3b",
                           dim_in=output_dim,
                           dim_out=output_dim)
            fc4b = brew.fc(m,
                           fc3b,
                           "fc4b",
                           dim_in=output_dim,
                           dim_out=output_dim)
            fc5b = brew.fc(m,
                           fc4b,
                           "fc5b",
                           dim_in=output_dim,
                           dim_out=output_dim)

            fc5sum = brew.sum(m, [fc5, fc5b], "fc5sum")

            fc5.Relu([], fc5sum) \
               .Softmax([], "pred1") \
               .LabelCrossEntropy(["label"], ["xent1"]) \
               .AveragedLoss([], "loss1")
            fc6 = brew.fc(m, fc5, "fc6", dim_in=output_dim, dim_out=output_dim)
            fc6.Relu([], fc6) \
               .Softmax([], "pred2") \
               .LabelCrossEntropy(["label"], ["xent2"]) \
               .AveragedLoss([], "loss2")

        blobs_before = count_blobs(m.net.Proto())
        optim_proto = memonger.optimize_inference_for_dag(
            m.net, ["name_x/data"], "name_x")
        blobs_after = count_blobs(optim_proto)
        self.assertLess(blobs_after, blobs_before)

        # Test networks produce exactly same results
        data = np.random.randn(batch_size, input_dim).astype(np.float32)
        label = np.random.randint(low=0, high=output_dim,
                                  size=(batch_size, )).astype(np.int32)
        workspace.RunNetOnce(m.param_init_net)
        workspace.FeedBlob("name_x/data", data)
        workspace.FeedBlob("name_x/label", label)
        workspace.RunNetOnce(m.net)
        loss1 = workspace.FetchBlob("name_x/loss1")
        loss2 = workspace.FetchBlob("name_x/loss2")
        workspace.RunNetOnce(optim_proto)
        optimized_loss1 = workspace.FetchBlob("name_x/loss1")
        optimized_loss2 = workspace.FetchBlob("name_x/loss2")
        np.testing.assert_almost_equal(loss1, optimized_loss1)
        np.testing.assert_almost_equal(loss2, optimized_loss2)

    @given(input_dim=st.integers(min_value=4, max_value=4),
           output_dim=st.integers(min_value=4, max_value=4),
           batch_size=st.integers(min_value=4, max_value=4))
    def test_forward_optim_tree_harder(self, input_dim, output_dim,
                                       batch_size):
        m = model_helper.ModelHelper()
        m.net.Proto().type = "dag"
        m.net.Proto().num_workers = 4
        m.net.AddExternalInput("label")
        m.net.AddExternalInput("data")

        with core.NameScope("name_x"):
            fc1 = brew.fc(m,
                          "data",
                          "fc1",
                          dim_in=input_dim,
                          dim_out=output_dim)
            fc2 = brew.fc(m, fc1, "fc2", dim_in=output_dim, dim_out=output_dim)

            fc3 = brew.fc(m, fc2, "fc3", dim_in=output_dim, dim_out=output_dim)
            fc4 = brew.fc(m, fc3, "fc4", dim_in=output_dim, dim_out=output_dim)
            fc5 = brew.fc(m, fc4, "fc5", dim_in=output_dim, dim_out=output_dim)

            # Branch
            fc3b = brew.fc(m,
                           fc2,
                           "fc3b",
                           dim_in=output_dim,
                           dim_out=output_dim)
            fc4b = brew.fc(m,
                           fc3b,
                           "fc4b",
                           dim_in=output_dim,
                           dim_out=output_dim)
            fc5b = brew.fc(m,
                           fc4b,
                           "fc5b",
                           dim_in=output_dim,
                           dim_out=output_dim)

            fc5sum = brew.sum(m, [fc5, fc5b], "fc5sum")
            fc5sum.Relu([], "relu1") \
               .Softmax([], "pred1") \
               .LabelCrossEntropy(["label"], ["xent1"]) \
               .AveragedLoss([], "loss1")
            fc6 = brew.fc(m, fc5, "fc6", dim_in=output_dim, dim_out=output_dim)
            fc6.Relu([], fc6) \
               .Softmax([], "pred2") \
               .LabelCrossEntropy(["label"], ["xent2"]) \
               .AveragedLoss([], "loss2")

        blobs_before = count_blobs(m.net.Proto())
        optim_proto = memonger.optimize_inference_for_dag(
            m.net, ["name_x/data"], "name_x/")

        blobs_after = count_blobs(optim_proto)

        # Extra test with when one of the parameters is also an input.
        # This caused a bug before.
        optim_proto_extra_input = memonger.optimize_inference_for_dag(
            m.net, ["name_x/data", "name_x/fc1_w"], "name_x/")
        blobs_after_extra_input = count_blobs(optim_proto_extra_input)
        self.assertEqual(blobs_after, blobs_after_extra_input)
        ###

        print(str(optim_proto))
        self.assertLess(blobs_after, blobs_before)

        # Test networks produce exactly same results
        data = np.random.randn(batch_size, input_dim).astype(np.float32)
        label = np.random.randint(low=0, high=output_dim,
                                  size=(batch_size, )).astype(np.int32)
        workspace.RunNetOnce(m.param_init_net)
        workspace.FeedBlob("name_x/data", data)
        workspace.FeedBlob("name_x/label", label)
        workspace.RunNetOnce(m.net)
        loss1 = workspace.FetchBlob("name_x/loss1")
        loss2 = workspace.FetchBlob("name_x/loss2")
        workspace.RunNetOnce(optim_proto)
        optimized_loss1 = workspace.FetchBlob("name_x/loss1")
        optimized_loss2 = workspace.FetchBlob("name_x/loss2")
        np.testing.assert_almost_equal(loss1, optimized_loss1)
        np.testing.assert_almost_equal(loss2, optimized_loss2)

    def test_rnn(self):
        from caffe2.python import rnn_cell
        T = 5
        model = model_helper.ModelHelper()
        seq_lengths, labels = \
            model.net.AddExternalInputs(
                'seq_lengths', 'labels',
            )
        init_blobs = []
        for i in range(2):
            hidden_init, cell_init = model.net.AddExternalInputs(
                "hidden_init_{}".format(i), "cell_init_{}".format(i))
            init_blobs.extend([hidden_init, cell_init])
        model.param_init_net.ConstantFill([], ["input"], shape=[T, 4, 10])
        output, last_hidden, _, last_state = rnn_cell.LSTM(
            model=model,
            input_blob="input",
            seq_lengths=seq_lengths,
            initial_states=init_blobs,
            dim_in=10,
            dim_out=[10, 10],
            scope="lstm1",
            forward_only=False,
            drop_states=True,
            return_last_layer_only=True,
        )
        softmax, loss = model.net.SoftmaxWithLoss(
            [model.Flatten(output), "labels"],
            ['softmax', 'loss'],
        )

        model.AddGradientOperators([loss])
        blobs_before = count_blobs(model.net.Proto())
        optim_proto = memonger.share_grad_blobs(
            model.net,
            ["loss"],
            set(viewvalues(model.param_to_grad)),
            "",
            share_activations=True,
            dont_share_blobs=set(),
        )
        blobs_after = count_blobs(optim_proto)
        self.assertLess(blobs_after, blobs_before)

        # Run once to see all blobs are set up correctly
        for init_blob in init_blobs:
            workspace.FeedBlob(init_blob, np.zeros([1, 4, 10],
                                                   dtype=np.float32))
        workspace.FeedBlob("seq_lengths", np.array([T] * 4, dtype=np.int32))
        workspace.FeedBlob("labels", np.random.rand(T).astype(np.int32))

        workspace.RunNetOnce(model.param_init_net)
        workspace.RunNetOnce(model.net)

    def test_compute_interference_graph_inplace_ops(self):
        m = model_helper.ModelHelper()
        m.Copy("b1", "b1")
        m.Copy("b1", "b1")
        m.Copy("b1", "b1")
        g = memonger.compute_interference_graph(m.net.Proto().op)
        self.assertEqual(list(g.edges()), [(0, 1), (0, 2), (1, 2)])

    def test_topological_sort_longest_path(self):
        m = model_helper.ModelHelper()
        # 0
        m.Copy("conv0_w_comp", "conv0_w")
        # 1
        conv0 = brew.conv(m, "data", "conv0", 32, 32, 4)
        # 2
        m.Copy("conv2_w", "conv2_w")
        # 3
        brew.conv(m, conv0, "conv2", 16, 32, 4)

        g = memonger.compute_interference_graph(m.net.Proto().op)

        orders_org = memonger.topological_sort_traversal(g)
        orders_gt_org = [2, 0, 1, 3]
        self.assertEqual(orders_gt_org, list(orders_org))

        orders = memonger.topological_sort_traversal_longest_path(g)
        # longer path is in front of the shorter one
        orders_gt = [0, 1, 2, 3]
        self.assertEqual(orders_gt, list(orders))

    def test_topological_sort_longest_path_multi_target(self):
        # two outputs: conv2 and data4
        m = model_helper.ModelHelper()
        # 0
        m.Copy("conv0_w_comp", "conv0_w")
        # 1
        conv0 = brew.conv(m, "data", "conv0", 32, 32, 4)
        # 2
        m.Copy("conv2_w", "conv2_w")
        # 3
        brew.conv(m, conv0, "conv2", 16, 32, 4)
        # 4
        m.Copy("data1", "data2")
        # 5
        m.Copy("data2", "data3")

        g = memonger.compute_interference_graph(m.net.Proto().op)

        orders_org = memonger.topological_sort_traversal(g)
        orders_gt_org = [4, 5, 2, 0, 1, 3]
        self.assertEqual(orders_gt_org, list(orders_org))

        orders = memonger.topological_sort_traversal_longest_path(g)
        # longer path is in front of the shorter one
        orders_gt = [0, 1, 2, 3, 4, 5]
        self.assertEqual(orders_gt, list(orders))

    def test_topological_sort_longest_path_single_node(self):
        # single node
        m = model_helper.ModelHelper()
        # 0
        m.Copy("conv0_w_comp", "conv0_w")

        g = memonger.compute_interference_graph(m.net.Proto().op)

        orders_org = memonger.topological_sort_traversal(g)
        orders_gt_org = [0]
        self.assertEqual(orders_gt_org, list(orders_org))

        orders = memonger.topological_sort_traversal_longest_path(g)
        # longer path is in front of the shorter one
        orders_gt = [0]
        self.assertEqual(orders_gt, list(orders))

    def test_compute_assignments_greedy(self):
        LiveRange = memonger.LiveRange
        ranges_sorted = [
            ('b1', LiveRange(1, 3, 10)),
            ('b2', LiveRange(3, 4, 1)),
            ('b3', LiveRange(5, 6, 1)),
            ('b4', LiveRange(5, 7, 10)),
        ]
        assignment_gt = [
            [ranges_sorted[0], ranges_sorted[3]],
            [ranges_sorted[1], ranges_sorted[2]],
        ]

        best = memonger.compute_assignments_greedy(ranges_sorted, None)
        self.assertEqual(memonger.get_memory_usage(best), 11)
        self.assertEqual(best, assignment_gt)

    def test_compute_assignments_dp(self):
        LiveRange = memonger.LiveRange
        ranges_sorted = [
            ('b1', LiveRange(1, 3, 10)),
            ('b2', LiveRange(3, 4, 1)),
            ('b3', LiveRange(5, 6, 1)),
            ('b4', LiveRange(5, 7, 10)),
        ]

        best = memonger.compute_assignments_dp(ranges_sorted, None)
        self.assertEqual(memonger.get_memory_usage(best), 11)

    def test_compute_assignments_dp1(self):
        LiveRange = memonger.LiveRange
        ranges_sorted = [
            ('b1', LiveRange(1, 2, 10)),
            ('b2', LiveRange(4, 6, 1)),
            ('b3', LiveRange(5, 6, 10)),
        ]

        best = memonger.compute_assignments_dp(ranges_sorted, [])
        self.assertEqual(memonger.get_memory_usage(best), 11)

    @given(input_dim=st.integers(min_value=4, max_value=4),
           output_dim=st.integers(min_value=4, max_value=4),
           batch_size=st.integers(min_value=4, max_value=4))
    def test_verify_graph_equality(self, input_dim, output_dim, batch_size):
        m = model_helper.ModelHelper()
        m.Proto().type = "dag"
        m.Proto().num_workers = 4
        with core.NameScope("name_x"):
            fc1 = brew.fc(m, "data", "x", dim_in=input_dim, dim_out=output_dim)
            fc2 = brew.fc(m, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            fc3 = brew.fc(m, fc1, "z", dim_in=output_dim, dim_out=output_dim)
            brew.sum(m, [fc2, fc3], "out")

        m2 = model_helper.ModelHelper()
        m2.Proto().type = "dag"
        m2.Proto().num_workers = 4
        with core.NameScope("name_x"):
            fc1 = brew.fc(m2,
                          "data",
                          "other_x",
                          dim_in=input_dim,
                          dim_out=output_dim)
            fc2 = brew.fc(m2,
                          fc1,
                          "other_y",
                          dim_in=output_dim,
                          dim_out=output_dim)
            fc3 = brew.fc(m2,
                          fc1,
                          "other_z",
                          dim_in=output_dim,
                          dim_out=output_dim)
            brew.sum(m2, [fc2, fc3], "out")

        self.assertTrue(
            memonger.verify_graph_equality(m.net.Proto(), m2.net.Proto()))

    @given(input_dim=st.integers(min_value=4, max_value=4),
           output_dim=st.integers(min_value=4, max_value=4),
           batch_size=st.integers(min_value=4, max_value=4))
    def test_verify_graph_equality_harder(self, input_dim, output_dim,
                                          batch_size):
        m = model_helper.ModelHelper()
        m.Proto().type = "dag"
        m.Proto().num_workers = 4
        with core.NameScope("name_x"):
            fc1 = brew.fc(m, "data", "x", dim_in=input_dim, dim_out=output_dim)
            fc2a = brew.fc(m, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            fc2b = brew.fc(m, fc1, "z", dim_in=output_dim, dim_out=output_dim)
            fc3a = brew.fc(m, fc2a, "u", dim_in=output_dim, dim_out=output_dim)
            fc3b = brew.fc(m, fc2b, "v", dim_in=output_dim, dim_out=output_dim)
            brew.sum(m, [fc3a, fc3b], "out")

        m2 = model_helper.ModelHelper()
        m2.Proto().type = "dag"
        m2.Proto().num_workers = 4
        with core.NameScope("name_x"):
            fc1 = brew.fc(m2,
                          "data",
                          "x",
                          dim_in=input_dim,
                          dim_out=output_dim)
            fc2a = brew.fc(m2, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            fc2b = brew.fc(m2, fc1, "z", dim_in=output_dim, dim_out=output_dim)
            fc3a = brew.fc(m2,
                           fc2a,
                           "y",
                           dim_in=output_dim,
                           dim_out=output_dim)
            fc3b = brew.fc(m2,
                           fc2b,
                           "z",
                           dim_in=output_dim,
                           dim_out=output_dim)
            brew.sum(m2, [fc3a, fc3b], "out")

        self.assertTrue(
            memonger.verify_graph_equality(m.net.Proto(), m2.net.Proto()))

    @given(input_dim=st.integers(min_value=4, max_value=4),
           output_dim=st.integers(min_value=4, max_value=4),
           batch_size=st.integers(min_value=4, max_value=4))
    def test_verify_graph_inequality(self, input_dim, output_dim, batch_size):
        m = model_helper.ModelHelper()
        m.Proto().type = "dag"
        m.Proto().num_workers = 4
        with core.NameScope("name_x"):
            fc1 = brew.fc(m, "data", "x", dim_in=input_dim, dim_out=output_dim)
            fc2 = brew.fc(m, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            fc3 = brew.fc(m, fc1, "z", dim_in=output_dim, dim_out=output_dim)
            brew.sum(m, [fc2, fc3], "out")

        m2 = model_helper.ModelHelper()
        m2.Proto().type = "dag"
        m2.Proto().num_workers = 4
        with core.NameScope("name_x"):
            fc1 = brew.fc(m2,
                          "data",
                          "x",
                          dim_in=input_dim,
                          dim_out=output_dim)
            fc2 = brew.fc(m2, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            fc3 = brew.fc(m2, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            brew.sum(m2, [fc2, fc3], "out")

        self.assertFalse(
            memonger.verify_graph_equality(m.net.Proto(), m2.net.Proto()))

    @given(input_dim=st.integers(min_value=4, max_value=4),
           output_dim=st.integers(min_value=4, max_value=4),
           batch_size=st.integers(min_value=4, max_value=4))
    def test_verify_graph_inequality_harder(self, input_dim, output_dim,
                                            batch_size):
        m = model_helper.ModelHelper()
        m.Proto().type = "dag"
        m.Proto().num_workers = 4
        with core.NameScope("name_x"):
            fc1 = brew.fc(m, "data", "x", dim_in=input_dim, dim_out=output_dim)
            fc2a = brew.fc(m, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            fc2b = brew.fc(m, fc1, "z", dim_in=output_dim, dim_out=output_dim)
            fc3a = brew.fc(m, fc2a, "u", dim_in=output_dim, dim_out=output_dim)
            fc3b = brew.fc(m, fc2b, "v", dim_in=output_dim, dim_out=output_dim)
            brew.sum(m, [fc3a, fc3b], "out")

        m2 = model_helper.ModelHelper()
        m2.Proto().type = "dag"
        m2.Proto().num_workers = 4
        with core.NameScope("name_x"):
            fc1 = brew.fc(m2,
                          "data",
                          "x",
                          dim_in=input_dim,
                          dim_out=output_dim)
            fc2a = brew.fc(m2, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            fc2b = brew.fc(m2, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            fc3a = brew.fc(m2,
                           fc2a,
                           "u",
                           dim_in=output_dim,
                           dim_out=output_dim)
            fc3b = brew.fc(m2,
                           fc2b,
                           "v",
                           dim_in=output_dim,
                           dim_out=output_dim)
            brew.sum(m2, [fc3a, fc3b], "out")

        self.assertFalse(
            memonger.verify_graph_equality(m.net.Proto(), m2.net.Proto()))

    def test_release_blobs_when_used(self):
        m = model_helper.ModelHelper()
        fc1 = brew.fc(m, "data", "x", dim_in=2, dim_out=2)
        fc2 = brew.fc(m, fc1, "y", dim_in=2, dim_out=2)
        fc3 = brew.fc(m, fc1, "z", dim_in=2, dim_out=2)
        fc4 = brew.fc(m, fc2, "u", dim_in=2, dim_out=2)
        m.net.Alias(["u"], ["u_alias"])

        brew.sum(m, [fc3, fc4], "out")

        with_frees = memonger.release_blobs_when_used(m.net.Proto(),
                                                      set("data"))

        expect_frees = {"x", "y", "z"}  # out is external output
        # and u is aliased so cannot be freed
        found_frees = set()
        for op in with_frees.op:
            if op.type == "Free":
                self.assertFalse(op.input[0] in found_frees)  # no double frees
                found_frees.add(op.input[0])
            else:
                # Check a freed blob is not used anymore
                for inp in op.input:
                    self.assertFalse(inp in found_frees)
                for outp in op.output:
                    self.assertFalse(outp in found_frees)

        self.assertEqual(expect_frees, found_frees)
Example #34
0
def test_settings_applied_twice_is_error():
    @given(st.integers())
    @settings()
    @settings()
    def test_nothing(x):
        pass
Example #35
0
        pytest.param(1, 2, 1, 2, False),
        pytest.param(1, 2, 2, 1, True),
        pytest.param(1, 2, 1, 3, True),
        pytest.param(2, 1, 1, 2, False),
    ],
)
def test_lt(approach_level_1, branch_distance_1, approach_level_2,
            branch_distance_2, result):
    cfd_1 = ControlFlowDistance(approach_level=approach_level_1,
                                branch_distance=branch_distance_1)
    cfd_2 = ControlFlowDistance(approach_level=approach_level_2,
                                branch_distance=branch_distance_2)
    assert (cfd_1 < cfd_2) == result


@given(level=st.integers())
def test_approach_level(level, control_flow_distance):
    assume(level >= 0)
    control_flow_distance.approach_level = level
    assert control_flow_distance.approach_level == level


@given(branch_distance=st.floats())
def test_branch_distance(branch_distance, control_flow_distance):
    assume(branch_distance >= 0)
    control_flow_distance.branch_distance = branch_distance
    assert control_flow_distance.branch_distance == branch_distance


def test_init_negative_approach_level():
    with pytest.raises(AssertionError):
Example #36
0
 class Stuff(RuleBasedStateMachine):
     @rule(x=integers())
     def a_rule(self, x):
         pass
Example #37
0
def test_detection_of_functions():
    @given(integers())
    def test(i):
        pass

    assert is_hypothesis_test(test)
Example #38
0
        w_slice = space.newslice(w(1), w_None, w(2))
        assert self.space.eq_w(space.getitem(w_str, w_slice), wb('el'))

    def test_listview_bytes(self):
        w_bytes = self.space.newbytes('abcd')
        assert self.space.listview_bytes(w_bytes) == list("abcd")


try:
    from hypothesis import given, strategies
except ImportError:
    pass
else:
    @given(u=strategies.binary(),
           start=strategies.integers(min_value=0, max_value=10),
           len1=strategies.integers(min_value=-1, max_value=10))
    def test_hypo_index_find(u, start, len1, space):
        if start + len1 < 0:
            return   # skip this case
        v = u[start : start + len1]
        w_u = space.wrap(u)
        w_v = space.wrap(v)
        expected = u.find(v, start, start + len1)
        try:
            w_index = space.call_method(w_u, 'index', w_v,
                                        space.newint(start),
                                        space.newint(start + len1))
        except OperationError as e:
            if not e.match(space, space.w_ValueError):
                raise
Example #39
0
from hypothesis import given
from hypothesis.strategies import integers, binary
from monocypher.bindings.crypto_hash import (
    crypto_blake2b,
    crypto_blake2b_init, crypto_blake2b_update, crypto_blake2b_final,
    BLAKE2B_KEY_MIN, BLAKE2B_KEY_MAX,
    BLAKE2B_HASH_MIN, BLAKE2B_HASH_MAX,
)

from tests.utils import get_vectors, chunked


MSG         = binary()
BLAKE2B_KEY = binary(min_size=BLAKE2B_KEY_MIN, max_size=BLAKE2B_KEY_MAX)
HASH_SIZE   = integers(min_value=BLAKE2B_HASH_MIN, max_value=BLAKE2B_HASH_MAX)
CHUNK_SIZE  = integers(min_value=1, max_value=200)


@given(MSG, BLAKE2B_KEY, HASH_SIZE, CHUNK_SIZE)
def test_crypto_blake2b(msg, key, hash_size, chunk_size):
    digest = crypto_blake2b(msg, key, hash_size)
    ctx = crypto_blake2b_init(key, hash_size)

    for chunk in chunked(msg, chunk_size):
        crypto_blake2b_update(ctx, chunk)
    assert crypto_blake2b_final(ctx) == digest


# test vectors
def test_crypto_blake2b_vectors():
    for vec in get_vectors('blake2-kat.json'):
Example #40
0
class TestReduceFrontSum(hu.HypothesisTestCase):
    @given(batch_size=st.integers(1, 3),
           stride=st.integers(1, 3),
           pad=st.integers(0, 3),
           kernel=st.integers(1, 5),
           dilation=st.integers(1, 3),
           size=st.integers(7, 10),
           channels=st.integers(1, 8),
           **hu.gcs)
    def test_im2col_layout(self, batch_size, stride, pad, kernel, dilation,
                           size, channels, gc, dc):

        dkernel = (dilation * (kernel - 1) + 1)
        assume(size >= dkernel)

        NCHW_TO_NHWC = (0, 2, 3, 1)
        NHWC_TO_NCHW = (0, 3, 1, 2)
        COL_NHWC_TO_NCHW = (4, 2, 3, 0, 1)

        N = batch_size
        C = channels
        H = size
        W = size

        out_h = int((H + (2 * pad) - dkernel) / stride + 1)
        out_w = int((W + (2 * pad) - dkernel) / stride + 1)

        im_nchw = np.random.rand(N, C, H, W).astype(np.float32) - 0.5
        im_nhwc = im_nchw.transpose(NCHW_TO_NHWC)

        op_im2col_nchw = core.CreateOperator("Im2Col", ["im_nchw"],
                                             ["col_nchw"],
                                             stride=stride,
                                             kernel=kernel,
                                             dilation=dilation,
                                             pad=pad,
                                             order="NCHW",
                                             device_option=gc)

        op_im2col_nhwc = core.CreateOperator("Im2Col", ["im_nhwc"],
                                             ["col_nhwc"],
                                             stride=stride,
                                             kernel=kernel,
                                             dilation=dilation,
                                             pad=pad,
                                             order="NHWC",
                                             device_option=gc)

        self.ws.create_blob("im_nchw").feed(im_nchw, device_option=gc)
        self.ws.create_blob("im_nhwc").feed(im_nhwc, device_option=gc)
        self.ws.run(op_im2col_nchw)
        self.ws.run(op_im2col_nhwc)

        # there is probably a clever way to spell this in np
        col_nchw = self.ws.blobs["col_nchw"].fetch()
        col_nhwc = self.ws.blobs["col_nhwc"].fetch()
        col_nchw_ = col_nchw.reshape(N, C, kernel, kernel, out_h, out_w)
        col_nhwc_ = col_nhwc.reshape(N, out_h, out_w, kernel, kernel, C)
        for i in range(0, N):
            np.testing.assert_allclose(
                col_nchw_[i],
                col_nhwc_[i].transpose(COL_NHWC_TO_NCHW),
                atol=1e-4,
                rtol=1e-4)

        op_col2im_nchw = core.CreateOperator("Col2Im", ["col_nchw", "im_nchw"],
                                             ["out_nchw"],
                                             stride=stride,
                                             kernel=kernel,
                                             dilation=dilation,
                                             pad=pad,
                                             order="NCHW",
                                             device_option=gc)

        op_col2im_nhwc = core.CreateOperator("Col2Im", ["col_nhwc", "im_nhwc"],
                                             ["out_nhwc"],
                                             stride=stride,
                                             kernel=kernel,
                                             dilation=dilation,
                                             pad=pad,
                                             order="NHWC",
                                             device_option=gc)

        self.ws.run(op_col2im_nchw)
        self.ws.run(op_col2im_nhwc)

        out_nchw = self.ws.blobs["out_nchw"].fetch()
        out_nhwc = self.ws.blobs["out_nhwc"].fetch()
        np.testing.assert_allclose(out_nchw,
                                   out_nhwc.transpose(NHWC_TO_NCHW),
                                   atol=1e-4,
                                   rtol=1e-4)

    @given(batch_size=st.integers(1, 3),
           stride=st.integers(1, 3),
           pad=st.integers(0, 3),
           kernel=st.integers(1, 5),
           dilation=st.integers(1, 3),
           size=st.integers(7, 10),
           channels=st.integers(1, 8),
           order=st.sampled_from(["NCHW"]),
           **hu.gcs)
    @settings(deadline=10000)
    def test_col2im_gradients(self, batch_size, stride, pad, kernel, dilation,
                              size, channels, order, gc, dc):
        assume(size >= dilation * (kernel - 1) + 1)
        op = core.CreateOperator("Im2Col", ["X"], ["Y"],
                                 stride=stride,
                                 kernel=kernel,
                                 dilation=dilation,
                                 pad=pad,
                                 order=order,
                                 device_option=gc)
        X = np.random.rand(batch_size, channels, size, size).astype(np.float32)
        self.assertGradientChecks(gc, op, [X], 0, [0])
        return
Example #41
0

def test_pop():
    x = LazySequenceCopy([2, 3])
    assert x.pop() == 3
    assert x.pop() == 2

    with pytest.raises(IndexError):
        x.pop()


@example(1, 5, 10)
@example(1, 10, 5)
@example(5, 10, 5)
@example(5, 1, 10)
@given(st.integers(), st.integers(), st.integers())
def test_clamp(lower, value, upper):
    lower, upper = sorted((lower, upper))

    clamped = clamp(lower, value, upper)

    assert lower <= clamped <= upper

    if lower <= value <= upper:
        assert value == clamped
    if lower > value:
        assert clamped == lower
    if value > upper:
        assert clamped == upper

Example #42
0
def protected_config():
    """
    Context manager to be used in all tests that modify the config to ensure
    that the config is left untouched even if the tests fail
    """
    ocfg: DotDict = qc.config.current_config
    original_config = deepcopy(ocfg)

    try:
        yield
    finally:
        qc.config.current_config = original_config


@settings(max_examples=50, deadline=1000)
@given(loc=hst.integers(0, 255),
       stat=hst.integers(0, 65535),
       smpl=hst.integers(0, 4294967295))
def test_generate_guid(loc, stat, smpl):
    # update config to generate a particular guid. Read it back to verify
    with protected_config():
        cfg = qc.config
        cfg['GUID_components']['location'] = loc
        cfg['GUID_components']['work_station'] = stat
        cfg['GUID_components']['sample'] = smpl

        guid = generate_guid()
        gen_time = int(np.round(time.time() * 1000))

        comps = parse_guid(guid)
Example #43
0
from .lcm import lcm
from typing import Sequence, List
from hypothesis import given
from hypothesis.strategies import integers, lists


def test_example():
    assert lcm(range(1, 11)) == 2520


@given(integers(2))
def test_single(x: int):
    assert lcm([x]) == x


@given(integers(2, 10000), integers(1, 5))
def test_duplication_irrelevant(x: int, n: int):
    assert lcm([x] * n) == x


@given(lists(integers(2), min_size=2, max_size=10))
def test_divisibility(xs: List[int]):
    product = lcm(xs)
    assert all(product % x == 0 for x in xs)
Example #44
0

class Foo(object):
    pass


foos = st.tuples().map(lambda _: Foo())


def test_can_create_arrays_of_composite_types():
    arr = minimal(nps.arrays(object, 100, foos))
    for x in arr:
        assert isinstance(x, Foo)


@given(st.lists(st.integers()), st.data())
def test_can_create_zero_dim_arrays_of_lists(x, data):
    arr = data.draw(nps.arrays(object, (), elements=st.just(x)))
    assert arr.shape == ()
    assert arr.dtype == np.dtype(object)
    assert arr.item() == x


def test_can_create_arrays_of_tuples():
    arr = minimal(
        nps.arrays(object, 10, st.tuples(st.integers(), st.integers())),
        lambda x: all(t0 != t1 for t0, t1 in x),
    )
    assert all(a in ((1, 0), (0, 1)) for a in arr)

def test_large_branching_tree():
    tree = st.deferred(
        lambda: st.integers() | st.tuples(tree, tree, tree, tree, tree))
    assert minimal(tree) == 0
    assert minimal(tree, lambda x: isinstance(x, tuple)) == (0, ) * 5
Example #46
0
from hypothesis.strategies import (
    booleans,
    dictionaries,
    floats,
    integers,
    lists,
    one_of,
    recursive,
    text,
)

import superintendent.multioutput.prioritisation
from superintendent import MultiLabeller
from superintendent.controls import MulticlassSubmitter

primitive_strategy = text() | integers() | floats(allow_nan=False) | booleans()

guaranteed_dtypes = (boolean_dtypes()
                     | integer_dtypes()
                     | floating_dtypes()
                     | unicode_string_dtypes())

container_strategy = dictionaries(
    text(), primitive_strategy) | lists(primitive_strategy)

nested_strategy = recursive(
    container_strategy,
    lambda children: lists(children) | dictionaries(text(), children),
)

container_strategy = dictionaries(
def test_binary_tree():
    tree = st.deferred(lambda: st.integers() | st.tuples(tree, tree))

    assert minimal(tree) == 0
    assert minimal(tree, lambda x: isinstance(x, tuple)) == (0, 0)
Example #48
0
 def listOfUniqueIntegrs(
     draw,
     elements=st.integers(min_value=1, max_value=10),
 ):
     xs = draw(lists(elements, max_size=5, min_size=5, unique=True))
     return xs
Example #49
0
 class Foo:
     @given(integers())
     def test(self, i):
         pass
Example #50
0
class TestDecryptionMediator(TestCase):
    NUMBER_OF_GUARDIANS = 3
    QUORUM = 2
    CEREMONY_DETAILS = CeremonyDetails(NUMBER_OF_GUARDIANS, QUORUM)

    def setUp(self):

        self.key_ceremony = KeyCeremonyMediator(self.CEREMONY_DETAILS)

        self.guardians: List[Guardian] = []

        # Setup Guardians
        for i in range(self.NUMBER_OF_GUARDIANS):
            sequence = i + 2
            self.guardians.append(
                Guardian(
                    "guardian_" + str(sequence),
                    sequence,
                    self.NUMBER_OF_GUARDIANS,
                    self.QUORUM,
                ))

        # Attendance (Public Key Share)
        for guardian in self.guardians:
            self.key_ceremony.announce(guardian)

        self.key_ceremony.orchestrate(identity_auxiliary_encrypt)
        self.key_ceremony.verify(identity_auxiliary_decrypt)

        self.joint_public_key = self.key_ceremony.publish_joint_key()
        self.assertIsNotNone(self.joint_public_key)

        # setup the election
        self.election = election_factory.get_fake_election()
        builder = ElectionBuilder(self.NUMBER_OF_GUARDIANS, self.QUORUM,
                                  self.election)

        self.assertIsNone(
            builder.build())  # Can't build without the public key

        builder.set_public_key(self.joint_public_key)
        self.metadata, self.context = get_optional(builder.build())

        self.encryption_device = EncryptionDevice("location")
        self.ballot_marking_device = EncryptionMediator(
            self.metadata, self.context, self.encryption_device)

        # get some fake ballots
        self.fake_cast_ballot = ballot_factory.get_fake_ballot(
            self.metadata, "some-unique-ballot-id-cast")
        self.more_fake_ballots = []
        for i in range(10):
            self.more_fake_ballots.append(
                ballot_factory.get_fake_ballot(
                    self.metadata, f"some-unique-ballot-id-cast{i}"))
        self.fake_spoiled_ballot = ballot_factory.get_fake_ballot(
            self.metadata, "some-unique-ballot-id-spoiled")
        self.more_fake_spoiled_ballots = []
        for i in range(2):
            self.more_fake_spoiled_ballots.append(
                ballot_factory.get_fake_ballot(
                    self.metadata, f"some-unique-ballot-id-spoiled{i}"))
        self.assertTrue(
            self.fake_cast_ballot.is_valid(
                self.metadata.ballot_styles[0].object_id))
        self.assertTrue(
            self.fake_spoiled_ballot.is_valid(
                self.metadata.ballot_styles[0].object_id))
        self.expected_plaintext_tally = accumulate_plaintext_ballots(
            [self.fake_cast_ballot] + self.more_fake_ballots)

        # Fill in the expected values with any missing selections
        # that were not made on any ballots
        selection_ids = set([
            selection.object_id for contest in self.metadata.contests
            for selection in contest.ballot_selections
        ])

        missing_selection_ids = selection_ids.difference(
            set(self.expected_plaintext_tally))

        for id in missing_selection_ids:
            self.expected_plaintext_tally[id] = 0

        # Encrypt
        self.encrypted_fake_cast_ballot = self.ballot_marking_device.encrypt(
            self.fake_cast_ballot)
        self.encrypted_fake_spoiled_ballot = self.ballot_marking_device.encrypt(
            self.fake_spoiled_ballot)
        self.assertIsNotNone(self.encrypted_fake_cast_ballot)
        self.assertIsNotNone(self.encrypted_fake_spoiled_ballot)
        self.assertTrue(
            self.encrypted_fake_cast_ballot.is_valid_encryption(
                self.metadata.description_hash,
                self.joint_public_key,
                self.context.crypto_extended_base_hash,
            ))

        # encrypt some more fake ballots
        self.more_fake_encrypted_ballots = []
        for fake_ballot in self.more_fake_ballots:
            self.more_fake_encrypted_ballots.append(
                self.ballot_marking_device.encrypt(fake_ballot))
        # encrypt some more fake ballots
        self.more_fake_encrypted_spoiled_ballots = []
        for fake_ballot in self.more_fake_spoiled_ballots:
            self.more_fake_encrypted_spoiled_ballots.append(
                self.ballot_marking_device.encrypt(fake_ballot))

        # configure the ballot box
        ballot_store = BallotStore()
        ballot_box = BallotBox(self.metadata, self.context, ballot_store)
        ballot_box.cast(self.encrypted_fake_cast_ballot)
        ballot_box.spoil(self.encrypted_fake_spoiled_ballot)

        # Cast some more fake ballots
        for fake_ballot in self.more_fake_encrypted_ballots:
            ballot_box.cast(fake_ballot)
        # Spoil some more fake ballots
        for fake_ballot in self.more_fake_encrypted_spoiled_ballots:
            ballot_box.spoil(fake_ballot)

        # generate encrypted tally
        self.ciphertext_tally = tally_ballots(ballot_store, self.metadata,
                                              self.context)

    def tearDown(self):
        self.key_ceremony.reset(
            CeremonyDetails(self.NUMBER_OF_GUARDIANS, self.QUORUM))

    def test_announce(self):
        # Arrange
        subject = DecryptionMediator(self.metadata, self.context,
                                     self.ciphertext_tally)

        # act
        result = subject.announce(self.guardians[0])

        # assert
        self.assertIsNotNone(result)

        # Can only announce once
        self.assertIsNotNone(subject.announce(self.guardians[0]))

        # Cannot submit another share internally
        self.assertFalse(
            subject._submit_decryption_share(
                TallyDecryptionShare(self.guardians[0].object_id, ZERO_MOD_P,
                                     {}, {})))

        # Cannot get plaintext tally without a quorum
        self.assertIsNone(subject.get_plaintext_tally())

    def test_compute_selection(self):
        # Arrange
        first_selection = [
            selection for contest in self.ciphertext_tally.cast.values()
            for selection in contest.tally_selections.values()
        ][0]

        # act
        result = compute_decryption_share_for_selection(
            self.guardians[0], first_selection, self.context)

        # assert
        self.assertIsNotNone(result)

    def test_compute_compensated_selection_failure(self):
        # Arrange
        first_selection = [
            selection for contest in self.ciphertext_tally.cast.values()
            for selection in contest.tally_selections.values()
        ][0]

        # Act
        self.guardians[0]._guardian_election_partial_key_backups.pop(
            self.guardians[2].object_id)

        self.assertIsNone(self.guardians[0].recovery_public_key_for(
            self.guardians[2].object_id))

        result = compute_compensated_decryption_share_for_selection(
            self.guardians[0],
            self.guardians[2].object_id,
            first_selection,
            self.context,
            identity_auxiliary_decrypt,
        )

        # Assert
        self.assertIsNone(result)

    def test_compute_compensated_selection(self):
        """
        demonstrates the complete workflow for computing a comepnsated decryption share
        For one selection. It is useful for verifying that the workflow is correct
        """
        # Arrange
        first_selection = [
            selection for contest in self.ciphertext_tally.cast.values()
            for selection in contest.tally_selections.values()
        ][0]

        # Compute lagrange coefficients for the guardians that are present
        lagrange_0 = compute_lagrange_coefficient(
            self.guardians[0].sequence_order,
            *[self.guardians[1].sequence_order],
        )
        lagrange_1 = compute_lagrange_coefficient(
            self.guardians[1].sequence_order,
            *[self.guardians[0].sequence_order],
        )

        print(
            f"lagrange: sequence_orders: ({self.guardians[0].sequence_order}, {self.guardians[1].sequence_order}, {self.guardians[2].sequence_order})\n"
        )

        print(lagrange_0)
        print(lagrange_1)

        # compute their shares
        share_0 = compute_decryption_share_for_selection(
            self.guardians[0], first_selection, self.context)

        share_1 = compute_decryption_share_for_selection(
            self.guardians[1], first_selection, self.context)

        self.assertIsNotNone(share_0)
        self.assertIsNotNone(share_1)

        # compute compensations shares for the missing guardian
        compensation_0 = compute_compensated_decryption_share_for_selection(
            self.guardians[0],
            self.guardians[2].object_id,
            first_selection,
            self.context,
            identity_auxiliary_decrypt,
        )

        compensation_1 = compute_compensated_decryption_share_for_selection(
            self.guardians[1],
            self.guardians[2].object_id,
            first_selection,
            self.context,
            identity_auxiliary_decrypt,
        )

        self.assertIsNotNone(compensation_0)
        self.assertIsNotNone(compensation_1)

        print("\nSHARES:")
        print(compensation_0)
        print(compensation_1)

        # Check the share proofs
        self.assertTrue(
            compensation_0.proof.is_valid(
                first_selection.ciphertext,
                get_optional(self.guardians[0].recovery_public_key_for(
                    self.guardians[2].object_id)),
                compensation_0.share,
                self.context.crypto_extended_base_hash,
            ))

        self.assertTrue(
            compensation_1.proof.is_valid(
                first_selection.ciphertext,
                get_optional(self.guardians[1].recovery_public_key_for(
                    self.guardians[2].object_id)),
                compensation_1.share,
                self.context.crypto_extended_base_hash,
            ))

        share_pow_p = [
            pow_p(compensation_0.share, lagrange_0),
            pow_p(compensation_1.share, lagrange_1),
        ]

        print("\nSHARE_POW_P")
        print(share_pow_p)

        # reconstruct the missing share from the compensation shares
        reconstructed_share = mult_p(*[
            pow_p(compensation_0.share, lagrange_0),
            pow_p(compensation_1.share, lagrange_1),
        ])

        print("\nRECONSTRUCTED SHARE\n")
        print(reconstructed_share)

        share_2 = create_ciphertext_decryption_selection(
            first_selection.object_id,
            self.guardians[2].object_id,
            first_selection.description_hash,
            reconstructed_share,
            {
                self.guardians[0].object_id: compensation_0,
                self.guardians[1].object_id: compensation_1,
            },
        )

        # Decrypt the result
        result = decrypt_selection_with_decryption_shares(
            first_selection,
            {
                self.guardians[0].object_id: (
                    self.guardians[0].share_election_public_key().key,
                    share_0,
                ),
                self.guardians[1].object_id: (
                    self.guardians[1].share_election_public_key().key,
                    share_1,
                ),
                self.guardians[2].object_id: (
                    self.guardians[2].share_election_public_key().key,
                    share_2,
                ),
            },
            self.context.crypto_extended_base_hash,
        )

        print(result)

        self.assertIsNotNone(result)
        self.assertEqual(
            result.tally,
            self.expected_plaintext_tally[first_selection.object_id])

    def test_decrypt_selection_all_present(self):
        # Arrange

        # find the first selection
        first_contest = [
            contest for contest in self.ciphertext_tally.cast.values()
        ][0]
        first_selection = list(first_contest.tally_selections.values())[0]

        # precompute decryption shares for the guardians
        first_share = compute_decryption_share(self.guardians[0],
                                               self.ciphertext_tally,
                                               self.context)
        second_share = compute_decryption_share(self.guardians[1],
                                                self.ciphertext_tally,
                                                self.context)
        third_share = compute_decryption_share(self.guardians[2],
                                               self.ciphertext_tally,
                                               self.context)

        # build type: Dict[GUARDIAN_ID, Tuple[ELECTION_PUBLIC_KEY, TallyDecryptionShare]]
        shares = {
            self.guardians[0].object_id: (
                self.guardians[0].share_election_public_key().key,
                first_share.contests[first_contest.object_id].selections[
                    first_selection.object_id],
            ),
            self.guardians[1].object_id: (
                self.guardians[1].share_election_public_key().key,
                second_share.contests[first_contest.object_id].selections[
                    first_selection.object_id],
            ),
            self.guardians[2].object_id: (
                self.guardians[2].share_election_public_key().key,
                third_share.contests[first_contest.object_id].selections[
                    first_selection.object_id],
            ),
        }

        # act
        result = decrypt_selection_with_decryption_shares(
            first_selection, shares, self.context.crypto_extended_base_hash)

        # assert
        self.assertIsNotNone(result)
        self.assertEqual(
            self.expected_plaintext_tally[first_selection.object_id],
            result.tally)

    def test_decrypt_ballot_compensate_all_guardians_present(self):
        # Arrange
        # precompute decryption shares for the guardians
        plaintext_ballot = self.fake_cast_ballot
        encrypted_ballot = self.encrypted_fake_cast_ballot
        shares = {
            guardian.object_id:
            compute_decryption_share_for_ballot(guardian, encrypted_ballot,
                                                self.context)
            for guardian in self.guardians[0:3]
        }

        # act
        result = decrypt_ballot(
            encrypted_ballot,
            shares,
            self.context.crypto_extended_base_hash,
        )

        # assert
        self.assertIsNotNone(result)

        for contest in plaintext_ballot.contests:
            for selection in contest.ballot_selections:
                expected_tally = 0 if selection.vote == "False" else 1
                actual_tally = (result[contest.object_id].selections[
                    selection.object_id].tally)
                self.assertEqual(expected_tally, actual_tally)

    def test_decrypt_ballot_compensate_missing_guardians(self):
        # Arrange
        # precompute decryption shares for the guardians
        plaintext_ballot = self.fake_cast_ballot
        encrypted_ballot = self.encrypted_fake_cast_ballot
        available_guardians = self.guardians[0:2]
        missing_guardian = self.guardians[2]
        missing_guardian_id = missing_guardian.object_id

        shares = {
            guardian.object_id:
            compute_decryption_share_for_ballot(guardian, encrypted_ballot,
                                                self.context)
            for guardian in available_guardians
        }
        compensated_shares = {
            guardian.object_id:
            compute_compensated_decryption_share_for_ballot(
                guardian,
                missing_guardian_id,
                encrypted_ballot,
                self.context,
                identity_auxiliary_decrypt,
            )
            for guardian in available_guardians
        }

        lagrange_coefficients = compute_lagrange_coefficients_for_guardians(
            [guardian.share_public_keys() for guardian in available_guardians])
        public_key = (available_guardians[0].guardian_election_public_keys().
                      get(missing_guardian_id))

        reconstructed_share = reconstruct_decryption_ballot(
            missing_guardian_id,
            public_key,
            encrypted_ballot,
            compensated_shares,
            lagrange_coefficients,
        )

        all_shares = {**shares, missing_guardian_id: reconstructed_share}

        # act
        result = decrypt_ballot(
            encrypted_ballot,
            all_shares,
            self.context.crypto_extended_base_hash,
        )

        # assert
        self.assertIsNotNone(result)

        for contest in plaintext_ballot.contests:
            for selection in contest.ballot_selections:
                expected_tally = 0 if selection.vote == "False" else 1
                actual_tally = (result[contest.object_id].selections[
                    selection.object_id].tally)
                self.assertEqual(expected_tally, actual_tally)

    def test_decrypt_spoiled_ballots_all_guardians_present(self):
        # Arrange
        # precompute decryption shares for the guardians
        first_share = compute_decryption_share(self.guardians[0],
                                               self.ciphertext_tally,
                                               self.context)
        second_share = compute_decryption_share(self.guardians[1],
                                                self.ciphertext_tally,
                                                self.context)
        third_share = compute_decryption_share(self.guardians[2],
                                               self.ciphertext_tally,
                                               self.context)
        shares = {
            self.guardians[0].object_id: first_share,
            self.guardians[1].object_id: second_share,
            self.guardians[2].object_id: third_share,
        }

        # act
        result = decrypt_spoiled_ballots(
            self.ciphertext_tally.spoiled_ballots,
            shares,
            self.context.crypto_extended_base_hash,
        )

        # assert
        self.assertIsNotNone(result)
        self.assertTrue(self.fake_spoiled_ballot.object_id in result)

        spoiled_ballot = result[self.fake_spoiled_ballot.object_id]
        for contest in self.fake_spoiled_ballot.contests:
            for selection in contest.ballot_selections:
                self.assertEqual(
                    spoiled_ballot[contest.object_id].selections[
                        selection.object_id].tally,
                    result[self.fake_spoiled_ballot.object_id]
                    [contest.object_id].selections[selection.object_id].tally,
                )

    def test_get_plaintext_tally_all_guardians_present_simple(self):
        # Arrange
        subject = DecryptionMediator(self.metadata, self.context,
                                     self.ciphertext_tally)

        # act
        for guardian in self.guardians:
            self.assertIsNotNone(subject.announce(guardian))

        decrypted_tallies = subject.get_plaintext_tally()
        result = self._convert_to_selections(decrypted_tallies)

        # assert
        self.assertIsNotNone(result)
        self.assertEqual(self.expected_plaintext_tally, result)

        # Verify we get the same tally back if we call again
        another_decrypted_tally = subject.get_plaintext_tally()

        self.assertEqual(decrypted_tallies, another_decrypted_tally)

    def test_get_plaintext_tally_compensate_missing_guardian_simple(self):

        # Arrange
        subject = DecryptionMediator(self.metadata, self.context,
                                     self.ciphertext_tally)

        # Act

        self.assertIsNotNone(subject.announce(self.guardians[0]))
        self.assertIsNotNone(subject.announce(self.guardians[1]))

        # explicitly compensate to demonstrate that this is possible, but not required
        self.assertIsNotNone(
            subject.compensate(self.guardians[2].object_id,
                               identity_auxiliary_decrypt))

        decrypted_tallies = subject.get_plaintext_tally()
        self.assertIsNotNone(decrypted_tallies)
        result = self._convert_to_selections(decrypted_tallies)

        # assert
        self.assertIsNotNone(result)
        print(result)
        self.assertEqual(self.expected_plaintext_tally, result)

    @settings(
        deadline=timedelta(milliseconds=15000),
        suppress_health_check=[HealthCheck.too_slow],
        max_examples=8,
        # disabling the "shrink" phase, because it runs very slowly
        phases=[Phase.explicit, Phase.reuse, Phase.generate, Phase.target],
    )
    @given(data(), integers(1, 3), integers(2, 5))
    def test_get_plaintext_tally_all_guardians_present(self, data,
                                                       parties: int,
                                                       contests: int):
        # Arrange
        description = data.draw(election_descriptions(parties, contests))
        builder = ElectionBuilder(self.NUMBER_OF_GUARDIANS, self.QUORUM,
                                  description)
        metadata, context = builder.set_public_key(
            self.joint_public_key).build()

        plaintext_ballots: List[PlaintextBallot] = data.draw(
            plaintext_voted_ballots(metadata, randrange(3, 6)))
        plaintext_tallies = accumulate_plaintext_ballots(plaintext_ballots)

        encrypted_tally = self._generate_encrypted_tally(
            metadata, context, plaintext_ballots)

        subject = DecryptionMediator(metadata, context, encrypted_tally)

        # act
        for guardian in self.guardians:
            self.assertIsNotNone(subject.announce(guardian))

        decrypted_tallies = subject.get_plaintext_tally()
        result = self._convert_to_selections(decrypted_tallies)

        # assert
        self.assertIsNotNone(result)
        self.assertEqual(plaintext_tallies, result)

    def _generate_encrypted_tally(
        self,
        metadata: InternalElectionDescription,
        context: CiphertextElectionContext,
        ballots: List[PlaintextBallot],
    ) -> CiphertextTally:

        # encrypt each ballot
        store = BallotStore()
        for ballot in ballots:
            encrypted_ballot = encrypt_ballot(ballot, metadata, context,
                                              int_to_q_unchecked(1))
            self.assertIsNotNone(encrypted_ballot)
            # add to the ballot store
            store.set(
                encrypted_ballot.object_id,
                from_ciphertext_ballot(encrypted_ballot, BallotBoxState.CAST),
            )

        tally = tally_ballots(store, metadata, context)
        self.assertIsNotNone(tally)
        return get_optional(tally)

    def _convert_to_selections(self, tally: PlaintextTally) -> Dict[str, int]:
        plaintext_selections: Dict[str, int] = {}
        for _, contest in tally.contests.items():
            for selection_id, selection in contest.selections.items():
                plaintext_selections[selection_id] = selection.tally

        return plaintext_selections
Example #51
0
#     print('Winners: ', winners)
#     print('Losers: ', [_.name for _ in world.factory_managers if _.name not in winners])
#     data = pd.DataFrame(data=world.saved_contracts).loc[:, ["buyer", "seller", "product", "quantity", "delivery_time"
#     , "unit_price", "penalty", "signing_delay", "concluded_at", "signed_at", "issues", "cfp"]]
#     data.to_csv(f'{logdir()}/contracts.csv')


def logdir():
    return pkg_resources.resource_filename("negmas", resource_name="tests")


@settings(deadline=None,
          suppress_health_check=[HealthCheck.function_scoped_fixture])
@given(
    single_checkpoint=st.booleans(),
    checkpoint_every=st.integers(0, 6),
    exist_ok=st.booleans(),
)
def test_world_auto_checkpoint(tmp_path, single_checkpoint, checkpoint_every,
                               exist_ok):
    import shutil

    new_folder: Path = tmp_path / unique_name("empty", sep="")
    new_folder.mkdir(parents=True, exist_ok=True)
    shutil.rmtree(new_folder)
    new_folder.mkdir(parents=True, exist_ok=True)
    filename = "scml"
    n_steps = 5

    world = SCML2019World.chain_world(
        log_file_name="",
Example #52
0

@use_several_sizes
def test_step_will_not_be_zero(size):
    assert_all_examples(st.slices(size), lambda x: x.step != 0)


@use_several_sizes
def test_slices_will_shrink(size):
    sliced = minimal(st.slices(size))
    assert sliced.start == 0 or sliced.start is None
    assert sliced.stop == 0 or sliced.stop is None
    assert sliced.step is None


@given(st.integers(1, 1000))
@settings(deadline=None)
def test_step_will_be_negative(size):
    find_any(st.slices(size), lambda x: (x.step or 1) < 0,
             settings(max_examples=10**6))


@given(st.integers(1, 1000))
@settings(deadline=None)
def test_step_will_be_positive(size):
    find_any(st.slices(size), lambda x: (x.step or 1) > 0)


@pytest.mark.parametrize("size", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
def test_stop_will_equal_size(size):
    find_any(st.slices(size), lambda x: x.stop == size,
    PingMessage,
    PongMessage,
    decode_message,
)
from ddht.v5_1.alexandria.payloads import (
    FindContentPayload,
    FindNodesPayload,
    FoundContentPayload,
    FoundNodesPayload,
    PingPayload,
    PongPayload,
)


@given(
    enr_seq=st.integers(min_value=0, max_value=2**32 - 1),
    advertisement_radius=st.integers(min_value=0, max_value=2**256 - 1),
)
def test_ping_message_encoding_round_trip(enr_seq, advertisement_radius):
    payload = PingPayload(enr_seq=enr_seq,
                          advertisement_radius=advertisement_radius)
    message = PingMessage(payload)
    encoded = message.to_wire_bytes()
    result = decode_message(encoded)
    assert result == message


@given(
    enr_seq=st.integers(min_value=0, max_value=2**32 - 1),
    advertisement_radius=st.integers(min_value=0, max_value=2**256 - 1),
)
Example #54
0
def test_can_create_arrays_of_tuples():
    arr = find(arrays(object, 10, st.tuples(st.integers(), st.integers())),
               lambda x: all(t[0] < t[1] for t in x))
    for a in arr:
        assert a in ((0, 1), (-1, 0))
def test_supports_positional_and_keyword_args_in_builds():
    assert repr(st.builds(hi, st.integers(), there=st.booleans())) == \
        'builds(hi, integers(), there=booleans())'
Example #56
0
class TestFcOperator(hu.HypothesisTestCase):
    def _run_test(self, n, m, k, transposed, multi_dim, dtype, engine, gc, dc):
        if dtype == np.float16:
            # fp16 only supported with CUDA
            assume(gc.device_type == caffe2_pb2.CUDA)
            dc = [d for d in dc if d.device_type == caffe2_pb2.CUDA]

        if engine == 'TENSORCORE':
            # TensorCore only makes sense with CUDA
            assume(gc.device_type == caffe2_pb2.CUDA)
            # ensures TensorCore kernels can be called
            m *= 8
            k *= 8
            n *= 8

        X = np.random.rand(m, k).astype(dtype) - 0.5
        if multi_dim:
            if transposed:
                W = np.random.rand(k, n, 1, 1).astype(dtype) - 0.5
            else:
                W = np.random.rand(n, k, 1, 1).astype(dtype) - 0.5
        else:
            if transposed:
                W = np.random.rand(k, n).astype(dtype) - 0.5
            else:
                W = np.random.rand(n, k).astype(dtype) - 0.5
        b = np.random.rand(n).astype(dtype) - 0.5

        def fc_op(X, W, b):
            return [np.dot(X, W.reshape(n, k).transpose()) + b.reshape(n)]

        def fc_tranposed_op(X, W, b):
            return [np.dot(X, W.reshape(k, n)) + b.reshape(n)]

        op = core.CreateOperator(
            'FCTransposed' if transposed else 'FC',
            ['X', 'W', 'b'],
            'out',
            engine=engine,
        )

        if dtype == np.float16 and gc.device_type == caffe2_pb2.CUDA:
            a = caffe2_pb2.Argument()
            a.i = 1
            a.name = "float16_compute"
            op.arg.extend([a])

        # Check against numpy reference
        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[X, W, b],
            reference=fc_tranposed_op if transposed else fc_op,
        )
        # Check over multiple devices
        self.assertDeviceChecks(dc, op, [X, W, b], [0])

        # Gradient checks
        threshold = 0.5 if dtype == np.float16 else 0.005
        stepsize = 0.5 if dtype == np.float16 else 0.05
        for i in range(3):
            self.assertGradientChecks(gc,
                                      op, [X, W, b],
                                      i, [0],
                                      threshold=threshold,
                                      stepsize=stepsize)

    @settings(max_examples=50,
              suppress_health_check=[HealthCheck.filter_too_much])
    @given(n=st.integers(1, 5),
           m=st.integers(0, 5),
           k=st.integers(1, 5),
           multi_dim=st.sampled_from([True, False]),
           dtype=st.sampled_from([np.float32, np.float16]),
           engine=st.sampled_from(['', 'TENSORCORE']),
           **hu.gcs)
    def test_fc(self, **kwargs):
        self._run_test(transposed=False, **kwargs)

    @settings(max_examples=50,
              suppress_health_check=[HealthCheck.filter_too_much])
    @given(n=st.integers(1, 5),
           m=st.integers(0, 5),
           k=st.integers(1, 5),
           multi_dim=st.sampled_from([True, False]),
           dtype=st.sampled_from([np.float32, np.float16]),
           engine=st.sampled_from(['', 'TENSORCORE']),
           **hu.gcs)
    def test_fc_transposed(self, **kwargs):
        self._run_test(transposed=True, **kwargs)
    """
    Function to tell us whether a SequencingError should be raised
    """
    if wait not in [0, 1]:
        return True
    if nrep not in range(0, 16384):
        return True
    if jump_to not in range(-1, num_elms + 1):
        return True
    if goto not in range(0, num_elms + 1):
        return True
    return False


@settings(max_examples=25)
@given(wait=hst.integers(),
       nrep=hst.integers(),
       jump_to=hst.integers(),
       goto=hst.integers())
def test_awg_output_validations(protosequence1, wait, nrep, jump_to, goto):

    protosequence1.setSequencingTriggerWait(1, wait)
    protosequence1.setSequencingNumberOfRepetitions(1, nrep)
    protosequence1.setSequencingEventJumpTarget(1, jump_to)
    protosequence1.setSequencingGoto(1, goto)

    N = protosequence1.length_sequenceelements

    if should_raise_sequencingerror(wait, nrep, jump_to, goto, N):
        with pytest.raises(SequencingError):
            protosequence1.outputForAWGFile()
import os, sys
import numpy as np

sys.path.append('../tokenizer')
sys.path.append('../evc_tool/domain/model')
from hypothesis import given, settings, Verbosity
from hypothesis import strategies as st

from tokenizer import tokenizer
from evc_tool.domain.model import embedding


@settings(verbosity=Verbosity.verbose, deadline=35000)
@given(s_ngram=st.integers(min_value=336, max_value=1000),
       no_components=st.integers(min_value=5, max_value=50))
def test_build_embed_struct(s_ngram, no_components):
    """function to test build_embed_struct in evc_tool/domain/model/embedding.py

        :param s_ngram: n_gram size
        :param no_components: number of dimensions per token
    """

    token_set = tokenizer.tokenize(
        'pipeline/data_loader/stage_three_plus_contractions_preprocessed_session_notes.csv',
        s_ngram, 1, True)
    glove_object = embedding.train(token_set, no_components=no_components)
    embed_struct = embedding.build_embed_struct(token_set, glove_object)

    no_sentences = len(token_set.collapse_speakers().all_tokens.tolist())

    # check if the final dimensions of the embed_struct are correct
Example #59
0
        def is_older(employee_age):
            return employee_age > (average_employee_age + years)

        n_employees_older = count(filter(is_older, employee_ages))

        if n_employees_older:
            n_employees = float(len(company_employees))
            percentage_older = n_employees_older / n_employees * 100

        yield Expected(company.company_id, company.company_name,
                       average_employee_age, percentage_older)


@settings(max_examples=50)
@given(employee_databases(), st.integers(min_value=0, max_value=10))
def test_get_employees_percentage_older_than_average(employee_database, years):
    """
    Verify that the query in get_employees_percentage_older_than_average
    behaves as expected. The way this is verified is by performing the same
    calculation in python rather than sql. The idea being that if two different
    calculation methods produce the same output we can be more confident that
    both are correct.

    :param employee_database: tuple of (employee data, SQLAlchemy session)
                              which should be used for testing.
    :param years: Find percentage of employees this number of years older
                    than the company-wide average.
    """
    with mock.patch('employee_insights.models.NOW', '2017-04-01'):
def test_includes_non_default_args_in_repr():
    assert repr(st.integers()) == 'integers()'
    assert repr(st.integers(min_value=1)) == 'integers(min_value=1)'