def add_optional_field(name, strategy): val = draw(one_of(none(), strategy)) if val is not None: event('unit.{}: optional field given value'.format(name)) result[name] = val else: event('unit.{}: optional field missing'.format(name))
def search(self, key): # A key inserted before may have already been # deleted if it was a duplicate, so searching it # may not succeed. Check the key exists in # the model dictionary. assume(key in self.state) event("Searching existing key") assert self.sorted_dict[key] == self.state[key]
def test_dimacs_cnf_serialize_accepts_only_cnf(sentence: nnf.NNF): if sentence.is_CNF(): event("CNF sentence") dimacs.dumps(sentence, mode='cnf') else: event("Not CNF sentence") with pytest.raises(dimacs.EncodeError): dimacs.dumps(sentence, mode='cnf')
def insert(self, d): event("insert") candidates = sorted( list(goal_id for goal_id, attrs in self.goaltree.q("select").items() if attrs["select"] != "select")) random_goal = d.draw(sampled_from(candidates)) self._accept_all(HoldSelect(), Select(random_goal), Insert("i"))
def test_implies(a: nnf.NNF, b: nnf.NNF): if a.implies(b): event("Implication") for model in a.models(): assert b.condition(model).valid() else: event("No implication") assert any(not b.condition(model).valid() for model in a.models())
def test_json_dumps(value): """Checks that value is serialisable as JSON.""" # We expect this test to always pass - the point of this exercise is # to define a recursive strategy, and then investigate the values it # generates for a *passing* test. hypothesis.note("type: {}".format(type(value))) hypothesis.event("type: {}".format(type(value))) json.dumps(value)
def test_paths_notarget(path): """Generate paths without any target. """ x, y = path_endpoint(path) print("No target: x={}, y={}, path=[{}]".format( x, y, ", ".join([str(p) for p in path]))) in_range = to_range(x - y, 10) event(str(in_range))
def event(self, description): """ Wrapper for hypothesis' event function. hypothesis.event raises an exception when invoked outside of hypothesis context, so skip it when we are replaying a failed path. """ if not self.replay_path: event(description)
def test_reversing_twice_gives_same_list(xs): # This will generate lists of arbitrary length (usually between 0 and # 100 elements) whose elements are integers. event("Length: %s" % len(xs)) ys = list(xs) ys.reverse() ys.reverse() assert xs == ys
def test_nth_line_ref(self, t_lineno: Tuple[str, int]) -> None: t, lineno = t_lineno hypothesis.event("lineno = {}".format(lineno)) def nth_line_ref(src: str, lineno: int) -> int: xs = src.split("\n")[:lineno] xs[-1] = '' return len("\n".join(xs)) self.assertEqual(expecttest.nth_line(t, lineno), nth_line_ref(t, lineno))
def test_scores_improve(content, style): """Scores must be one if inputs only vary on one dimension. """ pm = patchmatch.PatchMatcher(content, style) before = pm.scores.sum() pm.search_patches_random(times=1) after = pm.scores.sum() event("equal? %i" % int(after == before)) assert after >= before
def test_nth_line_ref(self, t_lineno): t, lineno = t_lineno hypothesis.event("lineno = {}".format(lineno)) def nth_line_ref(src, lineno): xs = src.split("\n")[:lineno] xs[-1] = '' return len("\n".join(xs)) self.assertEqual(expecttest.nth_line(t, lineno), nth_line_ref(t, lineno))
def test_smallest_hyperedge_tracker(data): for congruence in [True, False]: PE.GloballyIndexed.reset_global_index() h1 = PE.Hypergraph(congruence=congruence) tracker1 = PE.SmallestHyperedgeTracker(measure=PE.SmallestHyperedgeTracker.size) tracker2 = PE.SmallestHyperedgeTracker(measure=PE.SmallestHyperedgeTracker.depth) h1.listeners.add(tracker1) h1.listeners.add(tracker2) max_number_of_smallest = 0 there_was_by_size_ineq_by_depth = False for i in range(data.draw(strategies.integers(2, 5))): rw = data.draw(PE.gen_rewrite(h1)) h1.rewrite(**rw) if data.draw(strategies.booleans()): h1.remove_nodes(data.draw(strategies.sampled_from(list(h1.nodes())))) for n in h1.nodes(): # TODO: Sometimes there are just too many terms. In this case we assume false # but this isn't very elegant. A better approach is to find smallest terms instead # of enumerating all terms. terms = [] for t in PE.finite_terms(n): i = i + 1 if i > 1000: print("Ooups, too many terms") hypothesis.assume(False) terms.append((PE.measure_term(t, tracker1.measure), PE.measure_term(t, tracker2.measure), t)) if terms: (min_val1, _, min_term1) = min(terms, key=lambda x: x[0]) (_, min_val2, min_term2) = min(terms, key=lambda x: x[1]) assert min_val1 == tracker1.smallest[n][0] assert min_val2 == tracker2.smallest[n][0] smallest1 = set(t for v, _, t in terms if v == min_val1) smallest2 = set(t for _, v, t in terms if v == min_val2) assert set(tracker1.smallest_terms(n)) == smallest1 # For depth tracker will not return the full set of shallowest terms assert set(tracker2.smallest_terms(n)).issubset(smallest2) max_number_of_smallest = max(max_number_of_smallest, len(smallest1)) max_number_of_smallest = max(max_number_of_smallest, len(smallest2)) if smallest1 != smallest2: there_was_by_size_ineq_by_depth = True else: assert tracker1.smallest[n][0] == tracker1.worst_value assert tracker2.smallest[n][0] == tracker2.worst_value hypothesis.event("Max number of smallest: " + str(max_number_of_smallest)) hypothesis.event("There was a node where the num of smallest by size != by depth: " + str(there_was_by_size_ineq_by_depth))
def test_scale_and_offset_raw_value_iterable_for_set_cache(values, offsets, scales): p = Parameter(name='test_scale_and_offset_raw_value', set_cmd=None) # test that scale and offset does not change the default behaviour p.cache.set(values) assert p.raw_value == values # test setting scale and offset does not change anything p.scale = scales p.offset = offsets assert p.raw_value == values np_values = np.array(values) np_offsets = np.array(offsets) np_scales = np.array(scales) np_get_latest_values = np.array(p.get_latest()) # Without a call to ``get``, ``get_latest`` will just return old # cached values without applying the set scale and offset np.testing.assert_allclose(np_get_latest_values, np_values) np_get_values = np.array(p.get()) # Now that ``get`` is called, the returned values are the result of # application of the scale and offset. Obviously, calling # ``get_latest`` now will also return the values with the applied # scale and offset np.testing.assert_allclose(np_get_values, (np_values - np_offsets) / np_scales) np_get_latest_values_after_get = np.array(p.get_latest()) np.testing.assert_allclose(np_get_latest_values_after_get, (np_values - np_offsets) / np_scales) # test ``cache.set`` for scalar values if not isinstance(values, Iterable): p.cache.set(values) np.testing.assert_allclose(np.array(p.raw_value), np_values * np_scales + np_offsets) # No set/get cmd performed # testing conversion back and forth p.cache.set(values) np_get_latest_values = np.array(p.get_latest()) # No set/get cmd performed np.testing.assert_allclose(np_get_latest_values, np_values) # adding statistics if isinstance(offsets, Iterable): event('Offset is array') if isinstance(scales, Iterable): event('Scale is array') if isinstance(values, Iterable): event('Value is array') if isinstance(scales, Iterable) and isinstance(offsets, Iterable): event('Scale is array and also offset') if isinstance(scales, Iterable) and not isinstance(offsets, Iterable): event('Scale is array but not offset')
def test_validity(sentence: nnf.NNF): if sentence.valid(): event("Valid sentence") assert all( sentence.satisfied_by(model) for model in nnf.all_models(sentence.vars())) else: event("Invalid sentence") assert any(not sentence.satisfied_by(model) for model in nnf.all_models(sentence.vars()))
def test_simplify_eliminates_bools(sentence: nnf.NNF, merge_nodes): assume(sentence != nnf.true and sentence != nnf.false) if any(node == nnf.true or node == nnf.false for node in sentence.walk()): event("Sentence contained booleans originally") sentence = sentence.simplify(merge_nodes) if sentence == nnf.true or sentence == nnf.false: event("Sentence simplified to boolean") else: for node in sentence.walk(): assert node != nnf.true and node != nnf.false
def test_simplify_tree_literal(expression): ''' And/Or/Not trees populated entirely with True/False literals. Regardless of structure, simplification should always give True or False. ''' result = simplify_tree(expression) assert result is True or result is False if result is True: event("True") elif result is False: event("False")
def test_simplify_merges_internal_nodes(sentence: nnf.NNF): if any( any(type(node) == type(child) for child in node.children) for node in sentence.walk() if isinstance(node, nnf.Internal)): event("Sentence contained immediately mergeable nodes") # Nodes may also be merged after intermediate nodes are removed for node in sentence.simplify().walk(): if isinstance(node, nnf.Internal): for child in node.children: assert type(node) != type(child)
def test_spell_check_false_positives(word): """ Slightly abusing pytest & hypothesis to test false positive rate. The test will always pass but marks a 'False positive` event to be output using --hypothesis-show-statistics pytest argument. Tests using random 5-character words as specified in the Kata. """ if check(word) is True and word not in VALID_WORDS: event("False positive")
def test_DNNF_sat_strategies(sentence: nnf.NNF, merge_nodes): sat = sentence.satisfiable() if sat: assert sentence.simplify(merge_nodes) != nnf.false assert amc.SAT(sentence) event("Sentence satisfiable") else: assert sentence.simplify(merge_nodes) == nnf.false assert not amc.SAT(sentence) event("Sentence not satisfiable")
def cant_serialize(media_type: str) -> NoReturn: # type: ignore """Reject the current example if we don't know how to send this data to the application.""" event_text = f"Can't serialize data to `{media_type}`." note( f"{event_text}. " f"You can register your own serializer with `schemathesis.serializers.register` and Schemathesis will be able " f"to make API calls with this media type." ) event(event_text) reject() # type: ignore
def test_scores_improve(content, style): """Scores must be one if inputs only vary on one dimension. """ matcher = FeatureMatcher(content, style) matcher.compare_features_identity() before = matcher.repro_target.scores.sum() matcher.compare_features_random(times=1) after = matcher.repro_target.scores.sum() event("equal? %i" % int(after == before)) assert after >= before
def test_is_base_tuple_has_expected_behavior_for_parsable_types(type_str): # Should match tuple types if type_str.endswith(')'): assert is_base_tuple(type_str) event('Match for tuple type') # Should not match any other types else: assert not is_base_tuple(type_str) event('No match for non-tuple type')
def test_directed_hausdorff(rec_set1, rec_set2): d12, req12 = mdth.directed_hausdorff(rec_set1, rec_set2) assert len(req12[0]) > 0 assert len(req12[1]) > 0 _d12, _req12 = mdth.directed_hausdorff(*req12) assert req12 == _req12 assert len(req12[0]) <= len(rec_set1) assert len(req12[1]) <= len(rec_set2) assert d12 == _d12 event(f"d={d12}")
def compare_attribute_objects(self, first, second): assume(first is not NotImplemented) # Test __eq__ if first != second: event("compare_attribute_objects: Not Equal.") assert (first.value != second.value or first.cls != second.cls or second.value is NotImplemented) else: event("compare_attribute_objects: Equal.") assert first.value == second.value and first.cls == second.cls
def add_extra_searchwords(draw, result): for lang in LANGUAGES: words = draw(sets(text(SAFE_LETTERS + 'åäöÅÄÖ ', min_size=1, max_size=25))) if len(words) == 0: event('extra searchwords length {}'.format(len(words))) words = None else: event('extra searchwords length >0') words = ', '.join((word.strip() for word in words)) result['extra_searchwords_{}'.format(lang)] = words
def test_simplify_flat_and_fuzz(clauses): ''' Currently a simple error check, but this should really validate algorithm guarantees by checking against a data set. ''' result = simplify_flat_and(And(clauses)) n_output = len(result.clauses) if type(result) is And else 1 assert n_output <= len(clauses) if result is False: event('Simplified False') else: if n_output < len(clauses): event('Shortened')
def test_app_parser(dic): event("dict depth: {d}".format(d=dict_depth(dic))) parser = mk_app_parser(dic) status = parser.at([], dic) assert type(status) is StatusOk assert status.value == dic
def test_smoothing(sentence: nnf.NNF): if not sentence.smooth(): event("Sentence not smooth yet") smoothed = sentence.make_smooth() assert type(sentence) is type(smoothed) assert smoothed.smooth() assert sentence.equivalent(smoothed) assert smoothed.make_smooth() == smoothed else: event("Sentence already smooth") assert sentence.make_smooth() == sentence
def test_paths_with_target(path): """Generate paths targeting lower right. """ x, y = path_endpoint(path) print("With target: x={}, y={}, len={}".format(x, y, len(path))) in_range = to_range(x - y, 100) event(str(in_range)) target_function = float(x - y) target(target_function)
def test_substitution_result(expression): ''' If passed a complete dictionary of True/False assignments to relations, substitution always gives a True/False result. ''' variables = get_variables(expression) assignments = { variable: assignment for variable, assignment in zip(variables, itertools.cycle([True, False])) } result = substitution_result(expression, assignments) event('Result: {}'.format(result)) assert result in (True, False)
def test_has_arrlist_has_expected_behavior_for_parsable_types(type_str): # Should not match tuple types if type_str.startswith('('): assert not has_arrlist(type_str) event('No match for tuple type') # Should match array types elif ARRAY_RE.search(type_str): assert has_arrlist(type_str) event('Match for array type') # Should not match any other types else: assert not has_arrlist(type_str) event('No match for non-array type')
def test(i): event('hi')
def test(i): event(Foo())
def test(i): event("hi")
def test(i): if isinstance(i, str): event("boo")
def test_base_equals_has_expected_behavior_for_parsable_types(type_str): is_int = BaseEquals('int') is_int_with_sub = BaseEquals('int', with_sub=True) is_int_with_no_sub = BaseEquals('int', with_sub=False) # Should not match tuple types if type_str.startswith('('): assert not is_int(type_str) assert not is_int_with_sub(type_str) assert not is_int_with_no_sub(type_str) event('No match for tuple type') # Should not match array types elif ARRAY_RE.search(type_str): assert not is_int(type_str) assert not is_int_with_sub(type_str) assert not is_int_with_no_sub(type_str) event('No match for array type') # Should match types with int base elif type_str.startswith('int'): assert is_int(type_str) event('Match for base') if type_str == 'int': assert is_int_with_no_sub(type_str) assert not is_int_with_sub(type_str) event('Match for base with no sub') else: assert not is_int_with_no_sub(type_str) assert is_int_with_sub(type_str) event('Match for base with sub') # Should not match any other types else: assert not is_int(type_str) assert not is_int_with_sub(type_str) assert not is_int_with_no_sub(type_str) event('No match for other base')
def test(i): if isinstance(i, str): event('boo')