def test_mutual_recursion(): t = st.deferred(lambda: a | b) a = st.deferred(lambda: st.none() | st.tuples(st.just("a"), b)) b = st.deferred(lambda: st.none() | st.tuples(st.just("b"), a)) for c in ("a", "b"): assert minimal(t, lambda x: x is not None and x[0] == c) == (c, None)
def slice_node(draw): lower = draw(hs.one_of(const_node(hs.integers()), hs.none())) upper = draw(hs.one_of(const_node(hs.integers()), hs.none())) step = draw(hs.one_of(const_node(hs.integers()), hs.none())) node = astroid.Slice() node.postinit(lower, upper, step) return node
def api_results(min_size=0, max_size=20, hook_types=None): count = integers(min_value=min_size, max_value=max_size).example() hook_types = hook_types or get_hook_names() return fixed_dictionaries( { "count": just(count), "next": none(), "prev": none(), "results": lists( fixed_dictionaries( { "name": text(min_size=1), "latest_version": integers(min_value=0), "content": fixed_dictionaries( { "hook_type": sampled_from(hook_types), "version": integers(min_value=0), "description": text(min_size=1), "download_url": text(min_size=1), "checksum": text(min_size=1), } ), } ), min_size=count, max_size=count, ), } )
def test_recursion_in_middle(): # This test is significant because the integers().map(abs) is not checked # in the initial pass - when we recurse into x initially we decide that # x is empty, so the tuple is empty, and don't need to check the third # argument. Then when we do the more refined test we've discovered that x # is non-empty, so we need to check the non-emptiness of the last component # to determine the non-emptiness of the tuples. x = st.deferred(lambda: st.tuples(st.none(), x, st.integers().map(abs)) | st.none()) assert not x.is_empty
def steps(self): values = self.values() if not self.forked: return ( s.just(FORK_NOW) | s.builds(Insert, values, values, s.none()) | s.builds(Delete, values, values, s.none()) ) else: targets = s.sampled_from((self.left, self.right)) return s.builds(Insert, values, values, targets) | s.builds(Delete, values, values, targets)
def from_field(field): # type: (Type[dm.Field]) -> st.SearchStrategy[dm.Field] """Return a strategy for values that fit the given field. This is pretty similar to the core `from_type` function, with a subtle but important difference: `from_field` takes a Field *instance*, rather than a Field *subtype*, so that it has access to instance attributes such as string length and validators. """ check_type((dm.Field, df.Field), field, "field") if getattr(field, "choices", False): choices = [] # type: list for value, name_or_optgroup in field.choices: if isinstance(name_or_optgroup, (list, tuple)): choices.extend(key for key, _ in name_or_optgroup) else: choices.append(value) # form fields automatically include an empty choice, strip it out if u"" in choices: choices.remove(u"") min_size = 1 if isinstance(field, (dm.CharField, dm.TextField)) and field.blank: choices.insert(0, u"") elif isinstance(field, (df.Field)) and not field.required: choices.insert(0, u"") min_size = 0 strategy = st.sampled_from(choices) if isinstance(field, (df.MultipleChoiceField, df.TypedMultipleChoiceField)): strategy = st.lists(st.sampled_from(choices), min_size=min_size) else: if type(field) not in _global_field_lookup: if getattr(field, "null", False): return st.none() raise InvalidArgument("Could not infer a strategy for %r", (field,)) strategy = _global_field_lookup[type(field)] if not isinstance(strategy, st.SearchStrategy): strategy = strategy(field) assert isinstance(strategy, st.SearchStrategy) if field.validators: def validate(value): try: field.run_validators(value) return True except django.core.exceptions.ValidationError: return False strategy = strategy.filter(validate) if getattr(field, "null", False): return st.none() | strategy return strategy
def models(model, **extra): result = {} mappings = field_mappings() mandatory = set() for f in model._meta.concrete_fields: if isinstance(f, dm.AutoField): continue try: mapped = mappings[type(f)] except KeyError: if not f.null: mandatory.add(f.name) continue if f.null: mapped = st.one_of(st.none(), mapped) result[f.name] = mapped missed = {x for x in mandatory if x not in extra} if missed: raise InvalidArgument(( 'Missing arguments for mandatory field%s %s for model %s' % ( 's' if len(missed) > 1 else '', ', '.join(missed), model.__name__, ))) for k, v in extra.items(): if isinstance(v, SearchStrategy): result[k] = v else: result[k] = st.just(v) result.update(extra) return ModelStrategy(model, result)
def _get_strategy_for_field(f): if f.choices: choices = [] for value, name_or_optgroup in f.choices: if isinstance(name_or_optgroup, (list, tuple)): choices.extend(key for key, _ in name_or_optgroup) else: choices.append(value) if isinstance(f, (dm.CharField, dm.TextField)) and f.blank: choices.insert(0, u'') strategy = st.sampled_from(choices) elif type(f) == dm.SlugField: strategy = st.text(alphabet=string.ascii_letters + string.digits, min_size=(None if f.blank else 1), max_size=f.max_length) elif type(f) == dm.GenericIPAddressField: lookup = {'both': ip4_addr_strings() | ip6_addr_strings(), 'ipv4': ip4_addr_strings(), 'ipv6': ip6_addr_strings()} strategy = lookup[f.protocol.lower()] elif type(f) in (dm.TextField, dm.CharField): strategy = st.text(min_size=(None if f.blank else 1), max_size=f.max_length) elif type(f) == dm.DecimalField: bound = Decimal(10 ** f.max_digits - 1) / (10 ** f.decimal_places) strategy = st.decimals(min_value=-bound, max_value=bound, places=f.decimal_places) else: strategy = field_mappings().get(type(f), st.nothing()) if f.validators: strategy = strategy.filter(validator_to_filter(f)) if f.null: strategy = st.one_of(st.none(), strategy) return strategy
def test_fuzz_fractions_bounds(data): denom = data.draw(none() | integers(1, 100), label="denominator") fracs = none() | fractions(max_denominator=denom) low, high = data.draw(tuples(fracs, fracs), label="low, high") if low is not None and high is not None and low > high: low, high = high, low try: val = data.draw(fractions(low, high, denom), label="value") except InvalidArgument: reject() # fractions too close for given max_denominator if low is not None: assert low <= val if high is not None: assert val <= high if denom is not None: assert 1 <= val.denominator <= denom
def reusable(): return st.one_of( st.sampled_from(base_reusable_strategies), st.builds( st.floats, min_value=st.none() | st.floats(), max_value=st.none() | st.floats(), allow_infinity=st.booleans(), allow_nan=st.booleans() ), st.builds(st.just, st.lists(max_size=0)), st.builds(st.sampled_from, st.lists(st.lists(max_size=0))), st.lists(reusable).map(st.one_of), st.lists(reusable).map(lambda ls: st.tuples(*ls)), )
def add_optional_field(name, strategy): val = draw(one_of(none(), strategy)) if val is not None: event('unit.{}: optional field given value'.format(name)) result[name] = val else: event('unit.{}: optional field missing'.format(name))
def models(model, **extra): result = {} mappings = field_mappings() mandatory = set() for f in model._meta.concrete_fields: if isinstance(f, dm.AutoField): continue try: mapped = mappings[type(f)] except KeyError: if not f.null: mandatory.add(f.name) continue if f.null: mapped = st.one_of(st.none(), mapped) result[f.name] = mapped missed = {x for x in mandatory if x not in extra} if missed: raise InvalidArgument(( u'Missing arguments for mandatory field%s %s for model %s' % ( u's' if len(missed) > 1 else u'', u', '.join(missed), model.__name__, ))) result.update(extra) # Remove default_values so we don't try to generate anything for those. result = {k: v for k, v in result.items() if v is not default_value} return ModelStrategy(model, result)
def _get_strategy_for_field(f): if isinstance(f, dm.AutoField): return default_value elif f.choices: choices = [value for (value, name) in f.choices] if isinstance(f, (dm.CharField, dm.TextField)) and f.blank: choices.append(u'') strategy = st.sampled_from(choices) elif isinstance(f, dm.EmailField): return ff.fake_factory(u'email') elif type(f) in (dm.TextField, dm.CharField): strategy = st.text(min_size=(None if f.blank else 1), max_size=f.max_length) elif type(f) == dm.DecimalField: m = 10 ** f.max_digits - 1 div = 10 ** f.decimal_places q = Decimal('1.' + ('0' * f.decimal_places)) strategy = ( st.integers(min_value=-m, max_value=m) .map(lambda n: (Decimal(n) / div).quantize(q))) else: try: strategy = field_mappings()[type(f)] except KeyError: if f.null: return None else: raise UnmappedFieldError(f) if f.validators: strategy = strategy.filter(validator_to_filter(f)) if f.null: strategy = st.one_of(st.none(), strategy) return strategy
def format_specifiers(draw): """ Generate a valid format specifier using the rules: format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type] fill ::= <any character> align ::= "<" | ">" | "=" | "^" sign ::= "+" | "-" | " " width ::= integer precision ::= integer type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%" See https://docs.python.org/2/library/string.html :param draw: Let hypothesis draw from other strategies. :return: An example format_specifier. """ alphabet_strategy = st.characters(min_codepoint=ord('a'), max_codepoint=ord('z')) fill = draw(st.one_of(alphabet_strategy, st.none())) align = draw(st.sampled_from(list('<>=^'))) fill_align = (fill + align or '') if fill else '' type_ = draw(st.sampled_from('bcdeEfFgGnosxX%')) can_have_sign = type_ in 'deEfFgGnoxX%' can_have_comma = type_ in 'deEfFgG%' can_have_precision = type_ in 'fFgG' can_have_pound = type_ in 'boxX%' can_have_zero = type_ in 'oxX' sign = draw(st.sampled_from(list('+- ') + [''])) if can_have_sign else '' pound = draw(st.sampled_from(('#', '',))) if can_have_pound else '' zero = draw(st.sampled_from(('0', '',))) if can_have_zero else '' int_strategy = st.integers(min_value=1, max_value=1000) width = draw(st.one_of(int_strategy, st.none())) width = str(width) if width is not None else '' comma = draw(st.sampled_from((',', '',))) if can_have_comma else '' if can_have_precision: precision = draw(st.one_of(int_strategy, st.none())) precision = '.' + str(precision) if precision else '' else: precision = '' return ''.join((fill_align, sign, pound, zero, width, comma, precision, type_,))
def test_example_that_shrinks_to_overrun_fails_health_check(): @given(st.binary(min_size=9000, max_size=9000) | st.none()) def test(b): pass with pytest.raises(FailedHealthCheck) as exc: test() assert exc.value.health_check == HealthCheck.large_base_example
def test_large_data_will_fail_a_health_check(): @given(st.none() | st.binary(min_size=1024)) @settings(database=None, buffer_size=1000) def test(x): pass with raises(FailedHealthCheck) as e: test() assert "allowable size" in e.value.args[0]
def _get_strategy_for_field(f): # type: (Type[dm.Field]) -> st.SearchStrategy[Any] if f.choices: choices = [] # type: list for value, name_or_optgroup in f.choices: if isinstance(name_or_optgroup, (list, tuple)): choices.extend(key for key, _ in name_or_optgroup) else: choices.append(value) if isinstance(f, (dm.CharField, dm.TextField)) and f.blank: choices.insert(0, u'') strategy = st.sampled_from(choices) elif type(f) == dm.SlugField: strategy = st.text(alphabet=string.ascii_letters + string.digits, min_size=(0 if f.blank else 1), max_size=f.max_length) elif type(f) == dm.GenericIPAddressField: lookup = {'both': ip4_addr_strings() | ip6_addr_strings(), 'ipv4': ip4_addr_strings(), 'ipv6': ip6_addr_strings()} strategy = lookup[f.protocol.lower()] elif type(f) in (dm.TextField, dm.CharField): strategy = st.text( alphabet=st.characters(blacklist_characters=u'\x00', blacklist_categories=('Cs',)), min_size=(0 if f.blank else 1), max_size=f.max_length, ) # We can infer a vastly more precise strategy by considering the # validators as well as the field type. This is a minimal proof of # concept, but we intend to leverage the idea much more heavily soon. # See https://github.com/HypothesisWorks/hypothesis-python/issues/1116 re_validators = [ v for v in f.validators if isinstance(v, validators.RegexValidator) and not v.inverse_match ] if re_validators: regexes = [re.compile(v.regex, v.flags) if isinstance(v.regex, str) else v.regex for v in re_validators] # This strategy generates according to one of the regexes, and # filters using the others. It can therefore learn to generate # from the most restrictive and filter with permissive patterns. # Not maximally efficient, but it makes pathological cases rarer. # If you want a challenge: extend https://qntm.org/greenery to # compute intersections of the full Python regex language. strategy = st.one_of(*[st.from_regex(r) for r in regexes]) elif type(f) == dm.DecimalField: bound = Decimal(10 ** f.max_digits - 1) / (10 ** f.decimal_places) strategy = st.decimals(min_value=-bound, max_value=bound, places=f.decimal_places) else: strategy = field_mappings().get(type(f), st.nothing()) if f.validators: strategy = strategy.filter(validator_to_filter(f)) if f.null: strategy = st.one_of(st.none(), strategy) return strategy
def test_diff_values_mixed(data): type_float = data.draw(st.floats(), label='float') type_int = data.draw(st.integers(), label='int') type_none = data.draw(st.none(), label='none') assert are_values_different(type_float, type_int) assert are_values_different(type_float, type_none) assert are_values_different(type_int, type_none) assert are_values_different(np.ndarray([0]), 'hey') assert not are_values_different(type_none, type_none)
def translated_field(draw, name, allow_missing=True, languages=LANGUAGES): result = {} for lang in languages: if allow_missing: val = draw(one_of(text(max_size=50), none())) else: val = draw(text(min_size=1, max_size=50)) if val is not None: result['{}_{}'.format(name, lang)] = val return result
def test_will_mark_too_deep_examples_as_invalid(): d = ConjectureData.for_buffer(hbytes(0)) s = st.none() for _ in hrange(MAX_DEPTH + 1): s = s.map(lambda x: x) with pytest.raises(StopTest): d.draw(s) assert d.status == Status.INVALID
def requirements_list(draw, length, answers=False): if answers: elements = booleans() if length is not None else one_of(booleans(), none()) else: elements = text(min_size=1, alphabet='abcdefgh', average_size=10) return draw(lists( elements=elements, min_size=length, max_size=length if length is not None else 10 ))
def from_attrs_attribute(attrib, target): """Infer a strategy from the metadata on an attr.Attribute object.""" # Try inferring from the default argument. Note that this will only help # the user passed `infer` to builds() for this attribute, but in that case # we use it as the minimal example. default = st.nothing() if isinstance(attrib.default, attr.Factory): if not getattr(attrib.default, "takes_self", False): # new in 17.1 default = st.builds(attrib.default.factory) elif attrib.default is not attr.NOTHING: default = st.just(attrib.default) # Try inferring None, exact values, or type from attrs provided validators. null = st.nothing() # updated to none() on seeing an OptionalValidator in_collections = [] # list of in_ validator collections to sample from validator_types = set() # type constraints to pass to types_to_strategy() if attrib.validator is not None: validator = attrib.validator if isinstance(validator, attr.validators._OptionalValidator): null = st.none() validator = validator.validator if isinstance(validator, attr.validators._AndValidator): vs = validator._validators else: vs = [validator] for v in vs: if isinstance(v, attr.validators._InValidator): if isinstance(v.options, string_types): in_collections.append(list(all_substrings(v.options))) else: in_collections.append(v.options) elif isinstance(v, attr.validators._InstanceOfValidator): validator_types.add(v.type) # This is the important line. We compose the final strategy from various # parts. The default value, if any, is the minimal shrink, followed by # None (again, if allowed). We then prefer to sample from values passed # to an in_ validator if available, but infer from a type otherwise. # Pick one because (sampled_from((1, 2)) | from_type(int)) would usually # fail validation by generating e.g. zero! if in_collections: sample = st.sampled_from(list(ordered_intersection(in_collections))) strat = default | null | sample else: strat = default | null | types_to_strategy(attrib, validator_types) # Better to give a meaningful error here than an opaque "could not draw" # when we try to get a value but have lost track of where this was created. if strat.is_empty: raise ResolutionFailed( "Cannot infer a strategy from the default, validator, type, or " "converter for attribute=%r of class=%r" % (attrib, target) ) return strat
def resid_strategy(draw, i_kwargs={}): """ strategy for generating a resid. Look at the code, to see what we currently support as resid. """ chain = draw(one_of(none(), text(min_size=1, alphabet=ascii_letters))) het_flag = " " # Currently, a non-empty het-flag is not supported! resid = draw(integers(**i_kwargs).filter(lambda x: x != 0)) insertion_code = draw(one_of(just(" "), get_insertion_code())) return fgr.RESID(chain, (het_flag, resid, insertion_code))
def test_fuzz_decimals_bounds(data): places = data.draw(none() | integers(0, 20), label='places') finite_decs = decimals(allow_nan=False, allow_infinity=False, places=places) | none() low, high = data.draw(tuples(finite_decs, finite_decs), label='low, high') if low is not None and high is not None and low > high: low, high = high, low ctx = decimal.Context(prec=data.draw(integers(1, 100), label='precision')) try: with decimal.localcontext(ctx): strat = decimals(low, high, allow_nan=False, allow_infinity=False, places=places) val = data.draw(strat, label='value') except InvalidArgument: reject() # decimals too close for given places if low is not None: assert low <= val if high is not None: assert val <= high if places is not None: assert val.as_tuple().exponent == -places
def requirements_list(draw, length, answers=False): if answers: elements = text() if length is not None else one_of(text(), none()) else: elements = text(min_size=1, average_size=10, max_size=300, alphabet=_descriptive_alphabet).filter( partial(_word_count_filter, max_words=30) ) return draw(lists( elements=elements, min_size=length, max_size=length if length is not None else 10 ))
def test_DatetimeStrategy_draw_may_fail(): def is_failure_inducing(b): try: return strat._attempt_one_draw(ConjectureData.for_buffer(b)) is None except StopTest: return False strat = DatetimeStrategy(dt.datetime.min, dt.datetime.max, none()) failure_inducing = minimal(binary(), is_failure_inducing, timeout_after=30) data = ConjectureData.for_buffer(failure_inducing * 100) with pytest.raises(StopTest): data.draw(strat) assert data.status == Status.INVALID
def test_non_trivial_json(): json = st.deferred(lambda: st.none() | st.floats() | st.text() | lists | objects) lists = st.lists(json) objects = st.dictionaries(st.text(), json) assert minimal(json) is None small_list = minimal(json, lambda x: isinstance(x, list) and x) assert small_list == [None] x = minimal(json, lambda x: isinstance(x, dict) and isinstance(x.get(""), list)) assert x == {"": []}
def tz_args_strat(allow_naive, tz_list, name): if tz_list is None: tz_strat = timezones_strategy() else: tz_strat = st.sampled_from([ tz if isinstance(tz, dt.tzinfo) else pytz.timezone(tz) for tz in tz_list ]) if allow_naive or (allow_naive is None and tz_strat.is_empty): tz_strat = st.none() | tz_strat if tz_strat.is_empty: raise InvalidArgument( 'Cannot create non-naive %s with no timezones allowed.' % name) return tz_strat
def type_to_strat(x, opts): # type: (type) -> SearchStrategy ''' Given a type, return a strategy which yields a value of that type. Types maybe complex: Union, NamedTuple, etc. For more information, see https://docs.python.org/3/library/typing.html Usage: >>> type_to_strat(Union[int,str]).exmample() . . . 3 ''' recur = lambda y: type_to_strat(y, opts) if x in primitives: prim = primitives[x].filter(opts.get(x, lambda x: x)) return prim elif hasattr(x, '_fields'):# NamedTuple isn't a type, it's a function #elif isinstance(x, Callable): #this catches List[T] for some reason name = x.__name__ fts = OrderedDict(x._field_types) vals = map(recur, fts.values()) # `NamedTuple` is actually a ... `namedtuple` itself toArgDict = lambda xs: dict(zip(fts.keys(), xs)) return st.tuples(*vals).map(lambda ys: x(**toArgDict(ys))) elif issubclass(x, Dict): return st.dictionaries(*map(recur, x.__parameters__)) elif issubclass(x, Tuple): if x.__tuple_use_ellipsis__: # variable lenth tuple element_type = x.__tuple_params__[0] return recur(List[element_type]).map(tuple) return st.tuples(*map(recur, x.__tuple_params__)) elif issubclass(x, Union): return reduce(operator.ior, map(recur, x.__union_params__)) elif issubclass(x, Optional): # Optional[X] is equivalent to Union[X, type(None)]. second param is always Nonetype. value = x.__union_params__[0] return (recur(value) | st.none()) # type: SearchStrategy else: element_type = recur(x.__parameters__[0]) if issubclass(x, list): return st.lists(element_type) elif issubclass(x, set): return st.sets(element_type) elif issubclass(x, Sequence): anySizeTuple = recur(Tuple[element_type,...]) return st.sets(element_type) | st.lists(element_type) | anySizeTuple elif issubclass(x, Generator): toGen = lambda xs: (x for x in xs) # type: Callable[[Iterable[T]], Generator[T]] return recur(List[element_type]).map(toGen) # not sure how to create an Iterable (it doesn't have an `__next__` method) elif issubclass(x, Iterator) or issubclass(x, Iteratable): return recur(List[element_type]).map(iter) else: raise ValueError("Could not find strategy for type %s" % x)
def generic_protocol_options(draw): cam = draw(maybe(option_cam())) args = map(draw, ( option_version(), option_minimum_version(), option_swarm_id(), option_cipm(), option_mhtf(), option_lsa(), just(cam), none() if cam is None else option_ldw(cam), option_supported_messages(), option_chunk_size(), )) return protocol_options.ProtocolOptions(*args)
@given(st.integers()) @pytest.mark.trio async def test_mark_inner(n): assert isinstance(n, int) @our_settings @pytest.mark.trio @given(st.integers()) async def test_mark_outer(n): assert isinstance(n, int) @our_settings @pytest.mark.parametrize('y', [1, 2]) @given(x=st.none()) @pytest.mark.trio async def test_mark_and_parametrize(x, y): assert x is None assert y in (1, 2) def test_the_trio_scheduler_is_deterministic_under_hypothesis(): traces = [] @our_settings @given(st.integers()) @pytest.mark.trio async def inner(_): traces.append(await scheduler_trace())
class QCircuitMachine(RuleBasedStateMachine): """Build a Hypothesis rule based state machine for constructing, transpiling and simulating a series of random QuantumCircuits. Build circuits with up to QISKIT_RANDOM_QUBITS qubits, apply a random selection of gates from qiskit.extensions.standard with randomly selected qargs, cargs, and parameters. At random intervals, transpile the circuit for a random backend with a random optimization level and simulate both the initial and the transpiled circuits to verify that their counts are the same. """ qubits = Bundle('qubits') clbits = Bundle('clbits') backend = Aer.get_backend('qasm_simulator') max_qubits = int(backend.configuration().n_qubits / 2) def __init__(self): super().__init__() self.qc = QuantumCircuit() @precondition(lambda self: len(self.qc.qubits) < self.max_qubits) @rule(target=qubits, n=st.integers(min_value=1, max_value=max_qubits)) def add_qreg(self, n): """Adds a new variable sized qreg to the circuit, up to max_qubits.""" n = min(n, self.max_qubits - len(self.qc.qubits)) qreg = QuantumRegister(n) self.qc.add_register(qreg) return multiple(*list(qreg)) @rule(target=clbits, n=st.integers(1, 5)) def add_creg(self, n): """Add a new variable sized creg to the circuit.""" creg = ClassicalRegister(n) self.qc.add_register(creg) return multiple(*list(creg)) # Gates of various shapes @rule(gate=st.sampled_from(oneQ_gates), qarg=qubits) def add_1q_gate(self, gate, qarg): """Append a random 1q gate on a random qubit.""" self.qc.append(gate(), [qarg], []) @rule(gate=st.sampled_from(twoQ_gates), qargs=st.lists(qubits, max_size=2, min_size=2, unique=True)) def add_2q_gate(self, gate, qargs): """Append a random 2q gate across two random qubits.""" self.qc.append(gate(), qargs) @rule(gate=st.sampled_from(threeQ_gates), qargs=st.lists(qubits, max_size=3, min_size=3, unique=True)) def add_3q_gate(self, gate, qargs): """Append a random 3q gate across three random qubits.""" self.qc.append(gate(), qargs) @rule(gate=st.sampled_from(oneQ_oneP_gates), qarg=qubits, param=st.floats(allow_nan=False, allow_infinity=False, min_value=-10*pi, max_value=10*pi)) def add_1q1p_gate(self, gate, qarg, param): """Append a random 1q gate with 1 random float parameter.""" self.qc.append(gate(param), [qarg]) @rule(gate=st.sampled_from(oneQ_twoP_gates), qarg=qubits, params=st.lists( st.floats(allow_nan=False, allow_infinity=False, min_value=-10*pi, max_value=10*pi), min_size=2, max_size=2)) def add_1q2p_gate(self, gate, qarg, params): """Append a random 1q gate with 2 random float parameters.""" self.qc.append(gate(*params), [qarg]) @rule(gate=st.sampled_from(oneQ_threeP_gates), qarg=qubits, params=st.lists( st.floats(allow_nan=False, allow_infinity=False, min_value=-10*pi, max_value=10*pi), min_size=3, max_size=3)) def add_1q3p_gate(self, gate, qarg, params): """Append a random 1q gate with 3 random float parameters.""" self.qc.append(gate(*params), [qarg]) @rule(gate=st.sampled_from(twoQ_oneP_gates), qargs=st.lists(qubits, max_size=2, min_size=2, unique=True), param=st.floats(allow_nan=False, allow_infinity=False, min_value=-10*pi, max_value=10*pi)) def add_2q1p_gate(self, gate, qargs, param): """Append a random 2q gate with 1 random float parameter.""" self.qc.append(gate(param), qargs) @rule(gate=st.sampled_from(twoQ_threeP_gates), qargs=st.lists(qubits, max_size=2, min_size=2, unique=True), params=st.lists( st.floats(allow_nan=False, allow_infinity=False, min_value=-10*pi, max_value=10*pi), min_size=3, max_size=3)) def add_2q3p_gate(self, gate, qargs, params): """Append a random 2q gate with 3 random float parameters.""" self.qc.append(gate(*params), qargs) @rule(gate=st.sampled_from(oneQ_oneC_gates), qarg=qubits, carg=clbits) def add_1q1c_gate(self, gate, qarg, carg): """Append a random 1q, 1c gate.""" self.qc.append(gate(), [qarg], [carg]) @rule(gate=st.sampled_from(variadic_gates), qargs=st.lists(qubits, min_size=1, unique=True)) def add_variQ_gate(self, gate, qargs): """Append a gate with a variable number of qargs.""" self.qc.append(gate(len(qargs)), qargs) @precondition(lambda self: len(self.qc.data) > 0) @rule(carg=clbits, data=st.data()) def add_c_if_last_gate(self, carg, data): """Modify the last gate to be conditional on a classical register.""" creg = carg.register val = data.draw(st.integers(min_value=0, max_value=2**len(creg)-1)) last_gate = self.qc.data[-1] # Conditional instructions are not supported assume(isinstance(last_gate[0], Gate)) last_gate[0].c_if(creg, val) # Properties to check @invariant() def qasm(self): """After each circuit operation, it should be possible to build QASM.""" self.qc.qasm() @precondition(lambda self: any(isinstance(d[0], Measure) for d in self.qc.data)) @rule( backend=st.one_of( st.none(), st.sampled_from(mock_backends)), opt_level=st.integers(min_value=0, max_value=3)) def equivalent_transpile(self, backend, opt_level): """Simulate, transpile and simulate the present circuit. Verify that the counts are not significantly different before and after transpilation. """ print('Evaluating circuit at level {} on {}:\n{}'.format( opt_level, backend, self.qc.qasm())) assume(backend is None or backend.configuration().n_qubits >= len(self.qc.qubits)) shots = 4096 aer_counts = execute(self.qc, backend=self.backend, shots=shots).result().get_counts() try: xpiled_qc = transpile(self.qc, backend=backend, optimization_level=opt_level) except Exception as e: failed_qasm = 'Exception caught during transpilation of circuit: \n{}'.format( self.qc.qasm()) raise RuntimeError(failed_qasm) from e xpiled_aer_counts = execute(xpiled_qc, backend=self.backend, shots=shots).result().get_counts() count_differences = dicts_almost_equal(aer_counts, xpiled_aer_counts, 0.05 * shots) assert count_differences == '', 'Counts not equivalent: {}\nFailing QASM: \n{}'.format( count_differences, self.qc.qasm())
@st.composite def with_units(draw, elements, unit): angle = draw(elements) return angle * unit angles_q = partial(with_units, elements=angles(), unit=u.rad) eccentricities_q = partial(with_units, elements=eccentricities(), unit=u.one) @settings(deadline=None) @given( min_nu=angles_q(), ecc=eccentricities_q(), max_nu=st.one_of(angles_q(), st.none()), ) def test_sample_closed_is_always_between_minus_pi_and_pi(min_nu, ecc, max_nu): result = sample_closed(min_nu, ecc, max_nu) assert ((-np.pi * u.rad <= result) & (result <= np.pi * u.rad)).all() @settings(deadline=None) @given( min_nu=with_units(elements=st.floats(min_value=-np.pi, max_value=np.pi), unit=u.rad), ecc=eccentricities_q(), max_nu=st.one_of(angles_q(), st.none()), ) def test_sample_closed_starts_at_min_anomaly_if_in_range(min_nu, ecc, max_nu):
# pylint: disable=missing-docstring, redefined-outer-name, no-value-for-parameter from pyresult import is_ok, is_error from six import string_types from six.moves import zip from hypothesis import given from hypothesis.strategies import (lists, one_of, none, integers, recursive, text, booleans, floats, complex_numbers, dictionaries) from pydecoder.primitives import to_int from pydecoder.structs import array NOT_LIST = recursive( none() | text() | booleans() | integers() | floats() | complex_numbers(), lambda children: dictionaries(text(), children)) @given(lists(integers())) def test_array_returns_decoded_list_in_result(ins): rv = array(to_int, ins) assert is_ok(rv) for orig, res in zip(ins, rv.value): assert res == int(orig) @given(
from contextlib import suppress from functools import partial import pytest from hypothesis import given from hypothesis import strategies as st from jsonschema_rs import JSONSchema, ValidationError, is_valid, validate json = st.recursive( st.none() | st.booleans() | st.floats() | st.integers() | st.text(), lambda children: st.lists(children, min_size=1) | st.dictionaries( st.text(), children, min_size=1), ) @pytest.mark.parametrize("func", (is_valid, validate)) @given(instance=json) def test_instance_processing(func, instance): with suppress(Exception): func(True, instance) @pytest.mark.parametrize("func", (is_valid, validate)) @given(instance=json) def test_schema_processing(func, instance): with suppress(Exception): func(instance, True) @pytest.mark.parametrize("func", (is_valid, validate))
def test_raises_for_arg_with_unresolvable_annotation(): with pytest.raises(ResolutionFailed): st.builds(unknown_annotated_func).example() with pytest.raises(ResolutionFailed): st.builds(unknown_annotated_func, a=st.none(), c=infer).example()
AnnotatedNamedTuple = typing.NamedTuple("AnnotatedNamedTuple", [("a", str)]) @given(st.builds(AnnotatedNamedTuple)) def test_infers_args_for_namedtuple_builds(thing): assert isinstance(thing.a, str) @given(st.from_type(AnnotatedNamedTuple)) def test_infers_args_for_namedtuple_from_type(thing): assert isinstance(thing.a, str) @given(st.builds(AnnotatedNamedTuple, a=st.none())) def test_override_args_for_namedtuple(thing): assert thing.a is None @pytest.mark.parametrize( "thing", [typing.Optional, typing.List, getattr(typing, "Type", typing.Set)] ) # check Type if it's available, otherwise Set is redundant but harmless def test_cannot_resolve_bare_forward_reference(thing): with pytest.raises(InvalidArgument): t = thing["int"] if type(getattr(t, "__args__", [None])[0]) != ForwardRef: assert sys.version_info[:2] == (3, 5) pytest.xfail("python 3.5 typing module is really weird") st.from_type(t).example()
return hst.text( alphabet= "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_", **kwargs).filter(lambda x: x[0].isalpha() and x.isidentifier() and not (iskeyword(x))) # This strategy generates a dict of kwargs needed to instantiate a valid # ParamSpec object valid_paramspec_kwargs = hst.fixed_dictionaries({ 'name': valid_identifier(min_size=1, max_size=6), 'paramtype': hst.sampled_from(['numeric', 'array', 'text']), 'label': hst.one_of(hst.none(), hst.text(min_size=0, max_size=6)), 'unit': hst.one_of(hst.none(), hst.text(min_size=0, max_size=2)), 'depends_on': hst.lists(hst.text(min_size=1, max_size=3), min_size=0, max_size=3), 'inferred_from': hst.lists(hst.text(min_size=1, max_size=3), min_size=0, max_size=3) }) @pytest.fixture def version_0_serializations(): sers = [] sers.append({ 'name': 'dmm_v1', 'paramtype': 'numeric',
from datetime import timedelta import hypothesis.strategies as st import numpy as np import pandas as pd import pandas.testing as pdt import pyarrow as pa from hypothesis import example, given, settings from fletcher.algorithms.bool import all_op, any_op @settings(deadline=timedelta(milliseconds=1000)) @given(data=st.lists(st.one_of(st.booleans(), st.none())), skipna=st.booleans()) @example([], False) @example([], True) # Test with numpy.array as input. # This has the caveat that the missing buffer is None. @example(np.ones(10).astype(bool), False) @example(np.ones(10).astype(bool), True) def test_any_op(data, skipna): arrow = pa.array(data, type=pa.bool_()) # TODO(pandas-0.26): Use pandas.BooleanArray # https://github.com/pandas-dev/pandas/issues/27709 / https://github.com/pandas-dev/pandas/issues/12863 pandas = pd.Series(data, dtype=float) assert any_op(arrow, skipna) == pandas.any(skipna=skipna) # Split in the middle and check whether this still works if len(data) > 2: arrow = pa.chunked_array(
def test_can_find_none_list(): assert minimal(ds.lists(ds.none()), lambda x: len(x) >= 3) == [None] * 3
def steps(self): return none()
with warnings.catch_warnings(): warnings.simplefilter("ignore") min_dt = Timestamp(1900, 1, 1).to_pydatetime() max_dt = Timestamp(1900, 1, 1).to_pydatetime() gen_date_range = st.builds( pd.date_range, start=st.datetimes( # TODO: Choose the min/max values more systematically min_value=Timestamp(1900, 1, 1).to_pydatetime(), max_value=Timestamp(2100, 1, 1).to_pydatetime(), ), periods=st.integers(min_value=2, max_value=100), freq=st.sampled_from("Y Q M D H T s ms us ns".split()), tz=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()), ) gen_random_datetime = st.datetimes( min_value=min_dt, max_value=max_dt, timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()), ) # The strategy for each type is registered in conftest.py, as they don't carry # enough runtime information (e.g. type hints) to infer how to build them. gen_yqm_offset = st.one_of(*map( st.from_type, [ MonthBegin, MonthEnd,
def test_no_such_example(): with pytest.raises(NoSuchExample): find(s.none(), bool, database_key=b"no such example")
assert deserialize(json.dumps(x), Bool) == x @given(st.floats(allow_infinity=False, allow_nan=False)) def test_serialize_float(x: float): assert deserialize(json.dumps(x), Float) == x # TODO failing for the edge case: x = '"' # @given(st.text()) def test_serialize_str(): assert deserialize(json.dumps("hello"), Str) == "hello" # TODO fix the way we check for equality @given(st.none()) def test_serialize_none(x: None): assert deserialize(json.dumps(x), Null) == None def test_parsing_error_int(): with pytest.raises(ParsingError): # type: ignore deserialize("Not an int", Int) def test_parsing_error_bool(): with pytest.raises(ParsingError): # type: ignore deserialize("Not a bool", Bool) def test_parsing_error_float():
def test_recursive_call_validates_base_is_strategy(): x = st.recursive(1, lambda x: st.none()) with pytest.raises(InvalidArgument): x.example()
def arrays(draw, type, size=None, nullable=True): if isinstance(type, st.SearchStrategy): ty = draw(type) elif isinstance(type, pa.DataType): ty = type else: raise TypeError('Type must be a pyarrow DataType') if isinstance(size, st.SearchStrategy): size = draw(size) elif size is None: size = draw(_default_array_sizes) elif not isinstance(size, int): raise TypeError('Size must be an integer') if pa.types.is_null(ty): h.assume(nullable) value = st.none() elif pa.types.is_boolean(ty): value = st.booleans() elif pa.types.is_integer(ty): values = draw(npst.arrays(ty.to_pandas_dtype(), shape=(size,))) return pa.array(values, type=ty) elif pa.types.is_floating(ty): values = draw(npst.arrays(ty.to_pandas_dtype(), shape=(size,))) # Workaround ARROW-4952: no easy way to assert array equality # in a NaN-tolerant way. values[np.isnan(values)] = -42.0 return pa.array(values, type=ty) elif pa.types.is_decimal(ty): # TODO(kszucs): properly limit the precision # value = st.decimals(places=type.scale, allow_infinity=False) h.reject() elif pa.types.is_time(ty): value = st.times() elif pa.types.is_date(ty): value = st.dates() elif pa.types.is_timestamp(ty): min_int64 = -(2**63) max_int64 = 2**63 - 1 min_datetime = datetime.datetime.fromtimestamp(min_int64 // 10**9) max_datetime = datetime.datetime.fromtimestamp(max_int64 // 10**9) try: offset_hours = int(ty.tz) tz = pytz.FixedOffset(offset_hours * 60) except ValueError: tz = pytz.timezone(ty.tz) value = st.datetimes(timezones=st.just(tz), min_value=min_datetime, max_value=max_datetime) elif pa.types.is_duration(ty): value = st.timedeltas() elif pa.types.is_binary(ty) or pa.types.is_large_binary(ty): value = st.binary() elif pa.types.is_string(ty) or pa.types.is_large_string(ty): value = st.text() elif pa.types.is_fixed_size_binary(ty): value = st.binary(min_size=ty.byte_width, max_size=ty.byte_width) elif pa.types.is_list(ty): value = _pylist(ty.value_type, size=size, nullable=nullable) elif pa.types.is_large_list(ty): value = _pylist(ty.value_type, size=size, nullable=nullable) elif pa.types.is_fixed_size_list(ty): value = _pylist(ty.value_type, size=ty.list_size, nullable=nullable) elif pa.types.is_dictionary(ty): values = _pylist(ty.value_type, size=size, nullable=nullable) return pa.array(draw(values), type=ty) elif pa.types.is_map(ty): value = _pymap(ty.key_type, ty.item_type, size=_default_array_sizes, nullable=nullable) elif pa.types.is_struct(ty): h.assume(len(ty) > 0) fields, child_arrays = [], [] for field in ty: fields.append(field) child_arrays.append(draw(arrays(field.type, size=size))) return pa.StructArray.from_arrays(child_arrays, fields=fields) else: raise NotImplementedError(ty) if nullable: value = st.one_of(st.none(), value) values = st.lists(value, min_size=size, max_size=size) return pa.array(draw(values), type=ty)
"""Minimal test that our parameter derivation logic does what we expect. """ import struct from hypothesis import given import hypothesis.strategies as st from Crypto.Cipher import Salsa20 from umash import C, FFI @given( bits=st.integers(min_value=0, max_value=2**64 - 1), key=st.none() | st.binary(min_size=32, max_size=32), ) def test_public_params_derive(bits, key): length = FFI.sizeof("struct umash_params") umash_key = b"Do not use UMASH VS adversaries." if key is not None: umash_key = key nonce = struct.pack("<Q", bits) expected = FFI.new("struct umash_params[1]") salsa_bytes = Salsa20.new(umash_key, nonce).encrypt(b"\x00" * length) FFI.memmove(expected, salsa_bytes, length) assert C.umash_params_prepare(expected) actual = FFI.new("struct umash_params[1]") if key is None: C.umash_params_derive(actual, bits, FFI.NULL) else:
def boolean( nullable: bool = True) -> st.SearchStrategy[graphql.BooleanValueNode]: value = st.booleans() if nullable: value |= st.none() return st.builds(graphql.BooleanValueNode, value=value)
def test_cannot_force_inference_for_unannotated_arg(): with pytest.raises(InvalidArgument): st.builds(non_annotated_func, a=infer, c=st.none()).example() with pytest.raises(InvalidArgument): st.builds(non_annotated_func, a=st.none(), c=infer).example()
def string( nullable: bool = True) -> st.SearchStrategy[graphql.StringValueNode]: value = st.text() if nullable: value |= st.none() return st.builds(graphql.StringValueNode, value=value)
@given( params=st.fixed_dictionaries( {}, optional={ "auth": delimited(), "auth-type": st.sampled_from(["basic", "digest", "BASIC", "DIGEST"]), "workers": st.integers(min_value=1, max_value=64), "request-timeout": st.integers(), "validate-schema": st.booleans(), "hypothesis-deadline": st.integers() | st.none(), "hypothesis-max-examples": st.integers(), "hypothesis-report-multiple-bugs": st.booleans(), "hypothesis-seed": st.integers(), "hypothesis-verbosity": st.sampled_from([item.name for item in Verbosity]), }, ).map( lambda params: [f"--{key}={value}" for key, value in params.items()]), flags=st.fixed_dictionaries( {}, optional={ key: st.booleans()
def float_(nullable: bool = True) -> st.SearchStrategy[graphql.FloatValueNode]: value = st.floats(allow_infinity=False, allow_nan=False).map(str) if nullable: value |= st.none() return st.builds(graphql.FloatValueNode, value=value)
return handler_ @given(name=st.text()) def test_get_logger(name): """ Make sure get_logger gives the correct logger. """ default_logger = logging.getLogger(name) vm_logger = get_logger(name) assert vm_logger.logger is default_logger @example(args=[], type_='general', default_type='general', extra=None) @given(args=st.lists(st.text(), min_size=0, max_size=5), type_=st.one_of(st.none(), st.text()), default_type=st.text(min_size=1), extra=st.one_of( st.none(), st.dictionaries(KWARG_ST, st.integers(), min_size=0, max_size=5))) def test_type_adapter(logger, handler, args, type_, default_type, extra): """Make sure the TypeAdapter sets the correct type attr""" def test(record): """Make sure the type attribute is as expected""" rectype = getattr(record, 'type', None) if rectype is None: assert False, "No type for record!" if type_ is None: assert rectype == default_type else: assert rectype == type_
def int_(nullable: bool = True) -> st.SearchStrategy[graphql.IntValueNode]: value = st.integers().map(str) if nullable: value |= st.none() return st.builds(graphql.IntValueNode, value=value)
@pytest.mark.parametrize("key ,value, expected", [([1, 2, 3, 4, 5], ['h', 'e', 'l', 'l', 'o'], { 1: 'h', 2: 'e', 3: 'l', 4: 'l', 6: 'o' }), (['M', 'a', 'y'], ['J', 'u', 'n', 'e'], { 'M': 'J', 'a': 'u', 'y': 'n' })]) def test_dic(value, expected): assert (dic(key, value) == expected) import hypothesis.strategies as st from hypothesis import given @given(st.lists(st.integers()), st.lists(st.integers())) def test_hyp(x, y): assert len(dic(x, y)) == len(dict.fromkeys(x, None)) @given(st.lists(st.text()), st.lists(st.none())) def test_hyp_hyp_none(x, y): assert dic(x, y) == dict.fromkeys(x, None)
def enum(type_: graphql.GraphQLEnumType, nullable: bool = True) -> st.SearchStrategy[graphql.EnumValueNode]: enum_value = st.sampled_from(sorted(type_.values)) if nullable: enum_value |= st.none() return st.builds(graphql.EnumValueNode, value=enum_value)
def test_recursion_does_not_break_reusability(): x = st.deferred(lambda: st.none() | st.tuples(x)) assert x.has_reusable_values
def to_unexpected_args( signature: Base, *, values: Strategy[Domain] = strategies.none()) -> Strategy[Args]: count = signature_to_max_positionals_count(signature) + 1 return to_homogeneous_tuples(values, min_size=count)
# Build a strategy by removing the microsecond from a datetimes strategy # https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.builds return builds( lambda inDt: inDt.replace(microsecond=0), datetimes(min_value=datetime.datetime(1900, 1, 1, 0, 0), max_value=datetime.datetime.max, timezones=none()), ) # These initial strategies are the basic types supported by the original dEncode # Unfortuately we cannot make nested structure with floats because as the floats # are not stable, the result is approximative, and it becomes extremely difficult # to compare # Datetime also starts only at 1900 because earlier date can't be dumped with strftime initialStrategies = none() | booleans() | text() | integers() | myDatetimes( ) | myDates() initialJsonStrategies = none() | booleans() | text() | myDatetimes() | myDates( ) # From a strategy (x), make a new strategy # We basically use that to make nested structures # see http://hypothesis.readthedocs.io/en/latest/data.html#recursive-data nestedStrategy = recursive( initialStrategies, lambda x: lists(x) | dictionaries(text(), x) | tuples(x)) # This strategy does not return tuples nestedStrategyJson = recursive(initialJsonStrategies, lambda x: lists(x) | dictionaries(text(), x))
assert t == {False: 0, True: 0} @given(ds.dictionaries(ds.integers(), ds.integers(), max_size=5)) @settings(max_examples=50) def test_dictionaries_respect_size(d): assert len(d) <= 5 @given(ds.dictionaries(ds.integers(), ds.integers(), max_size=0)) @settings(max_examples=50) def test_dictionaries_respect_zero_size(d): assert len(d) <= 5 @given(ds.lists(ds.none(), max_size=5)) def test_none_lists_respect_max_size(ls): assert len(ls) <= 5 @given(ds.lists(ds.none(), max_size=5, min_size=1)) def test_none_lists_respect_max_and_min_size(ls): assert 1 <= len(ls) <= 5 @given(ds.iterables(ds.integers(), max_size=5, min_size=1)) def test_iterables_are_exhaustible(it): for _ in it: pass with pytest.raises(StopIteration): next(it)