def make_department(did):
     return {
         'id': str(did),
         'hierarchy_level': 0,
         'organization_type': draw(sampled_from(ORGANIZATION_TYPES)),
         'org_id': str(draw(sampled_from(resource_ids['organization'])))
     }
Ejemplo n.º 2
0
def question_type_and_weight() -> SearchStrategy:
    return one_of(
        tuples(sampled_from(Question.CHOICE_TYPES),
               fixed_decimals()),
        tuples(sampled_from(sorted(set(Question.available_types()) - set(Question.CHOICE_TYPES))),
               just(0))
    )
def write_pattern(draw, min_size=0):
    keys = draw(st.lists(st.integers(0, 1000), unique=True, min_size=1))
    values = draw(st.lists(st.integers(), unique=True, min_size=1))
    return draw(
        st.lists(
            st.tuples(st.sampled_from(keys), st.sampled_from(values)), min_size=min_size
        )
    )
Ejemplo n.º 4
0
 def subset(self, former, data):
     """
     A subset of the previous sequence.
     """
     l, pv = former
     assume(l)
     i = data.draw(st.sampled_from(range(len(l))))
     j = data.draw(st.sampled_from(range(len(l))))
     return l[i:j], pv[i:j]
Ejemplo n.º 5
0
def get_request(data, spec, spec_host):
    endpoint_path = data.draw(st.sampled_from(spec['paths'].keys()))
    endpoint = spec['paths'][endpoint_path]

    method_name = data.draw(st.sampled_from(endpoint.keys()))
    endpoint = endpoint[method_name]

    path_params = _get_filtered_parameter(endpoint, 'path', spec)
    path_args = data.draw(st.fixed_dictionaries(path_params))

    query_params = _get_filtered_parameter(endpoint, 'query', spec)
    query_args = data.draw(st.fixed_dictionaries(query_params))

    body_params = _get_filtered_parameter(endpoint, 'body', spec)
    if body_params:
        body_args = data.draw(body_params['body'])
    else:
        body_args = None

    valid_request_body_format = get_item_path_acceptable_format(endpoint, spec)

    request_data = None
    request_headers = {}

    if body_args:
        # no_body_format_declaration(body_args, valid_request_body_format, endpoint)
        if body_args and valid_request_body_format is None:
            # Force a request format, swagger ui seems to force json format
            valid_request_body_format = ["application/json"]

        request_body_format = data.draw(st.sampled_from(valid_request_body_format), 'request_body_format')

        request_headers['Content-Type'] = request_body_format
        if request_body_format == 'application/x-www-form-urlencoded':
            request_data = body_args
        elif request_body_format == 'application/json':
            request_data = json.dumps(body_args, cls=CustomJsonEncoder)
        elif request_body_format == 'application/xml':
            pass
            # TODO Implement XML
        else:
            raise Exception(request_body_format)

    endpoint_url = endpoint_path.format(**path_args)
    assume('\x00' not in endpoint_url)

    # Generate request
    URL = furl(spec_host)
    URL = URL.join(endpoint_url.lstrip('/'))

    if query_args:
        URL = URL.add(args=query_args)

    request = Request(method_name, URL.url, data=request_data,
                      headers=request_headers).prepare()
    request.build_context = locals()
    return request
Ejemplo n.º 6
0
 def resolve_Type(thing):
     if thing.__args__ is None:
         return st.just(type)
     inner = thing.__args__[0]
     if getattr(inner, '__origin__', None) is typing.Union:
         return st.sampled_from(inner.__args__)
     elif hasattr(inner, '__union_params__'):  # pragma: no cover
         return st.sampled_from(inner.__union_params__)
     return st.just(inner)
Ejemplo n.º 7
0
def _dev_options(draw):
    op_dev = draw(st.sampled_from(hu.device_options))
    if op_dev == hu.cpu_do:
        # the CPU op can only handle CPU tensor
        input_blob_dev = hu.cpu_do
    else:
        input_blob_dev = draw(st.sampled_from(hu.device_options))

    return op_dev, input_blob_dev
Ejemplo n.º 8
0
 def steps(self):
     if not self.space:
         return builds(Action, just('setup'), tuples(st_keys, st_values))
     global_actions = [Action('copydict', ()), Action('cleardict', ())]
     if self.space.reference:
         return (
             self.st_setitem() | sampled_from(global_actions) |
             self.st_updateitem() | self.st_delitem())
     else:
         return (self.st_setitem() | sampled_from(global_actions))
Ejemplo n.º 9
0
def prob_end_spatial_tournaments(draw, strategies=strategies,
                                min_size=1, max_size=10,
                                min_prob_end=0, max_prob_end=1,
                                min_noise=0, max_noise=1,
                                min_repetitions=1, max_repetitions=20):
    """
    A hypothesis decorator to return a probabilistic ending spatial tournament.

    Parameters
    ----------
    min_size : integer
        The minimum number of strategies to include
    max_size : integer
        The maximum number of strategies to include
    min_prob_end : float
        The minimum probability of a match ending
    max_prob_end : float
        The maximum probability of a match ending
    min_noise : float
        The minimum noise value
    max_noise : float
        The maximum noise value
    min_repetitions : integer
        The minimum number of repetitions
    max_repetitions : integer
        The maximum number of repetitions
    """
    strategies = draw(strategy_lists(strategies=strategies,
                                     min_size=min_size,
                                     max_size=max_size))
    players = [s() for s in strategies]
    player_indices = list(range(len(players)))

    all_potential_edges = list(itertools.combinations(player_indices, 2))
    all_potential_edges.extend([(i, i) for i in player_indices])  # Loops
    edges = draw(lists(sampled_from(all_potential_edges), unique=True,
                       average_size=2 * len(players)))

    # Ensure all players/nodes are connected:
    node_indices = sorted(set([node for edge in edges for node in edge]))
    missing_nodes = [index
                     for index in player_indices if index not in node_indices]
    for index in missing_nodes:
        opponent = draw(sampled_from(player_indices))
        edges.append((index, opponent))

    prob_end = draw(floats(min_value=min_prob_end, max_value=max_prob_end))
    repetitions = draw(integers(min_value=min_repetitions,
                                max_value=max_repetitions))
    noise = draw(floats(min_value=min_noise, max_value=max_noise))

    tournament = ProbEndSpatialTournament(players, prob_end=prob_end,
                                          repetitions=repetitions,
                                          noise=noise, edges=edges)
    return tournament
Ejemplo n.º 10
0
def sample_row(draw):
    """Get a sample database row."""
    return OrderedDict((
        ('id', draw(st.integers(min_value=1))),
        ('lat', draw(st.floats(min_value=-180, max_value=180))),
        ('lon', draw(st.floats(min_value=-90, max_value=90))),
        ('country', draw(st.sampled_from(['Cintra', 'Arnor', 'Arrakis', 'Gondor']))),
        ('town', draw(st.sampled_from(['Caer Morhen', 'Minas Tirith', 'Sietch Tabr', 'Gondolin']))),
        ('postcode', draw(st.sampled_from(['123123', '23423', '123122', '43223', '231232']))),
        ('street', draw(st.sampled_from(['street1', 'street2', 'street3', 'street4', 'street5']))),
        ('house', draw(st.integers(max_value=500))),
    ))
Ejemplo n.º 11
0
def from_field(field):
    # type: (Type[dm.Field]) -> st.SearchStrategy[dm.Field]
    """Return a strategy for values that fit the given field.

    This is pretty similar to the core `from_type` function, with a subtle
    but important difference: `from_field` takes a Field *instance*, rather
    than a Field *subtype*, so that it has access to instance attributes
    such as string length and validators.
    """
    check_type((dm.Field, df.Field), field, "field")
    if getattr(field, "choices", False):
        choices = []  # type: list
        for value, name_or_optgroup in field.choices:
            if isinstance(name_or_optgroup, (list, tuple)):
                choices.extend(key for key, _ in name_or_optgroup)
            else:
                choices.append(value)
        # form fields automatically include an empty choice, strip it out
        if u"" in choices:
            choices.remove(u"")
        min_size = 1
        if isinstance(field, (dm.CharField, dm.TextField)) and field.blank:
            choices.insert(0, u"")
        elif isinstance(field, (df.Field)) and not field.required:
            choices.insert(0, u"")
            min_size = 0
        strategy = st.sampled_from(choices)
        if isinstance(field, (df.MultipleChoiceField, df.TypedMultipleChoiceField)):
            strategy = st.lists(st.sampled_from(choices), min_size=min_size)
    else:
        if type(field) not in _global_field_lookup:
            if getattr(field, "null", False):
                return st.none()
            raise InvalidArgument("Could not infer a strategy for %r", (field,))
        strategy = _global_field_lookup[type(field)]
        if not isinstance(strategy, st.SearchStrategy):
            strategy = strategy(field)
    assert isinstance(strategy, st.SearchStrategy)
    if field.validators:

        def validate(value):
            try:
                field.run_validators(value)
                return True
            except django.core.exceptions.ValidationError:
                return False

        strategy = strategy.filter(validate)

    if getattr(field, "null", False):
        return st.none() | strategy
    return strategy
Ejemplo n.º 12
0
 def steps(self):
     strat = self.step_strategy('add_span')
     if self.span_ids:
         strat |= self.step_strategy('delete_span',
                                     hs.sampled_from(self.span_ids))
         strat |= self.step_strategy('set_span',
                                     hs.sampled_from(self.span_ids),
                                     hs.integers(-10, 100))
         strat |= self.step_strategy('set_tag',
                                     hs.sampled_from(self.span_ids),
                                     hs.sampled_from(self.TAG_NAMES),
                                     hs.booleans())
     return strat
Ejemplo n.º 13
0
def s3_backend_config_st(draw):
    """Hypothesis strategy for s3 backend configuration."""
    s3_be_dict = {
        'bucket': draw(s3_bucket_name_st()),
        'encrypt': draw(st.sampled_from(['true', 'false'])),
        'key': draw(st.text(
            alphabet=list(ascii_letters + digits + '!-_.*\'()/'),
            min_size=1,
            max_size=1024).filter(lambda x: x[0] not in '/')),
        'region': draw(st.sampled_from(aws_region_list)) }

    if bool(random.getrandbits(1)):
        s3_be_dict['profile'] = 'testawsprofile'
    return s3_be_dict
Ejemplo n.º 14
0
def mutated_commands(commands):
    args = st.sampled_from([b'withscores', b'xx', b'nx', b'ex', b'px', b'weights', b'aggregate',
                            b'', b'0', b'-1', b'nan', b'inf', b'-inf']) | command_args(commands)
    affixes = st.sampled_from([b'\0', b'-', b'+', b'\t', b'\n', b'0000']) | st.binary()
    return st.recursive(
        commands,
        lambda x:
            delete_arg(x)
            | replace_arg(x, args)
            | uppercase_arg(x)
            | prefix_arg(x, affixes)
            | suffix_arg(x, affixes)
            | add_arg(x, args)
            | swap_args(x))
Ejemplo n.º 15
0
def test_sorting_fields(tmpdir, runner, default_database):
    tasks = []
    for i in range(1, 10):
        days = datetime.timedelta(days=i)

        todo = Todo()
        todo.due = datetime.datetime.now() + days
        todo.created_at = datetime.datetime.now() - days
        todo.summary = 'harhar{}'.format(i)
        tasks.append(todo)

        default_database.save(todo)

    fields = tuple(field for field in dir(Todo) if not
                   field.startswith('_'))

    @given(sort_key=st.lists(
        st.sampled_from(fields + tuple('-' + x for x in fields)),
        unique=True
    ))
    def run_test(sort_key):
        sort_key = ','.join(sort_key)
        result = runner.invoke(cli, ['list', '--sort', sort_key])
        assert not result.exception
        assert result.exit_code == 0
        assert len(result.output.strip().splitlines()) == len(tasks)

    run_test()
Ejemplo n.º 16
0
 def test_create_with_profile(self):
     """
     Calling the ``/VolumerDriver.Create`` API with an ``Opts`` value
     of "profile=[gold,silver,bronze] in the request body JSON create a
     volume with a given name with [gold,silver,bronze] profile.
     """
     profile = sampled_from(["gold", "silver", "bronze"]).example()
     name = random_name(self)
     d = self.assertResult(
         b"POST", b"/VolumeDriver.Create", {u"Name": name, "Opts": {u"profile": profile}}, OK, {u"Err": u""}
     )
     d.addCallback(lambda _: self.flocker_client.list_datasets_configuration())
     d.addCallback(list)
     d.addCallback(
         lambda result: self.assertItemsEqual(
             result,
             [
                 Dataset(
                     dataset_id=result[0].dataset_id,
                     primary=self.NODE_A,
                     maximum_size=int(DEFAULT_SIZE.to_Byte()),
                     metadata={NAME_FIELD: name, u"clusterhq:flocker:profile": unicode(profile)},
                 )
             ],
         )
     )
     return d
Ejemplo n.º 17
0
def valid_key_string(draw):
    """
    A strategy that generates valid serialized OpaqueKeys.
    """
    key_type = draw(strategies.shared(strategies.sampled_from(KEY_TYPES), key="key_type"))
    key = draw(keys_of_type(key_type))
    return text_type(key)
Ejemplo n.º 18
0
def header(header_class, **kwargs):
    """Create a strategy for producing headers of a specific class.

    Args:
        header_class: The type of header to be produced. This class will be
            introspected to determine suitable strategies for each named
            field.

        **kwargs: Any supplied keyword arguments can be used to fix the value
            of particular header fields.
    """

    field_strategies = {}
    for field_name in header_class.ordered_field_names():
        if field_name in kwargs:
            field_strategy = just(kwargs.pop(field_name))
        else:
            value_type = getattr(header_class, field_name).value_type
            if hasattr(value_type, 'ENUM'):
                field_strategy = sampled_from(sorted(value_type.ENUM))
            else:
                field_strategy = integers(value_type.MINIMUM, value_type.MAXIMUM)
        field_strategies[field_name] = field_strategy

    if len(kwargs) > 0:
        raise TypeError("Unrecognised binary header field names {} for {}".format(
            ', '.join(kwargs.keys()),
            header_class.__name__))

    return fixed_dictionaries(field_strategies) \
           .map(lambda kw: header_class(**kw))
def http_headers():
    """
    Strategy for generating ``Headers`` populated with random HTTP
    headers.

    This could probably use some more work.
    """
    return dictionaries(
        keys=sampled_from((
            b"accept",
            b"accept-charset",
            b"accept-encoding",
            b"accept-language",
            b"accept-ranges",
            b"age",
            b"allow",
            b"authorization",
            b"cache-control",
            b"connection",
            b"content-encoding",
            b"content-language",
            # XXX The rest, I guess, plus randomly generate some?
        )),
        values=text().map(lambda x: x.encode("utf-8")),
    ).map(
        lambda h: Headers({k: [v] for (k, v) in h.items()})
    )
Ejemplo n.º 20
0
    def test_sparse_momentum_sgd(
        self, inputs, momentum, nesterov, lr, data_strategy, gc, dc
    ):
        w, grad, m = inputs

        # Create an indexing array containing values which index into grad
        indices = data_strategy.draw(
            hu.tensor(
                max_dim=1,
                min_value=1,
                max_value=grad.shape[0],
                dtype=np.int64,
                elements=st.sampled_from(np.arange(grad.shape[0])),
            ),
        )

        # Verify that the generated indices are unique
        hypothesis.assume(
            np.array_equal(
                np.unique(indices.flatten()),
                np.sort(indices.flatten())))

        # Sparsify grad
        grad = grad[indices]

        # Make momentum >= 0
        m = np.abs(m)

        # Convert lr to a numpy array
        lr = np.asarray([lr], dtype=np.float32)

        op = core.CreateOperator(
            "SparseMomentumSGDUpdate", ["grad", "m", "lr", "param", "indices"],
            ["adjusted_grad", "m", "param"],
            momentum=momentum,
            nesterov=int(nesterov),
            device_option=gc
        )

        # Reference
        def momentum_sgd(grad, m, lr):
            lr = lr[0]
            if not nesterov:
                adjusted_gradient = lr * grad + momentum * m
                return (adjusted_gradient, adjusted_gradient)
            else:
                m_new = momentum * m + lr * grad
                return ((1 + momentum) * m_new - momentum * m, m_new)

        def sparse(grad, m, lr, param, i):
            grad_new, m_new = momentum_sgd(grad, m[i], lr)
            m[i] = m_new
            param[i] -= grad_new
            return (grad_new, m, param)

        self.assertReferenceChecks(
            gc,
            op,
            [grad, m, lr, w, indices],
            sparse)
Ejemplo n.º 21
0
def tagged_union_strategy(type, attr_strategies):
    """
    Create a strategy for building a type with a ``TaggedUnionInvariant``.

    :param type: Type to generate a strategy for.
    :param attr_strategies: Mapping of attributes to strategies to
        generate corresponding attributes.
    :type attr_strategies: ``dict`` mapping ``str`` to ``SearchStrategy`s.
    """
    invariant = type.__invariant__

    def build(tag):
        args = {
            invariant.tag_attribute: just(tag),
        }
        args.update({
            attribute: strategy
            for attribute, strategy in attr_strategies.items()
            if (
              attribute in invariant.attributes_for_tag[tag] or
              attribute not in invariant._all_attributes
            )
        })
        return fixed_dictionaries(args).map(lambda kwargs: type(**kwargs))

    return sampled_from(invariant._allowed_tags).flatmap(build)
Ejemplo n.º 22
0
def _get_strategy_for_field(f):
    if isinstance(f, dm.AutoField):
        return default_value
    elif f.choices:
        choices = [value for (value, name) in f.choices]
        if isinstance(f, (dm.CharField, dm.TextField)) and f.blank:
            choices.append(u'')
        strategy = st.sampled_from(choices)
    elif isinstance(f, dm.EmailField):
        return ff.fake_factory(u'email')
    elif type(f) in (dm.TextField, dm.CharField):
        strategy = st.text(min_size=(None if f.blank else 1),
                           max_size=f.max_length)
    elif type(f) == dm.DecimalField:
        m = 10 ** f.max_digits - 1
        div = 10 ** f.decimal_places
        q = Decimal('1.' + ('0' * f.decimal_places))
        strategy = (
            st.integers(min_value=-m, max_value=m)
            .map(lambda n: (Decimal(n) / div).quantize(q)))
    else:
        try:
            strategy = field_mappings()[type(f)]
        except KeyError:
            if f.null:
                return None
            else:
                raise UnmappedFieldError(f)
    if f.validators:
        strategy = strategy.filter(validator_to_filter(f))
    if f.null:
        strategy = st.one_of(st.none(), strategy)
    return strategy
Ejemplo n.º 23
0
def jobs(
        draw,
        ids=uuids(),
        statuses=sampled_from(JobInterface.JobStatus),
        parameters=dictionaries(text(), text()),
        results=dictionaries(text(), text()),
        dates_submitted=datetimes(),
        registration_schemas=dictionaries(text(), text()),
        result_schemas=dictionaries(text(), text())
) -> JobInterface:
    """

    :param draw: A function that can take a strategy and draw a datum from it
    :param ids: A hypothesis strategy (statisticians should read "random
        variable"), that represents the set of all valid job IDs
    :param statuses: A hypothesis strategy that samples from the set of all
        allowed job statuses
    :param parameters: A hypothesis strategy that samples from all job
        parameters
    :param results: A hypothesis strategy that represents the possible results
    :param dates_submitted: A hypothesis strategy that represents the
        possible dates that can be submitted
    :param registration_schemas: The possible job registration schemas
    :param result_schemas: The possible job result schemas
    :return: A randomly-generated implementation of :class:`JobInterface`
    """
    return Job(
        draw(ids), draw(statuses), draw(parameters), draw(results),
        draw(dates_submitted),
        draw(registration_schemas),
        draw(result_schemas)
    )
Ejemplo n.º 24
0
 def steps(self):
     strategies = []
     for rule in self.rules():
         converted_arguments = {}
         valid = True
         if rule.precondition is not None and not rule.precondition(self):
             continue
         for k, v in sorted(rule.arguments.items()):
             if isinstance(v, Bundle):
                 bundle = self.bundle(v.name)
                 if not bundle:
                     valid = False
                     break
                 else:
                     v = sampled_from(bundle)
             converted_arguments[k] = v
         if valid:
             strategies.append(TupleStrategy((
                 just(rule),
                 FixedKeysDictStrategy(converted_arguments)
             ), tuple))
     if not strategies:
         raise InvalidDefinition(
             u'No progress can be made from state %r' % (self,)
         )
     return one_of(*strategies)
Ejemplo n.º 25
0
    def test_sparse_adagrad(self, inputs, lr, epsilon,
                            data_strategy, gc, dc):
        param, momentum, grad = inputs
        momentum = np.abs(momentum)
        lr = np.array([lr], dtype=np.float32)

        # Create an indexing array containing values that are lists of indices,
        # which index into grad
        indices = data_strategy.draw(
            hu.tensor(dtype=np.int64,
                      elements=st.sampled_from(np.arange(grad.shape[0]))),
        )
        hypothesis.note('indices.shape: %s' % str(indices.shape))

        # For now, the indices must be unique
        hypothesis.assume(np.array_equal(np.unique(indices.flatten()),
                                         np.sort(indices.flatten())))

        # Sparsify grad
        grad = grad[indices]

        op = core.CreateOperator(
            "SparseAdagrad",
            ["param", "momentum", "indices", "grad", "lr"],
            ["param", "momentum"],
            epsilon=epsilon,
            device_option=gc)

        def ref_sparse(param, momentum, indices, grad, lr, ref_using_fp16=False):
            param_out = np.copy(param)
            momentum_out = np.copy(momentum)
            for i, index in enumerate(indices):
                param_out[index], momentum_out[index] = self.ref_adagrad(
                    param[index],
                    momentum[index],
                    grad[i],
                    lr,
                    epsilon,
                    using_fp16=ref_using_fp16
                )
            return (param_out, momentum_out)

        ref_using_fp16_values = [False]
        if dc == hu.gpu_do:
            ref_using_fp16_values.append(True)

        for ref_using_fp16 in ref_using_fp16_values:
            if(ref_using_fp16):
                print('test_sparse_adagrad with half precision embedding')
                momentum_i = momentum.astype(np.float16)
                param_i = param.astype(np.float16)
            else:
                print('test_sparse_adagrad with full precision embedding')
                momentum_i = momentum.astype(np.float32)
                param_i = param.astype(np.float32)

            self.assertReferenceChecks(
                gc, op, [param_i, momentum_i, indices, grad, lr, ref_using_fp16],
                ref_sparse
            )
Ejemplo n.º 26
0
def fstrings(draw):
    """
    Generate a valid f-string.
    See https://www.python.org/dev/peps/pep-0498/#specification

    :param draw: Let hypothsis draw from other strategies.

    :return: A valid f-string.
    """
    character_strategy = st.characters(
        blacklist_characters='\r\n\'\\s{}',
        min_codepoint=1,
        max_codepoint=1000,
    )
    is_raw = draw(st.booleans())
    integer_strategy = st.integers(min_value=0, max_value=3)
    expression_count = draw(integer_strategy)
    content = []
    for _ in range(expression_count):
        expression = draw(expressions())
        conversion = draw(st.sampled_from(('', '!s', '!r', '!a',)))
        has_specifier = draw(st.booleans())
        specifier = ':' + draw(format_specifiers()) if has_specifier else ''
        content.append('{{{}{}}}'.format(expression, conversion, specifier))
        content.append(draw(st.text(character_strategy)))
    content = ''.join(content)
    return "f{}'{}'".format('r' if is_raw else '', content)
Ejemplo n.º 27
0
def api_results(min_size=0, max_size=20, hook_types=None):
    count = integers(min_value=min_size, max_value=max_size).example()
    hook_types = hook_types or get_hook_names()

    return fixed_dictionaries(
        {
            "count": just(count),
            "next": none(),
            "prev": none(),
            "results": lists(
                fixed_dictionaries(
                    {
                        "name": text(min_size=1),
                        "latest_version": integers(min_value=0),
                        "content": fixed_dictionaries(
                            {
                                "hook_type": sampled_from(hook_types),
                                "version": integers(min_value=0),
                                "description": text(min_size=1),
                                "download_url": text(min_size=1),
                                "checksum": text(min_size=1),
                            }
                        ),
                    }
                ),
                min_size=count,
                max_size=count,
            ),
        }
    )
Ejemplo n.º 28
0
def prf_given():
    """
    A wrapper for :py:func:`hypothesis.given` that establishes
    parameters common to all Pseudo-Random Function tests.

    :return: The same opaque type returned by
             :py:func:`hypothesis.given`
    """
    _prf_given = given(secret=st.binary(max_size=4096),
                       label=ascii_bytes(min_size=1, max_size=1024),
                       # OpenSSL does not use seeds longer than 1024 bytes
                       seed=st.binary(max_size=1024),
                       hash_cls=st.sampled_from([
                           hashes.SHA1,
                           hashes.SHA224,
                           hashes.SHA256,
                           hashes.SHA384,
                           hashes.SHA512,
                           hashes.RIPEMD160,
                           hashes.Whirlpool,
                           hashes.MD5,
                       ]),
                       output_length=st.integers(min_value=0, max_value=1024))

    def _ignore_slow_and_large_prf_given(function):
        """
        Suppress data generation and size speed checks.
        """
        ignore_slow = settings(suppress_health_check=[
            HealthCheck.data_too_large,
            HealthCheck.too_slow,
        ])
        return ignore_slow(_prf_given(function))

    return _ignore_slow_and_large_prf_given
Ejemplo n.º 29
0
def mcf_prefix():
    return sampled_from([
        SCRYPT_MCF_PREFIX_7,
        SCRYPT_MCF_PREFIX_s1,
        SCRYPT_MCF_PREFIX_DEFAULT,
        SCRYPT_MCF_PREFIX_ANY,
    ])
Ejemplo n.º 30
0
def remote_init_st(draw):
    """Hypothesis strategy to generate terraform remote init state."""
    be_type = draw(st.sampled_from(['s3']))
    ri_dict = {
        "version": 3,
        "serial": 0,
        "lineage": draw(lineage_st()),
        "backend": {
            "type": be_type,
            "config": draw(get_be_config_st(be_type)()),
            "hash": draw(st.text(alphabet=list(digits), min_size=18, max_size=18))
        },
        "modules": [
            {
                "path": [
                    "root"
                ],
                "outputs": {},
                "resources": {},
                "depends_on": []
            }
        ]
    }

    return ri_dict
Ejemplo n.º 31
0
class TestAttributes(object):
    """
    Tests for the `attrs`/`attr.s` class decorator.
    """
    @pytest.mark.skipif(not PY2, reason="No old-style classes in Py3")
    def test_catches_old_style(self):
        """
        Raises TypeError on old-style classes.
        """
        with pytest.raises(TypeError) as e:

            @attributes
            class C:
                pass

        assert ("attrs only works with new-style classes.", ) == e.value.args

    def test_sets_attrs(self):
        """
        Sets the `__attrs_attrs__` class attribute with a list of `Attribute`s.
        """
        @attributes
        class C(object):
            x = attr()

        assert "x" == C.__attrs_attrs__[0].name
        assert all(isinstance(a, Attribute) for a in C.__attrs_attrs__)

    def test_empty(self):
        """
        No attributes, no problems.
        """
        @attributes
        class C3(object):
            pass

        assert "C3()" == repr(C3())
        assert C3() == C3()

    @given(attr=attrs, attr_name=sampled_from(Attribute.__slots__))
    def test_immutable(self, attr, attr_name):
        """
        Attribute instances are immutable.
        """
        with pytest.raises(AttributeError):
            setattr(attr, attr_name, 1)

    @pytest.mark.parametrize("method_name", [
        "__repr__",
        "__eq__",
        "__hash__",
        "__init__",
    ])
    def test_adds_all_by_default(self, method_name):
        """
        If no further arguments are supplied, all add_XXX functions except
        add_hash are applied.  __hash__ is set to None.
        """
        # Set the method name to a sentinel and check whether it has been
        # overwritten afterwards.
        sentinel = object()

        class C(object):
            x = attr()

        setattr(C, method_name, sentinel)

        C = attributes(C)
        meth = getattr(C, method_name)

        assert sentinel != meth
        if method_name == "__hash__":
            assert meth is None

    @pytest.mark.parametrize("arg_name, method_name", [
        ("repr", "__repr__"),
        ("cmp", "__eq__"),
        ("hash", "__hash__"),
        ("init", "__init__"),
    ])
    def test_respects_add_arguments(self, arg_name, method_name):
        """
        If a certain `add_XXX` is `False`, `__XXX__` is not added to the class.
        """
        # Set the method name to a sentinel and check whether it has been
        # overwritten afterwards.
        sentinel = object()

        am_args = {"repr": True, "cmp": True, "hash": True, "init": True}
        am_args[arg_name] = False

        class C(object):
            x = attr()

        setattr(C, method_name, sentinel)

        C = attributes(**am_args)(C)

        assert sentinel == getattr(C, method_name)

    @pytest.mark.skipif(PY2, reason="__qualname__ is PY3-only.")
    @given(slots_outer=booleans(), slots_inner=booleans())
    def test_repr_qualname(self, slots_outer, slots_inner):
        """
        On Python 3, the name in repr is the __qualname__.
        """
        @attributes(slots=slots_outer)
        class C(object):
            @attributes(slots=slots_inner)
            class D(object):
                pass

        assert "C.D()" == repr(C.D())
        assert "GC.D()" == repr(GC.D())

    @given(slots_outer=booleans(), slots_inner=booleans())
    def test_repr_fake_qualname(self, slots_outer, slots_inner):
        """
        Setting repr_ns overrides a potentially guessed namespace.
        """
        @attributes(slots=slots_outer)
        class C(object):
            @attributes(repr_ns="C", slots=slots_inner)
            class D(object):
                pass

        assert "C.D()" == repr(C.D())

    @pytest.mark.skipif(PY2, reason="__qualname__ is PY3-only.")
    @given(slots_outer=booleans(), slots_inner=booleans())
    def test_name_not_overridden(self, slots_outer, slots_inner):
        """
        On Python 3, __name__ is different from __qualname__.
        """
        @attributes(slots=slots_outer)
        class C(object):
            @attributes(slots=slots_inner)
            class D(object):
                pass

        assert C.D.__name__ == "D"
        assert C.D.__qualname__ == C.__qualname__ + ".D"

    @given(with_validation=booleans())
    def test_post_init(self, with_validation, monkeypatch):
        """
        Verify that __attrs_post_init__ gets called if defined.
        """
        monkeypatch.setattr(_config, "_run_validators", with_validation)

        @attributes
        class C(object):
            x = attr()
            y = attr()

            def __attrs_post_init__(self2):
                self2.z = self2.x + self2.y

        c = C(x=10, y=20)
        assert 30 == getattr(c, 'z', None)
Ejemplo n.º 32
0
class MemongerTest(hu.HypothesisTestCase):
    @given(input_dim=st.integers(min_value=1, max_value=10),
           output_dim=st.integers(min_value=1, max_value=10),
           batch_size=st.integers(min_value=1, max_value=10),
           do=st.sampled_from(hu.device_options),
           algo=st.sampled_from(memonger.AssignmentAlgorithm))
    @settings(max_examples=5, deadline=None)
    def test_simple_memonger(self, input_dim, output_dim, batch_size, do,
                             algo):
        m = model_helper.ModelHelper()
        fc1 = brew.fc(m, "data", "fc1", dim_in=input_dim, dim_out=output_dim)
        fc2 = brew.fc(m, fc1, "fc2", dim_in=output_dim, dim_out=output_dim)
        fc3 = brew.fc(m, fc2, "fc3", dim_in=output_dim, dim_out=output_dim)

        fc3.Relu([], fc3)\
           .Softmax([], "pred") \
           .LabelCrossEntropy(["label"], ["xent"]) \
           .AveragedLoss([], "loss")
        input_to_grad = m.AddGradientOperators(["loss"])
        m.net.Proto().device_option.CopyFrom(do)
        m.param_init_net.Proto().device_option.CopyFrom(do)
        static_blobs = \
            [o for op in m.param_init_net.Proto().op for o in op.output] + \
            ["data", "label", "loss", input_to_grad["fc1_w"]]

        optimization = memonger.optimize_interference(m.Proto(),
                                                      static_blobs,
                                                      algo=algo)
        data = np.random.randn(batch_size, input_dim).astype(np.float32)
        label = np.random.randint(low=0, high=output_dim,
                                  size=(batch_size, )).astype(np.int32)
        workspace.RunNetOnce(m.param_init_net)
        workspace.FeedBlob("data", data, device_option=do)
        workspace.FeedBlob("label", label, device_option=do)
        workspace.RunNetOnce(m.net)
        loss = workspace.FetchBlob("loss")
        grad = workspace.FetchBlob(str(input_to_grad["fc1_w"]))
        workspace.RunNetOnce(optimization.net)
        optimized_loss = workspace.FetchBlob("loss")
        optimized_grad = workspace.FetchBlob(str(input_to_grad["fc1_w"]))
        np.testing.assert_almost_equal(loss, optimized_loss)
        np.testing.assert_almost_equal(grad, optimized_grad)
        stats = memonger.compute_statistics(optimization.assignments)
        self.assertLess(stats.optimized_nbytes, stats.baseline_nbytes)

        # run with blob sizes
        blob_sizes = memonger.collect_blob_sizes(m.Proto())
        optimization1 = memonger.optimize_interference(m.Proto(),
                                                       static_blobs,
                                                       blob_sizes=blob_sizes,
                                                       algo=algo)
        workspace.RunNetOnce(optimization1.net)
        optimized_loss = workspace.FetchBlob("loss")
        optimized_grad = workspace.FetchBlob(str(input_to_grad["fc1_w"]))
        np.testing.assert_almost_equal(loss, optimized_loss)
        np.testing.assert_almost_equal(grad, optimized_grad)
        stats = memonger.compute_statistics(optimization1.assignments)
        self.assertLessEqual(stats.optimized_nbytes, stats.baseline_nbytes)

    @given(input_dim=st.integers(min_value=1, max_value=10),
           output_dim=st.integers(min_value=1, max_value=10),
           batch_size=st.integers(min_value=1, max_value=10),
           do=st.sampled_from(hu.device_options))
    @settings(max_examples=5, deadline=None)
    def test_fast_memonger(self, input_dim, output_dim, batch_size, do):
        m = model_helper.ModelHelper()
        fc1 = brew.fc(m, "data", "fc1", dim_in=input_dim, dim_out=output_dim)
        fc2 = brew.fc(m, fc1, "fc2", dim_in=output_dim, dim_out=output_dim)
        fc3 = brew.fc(m, fc2, "fc3", dim_in=output_dim, dim_out=output_dim)

        fc3.Relu([], fc3)\
           .Softmax([], "pred") \
           .LabelCrossEntropy(["label"], ["xent"]) \
           .AveragedLoss([], "loss")
        input_to_grad = m.AddGradientOperators(["loss"])
        m.net.Proto().device_option.CopyFrom(do)
        m.param_init_net.Proto().device_option.CopyFrom(do)
        static_blobs = \
            [o for op in m.param_init_net.Proto().op for o in op.output] + \
            ["data", "label", "loss", input_to_grad["fc1_w"]]

        optimized_net = memonger.optimize_inference_fast(
            m.Proto(), static_blobs)
        data = np.random.randn(batch_size, input_dim).astype(np.float32)
        label = np.random.randint(low=0, high=output_dim,
                                  size=(batch_size, )).astype(np.int32)
        workspace.RunNetOnce(m.param_init_net)
        workspace.FeedBlob("data", data, device_option=do)
        workspace.FeedBlob("label", label, device_option=do)
        workspace.RunNetOnce(m.net)
        loss = workspace.FetchBlob("loss")
        grad = workspace.FetchBlob(str(input_to_grad["fc1_w"]))
        workspace.RunNetOnce(optimized_net)
        optimized_loss = workspace.FetchBlob("loss")
        optimized_grad = workspace.FetchBlob(str(input_to_grad["fc1_w"]))
        np.testing.assert_almost_equal(loss, optimized_loss)
        np.testing.assert_almost_equal(grad, optimized_grad)

        self.assertLess(count_blobs(optimized_net), count_blobs(m.Proto()))

    def test_fast_memonger_unique_outputs(self):
        m = model_helper.ModelHelper()
        fc = []
        for i in range(2):
            z = brew.fc(m,
                        "data{}".format(i),
                        "fc".format(i),
                        dim_in=2,
                        dim_out=2)
            fc.append(z)
        r = []
        # Trick is here to have same input appear twice in a same Sum
        for x in fc:
            for y in fc:
                r.append(brew.sum(m, [x, y], 1))
        concated = brew.concat(m, r, "concated")
        brew.relu(m, concated, "merged")

        static_blobs = \
            [o for op in m.param_init_net.Proto().op for o in op.output] + \
            ["merged"] + ["data{}".format(i) for i in range(len(fc))]

        optimized_net = memonger.optimize_inference_fast(
            m.Proto(), static_blobs)
        for op in optimized_net.op:
            self.assertEqual(len(op.output), len(set(op.output)), str(op))

    @given(input_dim=st.integers(min_value=1, max_value=4),
           output_dim=st.integers(min_value=1, max_value=4),
           batch_size=st.integers(min_value=1, max_value=4))
    def test_gradient_optim(self, input_dim, output_dim, batch_size):
        m = model_helper.ModelHelper()
        with core.NameScope("name_x"):
            fc1 = brew.fc(m,
                          "data",
                          "fc1",
                          dim_in=input_dim,
                          dim_out=output_dim)
            fc2 = brew.fc(m, fc1, "fc2", dim_in=output_dim, dim_out=output_dim)
            fc3 = brew.fc(m, fc2, "fc3", dim_in=output_dim, dim_out=output_dim)
            fc4 = brew.fc(m, fc3, "fc4", dim_in=output_dim, dim_out=output_dim)
            fc5 = brew.fc(m, fc4, "fc5", dim_in=output_dim, dim_out=output_dim)
            fc5.Relu([], fc5)\
               .Softmax([], "pred") \
               .LabelCrossEntropy(["label"], ["xent"]) \
               .AveragedLoss([], "loss")
        input_to_grad = m.AddGradientOperators(["name_x/loss"])

        blobs_before = count_blobs(m.net.Proto())
        optim_proto = memonger.share_grad_blobs(
            m.net,
            ["name_x/loss"],
            set(viewvalues(m.param_to_grad)),
            "name_x/",
            share_activations=False,
        )
        blobs_after = count_blobs(optim_proto)
        self.assertLess(blobs_after, blobs_before)

        optim_proto_wacts = memonger.share_grad_blobs(
            m.net,
            ["name_x/loss"],
            set(viewvalues(m.param_to_grad)),
            "name_x/",
            share_activations=True,
            dont_share_blobs=set([str(input_to_grad["name_x/fc1_w"])]),
        )
        blobs_wact_optim = count_blobs(optim_proto_wacts)
        self.assertLessEqual(blobs_wact_optim, blobs_after)

        # Check that the last activations are not shared
        self.assertTrue(has_blob(optim_proto, "name_x/fc5"))
        self.assertTrue(
            has_blob(optim_proto_wacts, "name_x/fc5"),
            "Dont remap final activation",
        )

        # Test networks produce exactly same gradients
        data = np.random.randn(batch_size, input_dim).astype(np.float32)
        label = np.random.randint(low=0, high=output_dim,
                                  size=(batch_size, )).astype(np.int32)
        workspace.RunNetOnce(m.param_init_net)
        workspace.FeedBlob("name_x/data", data)
        workspace.FeedBlob("name_x/label", label)
        workspace.RunNetOnce(m.net)
        loss = workspace.FetchBlob("name_x/loss")
        grad = workspace.FetchBlob(str(input_to_grad["name_x/fc1_w"]))
        workspace.RunNetOnce(optim_proto)
        optimized_loss = workspace.FetchBlob("name_x/loss")
        optimized_grad = workspace.FetchBlob(str(
            input_to_grad["name_x/fc1_w"]))
        np.testing.assert_almost_equal(loss, optimized_loss)
        np.testing.assert_almost_equal(grad, optimized_grad)

        workspace.FeedBlob(str(input_to_grad["name_x/fc1_w"]), np.array([0.0]))

        # Run with the forward optimization
        workspace.RunNetOnce(optim_proto_wacts)
        optimized_loss = workspace.FetchBlob("name_x/loss")
        optimized_grad = workspace.FetchBlob(str(
            input_to_grad["name_x/fc1_w"]))
        np.testing.assert_almost_equal(loss, optimized_loss)
        np.testing.assert_almost_equal(grad, optimized_grad)

    @unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
    def test_memonger_mix_cpu_gpu(self):
        '''
        Check that memonger does not make blobs cross CPU/GPU boundary
        '''
        m = model_helper.ModelHelper()
        with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, 0)):
            fc1 = brew.fc(m, "data", "fc1", dim_in=2, dim_out=2)
            fc2 = brew.fc(m, fc1, "fc2", dim_in=2, dim_out=2)
            fc3 = brew.fc(m, fc2, "fc3", dim_in=2, dim_out=2)
            fc4 = brew.fc(m, fc3, "fc4", dim_in=2, dim_out=2)
            fc4_cpu = m.net.CopyGPUToCPU(fc4, "fc4_cpu")
        with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU, 0)):
            fc5_cpu = brew.fc(m, fc4_cpu, "fc5_cpu", dim_in=2, dim_out=2)
            fc6_cpu = brew.fc(m, fc5_cpu, "fc6_cpu", dim_in=2, dim_out=2)
            fc7_cpu = brew.fc(m, fc6_cpu, "fc7_cpu", dim_in=2, dim_out=2)
            fc7_cpu.Relu([], fc7_cpu) \
               .Softmax([], "pred") \
               .LabelCrossEntropy(["label"], ["xent"]) \
               .AveragedLoss([], "loss")
        m.AddGradientOperators(["loss"])

        blobs_before = count_blobs(m.net.Proto())
        optim_proto = memonger.share_grad_blobs(
            m.net,
            ["loss"],
            set(viewvalues(m.param_to_grad)),
            "",
            share_activations=True,
            dont_share_blobs=set(),
        )
        blobs_after = count_blobs(optim_proto)
        self.assertLess(blobs_after, blobs_before)

        # Create set of blobs on CPU side and GPU side and check they don't
        # overlap
        device_blobs = {caffe2_pb2.CPU: set(), workspace.GpuDeviceType: set()}
        for op in optim_proto.op:
            if op.type not in ['CopyCPUToGPU', "CopyGPUToCPU"]:
                dev = op.device_option.device_type
                for b in list(op.input) + list(op.output):
                    device_blobs[dev].add(b)

        device_crossers = device_blobs[caffe2_pb2.CPU].intersection(
            device_blobs[workspace.GpuDeviceType])
        self.assertEquals(device_crossers, set())

    @given(input_dim=st.integers(min_value=4, max_value=4),
           output_dim=st.integers(min_value=4, max_value=4),
           batch_size=st.integers(min_value=4, max_value=4))
    @settings(deadline=1000)
    def test_gradient_optim_tree(self, input_dim, output_dim, batch_size):
        m = model_helper.ModelHelper()
        with core.NameScope("name_x"):
            fc1 = brew.fc(m,
                          "data",
                          "fc1",
                          dim_in=input_dim,
                          dim_out=output_dim)
            fc2 = brew.fc(m, fc1, "fc2", dim_in=output_dim, dim_out=output_dim)
            fc3 = brew.fc(m, fc2, "fc3", dim_in=output_dim, dim_out=output_dim)
            fc4 = brew.fc(m, fc3, "fc4", dim_in=output_dim, dim_out=output_dim)
            fc5 = brew.fc(m, fc4, "fc5", dim_in=output_dim, dim_out=output_dim)
            fc5.Relu([], fc5) \
               .Softmax([], "pred1") \
               .LabelCrossEntropy(["label"], ["xent1"]) \
               .AveragedLoss([], "loss1")
            fc6 = brew.fc(m, fc5, "fc6", dim_in=output_dim, dim_out=output_dim)
            fc6.Relu([], fc6) \
               .Softmax([], "pred2") \
               .LabelCrossEntropy(["label"], ["xent2"]) \
               .AveragedLoss([], "loss2")
        input_to_grad = m.AddGradientOperators(
            ["name_x/loss1", "name_x/loss2"])

        blobs_before = count_blobs(m.net.Proto())
        optim_proto = memonger.share_grad_blobs(
            m.net,
            ["name_x/loss1", "name_x/loss2"],
            set(viewvalues(m.param_to_grad)),
            "name_x",  # "name_x//shared_gradinp_0_shared" if using "name_x/"
            share_activations=True,
            dont_share_blobs=set([
                'name_x/fc6', 'name_x/fc5',
                str(input_to_grad["name_x/fc1_w"])
            ]),
        )
        blobs_after = count_blobs(optim_proto)
        self.assertLess(blobs_after, blobs_before)
        self.assertTrue(has_blob(optim_proto, "name_x/fc6"))

        # Test networks produce exactly same gradients
        data = np.random.randn(batch_size, input_dim).astype(np.float32)
        label = np.random.randint(low=0, high=output_dim,
                                  size=(batch_size, )).astype(np.int32)
        workspace.RunNetOnce(m.param_init_net)
        workspace.FeedBlob("name_x/data", data)
        workspace.FeedBlob("name_x/label", label)
        workspace.RunNetOnce(m.net)
        loss1 = workspace.FetchBlob("name_x/loss1")
        loss2 = workspace.FetchBlob("name_x/loss2")
        grad = workspace.FetchBlob(str(input_to_grad["name_x/fc1_w"]))
        workspace.FeedBlob(str(input_to_grad["name_x/fc1_w"]), np.array([0.0]))

        workspace.RunNetOnce(optim_proto)
        optimized_loss1 = workspace.FetchBlob("name_x/loss1")
        optimized_loss2 = workspace.FetchBlob("name_x/loss2")
        optimized_grad = workspace.FetchBlob(str(
            input_to_grad["name_x/fc1_w"]))
        np.testing.assert_almost_equal(loss1, optimized_loss1)
        np.testing.assert_almost_equal(loss2, optimized_loss2)
        np.testing.assert_almost_equal(grad, optimized_grad)

    @given(input_dim=st.integers(min_value=4, max_value=4),
           output_dim=st.integers(min_value=4, max_value=4),
           batch_size=st.integers(min_value=4, max_value=4))
    @settings(deadline=1000)
    def test_forward_optim_tree_daggy(self, input_dim, output_dim, batch_size):
        m = model_helper.ModelHelper()
        m.Proto().type = "dag"
        m.Proto().num_workers = 4

        with core.NameScope("name_x"):
            fc1 = brew.fc(m,
                          "data",
                          "fc1",
                          dim_in=input_dim,
                          dim_out=output_dim)
            fc2 = brew.fc(m, fc1, "fc2", dim_in=output_dim, dim_out=output_dim)

            fc3 = brew.fc(m, fc2, "fc3", dim_in=output_dim, dim_out=output_dim)
            fc4 = brew.fc(m, fc3, "fc4", dim_in=output_dim, dim_out=output_dim)
            fc5 = brew.fc(m, fc4, "fc5", dim_in=output_dim, dim_out=output_dim)

            # Branch
            fc3b = brew.fc(m,
                           fc2,
                           "fc3b",
                           dim_in=output_dim,
                           dim_out=output_dim)
            fc4b = brew.fc(m,
                           fc3b,
                           "fc4b",
                           dim_in=output_dim,
                           dim_out=output_dim)
            fc5b = brew.fc(m,
                           fc4b,
                           "fc5b",
                           dim_in=output_dim,
                           dim_out=output_dim)

            fc5sum = brew.sum(m, [fc5, fc5b], "fc5sum")

            fc5.Relu([], fc5sum) \
               .Softmax([], "pred1") \
               .LabelCrossEntropy(["label"], ["xent1"]) \
               .AveragedLoss([], "loss1")
            fc6 = brew.fc(m, fc5, "fc6", dim_in=output_dim, dim_out=output_dim)
            fc6.Relu([], fc6) \
               .Softmax([], "pred2") \
               .LabelCrossEntropy(["label"], ["xent2"]) \
               .AveragedLoss([], "loss2")

        blobs_before = count_blobs(m.net.Proto())
        optim_proto = memonger.optimize_inference_for_dag(
            m.net, ["name_x/data"], "name_x")
        blobs_after = count_blobs(optim_proto)
        self.assertLess(blobs_after, blobs_before)

        # Test networks produce exactly same results
        data = np.random.randn(batch_size, input_dim).astype(np.float32)
        label = np.random.randint(low=0, high=output_dim,
                                  size=(batch_size, )).astype(np.int32)
        workspace.RunNetOnce(m.param_init_net)
        workspace.FeedBlob("name_x/data", data)
        workspace.FeedBlob("name_x/label", label)
        workspace.RunNetOnce(m.net)
        loss1 = workspace.FetchBlob("name_x/loss1")
        loss2 = workspace.FetchBlob("name_x/loss2")
        workspace.RunNetOnce(optim_proto)
        optimized_loss1 = workspace.FetchBlob("name_x/loss1")
        optimized_loss2 = workspace.FetchBlob("name_x/loss2")
        np.testing.assert_almost_equal(loss1, optimized_loss1)
        np.testing.assert_almost_equal(loss2, optimized_loss2)

    @given(input_dim=st.integers(min_value=4, max_value=4),
           output_dim=st.integers(min_value=4, max_value=4),
           batch_size=st.integers(min_value=4, max_value=4))
    @settings(deadline=10000)
    def test_forward_optim_tree_harder(self, input_dim, output_dim,
                                       batch_size):
        m = model_helper.ModelHelper()
        m.net.Proto().type = "dag"
        m.net.Proto().num_workers = 4
        m.net.AddExternalInput("label")
        m.net.AddExternalInput("data")

        with core.NameScope("name_x"):
            fc1 = brew.fc(m,
                          "data",
                          "fc1",
                          dim_in=input_dim,
                          dim_out=output_dim)
            fc2 = brew.fc(m, fc1, "fc2", dim_in=output_dim, dim_out=output_dim)

            fc3 = brew.fc(m, fc2, "fc3", dim_in=output_dim, dim_out=output_dim)
            fc4 = brew.fc(m, fc3, "fc4", dim_in=output_dim, dim_out=output_dim)
            fc5 = brew.fc(m, fc4, "fc5", dim_in=output_dim, dim_out=output_dim)

            # Branch
            fc3b = brew.fc(m,
                           fc2,
                           "fc3b",
                           dim_in=output_dim,
                           dim_out=output_dim)
            fc4b = brew.fc(m,
                           fc3b,
                           "fc4b",
                           dim_in=output_dim,
                           dim_out=output_dim)
            fc5b = brew.fc(m,
                           fc4b,
                           "fc5b",
                           dim_in=output_dim,
                           dim_out=output_dim)

            fc5sum = brew.sum(m, [fc5, fc5b], "fc5sum")
            fc5sum.Relu([], "relu1") \
               .Softmax([], "pred1") \
               .LabelCrossEntropy(["label"], ["xent1"]) \
               .AveragedLoss([], "loss1")
            fc6 = brew.fc(m, fc5, "fc6", dim_in=output_dim, dim_out=output_dim)
            fc6.Relu([], fc6) \
               .Softmax([], "pred2") \
               .LabelCrossEntropy(["label"], ["xent2"]) \
               .AveragedLoss([], "loss2")

        blobs_before = count_blobs(m.net.Proto())
        optim_proto = memonger.optimize_inference_for_dag(
            m.net, ["name_x/data"], "name_x/")

        blobs_after = count_blobs(optim_proto)

        # Extra test with when one of the parameters is also an input.
        # This caused a bug before.
        optim_proto_extra_input = memonger.optimize_inference_for_dag(
            m.net, ["name_x/data", "name_x/fc1_w"], "name_x/")
        blobs_after_extra_input = count_blobs(optim_proto_extra_input)
        self.assertEqual(blobs_after, blobs_after_extra_input)
        ###

        print(str(optim_proto))
        self.assertLess(blobs_after, blobs_before)

        # Test networks produce exactly same results
        data = np.random.randn(batch_size, input_dim).astype(np.float32)
        label = np.random.randint(low=0, high=output_dim,
                                  size=(batch_size, )).astype(np.int32)
        workspace.RunNetOnce(m.param_init_net)
        workspace.FeedBlob("name_x/data", data)
        workspace.FeedBlob("name_x/label", label)
        workspace.RunNetOnce(m.net)
        loss1 = workspace.FetchBlob("name_x/loss1")
        loss2 = workspace.FetchBlob("name_x/loss2")
        workspace.RunNetOnce(optim_proto)
        optimized_loss1 = workspace.FetchBlob("name_x/loss1")
        optimized_loss2 = workspace.FetchBlob("name_x/loss2")
        np.testing.assert_almost_equal(loss1, optimized_loss1)
        np.testing.assert_almost_equal(loss2, optimized_loss2)

    def test_rnn(self):
        from caffe2.python import rnn_cell
        T = 5
        model = model_helper.ModelHelper()
        seq_lengths, labels = \
            model.net.AddExternalInputs(
                'seq_lengths', 'labels',
            )
        init_blobs = []
        for i in range(2):
            hidden_init, cell_init = model.net.AddExternalInputs(
                "hidden_init_{}".format(i), "cell_init_{}".format(i))
            init_blobs.extend([hidden_init, cell_init])
        model.param_init_net.ConstantFill([], ["input"], shape=[T, 4, 10])
        output, last_hidden, _, last_state = rnn_cell.LSTM(
            model=model,
            input_blob="input",
            seq_lengths=seq_lengths,
            initial_states=init_blobs,
            dim_in=10,
            dim_out=[10, 10],
            scope="lstm1",
            forward_only=False,
            drop_states=True,
            return_last_layer_only=True,
        )
        softmax, loss = model.net.SoftmaxWithLoss(
            [model.Flatten(output), "labels"],
            ['softmax', 'loss'],
        )

        model.AddGradientOperators([loss])
        blobs_before = count_blobs(model.net.Proto())
        optim_proto = memonger.share_grad_blobs(
            model.net,
            ["loss"],
            set(viewvalues(model.param_to_grad)),
            "",
            share_activations=True,
            dont_share_blobs=set(),
        )
        blobs_after = count_blobs(optim_proto)
        self.assertLess(blobs_after, blobs_before)

        # Run once to see all blobs are set up correctly
        for init_blob in init_blobs:
            workspace.FeedBlob(init_blob, np.zeros([1, 4, 10],
                                                   dtype=np.float32))
        workspace.FeedBlob("seq_lengths", np.array([T] * 4, dtype=np.int32))
        workspace.FeedBlob("labels", np.random.rand(T).astype(np.int32))

        workspace.RunNetOnce(model.param_init_net)
        workspace.RunNetOnce(model.net)

    def test_compute_interference_graph_inplace_ops(self):
        m = model_helper.ModelHelper()
        m.Copy("b1", "b1")
        m.Copy("b1", "b1")
        m.Copy("b1", "b1")
        g = memonger.compute_interference_graph(m.net.Proto().op)
        self.assertEqual(list(g.edges()), [(0, 1), (0, 2), (1, 2)])

    def test_topological_sort_longest_path(self):
        m = model_helper.ModelHelper()
        # 0
        m.Copy("conv0_w_comp", "conv0_w")
        # 1
        conv0 = brew.conv(m, "data", "conv0", 32, 32, 4)
        # 2
        m.Copy("conv2_w", "conv2_w")
        # 3
        brew.conv(m, conv0, "conv2", 16, 32, 4)

        g = memonger.compute_interference_graph(m.net.Proto().op)

        orders_org = memonger.topological_sort_traversal(g)
        orders_gt_org = [2, 0, 1, 3]
        self.assertEqual(orders_gt_org, list(orders_org))

        orders = memonger.topological_sort_traversal_longest_path(g)
        # longer path is in front of the shorter one
        orders_gt = [0, 1, 2, 3]
        self.assertEqual(orders_gt, list(orders))

    def test_topological_sort_longest_path_multi_target(self):
        # two outputs: conv2 and data4
        m = model_helper.ModelHelper()
        # 0
        m.Copy("conv0_w_comp", "conv0_w")
        # 1
        conv0 = brew.conv(m, "data", "conv0", 32, 32, 4)
        # 2
        m.Copy("conv2_w", "conv2_w")
        # 3
        brew.conv(m, conv0, "conv2", 16, 32, 4)
        # 4
        m.Copy("data1", "data2")
        # 5
        m.Copy("data2", "data3")

        g = memonger.compute_interference_graph(m.net.Proto().op)

        orders_org = memonger.topological_sort_traversal(g)
        orders_gt_org = [4, 5, 2, 0, 1, 3]
        self.assertEqual(orders_gt_org, list(orders_org))

        orders = memonger.topological_sort_traversal_longest_path(g)
        # longer path is in front of the shorter one
        orders_gt = [0, 1, 2, 3, 4, 5]
        self.assertEqual(orders_gt, list(orders))

    def test_topological_sort_longest_path_single_node(self):
        # single node
        m = model_helper.ModelHelper()
        # 0
        m.Copy("conv0_w_comp", "conv0_w")

        g = memonger.compute_interference_graph(m.net.Proto().op)

        orders_org = memonger.topological_sort_traversal(g)
        orders_gt_org = [0]
        self.assertEqual(orders_gt_org, list(orders_org))

        orders = memonger.topological_sort_traversal_longest_path(g)
        # longer path is in front of the shorter one
        orders_gt = [0]
        self.assertEqual(orders_gt, list(orders))

    def test_compute_assignments_greedy(self):
        LiveRange = memonger.LiveRange
        ranges_sorted = [
            ('b1', LiveRange(1, 3, 10)),
            ('b2', LiveRange(3, 4, 1)),
            ('b3', LiveRange(5, 6, 1)),
            ('b4', LiveRange(5, 7, 10)),
        ]
        assignment_gt = [
            [ranges_sorted[0], ranges_sorted[3]],
            [ranges_sorted[1], ranges_sorted[2]],
        ]

        best = memonger.compute_assignments_greedy(ranges_sorted, None)
        self.assertEqual(memonger.get_memory_usage(best), 11)
        self.assertEqual(best, assignment_gt)

    def test_compute_assignments_dp(self):
        LiveRange = memonger.LiveRange
        ranges_sorted = [
            ('b1', LiveRange(1, 3, 10)),
            ('b2', LiveRange(3, 4, 1)),
            ('b3', LiveRange(5, 6, 1)),
            ('b4', LiveRange(5, 7, 10)),
        ]

        best = memonger.compute_assignments_dp(ranges_sorted, None)
        self.assertEqual(memonger.get_memory_usage(best), 11)

    def test_compute_assignments_dp1(self):
        LiveRange = memonger.LiveRange
        ranges_sorted = [
            ('b1', LiveRange(1, 2, 10)),
            ('b2', LiveRange(4, 6, 1)),
            ('b3', LiveRange(5, 6, 10)),
        ]

        best = memonger.compute_assignments_dp(ranges_sorted, [])
        self.assertEqual(memonger.get_memory_usage(best), 11)

    @given(input_dim=st.integers(min_value=4, max_value=4),
           output_dim=st.integers(min_value=4, max_value=4),
           batch_size=st.integers(min_value=4, max_value=4))
    def test_verify_graph_equality(self, input_dim, output_dim, batch_size):
        m = model_helper.ModelHelper()
        m.Proto().type = "dag"
        m.Proto().num_workers = 4
        with core.NameScope("name_x"):
            fc1 = brew.fc(m, "data", "x", dim_in=input_dim, dim_out=output_dim)
            fc2 = brew.fc(m, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            fc3 = brew.fc(m, fc1, "z", dim_in=output_dim, dim_out=output_dim)
            brew.sum(m, [fc2, fc3], "out")

        m2 = model_helper.ModelHelper()
        m2.Proto().type = "dag"
        m2.Proto().num_workers = 4
        with core.NameScope("name_x"):
            fc1 = brew.fc(m2,
                          "data",
                          "other_x",
                          dim_in=input_dim,
                          dim_out=output_dim)
            fc2 = brew.fc(m2,
                          fc1,
                          "other_y",
                          dim_in=output_dim,
                          dim_out=output_dim)
            fc3 = brew.fc(m2,
                          fc1,
                          "other_z",
                          dim_in=output_dim,
                          dim_out=output_dim)
            brew.sum(m2, [fc2, fc3], "out")

        self.assertTrue(
            memonger.verify_graph_equality(m.net.Proto(), m2.net.Proto()))

    @given(input_dim=st.integers(min_value=4, max_value=4),
           output_dim=st.integers(min_value=4, max_value=4),
           batch_size=st.integers(min_value=4, max_value=4))
    def test_verify_graph_equality_harder(self, input_dim, output_dim,
                                          batch_size):
        m = model_helper.ModelHelper()
        m.Proto().type = "dag"
        m.Proto().num_workers = 4
        with core.NameScope("name_x"):
            fc1 = brew.fc(m, "data", "x", dim_in=input_dim, dim_out=output_dim)
            fc2a = brew.fc(m, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            fc2b = brew.fc(m, fc1, "z", dim_in=output_dim, dim_out=output_dim)
            fc3a = brew.fc(m, fc2a, "u", dim_in=output_dim, dim_out=output_dim)
            fc3b = brew.fc(m, fc2b, "v", dim_in=output_dim, dim_out=output_dim)
            brew.sum(m, [fc3a, fc3b], "out")

        m2 = model_helper.ModelHelper()
        m2.Proto().type = "dag"
        m2.Proto().num_workers = 4
        with core.NameScope("name_x"):
            fc1 = brew.fc(m2,
                          "data",
                          "x",
                          dim_in=input_dim,
                          dim_out=output_dim)
            fc2a = brew.fc(m2, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            fc2b = brew.fc(m2, fc1, "z", dim_in=output_dim, dim_out=output_dim)
            fc3a = brew.fc(m2,
                           fc2a,
                           "y",
                           dim_in=output_dim,
                           dim_out=output_dim)
            fc3b = brew.fc(m2,
                           fc2b,
                           "z",
                           dim_in=output_dim,
                           dim_out=output_dim)
            brew.sum(m2, [fc3a, fc3b], "out")

        self.assertTrue(
            memonger.verify_graph_equality(m.net.Proto(), m2.net.Proto()))

    @given(input_dim=st.integers(min_value=4, max_value=4),
           output_dim=st.integers(min_value=4, max_value=4),
           batch_size=st.integers(min_value=4, max_value=4))
    def test_verify_graph_inequality(self, input_dim, output_dim, batch_size):
        m = model_helper.ModelHelper()
        m.Proto().type = "dag"
        m.Proto().num_workers = 4
        with core.NameScope("name_x"):
            fc1 = brew.fc(m, "data", "x", dim_in=input_dim, dim_out=output_dim)
            fc2 = brew.fc(m, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            fc3 = brew.fc(m, fc1, "z", dim_in=output_dim, dim_out=output_dim)
            brew.sum(m, [fc2, fc3], "out")

        m2 = model_helper.ModelHelper()
        m2.Proto().type = "dag"
        m2.Proto().num_workers = 4
        with core.NameScope("name_x"):
            fc1 = brew.fc(m2,
                          "data",
                          "x",
                          dim_in=input_dim,
                          dim_out=output_dim)
            fc2 = brew.fc(m2, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            fc3 = brew.fc(m2, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            brew.sum(m2, [fc2, fc3], "out")

        self.assertFalse(
            memonger.verify_graph_equality(m.net.Proto(), m2.net.Proto()))

    @given(input_dim=st.integers(min_value=4, max_value=4),
           output_dim=st.integers(min_value=4, max_value=4),
           batch_size=st.integers(min_value=4, max_value=4))
    def test_verify_graph_inequality_harder(self, input_dim, output_dim,
                                            batch_size):
        m = model_helper.ModelHelper()
        m.Proto().type = "dag"
        m.Proto().num_workers = 4
        with core.NameScope("name_x"):
            fc1 = brew.fc(m, "data", "x", dim_in=input_dim, dim_out=output_dim)
            fc2a = brew.fc(m, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            fc2b = brew.fc(m, fc1, "z", dim_in=output_dim, dim_out=output_dim)
            fc3a = brew.fc(m, fc2a, "u", dim_in=output_dim, dim_out=output_dim)
            fc3b = brew.fc(m, fc2b, "v", dim_in=output_dim, dim_out=output_dim)
            brew.sum(m, [fc3a, fc3b], "out")

        m2 = model_helper.ModelHelper()
        m2.Proto().type = "dag"
        m2.Proto().num_workers = 4
        with core.NameScope("name_x"):
            fc1 = brew.fc(m2,
                          "data",
                          "x",
                          dim_in=input_dim,
                          dim_out=output_dim)
            fc2a = brew.fc(m2, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            fc2b = brew.fc(m2, fc1, "y", dim_in=output_dim, dim_out=output_dim)
            fc3a = brew.fc(m2,
                           fc2a,
                           "u",
                           dim_in=output_dim,
                           dim_out=output_dim)
            fc3b = brew.fc(m2,
                           fc2b,
                           "v",
                           dim_in=output_dim,
                           dim_out=output_dim)
            brew.sum(m2, [fc3a, fc3b], "out")

        self.assertFalse(
            memonger.verify_graph_equality(m.net.Proto(), m2.net.Proto()))

    def test_release_blobs_when_used(self):
        m = model_helper.ModelHelper()
        fc1 = brew.fc(m, "data", "x", dim_in=2, dim_out=2)
        fc2 = brew.fc(m, fc1, "y", dim_in=2, dim_out=2)
        fc3 = brew.fc(m, fc1, "z", dim_in=2, dim_out=2)
        fc4 = brew.fc(m, fc2, "u", dim_in=2, dim_out=2)
        m.net.Alias(["u"], ["u_alias"])

        brew.sum(m, [fc3, fc4], "out")

        with_frees = memonger.release_blobs_when_used(m.net.Proto(),
                                                      set("data"))

        expect_frees = {"x", "y", "z"}  # out is external output
        # and u is aliased so cannot be freed
        found_frees = set()
        for op in with_frees.op:
            if op.type == "Free":
                self.assertFalse(op.input[0] in found_frees)  # no double frees
                found_frees.add(op.input[0])
            else:
                # Check a freed blob is not used anymore
                for inp in op.input:
                    self.assertFalse(inp in found_frees)
                for outp in op.output:
                    self.assertFalse(outp in found_frees)

        self.assertEqual(expect_frees, found_frees)

def test_binop_pow():
    # raises because Vyper does not support decimal exponentiation
    vyper_ast = vy_ast.parse_to_ast("3.1337 ** 4.2")
    old_node = vyper_ast.body[0].value

    with pytest.raises(TypeMismatch):
        old_node.evaluate()


@pytest.mark.fuzzing
@settings(max_examples=50, deadline=1000)
@given(
    values=st.lists(st_decimals, min_size=2, max_size=10),
    ops=st.lists(st.sampled_from("+-*/%"), min_size=11, max_size=11),
)
def test_nested(get_contract, assert_tx_failed, values, ops):
    variables = "abcdefghij"
    input_value = ",".join(f"{i}: decimal" for i in variables[: len(values)])
    return_value = " ".join(f"{a} {b}" for a, b in zip(variables[: len(values)], ops))
    return_value = return_value.rsplit(maxsplit=1)[0]
    source = f"""
@external
def foo({input_value}) -> decimal:
    return {return_value}
    """
    contract = get_contract(source)

    literal_op = " ".join(f"{a} {b}" for a, b in zip(values, ops))
    literal_op = literal_op.rsplit(maxsplit=1)[0]
Ejemplo n.º 34
0
class TestBindings(test_util.TestCase):
    def test_simple(self):
        nn = ng.NNModule()
        dfg = nn.dataFlow
        dfg.createNode(ng.NeuralNetData("X"))
        dfg.createNode(ng.NeuralNetOperator("FC"))
        assert len(nn.dataFlow.getMutableNodes()) == 2

    def test_core_net_simple(self):
        net = core.Net("name")
        net.FC(["X", "W"], ["Y"])
        nn = ng.NNModule(net)
        for node in nn.dataFlow.getMutableNodes():
            if node.isOperator():
                assert node.getName() == "FC"
            elif node.isTensor():
                assert node.getName() in ["X", "W", "Y"]

    def test_core_net_controlflow(self):
        net = core.Net("name")
        net.FC(["X", "W"], ["Y"])
        net.Relu(["Y"], ["Z"])
        nn = ng.NNModule(net)
        assert len(nn.controlFlow) == 2
        for instr in nn.controlFlow:
            assert instr.getType() == "Operator"
        assert nn.controlFlow[0].getName() == "FC"
        assert nn.controlFlow[1].getName() == "Relu"

    def test_core_net_nn_accessors(self):
        net = core.Net("name")
        net.FC(["X", "W"], ["Y"])
        net.Relu(["Y"], ["Z"])
        nn = ng.NNModule(net)
        tensors = set()
        for t in nn.tensors:
            tensors.add(t.name)
        assert tensors == set(["X", "W", "Y", "Z"])
        ops = set()
        for op in nn.operators:
            ops.add(op.name)
        assert ops == set(["FC", "Relu"])
        nodes = set()
        for node in nn.nodes:
            nodes.add(node.name)
        assert nodes == (ops | tensors)

    def test_netdef_simple(self):
        net = core.Net("name")
        net.FC(["X", "W"], ["Y"])
        nn = ng.NNModule(net.Proto())
        for node in nn.dataFlow.getMutableNodes():
            if node.isOperator():
                assert node.getOperator().getName() == "FC"
            elif node.isTensor():
                assert node.getTensor().getName() in ["X", "W", "Y"]

    def test_operatordef_simple(self):
        nn = ng.NNModule()
        dfg = nn.dataFlow
        op = core.CreateOperator("Ceil", ["X"], ["Y"], engine="CUDNN")
        dfg.createNode(op)
        for node in dfg.getMutableNodes():
            assert node.isOperator()
            assert node.getOperator().getName() == "Ceil"

    def test_invalid_node(self):
        nn = ng.NNModule()
        dfg = nn.dataFlow
        with self.assertRaises(Exception):
            dfg.createNode(7)

    def test_edges_simple(self):
        nn = ng.NNModule()
        dfg = nn.dataFlow
        x = dfg.createNode(ng.NeuralNetData("X"))
        w = dfg.createNode(ng.NeuralNetData("W"))
        op = dfg.createNode(ng.NeuralNetOperator("Op"))

        with self.assertRaises(Exception):
            dfg.createEdge(x, w)
        dfg.createEdge(op, w)
        dfg.createEdge(x, op)

    @given(size=st.sampled_from([10, 50]))
    def test_edges_complex(self, size):
        random.seed(1337)
        nn = ng.NNModule()
        dfg = nn.dataFlow

        data = []
        ops = []
        for _ in range(size):
            data.append(dfg.createNode(ng.NeuralNetData("X")))
        for i in range(size):
            ops.append(dfg.createNode(ng.NeuralNetOperator("Op" + str(i))))

        for i in range(size):
            for j in range(size):
                if bool(random.getrandbits(1)):
                    dfg.createEdge(data[i], ops[j])

    def test_traversal(self):
        net = core.Net("test")
        net.FC(["X", "W"], ["Y"])
        net.Relu(["Y"], ["Z"])
        nn = ng.NNModule(net)
        fc = nn.controlFlow[0]
        relu = nn.controlFlow[1]
        assert fc.inputs[0].name == "X"
        assert fc.inputs[1].name == "W"
        assert relu.outputs[0].name == "Z"
        assert relu.inputs[0].name == "Y"
        assert relu.inputs[0].producer.name == "FC"
        assert fc.outputs[0].consumers[0].name == "Relu"

    def test_debug(self):
        nn = ng.NNModule()
        dfg = nn.dataFlow
        dfg.createNode(ng.NeuralNetData("X"))
        dfg.createNode(ng.NeuralNetData("W"))
        dfg.createNode(ng.NeuralNetOperator("Op"))

        ng.render(nn.dataFlow)

    def test_match_graph_node(self):
        mg = ng.NNMatchGraph()
        mg.createNode(ng.NeuralNetOperator("test"))
        nn = ng.NNModule()
        test = nn.dataFlow.createNode(ng.NeuralNetOperator("test"))
        x = nn.dataFlow.createNode(ng.NeuralNetData("X"))
        nn.dataFlow.createEdge(x, test)

        count = 0
        for match in nn.match(mg):
            assert len(match) == 1
            count += 1
        assert count == 1

    def test_match_graph_node_strict(self):
        mg = ng.NNMatchGraph()
        mg.createNode(ng.NeuralNetOperator("test"), strict=True)
        nn = ng.NNModule()
        test = nn.dataFlow.createNode(ng.NeuralNetOperator("test"))
        x = nn.dataFlow.createNode(ng.NeuralNetData("X"))
        nn.dataFlow.createEdge(test, x)

        count = 0
        for match in nn.match(mg):
            assert len(match) == 1
            count += 1

        with self.assertRaises(Exception):
            assert count == 1

    def test_match_graph(self):
        mg = ng.NNMatchGraph()
        test2m = mg.createNode(ng.NeuralNetOperator("test2"), strict=True)
        xm = mg.createNode(ng.NeuralNetData("X"), strict=True)
        testm = mg.createNode(ng.NeuralNetOperator("test"))
        mg.createEdge(test2m, xm)
        mg.createEdge(xm, testm)

        nn = ng.NNModule()
        test2 = nn.dataFlow.createNode(ng.NeuralNetOperator("test2"))
        x = nn.dataFlow.createNode(ng.NeuralNetData("X"))
        test = nn.dataFlow.createNode(ng.NeuralNetOperator("test"))
        nn.dataFlow.createEdge(test2, x)
        nn.dataFlow.createEdge(x, test)

        count = 0
        for match in nn.match(mg):
            print(len(match))
            assert len(match) == 3
            count += 1
        assert count == 1

    def test_genericGraph(self):
        g = ng.Graph()
        n1 = g.createNode("hello1")
        n2 = g.createNode("hello2")
        e = g.createEdge(n1, n2)
        ng.render(g)

    def test_convertToProto(self):
        net = core.Net("name")
        net.FC(["X", "W"], ["Y"])
        nn = ng.NNModule(net)
        new_netdef = nn.convertToCaffe2Proto()
        print(new_netdef)
        print(net.Proto())
        assert len(new_netdef.op) == len(net.Proto().op)
        for i in range(len(new_netdef.op)):
            op = net.Proto().op[i]
            new_op = new_netdef.op[i]
            assert op.type == new_op.type
            assert len(op.input) == len(new_op.input)
            assert len(op.output) == len(new_op.output)
            for a, b in zip(op.input, new_op.input):
                assert a == b
            for a, b in zip(op.output, new_op.output):
                assert a == b
        for a, b in zip(new_netdef.external_input, net.Proto().external_input):
            assert a == b
        for a, b in zip(new_netdef.external_output, net.Proto().external_output):
            assert a == b

    def test_node_interactions(self):
        nn = ng.NNModule()
        dfg = nn.dataFlow
        test1 = dfg.createNode(ng.Operator("test1"))
        test2 = dfg.createNode(ng.Operator("test2"))
        x = dfg.createNode(ng.Data("x"))
        dfg.createEdge(test1, x)
        dfg.createEdge(x, test2)
        p = test2.getOperatorPredecessors()
        assert len(p) == 1
        assert p[0] == test1

        # Add another node
        test3 = dfg.createNode(ng.Operator("test3"))
        y = dfg.createNode(ng.Data("y"))
        dfg.createEdge(test3, y)
        dfg.createEdge(y, test2)
        p = test2.getOperatorPredecessors()
        assert len(p) == 2
        assert test1 in p
        assert test3 in p

        # Successors
        assert len(test2.getOperatorSuccessors()) == 0
        assert len(test1.getOperatorSuccessors()) == 1
        assert test1.getOperatorSuccessors()[0] == test2

        # Check all the nodes are valid (pybind ownership test)
        for node in [test1, test2, test3]:
            assert node.isOperator()
        for node in [x, y]:
            assert node.isTensor()

    def test_delete_node(self):
        nn = ng.NNModule()
        node = nn.dataFlow.createNode(ng.NeuralNetOperator("TestOp"))
        nn.dataFlow.deleteNode(node)
        assert len(nn.dataFlow.getMutableNodes()) == 0

    def test_annotation_basic(self):
        annot = ng.Annotation()
        annot.setDevice("woot")
        assert annot.getDevice() == "woot"
        annot.setDeviceType(7)
        assert annot.getDeviceType() == 7

    def test_annotation_from_graph(self):
        nn = ng.NNModule()
        node = nn.dataFlow.createNode(ng.NeuralNetOperator("TestOp"))
        annot = node.getAnnotation()
        annot.setDeviceType(7)
        node.setAnnotation(annot)
        new_annot = node.getAnnotation()
        assert new_annot.getDeviceType() == 7

    def test_annotation_device_option(self):
        nn = ng.NNModule()
        node = nn.dataFlow.createNode(ng.NeuralNetOperator("TestOp"))
        d = caffe2_pb2.DeviceOption()
        d.node_name = "test"
        node.annotation.device_option = d
        # access in a different way
        d_2 = nn.controlFlow[0].annotation.device_option
        assert d == d_2

    def test_distributed_annotations(self):
        nn = ng.NNModule()
        key = nn.dataFlow.createNode(ng.NeuralNetData("key"))
        length = nn.dataFlow.createNode(ng.NeuralNetData("length"))
        node = nn.dataFlow.createNode(ng.NeuralNetOperator("TestOp"))

        annot = ng.Annotation()
        annot.setKeyNode(key)
        annot.setLengthNode(length)
        annot.setComponentLevels(["", "test", "woot"])

        node.setAnnotation(annot)

        new_annot = node.getAnnotation()
        #assert new_annot.getLengthNode() == length
        assert new_annot.getKeyNode() == key
        assert len(new_annot.getComponentLevels()) == 3
        assert new_annot.getComponentLevels()[0] == ""
        assert new_annot.getComponentLevels()[2] == "woot"

    def test_distributed_device_map(self):
        net = core.Net("name")
        net.FC(["X", "W"], ["Y"])
        d = caffe2_pb2.DeviceOption()
        nn = ng.NNModule(net, {"X": d, "W": d})

        with self.assertRaises(Exception):
            nn = ng.NNModule(net, {"X": d, "Fake": d})
Ejemplo n.º 35
0
class MKLRewriteTest(hu.HypothesisTestCase):
    @given(gen=st.sampled_from(
        [simple_relu, simple_fc, simple_mlp, simple_cnn]))
    def test_mkl_simple_rewrite(self, gen):
        cpu_model, (shape, ) = gen()
        cpu_model = deterministic_io(cpu_model)
        mkl_model = rewrite_graph.rewrite_model_helper_simple(cpu_model)
        X = np.random.randn(*shape).astype(np.float32)

        def run(model):
            self.ws.run(model.InitProto())
            self.ws.create_blob(model.Proto().external_input[0]).feed(X)
            self.ws.run(model.Proto())
            return self.ws.blobs[model.Proto().external_output[0]].fetch()

        np.testing.assert_allclose(run(cpu_model),
                                   run(mkl_model),
                                   atol=1e-4,
                                   rtol=1e-4)

    def test_mkl_resnet_rewrite(self):
        cpu_model, (shape, ) = complex_resnet()
        cpu_model = deterministic_io(cpu_model)
        mkl_model = rewrite_graph.rewrite_model_helper_simple(cpu_model)
        np.random.seed(1701)
        X = np.random.randn(*shape).astype(np.float32)

        def run(model):
            self.ws.run(model.InitProto())
            self.ws.create_blob(model.Proto().external_input[0]).feed(X)
            self.ws.run(model.Proto())
            return self.ws.blobs[model.Proto().external_output[0]].fetch()

        np.testing.assert_allclose(run(cpu_model),
                                   run(mkl_model),
                                   atol=1e-4,
                                   rtol=1e-4)

    def test_mkl_multi_output_rewrite(self):
        cpu_model, shapes = double_matmul()
        cpu_model = deterministic_io(cpu_model)
        mkl_model = rewrite_graph.rewrite_model_helper_simple(cpu_model)
        np.random.seed(1701)
        Xs = [np.random.randn(*shape).astype(np.float32) for shape in shapes]

        def run(model):
            self.ws.run(model.InitProto())
            for (name, X) in zip(model.Proto().external_input, Xs):
                self.ws.create_blob(name).feed(X)
            print(model.Proto())
            self.ws.run(model.Proto())
            return [
                self.ws.blobs[name].fetch()
                for name in model.Proto().external_output
            ]

        run(mkl_model)

        np.testing.assert_allclose(run(cpu_model),
                                   run(mkl_model),
                                   atol=1e-4,
                                   rtol=1e-4)

    def test_mkl_alexnet_rewrite(self):
        cpu_model, (shape, ) = alexnet()
        cpu_model = deterministic_io(cpu_model)
        mkl_model = rewrite_graph.rewrite_model_helper_simple(cpu_model)
        np.random.seed(1701)
        X = np.random.randn(*shape).astype(np.float32)

        def run(model):
            self.ws.run(model.InitProto())
            self.ws.create_blob(model.Proto().external_input[0]).feed(X)
            self.ws.run(model.Proto())
            return self.ws.blobs[model.Proto().external_output[0]].fetch()

        np.testing.assert_allclose(run(cpu_model),
                                   run(mkl_model),
                                   atol=1e-4,
                                   rtol=1e-4)
Ejemplo n.º 36
0
class TestUtilityOps(hu.HypothesisTestCase):
    @given(X=hu.tensor(), args=st.booleans(), **hu.gcs)
    def test_slice(self, X, args, gc, dc):
        X = X.astype(dtype=np.float32)
        dim = random.randint(0, X.ndim - 1)
        slice_start = random.randint(0, X.shape[dim] - 1)
        slice_end = random.randint(slice_start, X.shape[dim] - 1)
        starts = np.array([0] * X.ndim).astype(np.int32)
        ends = np.array([-1] * X.ndim).astype(np.int32)
        starts[dim] = slice_start
        ends[dim] = slice_end

        if args:
            op = core.CreateOperator("Slice", ["X"], ["Y"],
                                     starts=starts,
                                     ends=ends,
                                     device_option=gc)

            def slice_ref(X):
                slc = [slice(None)] * X.ndim
                slc[dim] = slice(slice_start, slice_end)
                return [X[slc]]

            inputs = [X]
        else:
            op = core.CreateOperator("Slice", ["X", "starts", "ends"], ["Y"],
                                     device_option=gc)

            def slice_ref(x, starts, ends):
                slc = [slice(None)] * x.ndim
                slc[dim] = slice(slice_start, slice_end)
                return [x[slc]]

            inputs = [X, starts, ends]

        self.assertReferenceChecks(gc, op, inputs, slice_ref)
        self.assertDeviceChecks(dc, op, inputs, [0])
        self.assertGradientChecks(
            device_option=gc,
            op=op,
            inputs=inputs,
            outputs_to_check=0,
            outputs_with_grads=[0],
        )

    @given(dtype=st.sampled_from([np.float32, np.int32]),
           ndims=st.integers(min_value=1, max_value=5),
           seed=st.integers(min_value=0, max_value=65536),
           null_axes=st.booleans(),
           engine=st.sampled_from(['CUDNN', None]),
           **hu.gcs)
    def test_transpose(self, dtype, ndims, seed, null_axes, engine, gc, dc):
        dims = (np.random.rand(ndims) * 16 + 1).astype(np.int32)
        X = (np.random.rand(*dims) * 16).astype(dtype)

        if null_axes:
            axes = None
            op = core.CreateOperator("Transpose", ["input"], ["output"],
                                     engine=engine)
        else:
            np.random.seed(int(seed))
            axes = [int(v) for v in list(np.random.permutation(X.ndim))]
            op = core.CreateOperator("Transpose", ["input"], ["output"],
                                     axes=axes,
                                     engine=engine)

        def transpose_ref(x, axes):
            return (np.transpose(x, axes), )

        self.assertReferenceChecks(gc, op, [X, axes], transpose_ref)

    @unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
    def test_gpu_transpose_minusones(self):
        '''
        Repro a problem with earlier version of CuDNN Transpose Op that
        casted ints to floats.
        '''
        X = -np.ones((2, 10)).astype(np.int32)
        with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
            workspace.FeedBlob("X", X)
            print("X:\n{}\n".format(workspace.FetchBlob("X")))
            op = core.CreateOperator("Transpose", ["X"], ["Y"], engine='CUDNN')
            workspace.RunOperatorOnce(op)
            Y = workspace.FetchBlob("Y")
            print("Y:\n{}\n".format(Y))

            for j in list(Y.flatten()):
                self.assertEqual(-1, j)

    @given(m=st.integers(5, 10),
           n=st.integers(5, 10),
           o=st.integers(5, 10),
           nans=st.booleans(),
           **hu.gcs)
    def test_nan_check(self, m, n, o, nans, gc, dc):
        other = np.array([1, 2, 3]).astype(np.float32)
        X = np.random.rand(m, n, o).astype(np.float32)
        if nans:
            x_nan = np.random.randint(0, m)
            y_nan = np.random.randint(0, n)
            z_nan = np.random.randint(0, o)
            X[x_nan, y_nan, z_nan] = float('NaN')

        # print('nans: {}'.format(nans))
        # print(X)

        def nan_reference(X, Y):
            if not np.isnan(X).any():
                return [X]
            else:
                return [np.array([])]

        op = core.CreateOperator("NanCheck", ["X", "other"], ["Y"])

        try:
            self.assertReferenceChecks(
                device_option=gc,
                op=op,
                inputs=[X, other],
                reference=nan_reference,
            )
            if nans:
                self.assertTrue(False, "Did not fail when presented with NaN!")
        except RuntimeError:
            self.assertTrue(nans, "No NaNs but failed")

        try:
            self.assertGradientChecks(
                device_option=gc,
                op=op,
                inputs=[X],
                outputs_to_check=0,
                outputs_with_grads=[0],
            )
            if nans:
                self.assertTrue(False, "Did not fail when gradient had NaN!")
        except RuntimeError:
            pass

    @given(n=st.integers(4, 5),
           m=st.integers(6, 7),
           d=st.integers(2, 3),
           **hu.gcs)
    def test_elementwise_max(self, n, m, d, gc, dc):
        X = np.random.rand(n, m, d).astype(np.float32)
        Y = np.random.rand(n, m, d).astype(np.float32)
        Z = np.random.rand(n, m, d).astype(np.float32)

        def max_op(X, Y, Z):
            return [np.maximum(np.maximum(X, Y), Z)]

        op = core.CreateOperator("Max", ["X", "Y", "Z"], ["mx"])

        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[X, Y, Z],
            reference=max_op,
        )

    @given(n=st.integers(4, 5),
           m=st.integers(6, 7),
           d=st.integers(2, 3),
           **hu.gcs)
    def test_elementwise_max_grad(self, n, m, d, gc, dc):
        go = np.random.rand(n, m, d).astype(np.float32)
        X = np.random.rand(n, m, d).astype(np.float32)
        Y = np.random.rand(n, m, d).astype(np.float32)
        Z = np.random.rand(n, m, d).astype(np.float32)
        mx = np.maximum(np.maximum(X, Y), Z)

        def max_grad_op(mx, go, X, Y, Z):
            def mx_grad(a):
                return go * (mx == a)

            return [mx_grad(a) for a in [X, Y, Z]]

        op = core.CreateOperator("MaxGradient", ["mx", "go", "X", "Y", "Z"],
                                 ["gX", "gY", "gZ"])

        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[mx, go, X, Y, Z],
            reference=max_grad_op,
        )

    @given(inputs=hu.lengths_tensor().flatmap(lambda pair: st.tuples(
        st.just(pair[0]),
        st.just(pair[1]),
        hu.dims(max_value=len(pair[1])),
    )).flatmap(lambda tup: st.tuples(
        st.just(tup[0]),
        st.just(tup[1]),
        hu.arrays(tup[2],
                  dtype=np.int32,
                  elements=st.integers(min_value=0, max_value=len(tup[1]) - 1)
                  ),
    )),
           **hu.gcs_cpu_only)
    def test_lengths_gather(self, inputs, gc, dc):
        items = inputs[0]
        lengths = inputs[1]
        indices = inputs[2]

        def lengths_gather_op(items, lengths, indices):
            ends = np.cumsum(lengths)
            return [
                np.concatenate(
                    list(items[ends[i] - lengths[i]:ends[i]] for i in indices))
            ]

        op = core.CreateOperator("LengthsGather",
                                 ["items", "lengths", "indices"], ["output"])

        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[items, lengths, indices],
            reference=lengths_gather_op,
        )

    @given(**hu.gcs)
    def test_size_op(self, gc, dc):
        X = np.array([[1, 2], [3, 4]]).astype(np.float32)

        def size_op(tensor):
            return [np.prod(tensor.shape)]

        op = core.CreateOperator("Size", ["X"], ["output"])

        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[X],
            reference=size_op,
        )

    def test_alias_op(self):
        """ Don't use hypothesis because there are only 2 cases to check"""
        for size in [0, 5]:
            X = np.arange(size).astype(np.float32)
            workspace.FeedBlob('X', X)

            op = core.CreateOperator("Alias", ["X"], ["Y"])
            workspace.RunOperatorOnce(op)
            Y = workspace.FetchBlob('Y')
            np.testing.assert_array_equal(X, Y)

    @given(**hu.gcs)
    def test_range(self, gc, dc):
        names = [
            ('stop_', ),
            ('start_', 'stop_'),
            ('start_', 'stop_', 'step_'),
        ]
        # Most random values aren't great here, so use a fixed set instead of
        # hypothesis.
        for inputs in (
            (10, ),
            (np.float32(10.0), ),
            (0, ),
            (0, 0),
            (10., 5.0, -1.),
            (2, 10000),
            (2, 10000, 20000),
            (2, 10000, -1),
        ):
            inputs = [np.array(v) for v in inputs]
            op = core.CreateOperator("Range", names[len(inputs) - 1], ["Y"])

            self.assertReferenceChecks(
                device_option=gc,
                op=op,
                inputs=inputs,
                reference=lambda *x: [np.arange(*x)],
            )
            self.assertDeviceChecks(dc, op, inputs, [0])

        with self.assertRaisesRegexp(RuntimeError, 'Step size cannot be 0'):
            inputs = (np.array(0), np.array(10), np.array(0))
            op = core.CreateOperator("Range", names[len(inputs) - 1], ["Y"])
            self.assertReferenceChecks(
                device_option=gc,
                op=op,
                inputs=inputs,
                reference=lambda *x: [np.arange(*x)],
            )
Ejemplo n.º 37
0
class TestStreamStateMachine(object):
    """
    Tests of the stream state machine.
    """
    @given(state=sampled_from(h2.stream.StreamState),
           input_=sampled_from(h2.stream.StreamInputs))
    def test_state_transitions(self, state, input_):
        s = h2.stream.H2StreamStateMachine(stream_id=1)
        s.state = state

        try:
            s.process_input(input_)
        except h2.exceptions.StreamClosedError:
            # This can only happen for streams that started in the closed
            # state OR where the input was RECV_DATA and the state was not
            # OPEN or HALF_CLOSED_LOCAL OR where the state was
            # HALF_CLOSED_REMOTE and a frame was received.
            if state == h2.stream.StreamState.CLOSED:
                assert s.state == h2.stream.StreamState.CLOSED
            elif input_ == h2.stream.StreamInputs.RECV_DATA:
                assert s.state == h2.stream.StreamState.CLOSED
                assert state not in (
                    h2.stream.StreamState.OPEN,
                    h2.stream.StreamState.HALF_CLOSED_LOCAL,
                )
            elif state == h2.stream.StreamState.HALF_CLOSED_REMOTE:
                assert input_ in (
                    h2.stream.StreamInputs.RECV_HEADERS,
                    h2.stream.StreamInputs.RECV_PUSH_PROMISE,
                    h2.stream.StreamInputs.RECV_DATA,
                    h2.stream.StreamInputs.RECV_CONTINUATION,
                )
        except h2.exceptions.ProtocolError:
            assert s.state == h2.stream.StreamState.CLOSED
        else:
            assert s.state in h2.stream.StreamState

    def test_state_machine_only_allows_stream_states(self):
        """
        The Stream state machine only allows StreamState inputs.
        """
        s = h2.stream.H2StreamStateMachine(stream_id=1)

        with pytest.raises(ValueError):
            s.process_input(1)

    def test_stream_state_machine_forbids_pushes_on_server_streams(self):
        """
        Streams where this peer is a server do not allow receiving pushed
        frames.
        """
        s = h2.stream.H2StreamStateMachine(stream_id=1)
        s.process_input(h2.stream.StreamInputs.RECV_HEADERS)

        with pytest.raises(h2.exceptions.ProtocolError):
            s.process_input(h2.stream.StreamInputs.RECV_PUSH_PROMISE)

    def test_stream_state_machine_forbids_sending_pushes_from_clients(self):
        """
        Streams where this peer is a client do not allow sending pushed frames.
        """
        s = h2.stream.H2StreamStateMachine(stream_id=1)
        s.process_input(h2.stream.StreamInputs.SEND_HEADERS)

        with pytest.raises(h2.exceptions.ProtocolError):
            s.process_input(h2.stream.StreamInputs.SEND_PUSH_PROMISE)

    @pytest.mark.parametrize("state", h2.stream.StreamState)
    @pytest.mark.parametrize(
        "input_",
        [
            h2.stream.StreamInputs.RECV_PRIORITY,
            h2.stream.StreamInputs.SEND_PRIORITY
        ]
    )
    def test_priority_frames_allowed_in_all_states(self, state, input_):
        """
        Priority frames can be sent/received in all stream states.
        """
        c = h2.stream.H2StreamStateMachine(stream_id=1)
        c.state = state

        c.process_input(input_)

    @pytest.mark.parametrize(
        "input_",
        [
            h2.stream.StreamInputs.SEND_HEADERS,
            h2.stream.StreamInputs.SEND_PUSH_PROMISE,
            h2.stream.StreamInputs.SEND_RST_STREAM,
            h2.stream.StreamInputs.SEND_DATA,
            h2.stream.StreamInputs.SEND_WINDOW_UPDATE,
            h2.stream.StreamInputs.SEND_END_STREAM,
        ]
    )
    def test_cannot_send_on_closed_streams(self, input_):
        """
        Sending anything but a PRIORITY frame is forbidden on closed streams.
        """
        c = h2.stream.H2StreamStateMachine(stream_id=1)
        c.state = h2.stream.StreamState.CLOSED

        with pytest.raises(h2.exceptions.StreamClosedError):
            c.process_input(input_)
Ejemplo n.º 38
0
class TestCrossEntropyOps(hu.HypothesisTestCase):
    @given(inputs=st.lists(
        elements=st.integers(min_value=1, max_value=5),
        min_size=1,
        max_size=2,
    ).flatmap(lambda shape: st.tuples(
        hu.arrays(dims=shape,
                  elements=st.one_of(
                      hu.floats(min_value=-1.0, max_value=-0.1),
                      hu.floats(min_value=0.1, max_value=1.0),
                  )),
        hu.arrays(
            dims=shape,
            elements=st.sampled_from([0.0, 1.0]),
        ),
    )),
           options=st.one_of(st.tuples(st.just(True), st.just(False)),
                             st.tuples(st.just(False), st.just(True)),
                             st.tuples(st.just(False), st.just(False))),
           **hu.gcs)
    def test_sigmoid_cross_entropy_with_logits(self, inputs, options, gc, dc):
        logits, targets = inputs
        log_D_trick, unjoined_lr_loss = options

        def sigmoid_xentr_logit_ref(logits, targets):
            if unjoined_lr_loss:
                s = unjoined_sigmoid_cross_entropy(logits, targets)
            else:
                s = (sigmoid_cross_entropy_with_logits(logits, targets)
                     if not log_D_trick else
                     sigmoid_cross_entropy_with_logits_with_log_D_trick(
                         logits, targets))
            m = np.mean(s, axis=len(logits.shape) - 1)
            return (m, )

        def sigmoid_xentr_logit_grad_ref(g_out, outputs, fwd_inputs):
            fwd_logits, fwd_targets = fwd_inputs
            inner_size = fwd_logits.shape[-1]
            if unjoined_lr_loss:
                m = unjoined_sigmoid_cross_entropy_grad(logits, targets)
            else:
                m = (sigmoid_cross_entropy_with_logits_grad(
                    fwd_logits, fwd_targets) if not log_D_trick else
                     sigmoid_cross_entropy_with_logits_with_log_D_trick_grad(
                         fwd_logits, fwd_targets))
            # m = fwd_targets - sigmoid(fwd_logits)
            g_in = -np.expand_dims(g_out, axis=-1) * m / inner_size
            return (g_in, None)

        op = core.CreateOperator('SigmoidCrossEntropyWithLogits',
                                 ['logits', 'targets'], ['xentropy'],
                                 log_D_trick=log_D_trick,
                                 unjoined_lr_loss=unjoined_lr_loss)
        self.assertReferenceChecks(device_option=gc,
                                   op=op,
                                   inputs=[logits, targets],
                                   reference=sigmoid_xentr_logit_ref,
                                   output_to_grad='xentropy',
                                   grad_reference=sigmoid_xentr_logit_grad_ref)

    @given(log_D_trick=st.just(False), **hu.gcs_cpu_only)
    def test_cross_entropy_and_unjoied_cross_entropy_relation(
            self, log_D_trick, gc, dc):
        logits = np.array([
            1.4720, 0.3500, -0.6529, -1.1908, 0.8357, -1.0774, -0.3395,
            -0.2469, 0.6708, -1.8332
        ],
                          dtype='f')
        targets = np.array([1., 1., 1., 1., 1., 1., 0., 0., 0., 0.], dtype='f')
        lr_size = targets.size
        unjoined_lr_loss = False

        def sigmoid_xentr_logit_ref(logits, targets):
            if unjoined_lr_loss:
                s = unjoined_sigmoid_cross_entropy(logits, targets)
            else:
                s = sigmoid_cross_entropy_with_logits(logits, targets)
            m = np.mean(s, axis=len(logits.shape) - 1)
            return (m, )

        def sigmoid_xentr_logit_grad_ref(g_out, outputs, fwd_inputs):
            fwd_logits, fwd_targets = fwd_inputs
            inner_size = fwd_logits.shape[-1]
            if unjoined_lr_loss:
                m = unjoined_sigmoid_cross_entropy_grad(logits, targets)
            else:
                m = sigmoid_cross_entropy_with_logits_grad(
                    fwd_logits, fwd_targets)

            # m = fwd_targets - sigmoid(fwd_logits)
            g_in = -np.expand_dims(g_out, axis=-1) * m / inner_size
            return (g_in, None)

        op = core.CreateOperator('SigmoidCrossEntropyWithLogits',
                                 ['logits', 'targets'], ['xentropy'],
                                 log_D_trick=log_D_trick,
                                 unjoined_lr_loss=unjoined_lr_loss)
        output_lr = self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[logits, targets],
            reference=sigmoid_xentr_logit_ref,
            output_to_grad='xentropy',
            grad_reference=sigmoid_xentr_logit_grad_ref)

        # Unjoined dataset where labels change later
        logits = np.array([
            1.4720, 0.3500, -0.6529, -1.1908, 0.8357, -1.0774, -0.3395,
            -0.2469, 0.6708, -1.8332, 1.4720, 0.3500, -0.6529, -1.1908, 0.8357,
            -1.0774
        ],
                          dtype='f')
        targets = np.array(
            [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1.],
            dtype='f')
        unjoined_lr_loss = True
        unjoined_lr_size = targets.size

        op = core.CreateOperator('SigmoidCrossEntropyWithLogits',
                                 ['logits', 'targets'], ['xentropy'],
                                 log_D_trick=log_D_trick,
                                 unjoined_lr_loss=unjoined_lr_loss)
        outputs_unjoined_lr = self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[logits, targets],
            reference=sigmoid_xentr_logit_ref,
            output_to_grad='xentropy',
            grad_reference=sigmoid_xentr_logit_grad_ref)

        self.assertAlmostEqual(output_lr[0].item(0) * lr_size /
                               unjoined_lr_size,
                               outputs_unjoined_lr[0].item(0),
                               delta=0.0001)

    @given(inputs=st.lists(
        elements=st.integers(min_value=1, max_value=5),
        min_size=1,
        max_size=2,
    ).flatmap(lambda shape: st.tuples(
        hu.arrays(dims=shape,
                  elements=st.one_of(
                      hu.floats(min_value=-1.0, max_value=-0.1),
                      hu.floats(min_value=0.1, max_value=1.0),
                  )),
        hu.arrays(
            dims=shape,
            elements=st.sampled_from([0.0, 1.0]),
        ),
        hu.arrays(
            dims=shape,
            elements=hu.floats(min_value=0.1, max_value=1.0),
        ),
    )),
           **hu.gcs)
    def test_weighted_sigmoid_cross_entropy_with_logits(self, inputs, gc, dc):
        logits, targets, weights = inputs

        def weighted_sigmoid_xentr_logit_ref(logits, targets, weights):
            s = sigmoid_cross_entropy_with_logits(logits, targets)
            s = np.multiply(s, weights)
            m = np.mean(s, axis=len(logits.shape) - 1)
            return (m, )

        def weighted_sigmoid_xentr_logit_grad_ref(g_out, outputs, fwd_inputs):
            fwd_logits, fwd_targets, fwd_weights = fwd_inputs
            inner_size = fwd_logits.shape[-1]
            m = fwd_targets - sigmoid(fwd_logits)
            m = np.multiply(m, weights)
            g_in = -np.expand_dims(g_out, axis=-1) * m / inner_size
            return (g_in, None, None)

        op = core.CreateOperator('WeightedSigmoidCrossEntropyWithLogits',
                                 ['logits', 'targets', 'weights'],
                                 ['xentropy'])
        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[logits, targets, weights],
            reference=weighted_sigmoid_xentr_logit_ref,
            output_to_grad='xentropy',
            grad_reference=weighted_sigmoid_xentr_logit_grad_ref)

    @given(n=st.integers(2, 10), b=st.integers(1, 5), **hu.gcs_cpu_only)
    def test_soft_label_cross_entropy(self, n, b, gc, dc):
        # Initialize X and add 1e-2 for numerical stability
        X = np.random.rand(b, n).astype(np.float32)
        X = X + 1e-2
        for i in range(b):
            X[i] = X[i] / np.sum(X[i])

        # Initialize label
        label = np.random.rand(b, n).astype(np.float32)
        for i in range(b):
            label[i] = label[i] / np.sum(label[i])

        # Reference implementation of cross entropy with soft labels
        def soft_label_xentr_ref(X, label):
            xent = [
                np.sum((-label[j][i] * np.log(max(X[j][i], 1e-20))
                        for i in range(len(X[0])))) for j in range(b)
            ]
            return (xent, )

        op = core.CreateOperator("CrossEntropy", ["X", "label"], ["Y"])

        # TODO(surya) Once CrossEntropyOp is ported to GPU, add the respective
        # tests to this unit test.
        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[X, label],
            reference=soft_label_xentr_ref,
        )

        self.assertGradientChecks(gc,
                                  op, [X, label],
                                  0, [0],
                                  stepsize=1e-4,
                                  threshold=1e-2)
Ejemplo n.º 39
0
class TestQueue:

    list_strat = st.lists(elements=(st.floats(allow_nan=False, allow_infinity=False) | st.text() | st.integers() |
                                    st.booleans() | st.dictionaries(st.integers(), st.integers())
                                    | st.lists(st.integers())))
    single_strat = st.one_of(st.floats(allow_infinity=False, allow_nan=False) | st.text() | st.integers() |
                             st.booleans() | st.dictionaries(st.integers(), st.integers()) |
                             st.lists(st.integers()))
    fail_single_strat = st.sampled_from([None, float('nan'), float('inf'), float('-inf')])

    # P1: Calls to enqueue that are successful should increment len by 1.
    @given(single_strat)
    def test_enqueue_p1(self, v):
        q = Queue()
        start_len = q.len()
        q.enqueue(v)
        assert start_len + 1 == q.len()

    # P2: Calls to enqueue that are unsuccessful should not affect the queue i.e. length should not increment.
    @given(fail_single_strat)
    def test_enqueue_p2(self, v):
        q = Queue()
        start_len = q.len()
        with pytest.raises(ValueError):
            q.enqueue(v)
        assert q.len() == start_len

    # P3: Calls to dequeue that are successful (non-None) should decrement len by 1 for non-empty queues.
    @given(list_strat)
    def test_dequeue_p3(self, v):
        q = Queue()
        for x in v:
            q.enqueue(x)
        start_len = q.len()
        item = q.dequeue()
        if item is None:
            pass
        else:
            assert start_len - 1 == q.len()

    # P4: Lens minimum value should be 0 i.e. calling decrement on an empty queue should not decrement len.
    def test_dequeue_p4(self):
        q = Queue()
        q.dequeue()
        assert q.len() == 0

    # P5: The initial value for len should be 0.
    def test_len_p5(self):
        q = Queue()
        assert q.len() == 0

    # P6: Enqueue should store values in the order they were placed, and dequeue should remove values in the same order,
    # meaning if you enqueue a number of objects and then immediately dequeue the same number of objects they should be
    # in the same order.
    @given(list_strat)
    def test_enqueue_dequeue_p6(self, v):
        q = Queue()
        initial_list = v
        for x in v:
            q.enqueue(x)
        return_list = []
        for x in v:
            return_list.append(q.dequeue())
        assert initial_list == return_list

    # P7: If you dequeue an empty Queue, it should return None.
    def test_dequeue_p7(self):
        q = Queue()
        assert q.dequeue() is None

    # P8: If you try to enqueue None, Inf, -Inf, or NaN, it should raise a ValueError.
    @given(fail_single_strat)
    def test_enqueue_p8(self, v):
        q = Queue()
        with pytest.raises(ValueError):
            q.enqueue(v)
            assert False
        assert True

    # P9: Calling len should not affect the order of the Queue.
    @given(list_strat)
    def test_len_p9(self, v):
        q = Queue()
        start_list = v
        for x in v:
            q.enqueue(x)
        q.len()
        end_list = []
        for x in v:
            end_list.append(q.dequeue())
        assert start_list == end_list

    # P10: When trying to enqueue a value that raises a ValueError, the order does not change
    @given(v=list_strat, w=fail_single_strat)
    def test_len_p10(self, v, w):
        q = Queue()
        start_list = v
        for x in v:
            q.enqueue(x)
        with pytest.raises(ValueError):
            q.enqueue(w)
        end_list = []
        for x in v:
            end_list.append(q.dequeue())
        assert end_list == start_list
Ejemplo n.º 40
0
def test_can_generate_non_naive_time():
    assert minimal(times(timezones=timezones()), lambda d: d.tzinfo).tzinfo == pytz.UTC


def test_can_generate_non_naive_datetime():
    assert (
        minimal(datetimes(timezones=timezones()), lambda d: d.tzinfo).tzinfo == pytz.UTC
    )


@given(datetimes(timezones=timezones()))
def test_timezone_aware_datetimes_are_timezone_aware(dt):
    assert dt.tzinfo is not None


@given(sampled_from(["min_value", "max_value"]), datetimes(timezones=timezones()))
def test_datetime_bounds_must_be_naive(name, val):
    with pytest.raises(InvalidArgument):
        datetimes(**{name: val}).validate()


def test_underflow_in_simplify():
    # we shouldn't trigger a pytz bug when we're simplifying
    minimal(
        datetimes(
            max_value=dt.datetime.min + dt.timedelta(days=3), timezones=timezones()
        ),
        lambda x: x.tzinfo != pytz.UTC,
    )

Ejemplo n.º 41
0
            f.run_validators(value)
            return True
        except ValidationError:
            return False

    return validate


safe_letters = string.ascii_letters + string.digits + '_-'

domains = st.builds(lambda x, y: '.'.join(x + [y]),
                    st.lists(st.text(safe_letters, min_size=1), min_size=1),
                    st.sampled_from([
                        'com',
                        'net',
                        'org',
                        'biz',
                        'info',
                    ]))

email_domains = st.one_of(
    domains, st.sampled_from(['gmail.com', 'yahoo.com', 'hotmail.com']))

base_emails = st.text(safe_letters, min_size=1)

emails_with_plus = st.builds(lambda x, y: '%s+%s' % (x, y), base_emails,
                             base_emails)

emails = st.builds(lambda x, y: '%s@%s' % (x, y),
                   st.one_of(base_emails, emails_with_plus), email_domains)
Ejemplo n.º 42
0
from hypothesis import given, assume, settings, strategies as st
from hypothesis.strategies import integers, composite
from ppci.wasm import Module, instantiate

# Create wasm module strategy?
# Function strategy?

# Instruction strategies:
i64_const = st.integers(min_value=-2**63,
                        max_value=2**63 - 1).map(lambda x: ('i64.const', x))

i64_load_opcode = st.sampled_from([
    'i64.load',
    'i64.load32_s',
    'i64.load32_u',
    'i64.load16_s',
    'i64.load16_u',
    'i64.load8_s',
    'i64.load8_u',
])


@composite
def i64_load(draw):
    opcode = draw(i64_load_opcode)
    base = draw(st.integers(min_value=0, max_value=9))
    offset = draw(st.integers(min_value=0, max_value=11))
    return (opcode, 'offset={}'.format(offset), ('i32.const', base))


@composite
Ejemplo n.º 43
0
class TestBooleanMaskOp(hu.HypothesisTestCase):

    @given(x=hu.tensor(min_dim=1,
                       max_dim=5,
                       elements=st.floats(min_value=0.5, max_value=1.0)),
           **hu.gcs)
    def test_boolean_mask(self, x, gc, dc):
        op = core.CreateOperator("BooleanMask",
                                 ["data", "mask"],
                                 "masked_data")
        mask = np.random.choice(a=[True, False], size=x.shape[0])

        def ref(x, mask):
            return (x[mask],)

        self.assertReferenceChecks(gc, op, [x, mask], ref)
        self.assertDeviceChecks(dc, op, [x, mask], [0])

    @given(x=hu.tensor(min_dim=1,
                       max_dim=5,
                       elements=st.floats(min_value=0.5, max_value=1.0)),
           **hu.gcs)
    def test_boolean_mask_indices(self, x, gc, dc):
        op = core.CreateOperator("BooleanMask",
                                 ["data", "mask"],
                                 ["masked_data", "masked_indices"])
        mask = np.random.choice(a=[True, False], size=x.shape[0])

        def ref(x, mask):
            return (x[mask], np.where(mask)[0])

        self.assertReferenceChecks(gc, op, [x, mask], ref)
        self.assertDeviceChecks(dc, op, [x, mask], [0])

    @staticmethod
    def _dtype_conversion(x, dtype, gc, dc):
        """SequenceMask only supports fp16 with CUDA."""
        if dtype == np.float16:
            assume(gc.device_type == caffe2_pb2.CUDA)
            dc = [d for d in dc if d.device_type == caffe2_pb2.CUDA]
            x = x.astype(dtype)
        return x, dc

    @given(x=hu.tensor(min_dim=2,
                       max_dim=5,
                       elements=st.floats(min_value=0.5, max_value=1.0)),
           dtype=st.sampled_from([np.float32, np.float16]),
           **hu.gcs)
    def test_sequence_mask_with_lengths(self, x, dtype, gc, dc):
        x, dc = self._dtype_conversion(x, dtype, gc, dc)
        # finite fill value needed for gradient check
        fill_val = 1e-3 if dtype == np.float16 else 1e-9
        op = core.CreateOperator("SequenceMask",
                                 ["data", "lengths"],
                                 ["masked_data"],
                                 mode="sequence",
                                 axis=len(x.shape) - 1,
                                 fill_val=fill_val)
        elem_dim = x.shape[-1]
        leading_dim = 1
        for dim in x.shape[:-1]:
            leading_dim *= dim
        lengths = np.random.randint(0, elem_dim, [leading_dim])\
            .astype(np.int32)

        def ref(x, lengths):
            ref = np.reshape(x, [leading_dim, elem_dim])
            for i in range(leading_dim):
                for j in range(elem_dim):
                    if j >= lengths[i]:
                        ref[i, j] = fill_val
            return [ref.reshape(x.shape)]

        self.assertReferenceChecks(gc, op, [x, lengths], ref)
        self.assertDeviceChecks(dc, op, [x, lengths], [0])

    @given(x=hu.tensor(min_dim=2,
                       max_dim=5,
                       elements=st.floats(min_value=0.5, max_value=1.0)),
           dtype=st.sampled_from([np.float32, np.float16]),
           **hu.gcs)
    def test_sequence_mask_with_window(self, x, dtype, gc, dc):
        x, dc = self._dtype_conversion(x, dtype, gc, dc)
        # finite fill value needed for gradient check
        fill_val = 1e-3 if dtype == np.float16 else 1e-9
        radius = 2
        op = core.CreateOperator("SequenceMask",
                                 ["data", "centers"],
                                 ["masked_data"],
                                 mode="window",
                                 radius=radius,
                                 axis=len(x.shape) - 1,
                                 fill_val=fill_val)
        elem_dim = x.shape[-1]
        leading_dim = 1
        for dim in x.shape[:-1]:
            leading_dim *= dim
        centers = np.random.randint(0, elem_dim, [leading_dim])\
            .astype(np.int32)

        def ref(x, centers):
            ref = np.reshape(x, [leading_dim, elem_dim])
            for i in range(leading_dim):
                for j in range(elem_dim):
                    if j > centers[i] + radius or j < centers[i] - radius:
                        ref[i, j] = fill_val
            return [ref.reshape(x.shape)]

        self.assertReferenceChecks(gc, op, [x, centers], ref)
        self.assertDeviceChecks(dc, op, [x, centers], [0])

        threshold = 0.4 if dtype == np.float16 else 0.005
        self.assertGradientChecks(gc, op, [x, centers], 0, [0],
                                  threshold=threshold)

    @given(x=hu.tensor(min_dim=2,
                       max_dim=5,
                       elements=st.floats(min_value=0.5, max_value=1.0)),
           mode=st.sampled_from(['upper', 'lower', 'upperdiag', 'lowerdiag']),
           dtype=st.sampled_from([np.float32, np.float16]),
           **hu.gcs)
    def test_sequence_mask_triangle(self, x, mode, dtype, gc, dc):
        x, dc = self._dtype_conversion(x, dtype, gc, dc)
        # finite fill value needed for gradient check
        fill_val = 1e-3 if dtype == np.float16 else 1e-9
        op = core.CreateOperator("SequenceMask",
                                 ["data"],
                                 ["masked_data"],
                                 mode=mode,
                                 axis=len(x.shape) - 1,
                                 fill_val=fill_val)
        elem_dim = x.shape[-1]
        leading_dim = 1
        for dim in x.shape[:-1]:
            leading_dim *= dim

        if mode == 'upper':
            def compare(i, j):
                return j > i
        elif mode == 'lower':
            def compare(i, j):
                return j < i
        elif mode == 'upperdiag':
            def compare(i, j):
                return j >= i
        elif mode == 'lowerdiag':
            def compare(i, j):
                return j <= i

        def ref(x):
            ref = np.reshape(x, [leading_dim, elem_dim])
            for i in range(leading_dim):
                for j in range(elem_dim):
                    if compare(i, j):
                        ref[i, j] = fill_val
            return [ref.reshape(x.shape)]

        self.assertReferenceChecks(gc, op, [x], ref)
        self.assertDeviceChecks(dc, op, [x], [0])

        threshold = 0.4 if dtype == np.float16 else 0.005
        stepsize = 0.1 if dtype == np.float16 else 0.05
        self.assertGradientChecks(gc, op, [x], 0, [0],
                                  threshold=threshold, stepsize=stepsize)
Ejemplo n.º 44
0
import hypothesis.extra.numpy as hnp
import hypothesis.strategies as st
import numpy as np
import pytest
from hypothesis import given, settings
from numpy.testing import assert_allclose

from mygrad.nnet.layers.utils import sliding_window_view

dtype_strat_numpy = st.sampled_from((np.int8, np.int16, np.int32, np.int64,
                                     np.float16, np.float32, np.float64))


@pytest.mark.parametrize(
    "args",
    [
        dict(step=None),
        dict(step=st.integers(max_value=-1)),
        dict(window_shape=st.none() | st.just(1)),
        dict(window_shape=st.lists(st.just(1),
                                   min_size=3)),  # more window dims than arr
        dict(window_shape=(st.just((-1, 1))
                           | st.just((-1, 1))
                           | st.tuples(st.floats(), st.floats()))),
        dict(window_shape=st.lists(st.integers(5, 7), min_size=1, max_size=2).
             filter(lambda x: 7 in x)),  # window dim too large
        dict(dilation=st.sampled_from([-1, (1, 0), "aa", (1, 1, 1), 1.0])),
        dict(dilation=st.sampled_from([7, (1, 7), (10, 1)])),
    ],
)
@settings(deadline=None)
Ejemplo n.º 45
0
def test_csr_from_coo_fixed():
    "Make a CSR from COO data"
    rows = np.array([0, 0, 1, 3], dtype=np.int32)
    cols = np.array([1, 2, 0, 1], dtype=np.int32)
    vals = np.arange(4, dtype=np.float_)

    csr = CSR.from_coo(rows, cols, vals)
    assert csr.nrows == 4
    assert csr.ncols == 3
    assert csr.nnz == 4
    assert csr.values == approx(vals)


@given(st.data(), st.integers(0, 100), st.integers(0, 100),
       st.sampled_from(['f4', 'f8']))
def test_csr_from_coo(data, nrows, ncols, dtype):
    dtype = np.dtype(dtype)
    n = nrows * ncols
    nnz = data.draw(st.integers(0, int(n * 0.75)))
    _log.debug('testing %d×%d (%d nnz) of type %s', nrows, ncols, nnz, dtype)

    coords = st.integers(0, max(n - 1, 0))
    coords = data.draw(nph.arrays(np.int32, nnz, elements=coords, unique=True))
    rows = np.mod(coords, nrows, dtype=np.int32)
    cols = np.floor_divide(coords, nrows, dtype=np.int32)

    finite = nph.from_dtype(dtype, allow_infinity=False, allow_nan=False)
    vals = data.draw(nph.arrays(dtype, nnz, elements=finite))

    csr = CSR.from_coo(rows, cols, vals, (nrows, ncols))
Ejemplo n.º 46
0
    def sample_program_configs(self, draw):
        min_sizes = [2.0, 4.0]
        max_sizes = [5.0, 10.0]
        aspect_ratios = [2.0, 3.0]
        variances = [0.1, 0.1, 0.2, 0.2]
        flip = True
        clip = True
        layer_w = draw(st.integers(min_value=30, max_value=40))
        layer_h = draw(st.integers(min_value=30, max_value=40))
        image_w = draw(st.integers(min_value=40, max_value=50))
        image_h = draw(st.integers(min_value=40, max_value=50))

        step_w = float(image_w) / float(layer_w)
        step_h = float(image_h) / float(layer_h)

        input_channels = 2
        image_channels = 3
        batch_size = 10

        offset = 0.5
        min_max_aspect_ratios_order = draw(st.sampled_from([True, False]))

        def generate_input(*args, **kwargs):
            return np.random.random((batch_size, image_channels, image_w,
                                     image_h)).astype('float32')

        def generate_image(*args, **kwargs):
            return np.random.random((batch_size, input_channels, layer_w,
                                     layer_h)).astype('float32')

        ops_config = OpConfig(
            type="prior_box",
            inputs={
                "Input": ["intput_data"],
                "Image": ["intput_image"]
            },
            outputs={
                "Boxes": ["output_boxes"],
                "Variances": ["variances"]
            },
            attrs={
                "min_sizes": min_sizes,
                "max_sizes": max_sizes,
                "aspect_ratios": aspect_ratios,
                "variances": variances,
                "flip": flip,
                "clip": clip,
                "step_w": step_w,
                "step_h": step_h,
                "offset": offset,
                "min_max_aspect_ratios_order": min_max_aspect_ratios_order,
            },
        )

        program_config = ProgramConfig(
            ops=[ops_config],
            weights={},
            inputs={
                "intput_data": TensorConfig(data_gen=partial(generate_input)),
                "intput_image": TensorConfig(data_gen=partial(generate_image)),
            },
            outputs=["output_boxes", "variances"])

        return program_config
Ejemplo n.º 47
0
def nonzero_floats(draw, min_value=None, max_value=None):
    sign      = draw(sampled_from((-1, +1)))
    magnitude = draw(floats(min_value=min_value, max_value=max_value))
    return sign * magnitude
Ejemplo n.º 48
0
class TestCTCBeamSearchDecoderOp(serial.SerializedTestCase):
    @serial.given(batch=st.sampled_from([1, 2, 4]),
                  max_time=st.sampled_from([1, 8, 64]),
                  alphabet_size=st.sampled_from([1, 2, 32, 128, 512]),
                  beam_width=st.sampled_from([1, 2, 16, None]),
                  **hu.gcs_cpu_only)
    def test_ctc_beam_search_decoder(self, batch, max_time, alphabet_size,
                                     beam_width, gc, dc):
        if not beam_width:
            beam_width = DEFAULT_BEAM_WIDTH
            op_seq_len = core.CreateOperator('CTCBeamSearchDecoder',
                                             ['INPUTS', 'SEQ_LEN'],
                                             ['OUTPUT_LEN', 'VALUES'])

            op_no_seq_len = core.CreateOperator('CTCBeamSearchDecoder',
                                                ['INPUTS'],
                                                ['OUTPUT_LEN', 'VALUES'])
        else:
            op_seq_len = core.CreateOperator('CTCBeamSearchDecoder',
                                             ['INPUTS', 'SEQ_LEN'],
                                             ['OUTPUT_LEN', 'VALUES'],
                                             beam_width=beam_width)

            op_no_seq_len = core.CreateOperator('CTCBeamSearchDecoder',
                                                ['INPUTS'],
                                                ['OUTPUT_LEN', 'VALUES'],
                                                beam_width=beam_width)

        def input_generater():
            inputs = np.random.rand(max_time, batch, alphabet_size)\
                .astype(np.float32)
            seq_len = np.random.randint(1, max_time + 1, size=batch)\
                .astype(np.int32)
            return inputs, seq_len

        def ref_ctc_decoder(inputs, seq_len):
            output_len = np.array([]).astype(np.int32)
            val = np.array([]).astype(np.int32)

            for i in range(batch):
                Pb, Pnb = defaultdict(Counter), defaultdict(Counter)
                Pb[0][()] = 1
                Pnb[0][()] = 0
                A_prev = [()]
                ctc = inputs[:, i, :]
                ctc = np.vstack((np.zeros(alphabet_size), ctc))
                len_i = seq_len[i] if seq_len is not None else max_time

                for t in range(1, len_i + 1):
                    pruned_alphabet = np.where(
                        ctc[t] > DEFAULT_PRUNE_THRESHOLD)[0]
                    for l in A_prev:
                        for c in pruned_alphabet:
                            if c == 0:
                                Pb[t][l] += ctc[t][c] * (Pb[t - 1][l] +
                                                         Pnb[t - 1][l])
                            else:
                                l_plus = l + (c, )
                                if len(l) > 0 and c == l[-1]:
                                    Pnb[t][l_plus] += ctc[t][c] * Pb[t - 1][l]
                                    Pnb[t][l] += ctc[t][c] * Pnb[t - 1][l]
                                else:
                                    Pnb[t][l_plus] += \
                                        ctc[t][c] * (Pb[t - 1][l] + Pnb[t - 1][l])

                                if l_plus not in A_prev:
                                    Pb[t][l_plus] += \
                                        ctc[t][0] * \
                                        (Pb[t - 1][l_plus] + Pnb[t - 1][l_plus])
                                    Pnb[t][l_plus] += ctc[t][c] * Pnb[
                                        t - 1][l_plus]

                    A_next = Pb[t] + Pnb[t]
                    A_prev = sorted(A_next, key=A_next.get, reverse=True)
                    A_prev = A_prev[:beam_width]

                decoded = A_prev[0] if len(A_prev) > 0 else ()

                val = np.hstack((val, decoded))
                output_len = np.append(output_len, len(decoded))

            return [output_len, val]

        def ref_ctc_decoder_max_time(inputs):
            return ref_ctc_decoder(inputs, None)

        inputs, seq_len = input_generater()

        self.assertReferenceChecks(
            device_option=gc,
            op=op_seq_len,
            inputs=[inputs, seq_len],
            reference=ref_ctc_decoder,
        )

        self.assertReferenceChecks(
            device_option=gc,
            op=op_no_seq_len,
            inputs=[inputs],
            reference=ref_ctc_decoder_max_time,
        )
 def lists_of_length(n):
     return lists(sampled_from('ab'), min_size=n, max_size=n)
Ejemplo n.º 50
0
        f"All encodings: {pformat(all_encodings)}")


if HAVE_HYPOTHESIS:

    class JustALengthIssue(Exception):
        pass

    @pytest.mark.xfail
    @given(
        st.text(min_size=1),
        st.sampled_from([
            "ascii",
            "utf-8",
            "utf-16",
            "utf-32",
            "iso-8859-7",
            "iso-8859-8",
            "windows-1255",
        ]),
        st.randoms(),
    )
    @settings(max_examples=200)
    def test_never_fails_to_detect_if_there_is_a_valid_encoding(txt, enc, rnd):
        try:
            data = txt.encode(enc)
        except UnicodeEncodeError:
            assume(False)
        detected = chardet.detect(data)["encoding"]
        if detected is None:
            with pytest.raises(JustALengthIssue):
Ejemplo n.º 51
0
# coding=utf-8

# This file is part of Hypothesis (https://github.com/DRMacIver/hypothesis)

# Most of this work is copyright (C) 2013-2015 David R. MacIver
# ([email protected]), but it contains contributions by other. See
# https://github.com/DRMacIver/hypothesis/blob/master/CONTRIBUTING.rst for a
# full list of people who may hold copyright, and consult the git log if you
# need to determine who owns an individual contribution.

# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.

# END HEADER

from __future__ import division, print_function, absolute_import, \
    unicode_literals

from hypothesis import Settings, given
from hypothesis.strategies import sampled_from


@given(sampled_from((1, 2)), settings=Settings(min_satisfying_examples=10))
def test_can_handle_sampling_from_fewer_than_min_satisfying(v):
    pass
Ejemplo n.º 52
0
class RNNCellTest(hu.HypothesisTestCase):
    @given(
        input_tensor=hu.tensor(min_dim=3, max_dim=3, max_value=3),
        num_layers=st.integers(1, 4),
        outputs_with_grad=st.sampled_from([[0], [1], [0, 1]]),
    )
    @ht_settings(max_examples=10)
    def test_unroll_mul(self, input_tensor, num_layers, outputs_with_grad):
        outputs = []
        nets = []
        input_blob = None
        for T in [input_tensor.shape[0], None]:
            model = ModelHelper(
                "rnn_mul_{}".format("unroll" if T else "dynamic"))
            input_blob = model.net.AddExternalInputs("input_blob")
            outputs.append(
                prepare_mul_rnn(model, input_blob, input_tensor.shape, T,
                                outputs_with_grad, num_layers))
            workspace.RunNetOnce(model.param_init_net)
            nets.append(model.net)

            workspace.blobs[input_blob] = input_tensor
            gradient_checker.NetGradientChecker.CompareNets(
                nets,
                outputs,
                outputs_with_grad_ids=outputs_with_grad,
                inputs_with_grads=[input_blob],
                print_net_images=True,
            )

    @given(input_tensor=hu.tensor(min_dim=3, max_dim=3, max_value=3),
           forget_bias=st.floats(-10.0, 10.0),
           drop_states=st.booleans(),
           dim_out=st.lists(
               elements=st.integers(min_value=1, max_value=3),
               min_size=1,
               max_size=3,
           ),
           outputs_with_grads=st.sampled_from([[0], [1], [0, 1], [0, 2],
                                               [0, 1, 2, 3]]))
    @ht_settings(max_examples=10)
    @utils.debug
    def test_unroll_lstm(self, input_tensor, dim_out, outputs_with_grads,
                         **kwargs):
        lstms = [
            _prepare_lstm(*input_tensor.shape,
                          create_lstm=rnn_cell.LSTM,
                          outputs_with_grads=outputs_with_grads,
                          T=T,
                          two_d_initial_states=False,
                          dim_out=dim_out,
                          **kwargs) for T in [input_tensor.shape[0], None]
        ]
        outputs, nets, inputs = zip(*lstms)
        workspace.FeedBlob(inputs[0][-1], input_tensor)

        assert inputs[0] == inputs[1]
        gradient_checker.NetGradientChecker.CompareNets(
            nets,
            outputs,
            outputs_with_grads,
            inputs_with_grads=inputs[0],
            print_net_images=True,
        )

    @given(
        input_tensor=hu.tensor(min_dim=3, max_dim=3),
        forget_bias=st.floats(-10.0, 10.0),
        forward_only=st.booleans(),
        drop_states=st.booleans(),
    )
    @ht_settings(max_examples=10)
    def test_layered_lstm(self, input_tensor, **kwargs):
        for outputs_with_grads in [[0], [1], [0, 1, 2, 3]]:
            for memory_optim in [False, True]:
                _, net, inputs = _prepare_lstm(
                    *input_tensor.shape,
                    create_lstm=rnn_cell.LSTM,
                    outputs_with_grads=outputs_with_grads,
                    memory_optim=memory_optim,
                    **kwargs)
                workspace.FeedBlob(inputs[-1], input_tensor)
                workspace.RunNetOnce(net)
                workspace.ResetWorkspace()

    @given(
        input_tensor=lstm_input(),
        forget_bias=st.floats(-10.0, 10.0),
        fwd_only=st.booleans(),
        drop_states=st.booleans(),
    )
    @ht_settings(max_examples=15)
    @utils.debug
    def test_lstm_main(self, **kwargs):
        for lstm_type in [(rnn_cell.LSTM, lstm_reference),
                          (rnn_cell.MILSTM, milstm_reference)]:
            for outputs_with_grads in [[0], [1], [0, 1, 2, 3]]:
                for memory_optim in [False, True]:
                    self.lstm_base(lstm_type,
                                   outputs_with_grads=outputs_with_grads,
                                   memory_optim=memory_optim,
                                   **kwargs)

    def lstm_base(self, lstm_type, outputs_with_grads, memory_optim,
                  input_tensor, forget_bias, fwd_only, drop_states):
        print("LSTM test parameters: ", locals())
        create_lstm, ref = lstm_type
        ref = partial(ref, forget_bias=forget_bias)

        t, n, d = input_tensor.shape
        assert d % 4 == 0
        d = d // 4
        ref = partial(ref, forget_bias=forget_bias, drop_states=drop_states)

        net = _prepare_lstm(t,
                            n,
                            d,
                            create_lstm,
                            outputs_with_grads=outputs_with_grads,
                            memory_optim=memory_optim,
                            forget_bias=forget_bias,
                            forward_only=fwd_only,
                            drop_states=drop_states)[1]
        # here we don't provide a real input for the net but just for one of
        # its ops (RecurrentNetworkOp). So have to hardcode this name
        workspace.FeedBlob("test_name_scope/external/recurrent/i2h",
                           input_tensor)
        op = net._net.op[-1]
        inputs = [workspace.FetchBlob(name) for name in op.input]

        self.assertReferenceChecks(
            hu.cpu_do,
            op,
            inputs,
            ref,
            outputs_to_check=list(range(4)),
        )

        # Checking for input, gates_t_w and gates_t_b gradients
        if not fwd_only:
            for param in range(5):
                self.assertGradientChecks(
                    device_option=hu.cpu_do,
                    op=op,
                    inputs=inputs,
                    outputs_to_check=param,
                    outputs_with_grads=outputs_with_grads,
                    threshold=0.01,
                    stepsize=0.005,
                )

    def test_lstm_extract_predictor_net(self):
        model = ModelHelper(name="lstm_extract_test")

        with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU, 0)):
            output, _, _, _ = rnn_cell.LSTM(
                model=model,
                input_blob="input",
                seq_lengths="seqlengths",
                initial_states=("hidden_init", "cell_init"),
                dim_in=20,
                dim_out=40,
                scope="test",
                drop_states=True,
                return_last_layer_only=True,
            )
        # Run param init net to get the shapes for all inputs
        shapes = {}
        workspace.RunNetOnce(model.param_init_net)
        for b in workspace.Blobs():
            shapes[b] = workspace.FetchBlob(b).shape

        # But export in CPU
        (predict_net, export_blobs) = ExtractPredictorNet(
            net_proto=model.net.Proto(),
            input_blobs=["input"],
            output_blobs=[output],
            device=core.DeviceOption(caffe2_pb2.CPU, 1),
        )

        # Create the net and run once to see it is valid
        # Populate external inputs with correctly shaped random input
        # and also ensure that the export_blobs was constructed correctly.
        workspace.ResetWorkspace()
        shapes['input'] = [10, 4, 20]
        shapes['cell_init'] = [1, 4, 40]
        shapes['hidden_init'] = [1, 4, 40]

        print(predict_net.Proto().external_input)
        self.assertTrue('seqlengths' in predict_net.Proto().external_input)
        for einp in predict_net.Proto().external_input:
            if einp == 'seqlengths':
                workspace.FeedBlob("seqlengths",
                                   np.array([10] * 4, dtype=np.int32))
            else:
                workspace.FeedBlob(
                    einp,
                    np.zeros(shapes[einp]).astype(np.float32),
                )
                if einp != 'input':
                    self.assertTrue(einp in export_blobs)

        print(str(predict_net.Proto()))
        self.assertTrue(workspace.CreateNet(predict_net.Proto()))
        self.assertTrue(workspace.RunNet(predict_net.Proto().name))

        # Validate device options set correctly for the RNNs
        import google.protobuf.text_format as protobuftx
        for op in predict_net.Proto().op:
            if op.type == 'RecurrentNetwork':
                for arg in op.arg:
                    if arg.name == "step_net":
                        step_proto = caffe2_pb2.NetDef()
                        protobuftx.Merge(arg.s, step_proto)
                        for step_op in step_proto.op:
                            self.assertEqual(0,
                                             step_op.device_option.device_type)
                            self.assertEqual(1,
                                             step_op.device_option.cuda_gpu_id)
                    elif arg.name == 'backward_step_net':
                        self.assertEqual("", arg.s)

    @given(encoder_output_length=st.integers(1, 3),
           encoder_output_dim=st.integers(1, 3),
           decoder_input_length=st.integers(1, 3),
           decoder_state_dim=st.integers(1, 3),
           batch_size=st.integers(1, 3),
           **hu.gcs)
    def test_lstm_with_attention(
        self,
        encoder_output_length,
        encoder_output_dim,
        decoder_input_length,
        decoder_state_dim,
        batch_size,
        gc,
        dc,
    ):
        self.lstm_with_attention(
            partial(
                rnn_cell.LSTMWithAttention,
                attention_type=AttentionType.Regular,
            ),
            encoder_output_length,
            encoder_output_dim,
            decoder_input_length,
            decoder_state_dim,
            batch_size,
            lstm_with_attention_reference,
            gc,
        )

    @given(encoder_output_length=st.integers(1, 3),
           encoder_output_dim=st.integers(1, 3),
           decoder_input_length=st.integers(1, 3),
           decoder_state_dim=st.integers(1, 3),
           batch_size=st.integers(1, 3),
           **hu.gcs)
    def test_lstm_with_recurrent_attention(
        self,
        encoder_output_length,
        encoder_output_dim,
        decoder_input_length,
        decoder_state_dim,
        batch_size,
        gc,
        dc,
    ):
        self.lstm_with_attention(
            partial(
                rnn_cell.LSTMWithAttention,
                attention_type=AttentionType.Recurrent,
            ),
            encoder_output_length,
            encoder_output_dim,
            decoder_input_length,
            decoder_state_dim,
            batch_size,
            lstm_with_recurrent_attention_reference,
            gc,
        )

    def lstm_with_attention(
        self,
        create_lstm_with_attention,
        encoder_output_length,
        encoder_output_dim,
        decoder_input_length,
        decoder_state_dim,
        batch_size,
        ref,
        gc,
    ):
        model = ModelHelper(name='external')
        with core.DeviceScope(gc):
            (
                encoder_outputs,
                decoder_inputs,
                decoder_input_lengths,
                initial_decoder_hidden_state,
                initial_decoder_cell_state,
                initial_attention_weighted_encoder_context,
            ) = model.net.AddExternalInputs(
                'encoder_outputs',
                'decoder_inputs',
                'decoder_input_lengths',
                'initial_decoder_hidden_state',
                'initial_decoder_cell_state',
                'initial_attention_weighted_encoder_context',
            )
            create_lstm_with_attention(
                model=model,
                decoder_inputs=decoder_inputs,
                decoder_input_lengths=decoder_input_lengths,
                initial_decoder_hidden_state=initial_decoder_hidden_state,
                initial_decoder_cell_state=initial_decoder_cell_state,
                initial_attention_weighted_encoder_context=(
                    initial_attention_weighted_encoder_context),
                encoder_output_dim=encoder_output_dim,
                encoder_outputs=encoder_outputs,
                decoder_input_dim=decoder_state_dim,
                decoder_state_dim=decoder_state_dim,
                scope='external/LSTMWithAttention',
            )
            op = model.net._net.op[-2]
        workspace.RunNetOnce(model.param_init_net)

        # This is original decoder_inputs after linear layer
        decoder_input_blob = op.input[0]

        workspace.FeedBlob(
            decoder_input_blob,
            np.random.randn(
                decoder_input_length,
                batch_size,
                decoder_state_dim * 4,
            ).astype(np.float32))
        workspace.FeedBlob(
            'external/LSTMWithAttention/encoder_outputs_transposed',
            np.random.randn(
                batch_size,
                encoder_output_dim,
                encoder_output_length,
            ).astype(np.float32),
        )
        workspace.FeedBlob(
            'external/LSTMWithAttention/weighted_encoder_outputs',
            np.random.randn(
                encoder_output_length,
                batch_size,
                encoder_output_dim,
            ).astype(np.float32),
        )
        workspace.FeedBlob(
            decoder_input_lengths,
            np.random.randint(0, decoder_input_length + 1,
                              size=(batch_size, )).astype(np.int32))
        workspace.FeedBlob(
            initial_decoder_hidden_state,
            np.random.randn(1, batch_size,
                            decoder_state_dim).astype(np.float32))
        workspace.FeedBlob(
            initial_decoder_cell_state,
            np.random.randn(1, batch_size,
                            decoder_state_dim).astype(np.float32))
        workspace.FeedBlob(
            initial_attention_weighted_encoder_context,
            np.random.randn(1, batch_size,
                            encoder_output_dim).astype(np.float32))
        inputs = [workspace.FetchBlob(name) for name in op.input]
        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=inputs,
            reference=ref,
            grad_reference=None,
            output_to_grad=None,
            outputs_to_check=list(range(6)),
        )
        gradients_to_check = [
            index for (index, input_name) in enumerate(op.input)
            if input_name != 'decoder_input_lengths'
        ]
        for param in gradients_to_check:
            self.assertGradientChecks(
                device_option=gc,
                op=op,
                inputs=inputs,
                outputs_to_check=param,
                outputs_with_grads=[0, 4],
                threshold=0.01,
                stepsize=0.001,
            )

    @given(n=st.integers(1, 10),
           d=st.integers(1, 10),
           t=st.integers(1, 10),
           **hu.gcs)
    def test_lstm_unit_recurrent_network(self, n, d, t, dc, gc):
        op = core.CreateOperator('LSTMUnit', [
            'hidden_t_prev',
            'cell_t_prev',
            'gates_t',
            'seq_lengths',
            'timestep',
        ], ['hidden_t', 'cell_t'])
        cell_t_prev = np.random.randn(1, n, d).astype(np.float32)
        hidden_t_prev = np.random.randn(1, n, d).astype(np.float32)
        gates = np.random.randn(1, n, 4 * d).astype(np.float32)
        seq_lengths = np.random.randint(1, t + 1, size=(n, )).astype(np.int32)
        timestep = np.random.randint(0, t, size=(1, )).astype(np.int32)
        inputs = [hidden_t_prev, cell_t_prev, gates, seq_lengths, timestep]
        input_device_options = {'timestep': hu.cpu_do}
        self.assertDeviceChecks(dc,
                                op,
                                inputs, [0],
                                input_device_options=input_device_options)
        self.assertReferenceChecks(gc,
                                   op,
                                   inputs,
                                   lstm_unit,
                                   input_device_options=input_device_options)
        for i in range(2):
            self.assertGradientChecks(
                gc,
                op,
                inputs,
                i, [0, 1],
                input_device_options=input_device_options)

    @given(input_length=st.integers(2, 5),
           dim_in=st.integers(1, 3),
           max_num_units=st.integers(1, 3),
           num_layers=st.integers(2, 3),
           batch_size=st.integers(1, 3))
    def test_multi_lstm(
        self,
        input_length,
        dim_in,
        max_num_units,
        num_layers,
        batch_size,
    ):
        model = ModelHelper(name='external')
        (
            input_sequence,
            seq_lengths,
        ) = model.net.AddExternalInputs(
            'input_sequence',
            'seq_lengths',
        )
        dim_out = [
            np.random.randint(1, max_num_units + 1) for _ in range(num_layers)
        ]
        h_all, h_last, c_all, c_last = rnn_cell.LSTM(
            model=model,
            input_blob=input_sequence,
            seq_lengths=seq_lengths,
            initial_states=None,
            dim_in=dim_in,
            dim_out=dim_out,
            scope='test',
            outputs_with_grads=(0, ),
            return_params=False,
            memory_optimization=False,
            forget_bias=0.0,
            forward_only=False,
            return_last_layer_only=True,
        )

        workspace.RunNetOnce(model.param_init_net)

        seq_lengths_val = np.random.randint(
            1,
            input_length + 1,
            size=(batch_size),
        ).astype(np.int32)
        input_sequence_val = np.random.randn(
            input_length,
            batch_size,
            dim_in,
        ).astype(np.float32)
        workspace.FeedBlob(seq_lengths, seq_lengths_val)
        workspace.FeedBlob(input_sequence, input_sequence_val)

        hidden_input_list = []
        cell_input_list = []
        i2h_w_list = []
        i2h_b_list = []
        gates_w_list = []
        gates_b_list = []

        for i in range(num_layers):
            hidden_input_list.append(
                workspace.FetchBlob(
                    'test/initial_hidden_state_{}'.format(i)), )
            cell_input_list.append(
                workspace.FetchBlob('test/initial_cell_state_{}'.format(i)), )
            i2h_w_list.append(
                workspace.FetchBlob('test/layer_{}/i2h_w'.format(i)), )
            i2h_b_list.append(
                workspace.FetchBlob('test/layer_{}/i2h_b'.format(i)), )
            gates_w_list.append(
                workspace.FetchBlob('test/layer_{}/gates_t_w'.format(i)), )
            gates_b_list.append(
                workspace.FetchBlob('test/layer_{}/gates_t_b'.format(i)), )

        workspace.RunNetOnce(model.net)
        h_all_calc = workspace.FetchBlob(h_all)
        h_last_calc = workspace.FetchBlob(h_last)
        c_all_calc = workspace.FetchBlob(c_all)
        c_last_calc = workspace.FetchBlob(c_last)

        h_all_ref, h_last_ref, c_all_ref, c_last_ref = multi_lstm_reference(
            input_sequence_val,
            hidden_input_list,
            cell_input_list,
            i2h_w_list,
            i2h_b_list,
            gates_w_list,
            gates_b_list,
            seq_lengths_val,
            forget_bias=0.0,
        )

        h_all_delta = np.abs(h_all_ref - h_all_calc).sum()
        h_last_delta = np.abs(h_last_ref - h_last_calc).sum()
        c_all_delta = np.abs(c_all_ref - c_all_calc).sum()
        c_last_delta = np.abs(c_last_ref - c_last_calc).sum()

        self.assertAlmostEqual(h_all_delta, 0.0, places=5)
        self.assertAlmostEqual(h_last_delta, 0.0, places=5)
        self.assertAlmostEqual(c_all_delta, 0.0, places=5)
        self.assertAlmostEqual(c_last_delta, 0.0, places=5)

        input_values = {
            'input_sequence': input_sequence_val,
            'seq_lengths': seq_lengths_val,
        }
        for param in model.GetParams():
            value = workspace.FetchBlob(param)
            input_values[str(param)] = value

        output_sum = model.net.SumElements(
            [h_all],
            'output_sum',
            average=True,
        )
        fake_loss = model.net.Tanh(output_sum, )
        for param in model.GetParams():
            gradient_checker.NetGradientChecker.Check(
                model.net,
                outputs_with_grad=[fake_loss],
                input_values=input_values,
                input_to_check=str(param),
                print_net=False,
                step_size=0.0001,
                threshold=0.05,
            )
Ejemplo n.º 53
0
 def strategy(self):
     """Returns resulting strategy that generates configured char set."""
     allowed = self._whitelist_chars
     if self._negate:
         allowed = BYTES_ALL - allowed
     return st.sampled_from(sorted(allowed))
Ejemplo n.º 54
0
class TestBooleanMaskOp(serial.SerializedTestCase):
    @given(x=hu.tensor1d(min_len=1,
                         max_len=100,
                         elements=hu.floats(min_value=0.5, max_value=1.0)),
           **hu.gcs_cpu_only)
    @settings(deadline=10000)
    def test_boolean_mask_gradient(self, x, gc, dc):
        op = core.CreateOperator("BooleanMask", ["data", "mask"],
                                 "masked_data")
        mask = np.random.choice(a=[True, False], size=x.shape[0])
        expected_gradient = np.copy(mask).astype(int)
        self.assertDeviceChecks(dc, op, [x, mask], [0])
        self.assertGradientChecks(gc, op, [x, mask], 0, [0])

    @given(x=hu.tensor1d(min_len=1,
                         max_len=5,
                         elements=hu.floats(min_value=0.5, max_value=1.0)),
           **hu.gcs)
    @settings(deadline=10000)
    def test_boolean_mask(self, x, gc, dc):
        op = core.CreateOperator("BooleanMask", ["data", "mask"],
                                 "masked_data")
        mask = np.random.choice(a=[True, False], size=x.shape[0])

        def ref(x, mask):
            return (x[mask], )

        self.assertReferenceChecks(gc, op, [x, mask], ref)
        self.assertDeviceChecks(dc, op, [x, mask], [0])

    @given(x=hu.tensor1d(min_len=1,
                         max_len=5,
                         elements=hu.floats(min_value=0.5, max_value=1.0)),
           **hu.gcs)
    def test_boolean_mask_indices(self, x, gc, dc):
        op = core.CreateOperator("BooleanMask", ["data", "mask"],
                                 ["masked_data", "masked_indices"])
        mask = np.random.choice(a=[True, False], size=x.shape[0])

        def ref(x, mask):
            return (x[mask], np.where(mask)[0])

        self.assertReferenceChecks(gc, op, [x, mask], ref)
        self.assertDeviceChecks(dc, op, [x, mask], [0])

    @staticmethod
    def _dtype_conversion(x, dtype, gc, dc):
        """SequenceMask only supports fp16 with CUDA/ROCm."""
        if dtype == np.float16:
            assume(core.IsGPUDeviceType(gc.device_type))
            dc = [d for d in dc if core.IsGPUDeviceType(d.device_type)]
            x = x.astype(dtype)
        return x, dc

    @given(x=hu.tensor(min_dim=2,
                       max_dim=5,
                       elements=hu.floats(min_value=0.5, max_value=1.0)),
           dtype=st.sampled_from([np.float32, np.float16]),
           **hu.gcs)
    def test_sequence_mask_with_lengths(self, x, dtype, gc, dc):
        x, dc = self._dtype_conversion(x, dtype, gc, dc)
        # finite fill value needed for gradient check
        fill_val = 1e-3 if dtype == np.float16 else 1e-9
        op = core.CreateOperator("SequenceMask", ["data", "lengths"],
                                 ["masked_data"],
                                 mode="sequence",
                                 axis=len(x.shape) - 1,
                                 fill_val=fill_val)
        elem_dim = x.shape[-1]
        leading_dim = 1
        for dim in x.shape[:-1]:
            leading_dim *= dim
        lengths = np.random.randint(0, elem_dim, [leading_dim])\
            .astype(np.int32)

        def ref(x, lengths):
            ref = np.reshape(x, [leading_dim, elem_dim])
            for i in range(leading_dim):
                for j in range(elem_dim):
                    if j >= lengths[i]:
                        ref[i, j] = fill_val
            return [ref.reshape(x.shape)]

        self.assertReferenceChecks(gc, op, [x, lengths], ref)
        self.assertDeviceChecks(dc, op, [x, lengths], [0])

    @given(x=hu.tensor(min_dim=2,
                       max_dim=5,
                       elements=hu.floats(min_value=0.5, max_value=1.0)),
           dtype=st.sampled_from([np.float32, np.float16]),
           **hu.gcs)
    @settings(deadline=10000)
    def test_sequence_mask_with_window(self, x, dtype, gc, dc):
        x, dc = self._dtype_conversion(x, dtype, gc, dc)
        # finite fill value needed for gradient check
        fill_val = 1e-3 if dtype == np.float16 else 1e-9
        radius = 2
        op = core.CreateOperator("SequenceMask", ["data", "centers"],
                                 ["masked_data"],
                                 mode="window",
                                 radius=radius,
                                 axis=len(x.shape) - 1,
                                 fill_val=fill_val)
        elem_dim = x.shape[-1]
        leading_dim = 1
        for dim in x.shape[:-1]:
            leading_dim *= dim
        centers = np.random.randint(0, elem_dim, [leading_dim])\
            .astype(np.int32)

        def ref(x, centers):
            ref = np.reshape(x, [leading_dim, elem_dim])
            for i in range(leading_dim):
                for j in range(elem_dim):
                    if j > centers[i] + radius or j < centers[i] - radius:
                        ref[i, j] = fill_val
            return [ref.reshape(x.shape)]

        self.assertReferenceChecks(gc, op, [x, centers], ref)
        self.assertDeviceChecks(dc, op, [x, centers], [0])

        # Gradient check with np.float16 is found to be flakey, disable for now
        # with high threshold (to repro, set threshold to 0.4).
        threshold = 1.0 if dtype == np.float16 else 0.005
        self.assertGradientChecks(gc,
                                  op, [x, centers],
                                  0, [0],
                                  threshold=threshold)

    @given(x=hu.tensor(min_dim=2,
                       max_dim=5,
                       elements=hu.floats(min_value=0.5, max_value=1.0)),
           mode=st.sampled_from(['upper', 'lower', 'upperdiag', 'lowerdiag']),
           dtype=st.sampled_from([np.float32, np.float16]),
           **hu.gcs)
    @settings(deadline=10000)
    def test_sequence_mask_triangle(self, x, mode, dtype, gc, dc):
        x, dc = self._dtype_conversion(x, dtype, gc, dc)
        # finite fill value needed for gradient check
        fill_val = 1e-3 if dtype == np.float16 else 1e-9
        op = core.CreateOperator("SequenceMask", ["data"], ["masked_data"],
                                 mode=mode,
                                 axis=len(x.shape) - 1,
                                 fill_val=fill_val)
        elem_dim = x.shape[-1]
        leading_dim = 1
        for dim in x.shape[:-1]:
            leading_dim *= dim

        if mode == 'upper':

            def compare(i, j):
                return j > i
        elif mode == 'lower':

            def compare(i, j):
                return j < i
        elif mode == 'upperdiag':

            def compare(i, j):
                return j >= i
        elif mode == 'lowerdiag':

            def compare(i, j):
                return j <= i

        def ref(x):
            ref = np.reshape(x, [leading_dim, elem_dim])
            for i in range(leading_dim):
                for j in range(elem_dim):
                    if compare(i, j):
                        ref[i, j] = fill_val
            return [ref.reshape(x.shape)]

        self.assertReferenceChecks(gc, op, [x], ref)
        self.assertDeviceChecks(dc, op, [x], [0])

        # Gradient check with np.float16 is found to be flakey, disable for now
        # with high threshold (to repro, set threshold to 0.4).
        threshold = 1.0 if dtype == np.float16 else 0.005
        stepsize = 0.1 if dtype == np.float16 else 0.05
        self.assertGradientChecks(gc,
                                  op, [x],
                                  0, [0],
                                  threshold=threshold,
                                  stepsize=stepsize)

    @given(x=hu.tensor(min_dim=2,
                       max_dim=5,
                       elements=hu.floats(min_value=0.5, max_value=1.0)),
           dtype=st.sampled_from([np.float32, np.float16]),
           **hu.gcs)
    @settings(deadline=10000)
    def test_sequence_mask_batching_lengths(self, x, dtype, gc, dc):
        x, dc = self._dtype_conversion(x, dtype, gc, dc)
        # finite fill value needed for gradient check
        fill_val = 1e-3 if dtype == np.float16 else 1e-9
        # choose _different_ batch and axis dimensions, w/ axis != 0.
        axis = 0
        batch = 0
        while axis == 0 or axis < batch:
            inds = np.arange(len(x.shape))
            np.random.shuffle(inds)
            batch = inds[0]
            axis = inds[1]
        op = core.CreateOperator("SequenceMask", ["data", "lengths"],
                                 ["masked_data"],
                                 mode='sequence',
                                 axis=axis,
                                 fill_val=fill_val,
                                 batch=batch)

        before = int(np.prod(x.shape[:batch + 1]))
        between = int(np.prod(x.shape[batch + 1:axis]))
        after = int(np.prod(x.shape[axis:]))

        lengths = np.random.randint(0, after, [between])\
            .astype(np.int32)

        def ref(z, l):
            w = np.reshape(z, [before, between, after])

            for b in range(before):
                r = w[b, :, :]
                for i in range(between):
                    for j in range(after):
                        if j >= l[i]:
                            r[i, j] = fill_val
            return [w.reshape(z.shape)]

        self.assertReferenceChecks(gc, op, [x, lengths], ref)
        self.assertDeviceChecks(dc, op, [x, lengths], [0])

        # Gradient check with np.float16 is found to be flakey, disable for now
        # with high threshold (to repro, set threshold to 0.4).
        threshold = 1.0 if dtype == np.float16 else 0.005
        self.assertGradientChecks(gc,
                                  op, [x, lengths],
                                  0, [0],
                                  threshold=threshold)

    @given(x=hu.tensor(min_dim=4,
                       max_dim=4,
                       elements=hu.floats(min_value=0.5, max_value=1.0)),
           dtype=st.sampled_from([np.float32, np.float16]),
           **hu.gcs)
    @settings(deadline=10000)
    def test_sequence_mask_batching_window(self, x, dtype, gc, dc):
        x, dc = self._dtype_conversion(x, dtype, gc, dc)
        # finite fill value needed for gradient check
        fill_val = 1e-3 if dtype == np.float16 else 1e-9
        radius = 1
        # choose _different_ batch and axis dimensions, w/ axis != 0.
        axis = 0
        batch = 0
        while axis == 0 or axis < batch:
            inds = np.arange(len(x.shape))
            np.random.shuffle(inds)
            batch = inds[0]
            axis = inds[1]
        op = core.CreateOperator("SequenceMask", ["data", "centers"],
                                 ["masked_data"],
                                 mode='window',
                                 radius=radius,
                                 axis=axis,
                                 fill_val=fill_val,
                                 batch=batch)

        before = int(np.prod(x.shape[:batch + 1]))
        between = int(np.prod(x.shape[batch + 1:axis]))
        after = int(np.prod(x.shape[axis:]))

        centers = np.random.randint(0, after, [between])\
            .astype(np.int32)

        def ref(z, c):
            w = np.reshape(z, [before, between, after])

            for b in range(before):
                r = w[b, :, :]
                for i in range(between):
                    for j in range(after):
                        if j > c[i] + radius or j < c[i] - radius:
                            r[i, j] = fill_val
            return [w.reshape(z.shape)]

        self.assertReferenceChecks(gc, op, [x, centers], ref)
        self.assertDeviceChecks(dc, op, [x, centers], [0])

        # Gradient check with np.float16 is found to be flakey, disable for now
        # with high threshold (to repro, set threshold to 0.4).
        threshold = 1.0 if dtype == np.float16 else 0.005
        self.assertGradientChecks(gc,
                                  op, [x, centers],
                                  0, [0],
                                  threshold=threshold)

    @given(x=hu.tensor(min_dim=3,
                       max_dim=5,
                       elements=hu.floats(min_value=0.5, max_value=1.0)),
           mode=st.sampled_from(['upper', 'lower', 'upperdiag', 'lowerdiag']),
           dtype=st.sampled_from([np.float32, np.float16]),
           **hu.gcs)
    @settings(deadline=10000)
    def test_sequence_mask_batching_triangle(self, x, mode, dtype, gc, dc):
        x, dc = self._dtype_conversion(x, dtype, gc, dc)
        # finite fill value needed for gradient check
        fill_val = 1e-3 if dtype == np.float16 else 1e-9
        # choose _different_ batch and axis dimensions, w/ axis != 0.
        axis = 0
        batch = 0
        while axis == 0 or axis < batch:
            inds = np.arange(len(x.shape))
            np.random.shuffle(inds)
            batch = inds[0]
            axis = inds[1]
        op = core.CreateOperator("SequenceMask", ["data"], ["masked_data"],
                                 mode=mode,
                                 axis=axis,
                                 fill_val=fill_val,
                                 batch=batch)

        if mode == 'upper':

            def compare(i, j):
                return j > i
        elif mode == 'lower':

            def compare(i, j):
                return j < i
        elif mode == 'upperdiag':

            def compare(i, j):
                return j >= i
        elif mode == 'lowerdiag':

            def compare(i, j):
                return j <= i

        def ref(z):
            before = int(np.prod(z.shape[:batch + 1]))
            between = int(np.prod(z.shape[batch + 1:axis]))
            after = int(np.prod(z.shape[axis:]))

            w = np.reshape(z, [before, between, after])

            for b in range(before):
                r = w[b, :, :]
                for i in range(between):
                    for j in range(after):
                        if compare(i, j):
                            r[i, j] = fill_val
            return [w.reshape(z.shape)]

        self.assertReferenceChecks(gc, op, [x], ref)
        self.assertDeviceChecks(dc, op, [x], [0])

        # Gradient check with np.float16 is found to be flakey, disable for now
        # with high threshold (to repro, set threshold to 0.4).
        threshold = 1.0 if dtype == np.float16 else 0.005
        stepsize = 0.1 if dtype == np.float16 else 0.05
        self.assertGradientChecks(gc,
                                  op, [x],
                                  0, [0],
                                  threshold=threshold,
                                  stepsize=stepsize)

    @given(x=hu.tensor(min_dim=3,
                       max_dim=5,
                       elements=hu.floats(min_value=0.5, max_value=1.0)),
           dtype=st.sampled_from([np.float32, np.float16]),
           **hu.gcs)
    def test_sequence_mask_repeated(self, x, dtype, gc, dc):
        x, dc = self._dtype_conversion(x, dtype, gc, dc)
        # finite fill value needed for gradient check
        fill_val = 1e-3 if dtype == np.float16 else 1e-9
        op = core.CreateOperator("SequenceMask", ["data", "lengths"],
                                 ["masked_data"],
                                 mode="sequence",
                                 axis=len(x.shape) - 2,
                                 repeat_from_axis=-1,
                                 fill_val=fill_val)

        elem_dim = x.shape[-2]
        leading_dim = 1
        for dim in x.shape[:-2]:
            leading_dim *= dim
        lengths = np.random.randint(0, elem_dim, [leading_dim])\
            .astype(np.int32)

        def ref(x, lengths):
            ref = np.reshape(x, [leading_dim, elem_dim, -1])
            for i in range(leading_dim):
                for j in range(elem_dim):
                    if j >= lengths[i]:
                        ref[i, j, :] = fill_val
            return [ref.reshape(x.shape)]

        self.assertReferenceChecks(gc, op, [x, lengths], ref)
        self.assertDeviceChecks(dc, op, [x, lengths], [0])
Ejemplo n.º 55
0
def _strategy(codes, context, pattern):
    """Convert SRE regex parse tree to strategy that generates strings matching
    that regex represented by that parse tree.

    `codes` is either a list of SRE regex elements representations or a
    particular element representation. Each element is a tuple of element code
    (as string) and parameters. E.g. regex 'ab[0-9]+' compiles to following
    elements:

        [
            (LITERAL, 97),
            (LITERAL, 98),
            (MAX_REPEAT, (1, 4294967295, [
                (IN, [
                    (RANGE, (48, 57))
                ])
            ]))
        ]

    The function recursively traverses regex element tree and converts each
    element to strategy that generates strings that match that element.

    Context stores
    1. List of groups (for backreferences)
    2. Active regex flags (e.g. IGNORECASE, DOTALL, UNICODE, they affect
       behavior of various inner strategies)

    """
    def recurse(codes):
        return _strategy(codes, context, pattern)

    if isinstance(pattern, text_type):
        empty = u''
        to_char = hunichr
    else:
        empty = b''
        to_char = int_to_byte
        binary_char = st.binary(min_size=1, max_size=1)

    if not isinstance(codes, tuple):
        # List of codes
        strategies = []

        i = 0
        while i < len(codes):
            if codes[i][0] == sre.LITERAL and \
                    not context.flags & re.IGNORECASE:
                # Merge subsequent "literals" into one `just()` strategy
                # that generates corresponding text if no IGNORECASE
                j = i + 1
                while j < len(codes) and codes[j][0] == sre.LITERAL:
                    j += 1

                if i + 1 < j:
                    strategies.append(st.just(
                        empty.join([to_char(charcode)
                                    for (_, charcode) in codes[i:j]])
                    ))

                    i = j
                    continue

            strategies.append(recurse(codes[i]))
            i += 1

        # We handle this separately at the top level, but some regex can
        # contain empty lists internally, so we need to handle this here too.
        if not strategies:
            return st.just(empty)

        if len(strategies) == 1:
            return strategies[0]
        return st.tuples(*strategies).map(empty.join)
    else:
        # Single code
        code, value = codes
        if code == sre.LITERAL:
            # Regex 'a' (single char)
            c = to_char(value)
            if context.flags & re.IGNORECASE and \
                    re.match(c, c.swapcase(), re.IGNORECASE) is not None:
                # We do the explicit check for swapped-case matching because
                # eg 'ß'.upper() == 'SS' and ignorecase doesn't match it.
                return st.sampled_from([c, c.swapcase()])
            return st.just(c)

        elif code == sre.NOT_LITERAL:
            # Regex '[^a]' (negation of a single char)
            c = to_char(value)
            blacklist = set(c)
            if context.flags & re.IGNORECASE and \
                    re.match(c, c.swapcase(), re.IGNORECASE) is not None:
                blacklist |= set(c.swapcase())
            if isinstance(pattern, text_type):
                return st.characters(blacklist_characters=blacklist)
            else:
                return binary_char.filter(lambda c: c not in blacklist)

        elif code == sre.IN:
            # Regex '[abc0-9]' (set of characters)
            negate = value[0][0] == sre.NEGATE
            if isinstance(pattern, text_type):
                builder = CharactersBuilder(negate, context.flags)
            else:
                builder = BytesBuilder(negate, context.flags)

            for charset_code, charset_value in value:
                if charset_code == sre.NEGATE:
                    # Regex '[^...]' (negation)
                    # handled by builder = CharactersBuilder(...) above
                    pass
                elif charset_code == sre.LITERAL:
                    # Regex '[a]' (single char)
                    builder.add_char(charset_value)
                elif charset_code == sre.RANGE:
                    # Regex '[a-z]' (char range)
                    low, high = charset_value
                    for char_code in hrange(low, high + 1):
                        builder.add_char(char_code)
                elif charset_code == sre.CATEGORY:
                    # Regex '[\w]' (char category)
                    builder.add_category(charset_value)
                else:  # pragma: no cover
                    # Currently there are no known code points other than
                    # handled here. This code is just future proofing
                    raise AssertionError('Unknown charset code: %s'
                                         % charset_code)
            return builder.strategy

        elif code == sre.ANY:
            # Regex '.' (any char)
            if isinstance(pattern, text_type):
                if context.flags & re.DOTALL:
                    return st.characters()
                return st.characters(blacklist_characters=u'\n')
            else:
                if context.flags & re.DOTALL:
                    return binary_char
                return binary_char.filter(lambda c: c != b'\n')

        elif code == sre.AT:
            # Regexes like '^...', '...$', '\bfoo', '\Bfoo'
            # An empty string (or newline) will match the token itself, but
            # we don't and can't check the position (eg '%' at the end)
            return st.just(empty)

        elif code == sre.SUBPATTERN:
            # Various groups: '(...)', '(:...)' or '(?P<name>...)'
            old_flags = context.flags
            if HAS_SUBPATTERN_FLAGS:  # pragma: no cover
                # This feature is available only in specific Python versions
                context.flags = (context.flags | value[1]) & ~value[2]

            strat = _strategy(value[-1], context, pattern)

            context.flags = old_flags

            if value[0]:
                strat = update_group(value[0], strat)

            return strat

        elif code == sre.GROUPREF:
            # Regex '\\1' or '(?P=name)' (group reference)
            return reuse_group(value)

        elif code == sre.ASSERT:
            # Regex '(?=...)' or '(?<=...)' (positive lookahead/lookbehind)
            return recurse(value[1])

        elif code == sre.ASSERT_NOT:
            # Regex '(?!...)' or '(?<!...)' (negative lookahead/lookbehind)
            return st.just(empty)

        elif code == sre.BRANCH:
            # Regex 'a|b|c' (branch)
            return st.one_of([recurse(branch) for branch in value[1]])

        elif code in [sre.MIN_REPEAT, sre.MAX_REPEAT]:
            # Regexes 'a?', 'a*', 'a+' and their non-greedy variants
            # (repeaters)
            at_least, at_most, subregex = value
            if at_most == sre.MAXREPEAT:
                at_most = None
            if at_least == 0 and at_most == 1:
                return st.just(empty) | recurse(subregex)
            return st.lists(recurse(subregex),
                            min_size=at_least,
                            max_size=at_most).map(empty.join)

        elif code == sre.GROUPREF_EXISTS:
            # Regex '(?(id/name)yes-pattern|no-pattern)'
            # (if group exists choice)
            return group_conditional(
                value[0],
                recurse(value[1]),
                recurse(value[2]) if value[2] else st.just(empty),
            )

        else:  # pragma: no cover
            # Currently there are no known code points other than handled here.
            # This code is just future proofing
            raise AssertionError('Unknown code point: %s' % repr(code))
Ejemplo n.º 56
0
class FunctionalAPITest(QuantizationTestCase):
    def test_relu_api(self):
        X = torch.arange(-5, 5, dtype=torch.float)
        scale = 2.0
        zero_point = 1
        qX = torch.quantize_per_tensor(X,
                                       scale=scale,
                                       zero_point=zero_point,
                                       dtype=torch.quint8)
        qY = torch.relu(qX)
        qY_hat = qF.relu(qX)
        self.assertEqual(qY, qY_hat)

    def _test_conv_api_impl(
        self,
        qconv_fn,
        conv_fn,
        batch_size,
        in_channels_per_group,
        input_feature_map_size,
        out_channels_per_group,
        groups,
        kernel_size,
        stride,
        padding,
        dilation,
        X_scale,
        X_zero_point,
        W_scale,
        W_zero_point,
        Y_scale,
        Y_zero_point,
        use_bias,
        use_channelwise,
    ):
        for i in range(len(kernel_size)):
            assume(input_feature_map_size[i] + 2 * padding[i] >= dilation[i] *
                   (kernel_size[i] - 1) + 1)
        (X, X_q, W, W_q, b) = _make_conv_test_input(
            batch_size, in_channels_per_group, input_feature_map_size,
            out_channels_per_group, groups, kernel_size, X_scale, X_zero_point,
            W_scale, W_zero_point, use_bias, use_channelwise)

        Y_exp = conv_fn(X, W, b, stride, padding, dilation, groups)
        Y_exp = torch.quantize_per_tensor(Y_exp,
                                          scale=Y_scale,
                                          zero_point=Y_zero_point,
                                          dtype=torch.quint8)
        Y_act = qconv_fn(X_q,
                         W_q,
                         b,
                         stride,
                         padding,
                         dilation,
                         groups,
                         padding_mode="zeros",
                         scale=Y_scale,
                         zero_point=Y_zero_point)

        # Make sure the results match
        # assert_array_almost_equal compares using the following formula:
        #     abs(desired-actual) < 1.5 * 10**(-decimal)
        # (https://docs.scipy.org/doc/numpy/reference/generated/numpy.testing.assert_almost_equal.html)
        # We use decimal = 0 to ignore off-by-1 differences between reference
        # and test. Off-by-1 differences arise due to the order of round and
        # zero_point addition operation, i.e., if addition followed by round is
        # used by reference and round followed by addition is used by test, the
        # results may differ by 1.
        # For example, the result of round(2.5) + 1 is 3 while round(2.5 + 1) is
        # 4 assuming the rounding mode is round-to-nearest, ties-to-even.
        np.testing.assert_array_almost_equal(Y_exp.int_repr().numpy(),
                                             Y_act.int_repr().numpy(),
                                             decimal=0)

    @given(batch_size=st.integers(1, 3),
           in_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
           H=st.integers(4, 16),
           W=st.integers(4, 16),
           out_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
           groups=st.integers(1, 4),
           kernel_h=st.integers(1, 7),
           kernel_w=st.integers(1, 7),
           stride_h=st.integers(1, 2),
           stride_w=st.integers(1, 2),
           pad_h=st.integers(0, 2),
           pad_w=st.integers(0, 2),
           dilation=st.integers(1, 2),
           X_scale=st.floats(1.2, 1.6),
           X_zero_point=st.integers(0, 4),
           W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
           W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
           Y_scale=st.floats(4.2, 5.6),
           Y_zero_point=st.integers(0, 4),
           use_bias=st.booleans(),
           use_channelwise=st.booleans(),
           qengine=st.sampled_from(("qnnpack", "fbgemm")))
    def test_conv2d_api(
        self,
        batch_size,
        in_channels_per_group,
        H,
        W,
        out_channels_per_group,
        groups,
        kernel_h,
        kernel_w,
        stride_h,
        stride_w,
        pad_h,
        pad_w,
        dilation,
        X_scale,
        X_zero_point,
        W_scale,
        W_zero_point,
        Y_scale,
        Y_zero_point,
        use_bias,
        use_channelwise,
        qengine,
    ):
        # Tests the correctness of the conv2d function.

        if qengine not in torch.backends.quantized.supported_engines:
            return
        if qengine == 'qnnpack':
            if IS_PPC or TEST_WITH_UBSAN:
                return
            use_channelwise = False

        input_feature_map_size = (H, W)
        kernel_size = (kernel_h, kernel_w)
        stride = (stride_h, stride_w)
        padding = (pad_h, pad_w)
        dilation = (dilation, dilation)

        with override_quantized_engine(qengine):
            qconv_fn = qF.conv2d
            conv_fn = F.conv2d
            self._test_conv_api_impl(
                qconv_fn, conv_fn, batch_size, in_channels_per_group,
                input_feature_map_size, out_channels_per_group, groups,
                kernel_size, stride, padding, dilation, X_scale, X_zero_point,
                W_scale, W_zero_point, Y_scale, Y_zero_point, use_bias,
                use_channelwise)

    @given(batch_size=st.integers(1, 3),
           in_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
           D=st.integers(4, 8),
           H=st.integers(4, 8),
           W=st.integers(4, 8),
           out_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
           groups=st.integers(1, 4),
           kernel_d=st.integers(1, 4),
           kernel_h=st.integers(1, 4),
           kernel_w=st.integers(1, 4),
           stride_d=st.integers(1, 2),
           stride_h=st.integers(1, 2),
           stride_w=st.integers(1, 2),
           pad_d=st.integers(0, 2),
           pad_h=st.integers(0, 2),
           pad_w=st.integers(0, 2),
           dilation=st.integers(1, 2),
           X_scale=st.floats(1.2, 1.6),
           X_zero_point=st.integers(0, 4),
           W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
           W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
           Y_scale=st.floats(4.2, 5.6),
           Y_zero_point=st.integers(0, 4),
           use_bias=st.booleans(),
           use_channelwise=st.booleans(),
           qengine=st.sampled_from(("fbgemm", )))
    def test_conv3d_api(
        self,
        batch_size,
        in_channels_per_group,
        D,
        H,
        W,
        out_channels_per_group,
        groups,
        kernel_d,
        kernel_h,
        kernel_w,
        stride_d,
        stride_h,
        stride_w,
        pad_d,
        pad_h,
        pad_w,
        dilation,
        X_scale,
        X_zero_point,
        W_scale,
        W_zero_point,
        Y_scale,
        Y_zero_point,
        use_bias,
        use_channelwise,
        qengine,
    ):
        # Tests the correctness of the conv3d function.
        # Currently conv3d only supports FbGemm engine

        if qengine not in torch.backends.quantized.supported_engines:
            return

        input_feature_map_size = (D, H, W)
        kernel_size = (kernel_d, kernel_h, kernel_w)
        stride = (stride_d, stride_h, stride_w)
        padding = (pad_d, pad_h, pad_w)
        dilation = (dilation, dilation, dilation)

        with override_quantized_engine(qengine):
            qconv_fn = qF.conv3d
            conv_fn = F.conv3d
            self._test_conv_api_impl(
                qconv_fn, conv_fn, batch_size, in_channels_per_group,
                input_feature_map_size, out_channels_per_group, groups,
                kernel_size, stride, padding, dilation, X_scale, X_zero_point,
                W_scale, W_zero_point, Y_scale, Y_zero_point, use_bias,
                use_channelwise)
Ejemplo n.º 57
0
class TestInstanceNorm(serial.SerializedTestCase):
    def _get_inputs(self, N, C, H, W, order):
        input_data = np.random.rand(N, C, H, W).astype(np.float32)
        if order == 'NHWC':
            # Allocate in the same order as NCHW and transpose to make sure
            # the inputs are identical on freshly-seeded calls.
            input_data = utils.NCHW2NHWC(input_data)
        elif order != "NCHW":
            raise Exception('unknown order type ({})'.format(order))

        scale_data = np.random.rand(C).astype(np.float32)
        bias_data = np.random.rand(C).astype(np.float32)
        return input_data, scale_data, bias_data

    def _get_op(self,
                device_option,
                store_mean,
                store_inv_stdev,
                epsilon,
                order,
                inplace=False):
        outputs = ['output' if not inplace else "input"]
        if store_mean or store_inv_stdev:
            outputs += ['mean']
        if store_inv_stdev:
            outputs += ['inv_stdev']
        op = core.CreateOperator('InstanceNorm', ['input', 'scale', 'bias'],
                                 outputs,
                                 order=order,
                                 epsilon=epsilon,
                                 device_option=device_option)
        return op

    def _feed_inputs(self, input_blobs, device_option):
        names = ['input', 'scale', 'bias']
        for name, blob in zip(names, input_blobs):
            self.ws.create_blob(name).feed(blob, device_option=device_option)

    @given(gc=hu.gcs['gc'],
           dc=hu.gcs['dc'],
           N=st.integers(1, 4),
           C=st.integers(1, 4),
           H=st.integers(2, 4),
           W=st.integers(2, 4),
           order=st.sampled_from(['NCHW', 'NHWC']),
           epsilon=st.floats(1e-6, 1e-4),
           store_mean=st.booleans(),
           seed=st.integers(0, 1000),
           store_inv_stdev=st.booleans())
    @settings(deadline=1000)
    def test_instance_norm_gradients(self, gc, dc, N, C, H, W, order,
                                     store_mean, store_inv_stdev, epsilon,
                                     seed):
        np.random.seed(seed)

        # force store_inv_stdev if store_mean to match existing forward pass
        # implementation
        store_inv_stdev |= store_mean

        op = self._get_op(device_option=gc,
                          store_mean=store_mean,
                          store_inv_stdev=store_inv_stdev,
                          epsilon=epsilon,
                          order=order)

        input_data = np.arange(N * C * H * W).astype(np.float32)
        np.random.shuffle(input_data)
        if order == "NCHW":
            input_data = input_data.reshape(N, C, H, W)
        else:
            input_data = input_data.reshape(N, H, W, C)
        scale_data = np.random.randn(C).astype(np.float32)
        bias_data = np.random.randn(C).astype(np.float32)
        input_blobs = (input_data, scale_data, bias_data)

        output_indices = [0]
        # if store_inv_stdev is turned on, store_mean must also be forced on
        if store_mean or store_inv_stdev:
            output_indices += [1]
        if store_inv_stdev:
            output_indices += [2]
        self.assertDeviceChecks(dc, op, input_blobs, output_indices)
        # The gradient only flows from output #0 since the other two only
        # store the temporary mean and inv_stdev buffers.
        # Check dl/dinput
        self.assertGradientChecks(gc, op, input_blobs, 0, [0])
        # Check dl/dscale
        self.assertGradientChecks(gc, op, input_blobs, 1, [0])
        # Check dl/dbias
        self.assertGradientChecks(gc, op, input_blobs, 2, [0])

    @given(gc=hu.gcs['gc'],
           dc=hu.gcs['dc'],
           N=st.integers(2, 10),
           C=st.integers(3, 10),
           H=st.integers(5, 10),
           W=st.integers(7, 10),
           seed=st.integers(0, 1000),
           epsilon=st.floats(1e-6, 1e-4),
           store_mean=st.booleans(),
           store_inv_stdev=st.booleans())
    def test_instance_norm_layout(self, gc, dc, N, C, H, W, store_mean,
                                  store_inv_stdev, epsilon, seed):
        # force store_inv_stdev if store_mean to match existing forward pass
        # implementation
        store_inv_stdev |= store_mean

        outputs = {}
        for order in ('NCHW', 'NHWC'):
            np.random.seed(seed)
            input_blobs = self._get_inputs(N, C, H, W, order)
            self._feed_inputs(input_blobs, device_option=gc)
            op = self._get_op(device_option=gc,
                              store_mean=store_mean,
                              store_inv_stdev=store_inv_stdev,
                              epsilon=epsilon,
                              order=order)
            self.ws.run(op)
            outputs[order] = self.ws.blobs['output'].fetch()
        np.testing.assert_allclose(outputs['NCHW'],
                                   utils.NHWC2NCHW(outputs["NHWC"]),
                                   atol=1e-4,
                                   rtol=1e-4)

    @serial.given(gc=hu.gcs['gc'],
                  dc=hu.gcs['dc'],
                  N=st.integers(2, 10),
                  C=st.integers(3, 10),
                  H=st.integers(5, 10),
                  W=st.integers(7, 10),
                  order=st.sampled_from(['NCHW', 'NHWC']),
                  epsilon=st.floats(1e-6, 1e-4),
                  store_mean=st.booleans(),
                  seed=st.integers(0, 1000),
                  store_inv_stdev=st.booleans(),
                  inplace=st.booleans())
    def test_instance_norm_reference_check(self, gc, dc, N, C, H, W, order,
                                           store_mean, store_inv_stdev,
                                           epsilon, seed, inplace):
        np.random.seed(seed)

        # force store_inv_stdev if store_mean to match existing forward pass
        # implementation
        store_inv_stdev |= store_mean
        if order != "NCHW":
            assume(not inplace)

        inputs = self._get_inputs(N, C, H, W, order)
        op = self._get_op(device_option=gc,
                          store_mean=store_mean,
                          store_inv_stdev=store_inv_stdev,
                          epsilon=epsilon,
                          order=order,
                          inplace=inplace)

        def ref(input_blob, scale_blob, bias_blob):
            if order == 'NHWC':
                input_blob = utils.NHWC2NCHW(input_blob)

            mean_blob = input_blob.reshape((N, C, -1)).mean(axis=2)
            inv_stdev_blob = 1.0 / \
                np.sqrt(input_blob.reshape((N, C, -1)).var(axis=2) + epsilon)
            # _bc indicates blobs that are reshaped for broadcast
            scale_bc = scale_blob[np.newaxis, :, np.newaxis, np.newaxis]
            mean_bc = mean_blob[:, :, np.newaxis, np.newaxis]
            inv_stdev_bc = inv_stdev_blob[:, :, np.newaxis, np.newaxis]
            bias_bc = bias_blob[np.newaxis, :, np.newaxis, np.newaxis]
            normalized_blob = scale_bc * (input_blob - mean_bc) * inv_stdev_bc \
                + bias_bc

            if order == 'NHWC':
                normalized_blob = utils.NCHW2NHWC(normalized_blob)

            if not store_mean and not store_inv_stdev:
                return normalized_blob,
            elif not store_inv_stdev:
                return normalized_blob, mean_blob
            else:
                return normalized_blob, mean_blob, inv_stdev_blob

        self.assertReferenceChecks(gc, op, inputs, ref)

    @given(gc=hu.gcs['gc'],
           dc=hu.gcs['dc'],
           N=st.integers(2, 10),
           C=st.integers(3, 10),
           H=st.integers(5, 10),
           W=st.integers(7, 10),
           order=st.sampled_from(['NCHW', 'NHWC']),
           epsilon=st.floats(1e-6, 1e-4),
           store_mean=st.booleans(),
           seed=st.integers(0, 1000),
           store_inv_stdev=st.booleans())
    def test_instance_norm_device_check(self, gc, dc, N, C, H, W, order,
                                        store_mean, store_inv_stdev, epsilon,
                                        seed):
        np.random.seed(seed)

        # force store_inv_stdev if store_mean to match existing forward pass
        # implementation
        store_inv_stdev |= store_mean

        inputs = self._get_inputs(N, C, H, W, order)
        op = self._get_op(device_option=gc,
                          store_mean=store_mean,
                          store_inv_stdev=store_inv_stdev,
                          epsilon=epsilon,
                          order=order)

        self.assertDeviceChecks(dc, op, inputs, [0])

    @given(is_test=st.booleans(),
           N=st.integers(2, 10),
           C=st.integers(3, 10),
           H=st.integers(5, 10),
           W=st.integers(7, 10),
           order=st.sampled_from(['NCHW', 'NHWC']),
           epsilon=st.floats(1e-6, 1e-4),
           seed=st.integers(0, 1000))
    def test_instance_norm_model_helper(self, N, C, H, W, order, epsilon, seed,
                                        is_test):
        np.random.seed(seed)
        model = model_helper.ModelHelper(name="test_model")
        brew.instance_norm(model,
                           'input',
                           'output',
                           C,
                           epsilon=epsilon,
                           order=order,
                           is_test=is_test)

        input_blob = np.random.rand(N, C, H, W).astype(np.float32)
        if order == 'NHWC':
            input_blob = utils.NCHW2NHWC(input_blob)

        self.ws.create_blob('input').feed(input_blob)

        self.ws.create_net(model.param_init_net).run()
        self.ws.create_net(model.net).run()

        if is_test:
            scale = self.ws.blobs['output_s'].fetch()
            assert scale is not None
            assert scale.shape == (C, )
            bias = self.ws.blobs['output_b'].fetch()
            assert bias is not None
            assert bias.shape == (C, )

        output_blob = self.ws.blobs['output'].fetch()
        if order == 'NHWC':
            output_blob = utils.NHWC2NCHW(output_blob)

        assert output_blob.shape == (N, C, H, W)
Ejemplo n.º 58
0
class ModuleAPITest(QuantizationTestCase):
    def test_relu(self):
        relu_module = nnq.ReLU()
        relu6_module = nnq.ReLU6()

        x = torch.arange(-10, 10, dtype=torch.float)
        y_ref = torch.relu(x)
        y6_ref = torch.nn.modules.ReLU6()(x)

        qx = torch.quantize_per_tensor(x, 1.0, 0, dtype=torch.qint32)
        qy = relu_module(qx)
        qy6 = relu6_module(qx)

        self.assertEqual(y_ref,
                         qy.dequantize(),
                         message="ReLU module API failed")
        self.assertEqual(y6_ref,
                         qy6.dequantize(),
                         message="ReLU6 module API failed")

    @given(batch_size=st.integers(1, 5),
           in_features=st.integers(16, 32),
           out_features=st.integers(4, 8),
           use_bias=st.booleans(),
           use_fused=st.booleans(),
           per_channel=st.booleans(),
           qengine=st.sampled_from(("qnnpack", "fbgemm")))
    def test_linear_api(self, batch_size, in_features, out_features, use_bias,
                        use_fused, per_channel, qengine):
        """test API functionality for nn.quantized.linear and nn.intrinsic.quantized.linear_relu"""
        if qengine not in torch.backends.quantized.supported_engines:
            return
        if qengine == 'qnnpack':
            if IS_PPC or TEST_WITH_UBSAN:
                return
            per_channel = False
        with override_quantized_engine(qengine):
            W = torch.rand(out_features, in_features).float()
            if per_channel:
                scale_tensor = torch.ones(out_features, dtype=torch.double)
                zero_point_tensor = torch.zeros(out_features, dtype=torch.long)
                for i in range(len(scale_tensor)):
                    scale_tensor[i] = (i + 1.0) / 255.0
                W_q = torch.quantize_per_channel(W,
                                                 scales=scale_tensor,
                                                 zero_points=zero_point_tensor,
                                                 axis=0,
                                                 dtype=torch.qint8)
            else:
                W_q = torch.quantize_per_tensor(W, 0.1, 4, torch.qint8)

            X = torch.rand(batch_size, in_features).float()
            X_q = torch.quantize_per_tensor(X, 0.2, 10, torch.quint8)
            B = torch.rand(out_features).float() if use_bias else None
            scale = 0.5
            zero_point = 3
            if use_fused:
                qlinear = nnq_fused.LinearReLU(in_features, out_features)
            else:
                qlinear = nnq.Linear(in_features, out_features)

            # Run module with default-initialized parameters.
            # This tests that the constructor is correct.
            qlinear(X_q)

            qlinear.set_weight_bias(W_q, B)
            # Simple round-trip test to ensure weight()/set_weight() API
            self.assertEqual(qlinear.weight(), W_q)
            W_pack = qlinear._packed_params._packed_params

            qlinear.scale = float(scale)
            qlinear.zero_point = int(zero_point)
            Z_q = qlinear(X_q)
            # Check if the module implementation matches calling the
            # ops directly
            if use_fused:
                Z_ref = torch.ops.quantized.linear_relu(
                    X_q, W_pack, scale, zero_point)

                self.assertTrue('QuantizedLinearReLU' in str(qlinear))
            else:
                Z_ref = torch.ops.quantized.linear(X_q, W_pack, scale,
                                                   zero_point)

                self.assertTrue('QuantizedLinear' in str(qlinear))
            self.assertEqual(Z_ref, Z_q)

            # Test serialization of quantized Linear Module using state_dict
            model_dict = qlinear.state_dict()
            self.assertEqual(model_dict['_packed_params.weight'], W_q)
            if use_bias:
                self.assertEqual(model_dict['_packed_params.bias'], B)
            b = io.BytesIO()
            torch.save(model_dict, b)
            b.seek(0)
            loaded_dict = torch.load(b)
            for key in model_dict:
                self.assertEqual(model_dict[key], loaded_dict[key])
            if use_fused:
                loaded_qlinear = nnq_fused.LinearReLU(in_features,
                                                      out_features)
            else:
                loaded_qlinear = nnq.Linear(in_features, out_features)
            loaded_qlinear.load_state_dict(loaded_dict)

            linear_unpack = torch.ops.quantized.linear_unpack
            self.assertEqual(
                linear_unpack(qlinear._packed_params._packed_params),
                linear_unpack(loaded_qlinear._packed_params._packed_params))
            if use_bias:
                self.assertEqual(qlinear.bias(), loaded_qlinear.bias())
            self.assertEqual(qlinear.scale, loaded_qlinear.scale)
            self.assertEqual(qlinear.zero_point, loaded_qlinear.zero_point)
            self.assertTrue(dir(qlinear) == dir(loaded_qlinear))
            self.assertTrue(hasattr(qlinear, '_packed_params'))
            self.assertTrue(hasattr(loaded_qlinear, '_packed_params'))
            self.assertTrue(hasattr(qlinear, '_weight_bias'))
            self.assertTrue(hasattr(loaded_qlinear, '_weight_bias'))
            self.assertEqual(qlinear._weight_bias(),
                             loaded_qlinear._weight_bias())
            self.assertEqual(
                qlinear._weight_bias(),
                torch.ops.quantized.linear_unpack(
                    qlinear._packed_params._packed_params))
            Z_q2 = loaded_qlinear(X_q)
            self.assertEqual(Z_q, Z_q2)

            # The below check is meant to ensure that `torch.save` and `torch.load`
            # serialization works, however it is currently broken by the following:
            # https://github.com/pytorch/pytorch/issues/24045
            #
            # Instead, we currently check that the proper exception is thrown on save.
            # <start code>
            # b = io.BytesIO()
            # torch.save(qlinear, b)
            # b.seek(0)
            # loaded = torch.load(b)
            # self.assertEqual(qlinear.weight(), loaded.weight())
            # self.assertEqual(qlinear.scale, loaded.scale)
            # self.assertEqual(qlinear.zero_point, loaded.zero_point)
            # <end code>
            with self.assertRaisesRegex(
                    RuntimeError,
                    r'torch.save\(\) is not currently supported'):
                b = io.BytesIO()
                torch.save(qlinear, b)

            # Test JIT
            self.checkScriptable(qlinear,
                                 list(zip([X_q], [Z_ref])),
                                 check_save_load=True)

            # Test from_float.
            float_linear = torch.nn.Linear(in_features, out_features).float()
            float_linear.qconfig = torch.quantization.default_qconfig
            torch.quantization.prepare(float_linear, inplace=True)
            float_linear(X.float())
            # Sequential allows swapping using "convert".
            quantized_float_linear = torch.nn.Sequential(float_linear)
            quantized_float_linear = torch.quantization.convert(
                quantized_float_linear, inplace=True)

            # Smoke test to make sure the module actually runs
            quantized_float_linear(X_q)

            # Smoke test extra_repr
            self.assertTrue('QuantizedLinear' in str(quantized_float_linear))

    def test_quant_dequant_api(self):
        r = torch.tensor([[1., -1.], [1., -1.]], dtype=torch.float)
        scale, zero_point, dtype = 1.0, 2, torch.qint8
        # testing Quantize API
        qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)
        quant_m = nnq.Quantize(scale, zero_point, dtype)
        qr2 = quant_m(r)
        self.assertEqual(qr, qr2)
        # testing Dequantize API
        rqr = qr.dequantize()
        dequant_m = nnq.DeQuantize()
        rqr2 = dequant_m(qr2)
        self.assertEqual(rqr, rqr2)

    def _test_conv_api_impl(
        self,
        module_name,
        qconv_module,
        conv_module,
        batch_size,
        in_channels_per_group,
        input_feature_map_size,
        out_channels_per_group,
        groups,
        kernel_size,
        stride,
        padding,
        dilation,
        X_scale,
        X_zero_point,
        W_scale,
        W_zero_point,
        Y_scale,
        Y_zero_point,
        use_bias,
        use_fused,
        use_channelwise,
    ):
        for i in range(len(kernel_size)):
            assume(input_feature_map_size[i] + 2 * padding[i] >= dilation[i] *
                   (kernel_size[i] - 1) + 1)

        in_channels = in_channels_per_group * groups
        out_channels = out_channels_per_group * groups
        (X, X_q, W, W_q, b) = _make_conv_test_input(
            batch_size, in_channels_per_group, input_feature_map_size,
            out_channels_per_group, groups, kernel_size, X_scale, X_zero_point,
            W_scale, W_zero_point, use_bias, use_channelwise)

        qconv_module.set_weight_bias(W_q, b)
        qconv_module.scale = Y_scale
        qconv_module.zero_point = Y_zero_point

        if use_fused:
            conv_module[0].weight.data = W
            if use_bias:
                conv_module[0].bias.data = b
        else:
            conv_module.weight.data = W
            if use_bias:
                conv_module.bias.data = b

        # Test members
        self.assertTrue(module_name in str(qconv_module))
        self.assertTrue(hasattr(qconv_module, '_packed_params'))
        self.assertTrue(hasattr(qconv_module, 'scale'))
        self.assertTrue(hasattr(qconv_module, 'zero_point'))

        # Test properties
        self.assertEqual(W_q, qconv_module.weight())
        if use_bias:
            self.assertEqual(b, qconv_module.bias())
        self.assertEqual(Y_scale, qconv_module.scale)
        self.assertEqual(Y_zero_point, qconv_module.zero_point)

        # Test forward
        Y_exp = conv_module(X)
        Y_exp = torch.quantize_per_tensor(Y_exp,
                                          scale=Y_scale,
                                          zero_point=Y_zero_point,
                                          dtype=torch.quint8)
        Y_act = qconv_module(X_q)

        # Make sure the results match
        # assert_array_almost_equal compares using the following formula:
        #     abs(desired-actual) < 1.5 * 10**(-decimal)
        # (https://docs.scipy.org/doc/numpy/reference/generated/numpy.testing.assert_almost_equal.html)
        # We use decimal = 0 to ignore off-by-1 differences between reference
        # and test. Off-by-1 differences arise due to the order of round and
        # zero_point addition operation, i.e., if addition followed by round is
        # used by reference and round followed by addition is used by test, the
        # results may differ by 1.
        # For example, the result of round(2.5) + 1 is 3 while round(2.5 + 1) is
        # 4 assuming the rounding mode is round-to-nearest, ties-to-even.
        np.testing.assert_array_almost_equal(Y_exp.int_repr().numpy(),
                                             Y_act.int_repr().numpy(),
                                             decimal=0)

        # Test serialization of quantized Conv Module using state_dict
        model_dict = qconv_module.state_dict()
        self.assertEqual(W_q, model_dict['weight'])
        if use_bias:
            self.assertEqual(b, model_dict['bias'])
        bytes_io = io.BytesIO()
        torch.save(model_dict, bytes_io)
        bytes_io.seek(0)
        loaded_dict = torch.load(bytes_io)
        for key in loaded_dict:
            self.assertEqual(model_dict[key], loaded_dict[key])

        loaded_qconv_module = type(qconv_module)(in_channels,
                                                 out_channels,
                                                 kernel_size,
                                                 stride,
                                                 padding,
                                                 dilation,
                                                 groups,
                                                 use_bias,
                                                 padding_mode="zeros")
        loaded_qconv_module.load_state_dict(loaded_dict)

        self.assertTrue(dir(loaded_qconv_module) == dir(qconv_module))
        self.assertTrue(module_name in str(loaded_qconv_module))
        self.assertTrue(hasattr(loaded_qconv_module, '_packed_params'))
        self.assertTrue(hasattr(loaded_qconv_module, '_weight_bias'))

        self.assertEqual(qconv_module.weight(), loaded_qconv_module.weight())
        if use_bias:
            self.assertEqual(qconv_module.bias(), loaded_qconv_module.bias())
        self.assertEqual(qconv_module.scale, loaded_qconv_module.scale)
        self.assertEqual(qconv_module.zero_point,
                         loaded_qconv_module.zero_point)
        Y_loaded = loaded_qconv_module(X_q)
        np.testing.assert_array_almost_equal(Y_exp.int_repr().numpy(),
                                             Y_loaded.int_repr().numpy(),
                                             decimal=0)

        # The below check is meant to ensure that `torch.save` and `torch.load`
        # serialization works, however it is currently broken by the following:
        # https://github.com/pytorch/pytorch/issues/24045
        #
        # Instead, we currently check that the proper exception is thrown on
        # save.
        # <start code>
        # b = io.BytesIO()
        # torch.save(conv_under_test, b)
        # b.seek(0)
        # loaded_conv = torch.load(b)
        #
        # self.assertEqual(loaded_qconv_module.bias(), qconv_module.bias())
        # self.assertEqual(loaded_qconv_module.scale, qconv_module.scale)
        # self.assertEqual(loaded_qconv_module.zero_point,
        #                  qconv_module.zero_point)
        # <end code>
        with self.assertRaisesRegex(
                RuntimeError, r'torch.save\(\) is not currently supported'):
            bytes_io = io.BytesIO()
            torch.save(qconv_module, bytes_io)

        # JIT testing
        self.checkScriptable(qconv_module,
                             list(zip([X_q], [Y_exp])),
                             check_save_load=True)

        # Test from_float
        conv_module.qconfig = torch.quantization.default_qconfig
        torch.quantization.prepare(conv_module, inplace=True)
        conv_module(X.float())
        converted_qconv_module = torch.nn.Sequential(conv_module)
        torch.quantization.convert(converted_qconv_module, inplace=True)

        # Smoke test to make sure the module actually runs
        if use_bias:
            if use_fused:
                self.assertEqual(conv_module[0].bias,
                                 converted_qconv_module[0].bias())
            else:
                self.assertEqual(conv_module.bias,
                                 converted_qconv_module[0].bias())
        # Smoke test extra_repr
        self.assertTrue(module_name in str(converted_qconv_module))

    @given(batch_size=st.integers(1, 3),
           in_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
           H=st.integers(4, 16),
           W=st.integers(4, 16),
           out_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
           groups=st.integers(1, 4),
           kernel_h=st.integers(1, 7),
           kernel_w=st.integers(1, 7),
           stride_h=st.integers(1, 2),
           stride_w=st.integers(1, 2),
           pad_h=st.integers(0, 2),
           pad_w=st.integers(0, 2),
           dilation=st.integers(1, 2),
           X_scale=st.floats(1.2, 1.6),
           X_zero_point=st.integers(0, 4),
           W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
           W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
           Y_scale=st.floats(4.2, 5.6),
           Y_zero_point=st.integers(0, 4),
           use_bias=st.booleans(),
           use_fused=st.booleans(),
           use_channelwise=st.booleans(),
           qengine=st.sampled_from(("qnnpack", "fbgemm")))
    def test_conv2d_api(
        self,
        batch_size,
        in_channels_per_group,
        H,
        W,
        out_channels_per_group,
        groups,
        kernel_h,
        kernel_w,
        stride_h,
        stride_w,
        pad_h,
        pad_w,
        dilation,
        X_scale,
        X_zero_point,
        W_scale,
        W_zero_point,
        Y_scale,
        Y_zero_point,
        use_bias,
        use_fused,
        use_channelwise,
        qengine,
    ):
        # Tests the correctness of the conv2d module.
        if qengine not in torch.backends.quantized.supported_engines:
            return
        if qengine == 'qnnpack':
            if IS_PPC or TEST_WITH_UBSAN:
                return
            use_channelwise = False

        in_channels = in_channels_per_group * groups
        out_channels = out_channels_per_group * groups
        input_feature_map_size = (H, W)
        kernel_size = (kernel_h, kernel_w)
        stride = (stride_h, stride_w)
        padding = (pad_h, pad_w)
        dilation = (dilation, dilation)

        with override_quantized_engine(qengine):
            if use_fused:
                module_name = "QuantizedConvReLU2d"
                qconv_module = nnq_fused.ConvReLU2d(in_channels,
                                                    out_channels,
                                                    kernel_size,
                                                    stride,
                                                    padding,
                                                    dilation,
                                                    groups,
                                                    use_bias,
                                                    padding_mode="zeros")
            else:
                module_name = "QuantizedConv2d"
                qconv_module = nnq.Conv2d(in_channels,
                                          out_channels,
                                          kernel_size,
                                          stride,
                                          padding,
                                          dilation,
                                          groups,
                                          use_bias,
                                          padding_mode="zeros")

            conv_module = nn.Conv2d(in_channels,
                                    out_channels,
                                    kernel_size,
                                    stride,
                                    padding,
                                    dilation,
                                    groups,
                                    use_bias,
                                    padding_mode="zeros")
            if use_fused:
                relu_module = nn.ReLU()
                conv_module = nni.ConvReLU2d(conv_module, relu_module)
            conv_module = conv_module.float()

            self._test_conv_api_impl(
                module_name, qconv_module, conv_module, batch_size,
                in_channels_per_group, input_feature_map_size,
                out_channels_per_group, groups, kernel_size, stride, padding,
                dilation, X_scale, X_zero_point, W_scale, W_zero_point,
                Y_scale, Y_zero_point, use_bias, use_fused, use_channelwise)

    @given(batch_size=st.integers(1, 3),
           in_channels_per_group=st.sampled_from([2, 4, 5, 8, 16]),
           D=st.integers(3, 6),
           H=st.integers(3, 6),
           W=st.integers(3, 6),
           out_channels_per_group=st.sampled_from([2, 4, 5, 8, 16]),
           groups=st.integers(1, 4),
           kernel_d=st.integers(1, 3),
           kernel_h=st.integers(1, 3),
           kernel_w=st.integers(1, 3),
           stride_d=st.integers(1, 2),
           stride_h=st.integers(1, 2),
           stride_w=st.integers(1, 2),
           pad_d=st.integers(0, 1),
           pad_h=st.integers(0, 1),
           pad_w=st.integers(0, 1),
           dilation=st.integers(1, 2),
           X_scale=st.floats(1.2, 1.6),
           X_zero_point=st.integers(0, 4),
           W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
           W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
           Y_scale=st.floats(4.2, 5.6),
           Y_zero_point=st.integers(0, 4),
           use_bias=st.booleans(),
           use_fused=st.booleans(),
           use_channelwise=st.booleans(),
           qengine=st.sampled_from(("fbgemm", )))
    def test_conv3d_api(
        self,
        batch_size,
        in_channels_per_group,
        D,
        H,
        W,
        out_channels_per_group,
        groups,
        kernel_d,
        kernel_h,
        kernel_w,
        stride_d,
        stride_h,
        stride_w,
        pad_d,
        pad_h,
        pad_w,
        dilation,
        X_scale,
        X_zero_point,
        W_scale,
        W_zero_point,
        Y_scale,
        Y_zero_point,
        use_bias,
        use_channelwise,
        use_fused,
        qengine,
    ):
        # Tests the correctness of the conv3d module.
        if qengine not in torch.backends.quantized.supported_engines:
            return

        in_channels = in_channels_per_group * groups
        out_channels = out_channels_per_group * groups
        input_feature_map_size = (D, H, W)
        kernel_size = (kernel_d, kernel_h, kernel_w)
        stride = (stride_d, stride_h, stride_w)
        padding = (pad_d, pad_h, pad_w)
        dilation = (dilation, dilation, dilation)

        with override_quantized_engine(qengine):
            if use_fused:
                module_name = "QuantizedConvReLU3d"
                qconv_module = nnq_fused.ConvReLU3d(in_channels,
                                                    out_channels,
                                                    kernel_size,
                                                    stride,
                                                    padding,
                                                    dilation,
                                                    groups,
                                                    use_bias,
                                                    padding_mode="zeros")
            else:
                module_name = "QuantizedConv3d"
                qconv_module = nnq.Conv3d(in_channels,
                                          out_channels,
                                          kernel_size,
                                          stride,
                                          padding,
                                          dilation,
                                          groups,
                                          use_bias,
                                          padding_mode="zeros")

            conv_module = nn.Conv3d(in_channels,
                                    out_channels,
                                    kernel_size,
                                    stride,
                                    padding,
                                    dilation,
                                    groups,
                                    use_bias,
                                    padding_mode="zeros")
            if use_fused:
                relu_module = nn.ReLU()
                conv_module = nni.ConvReLU3d(conv_module, relu_module)
            conv_module = conv_module.float()

            self._test_conv_api_impl(
                module_name, qconv_module, conv_module, batch_size,
                in_channels_per_group, input_feature_map_size,
                out_channels_per_group, groups, kernel_size, stride, padding,
                dilation, X_scale, X_zero_point, W_scale, W_zero_point,
                Y_scale, Y_zero_point, use_bias, use_fused, use_channelwise)

    def test_pool_api(self):
        """Tests the correctness of the pool module.
        The correctness is defined against the functional implementation.
        """
        N, C, H, W = 10, 10, 10, 3
        kwargs = {
            'kernel_size': 2,
            'stride': None,
            'padding': 0,
            'dilation': 1
        }

        scale, zero_point = 1.0 / 255, 128

        X = torch.randn(N, C, H, W, dtype=torch.float32)
        qX = torch.quantize_per_tensor(X,
                                       scale=scale,
                                       zero_point=zero_point,
                                       dtype=torch.quint8)
        qX_expect = torch.nn.functional.max_pool2d(qX, **kwargs)

        pool_under_test = torch.nn.quantized.MaxPool2d(**kwargs)
        qX_hat = pool_under_test(qX)
        self.assertEqual(qX_expect, qX_hat)

        # JIT Testing
        self.checkScriptable(pool_under_test, list(zip([X], [qX_expect])))

    def test_batch_norm2d(self):
        """Tests the correctness of the batchnorm2d module.
        The correctness is defined against the functional implementation.
        """
        x = torch.randn((2, 4, 6, 8), dtype=torch.float)
        float_mod = torch.nn.BatchNorm2d(4)
        float_mod.training = False

        y_ref = float_mod(x)
        quant_ref = torch.quantize_per_tensor(y_ref,
                                              1.0,
                                              0,
                                              dtype=torch.quint8)

        quant_mod = nnq.BatchNorm2d(4)
        qx = torch.quantize_per_tensor(x, 1.0, 0, dtype=torch.quint8)
        qy = quant_mod(qx)

        self.assertEqual(quant_ref.int_repr().numpy(),
                         qy.int_repr().numpy(),
                         message="BatchNorm2d module API failed")

    def test_batch_norm3d(self):
        """Tests the correctness of the batchnorm3d module.
        The correctness is defined against the functional implementation.
        """
        x = torch.randn((2, 4, 6, 8, 10), dtype=torch.float)
        float_mod = torch.nn.BatchNorm3d(4)
        float_mod.training = False

        y_ref = float_mod(x)
        quant_ref = torch.quantize_per_tensor(y_ref,
                                              1.0,
                                              0,
                                              dtype=torch.quint8)

        quant_mod = nnq.BatchNorm3d(4)
        qx = torch.quantize_per_tensor(x, 1.0, 0, dtype=torch.quint8)
        qy = quant_mod(qx)

        self.assertEqual(quant_ref.int_repr().numpy(),
                         qy.int_repr().numpy(),
                         message="BatchNorm3d module API failed")
Ejemplo n.º 59
0
class TestTags(object):
    """
    Test methods of the ``Tags`` class.
    """

    pytestmark = _UDEV_TEST(154, "TestTags")

    _device_data = [d for d in _DEVICE_DATA if d.tags]
    if len(_device_data) > 0:
        @given(
           _CONTEXT_STRATEGY,
           strategies.sampled_from(_device_data),
           settings=Settings(max_examples=5)
        )
        def test_iteration(self, a_context, device_datum):
            device = Device.from_path(a_context, device_datum.device_path)
            assert set(device.tags) == set(device_datum.tags)
            for tag in device.tags:
                assert is_unicode_string(tag)

        @given(
           _CONTEXT_STRATEGY,
           strategies.sampled_from(_device_data),
           settings=Settings(max_examples=5)
        )
        def test_contains(self, a_context, device_datum):
            device = Device.from_path(a_context, device_datum.device_path)
            for tag in device_datum.tags:
                assert tag in device.tags
    else:
        def test_iteration(self):
            pytest.skip("not enough devices with tags")

        def test_contains(self):
            pytest.skip("not enough devices with tags")

    @given(
       strategies.sampled_from(_DEVICES),
       settings=Settings(max_examples=5)
    )
    def test_iteration_mock(self, a_device):
        funcname = 'udev_device_get_tags_list_entry'
        with pytest.libudev_list(a_device._libudev, funcname,
                                 [b'spam', b'eggs']):
            tags = list(a_device.tags)
            assert tags == ['spam', 'eggs']
            func = a_device._libudev.udev_device_get_tags_list_entry
            func.assert_called_once_with(a_device)


    @_UDEV_TEST(172, "test_contans_mock")
    @given(
       strategies.sampled_from(_DEVICES),
       settings=Settings(max_examples=5)
    )
    def test_contains_mock(self, a_device):
        """
        Test that ``udev_device_has_tag`` is called if available.
        """
        funcname = 'udev_device_has_tag'
        spec = lambda d, t: None
        with mock.patch.object(a_device._libudev, funcname,
                               autospec=spec) as func:
            func.return_value = 1
            assert 'foo' in a_device.tags
            func.assert_called_once_with(a_device, b'foo')
Ejemplo n.º 60
0
 def steps(self):
     strat = tuples(just(False), integers())
     if self.elements:
         strat |= tuples(just(True), sampled_from(self.elements))
     return strat