示例#1
0
 def test_nested(self):
     self.assertTrue(nest.is_nested(list()))
     self.assertTrue(nest.is_sequence(list()))
     self.assertTrue(nest.is_nested(tuple()))
     self.assertTrue(nest.is_sequence(tuple()))
     self.assertTrue(nest.is_nested(dict()))
     self.assertFalse(nest.is_sequence(dict()))
示例#2
0
 def assertEqual(
     self,
     first,
     second,
     msg=None,
     prec=None,
 ):
     if prec is None:
         prec = self.precision
     inputs = nest.flatten(first)
     num_first = len(inputs)
     inputs += nest.flatten(second)
     num_second = len(inputs) - num_first
     for i, input in enumerate(inputs):
         if isinstance(input, torch.Tensor):
             inputs[i] = input.numpy()
     first = inputs[:num_first] if num_first > 1 else inputs[0]
     second = inputs[num_first:len(inputs)] if num_second > 1 else inputs[
         num_first]
     if isinstance(first, np.ndarray) and isinstance(second, np.ndarray):
         super(OpTestCase, self).assertEqual(first.shape, second.shape)
         if first.dtype == np.bool and second.dtype == np.bool:
             diff = first ^ second
             num_unique = len(np.unique(diff))
             self.assertLessEqual(num_unique, 1, msg)
         else:
             diff = np.abs(first - second)
             max_err = diff.max()
             self.assertLessEqual(max_err, prec, msg)
     elif nest.is_sequence(first) and nest.is_sequence(second):
         for a, b in zip(first, second):
             self.assertEqual(a, b, msg, prec)
     else:
         super(OpTestCase, self).assertEqual(first, second, msg)
示例#3
0
def normalize_paddings(value, rank):
    """Repeat the paddings according to the rank."""
    if isinstance(value, int):
        return ((value, value), ) * rank
    elif nest.is_sequence(value):
        if isinstance(value[0], int):
            return normalize_tuple(value, rank)
        elif nest.is_sequence(value[0]):
            value = [normalize_tuple(v, 2) for v in value]
            if len(value) > rank:
                return (value[i] for i in range(rank))
            else:
                return tuple([value[i] for i in range(len(value))] +
                             [value[-1] for _ in range(len(value), rank)])
示例#4
0
    def run(self, inputs, **kwargs):
        """Run the model.

        Parameters
        ----------
        inputs : Union[Sequence, Dict]
            The input arrays.

        Returns
        -------
        namedtuple
            The model outputs.

        """
        if isinstance(inputs, numpy.ndarray):
            inputs = [inputs]
        if isinstance(inputs, dict):
            for name, value in inputs.items():
                self._input_dict[name]._impl.FromNumpy(value)
        elif nest.is_sequence(inputs):
            for ref, value in zip(self._input_dict.values(), inputs):
                ref._impl.FromNumpy(value)
        else:
            raise ValueError('Excepted sequence or dict inputs.')
        self._function.callback(return_outputs=False)
        named_outputs = namedtupledict('Outputs',
                                       list(self._output_dict.keys()))
        return named_outputs(*(self._output_dict.values()))
示例#5
0
    def run(self, inputs, **kwargs):
        """Run the model.

        Parameters
        ----------
        inputs : Union[Sequence, Dict]
            The input arrays.

        Returns
        -------
        namedtuple
            The model outputs.

        """
        if isinstance(inputs, numpy.ndarray):
            inputs = [inputs]
        if isinstance(inputs, dict):
            for name, value in inputs.items():
                self._input_dict[name]._impl.FromNumpy(value)
        elif nest.is_sequence(inputs):
            for ref, value in zip(self._input_dict.values(), inputs):
                ref._impl.FromNumpy(value)
        else:
            raise ValueError('Excepted sequence or dict inputs.')
        self._context.run()
        return self._output_tuple(*self._output_dict.values())
示例#6
0
def split(tensor, split_size_or_sections, dim=0, copy=True):
    """Split input into chunks along the given dimension.

    Either size of every chunk or each chunk will be accepted:

    ```python
    x = torch.tensor([1, 2, 3, 4, 5, 6])
    # Shape: (6,) -> (4,), (2,)
    print(torch.split(x, split_size_or_sections=4))
    # Shape: (6,) -> (5,), (1,)
    print(torch.split(x, split_size_or_sections=(5, 1)))
    ```

    :attr:`dim` can be negative:

    ```python
    x = torch.tensor([[1, 2, 3], [4, 5, 6]])
    print(torch.split(x, 2, dim=1))
    print(torch.split(x, 2, dim=-1))  # Equivalent
    ```

    Parameters
    ----------
    tensor : dragon.vm.torch.Tensor
        The input tensor.
    split_size_or_sections : Union[int, Sequence[int]
        The number or size of chunks.
    dim : int, optional, default=0
        The dimension to split.
    copy : bool, optional, default=True
        Copy or create the views of input.

    Returns
    -------
    Sequence[dragon.vm.torch.Tensor]
        The output tensors.

    """
    if nest.is_sequence(split_size_or_sections):
        size_splits = split_size_or_sections
        num_splits = len(split_size_or_sections)
    else:
        size = tensor.shape[dim]
        if size % split_size_or_sections == 0:
            num_splits = size // split_size_or_sections
            size_splits = [split_size_or_sections] * num_splits
        else:
            num_splits = size // split_size_or_sections + 1
            size_splits = [split_size_or_sections] * num_splits
            size_splits[-1] = size - (split_size_or_sections *
                                      (num_splits - 1))
    return Function.apply('Split',
                          tensor.device, [tensor],
                          outputs=[None] * num_splits,
                          axis=dim,
                          num_splits=num_splits,
                          split=size_splits,
                          copy=copy)
示例#7
0
def split(tensor, split_size_or_sections, dim=0):
    """Split input into chunks along the given dimension.

    Either size of every chunk or each chunk will be accepted:

    ```python
    x = torch.tensor([1, 2, 3, 4, 5, 6])
    # Shape: (6,) -> (4,), (2,)
    print(torch.split(x, split_size_or_sections=4))
    # Shape: (6,) -> (5,), (1,)
    print(torch.split(x, split_size_or_sections=(5, 1)))
    ```

    The ``dim`` can be negative representing the last-k axis:

    ```python
    x = torch.tensor([[1, 2], [3, 4], [5, 6]])
    print(torch.split(x, 2, dim=1))
    print(torch.split(x, 2, dim=-1))  # Equivalent
    ```

    Parameters
    ----------
    tensor : dragon.vm.torch.Tensor
        The input tensor.
    split_size_or_sections : Union[int, Sequence[int]
        The number or size of chunks.
    dim : int, optional, default=0
        The dimension to split.

    Returns
    -------
    Sequence[dragon.vm.torch.Tensor]
        The output tensors.

    """
    if nest.is_sequence(split_size_or_sections):
        num_splits = len(split_size_or_sections)
        size_splits = split_size_or_sections
    else:
        size = tensor.shape[dim]
        if size % split_size_or_sections == 0:
            num_splits = size // split_size_or_sections
            size_splits = [split_size_or_sections] * num_splits
        else:
            num_splits = size // split_size_or_sections + 1
            size_splits = [split_size_or_sections] * num_splits
            size_splits[-1] = size - (split_size_or_sections *
                                      (num_splits - 1))
    return _functions.Split \
        .instantiate(
            tensor.device,
            axis=dim,
            size_splits=size_splits,
        ).apply(tensor, num_splits)
示例#8
0
 def _maybe_build(self, inputs):
     if not self.built:
         input_spec.assert_input_compatibility(self.input_spec, inputs,
                                               self.name)
         inputs_list = nest.flatten(inputs)
         input_shapes = None
         if all(hasattr(x, 'shape') for x in inputs_list):
             input_shapes = [x.shape for x in inputs_list]
             if not nest.is_sequence(inputs):
                 input_shapes = input_shapes[0]
         self.build(input_shapes)
示例#9
0
def split(inputs, num_or_size_splits, axis=0, copy=True, **kwargs):
    """Split input into chunks along the given axis.

    Either number or size of splits will be accepted:

    ```python
    x = dragon.constant([[1, 2], [3, 4], [5, 6]])
    # Shape: (3, 2) -> (2, 2), (1, 2)
    print(dragon.split(x, num_or_size_splits=2))
    # Shape: (3, 2) -> (1, 2), (2, 2)
    print(dragon.split(x, num_or_size_splits=(1, 2)))
    ```

    :attr:`axis` can be negative:

    ```python
    x = dragon.constant([[1, 2], [3, 4], [5, 6]])
    print(dragon.split(x, 2, axis=1))
    print(dragon.split(x, 2, axis=-1))  # Equivalent
    ```

    Parameters
    ----------
    inputs : dragon.Tensor
        The input tensor.
    num_or_size_splits: Union[int, Sequence[int]]
        The number or size of chunks.
    axis : int, optional, default=0
        The axis to split.
    copy : bool, optional, default=True
        Copy or create the views of input.

    Returns
    -------
    Sequence[dragon.Tensor]
        The output tensors.

    """
    if nest.is_sequence(num_or_size_splits):
        size_splits = num_or_size_splits
        num = num_splits = len(num_or_size_splits)
    else:
        size_splits = None
        num, num_splits = num_or_size_splits, 0
    if context.executing_eagerly():
        return OpLib.execute(
            'Split', inputs, outputs=[None] * num, axis=axis,
            num_splits=num_splits, split=size_splits, copy=copy)
    return OpLib.add('Split', inputs, num_outputs=num, axis=axis,
                     split=size_splits, copy=copy, **kwargs)
示例#10
0
    def shape(self, value):
        """Set the tensor shape.

        Parameters
        ---------
        value : Sequence[int]
            The shape to set.

        """
        if value is not None:
            if not nest.is_sequence(value):
                raise TypeError(
                    'The <shape> should be a sequence. Got {}.'.format(
                        type(value).__name__))
            self._shape = tuple(nest.flatten(value))
        else:
            self._shape = value
示例#11
0
    def __call__(self, inputs, **kwargs):
        """The preprocessor for ``self.forward(...)``."""
        # Maybe build the layer at the first time.
        if not self._built:
            input_list = nest.flatten(inputs)
            input_shapes = None
            if all(hasattr(x, 'shape') for x in input_list):
                input_shapes = [x.shape for x in input_list]
                if not nest.is_sequence(inputs):
                    input_shapes = input_shapes[0]
            self.build(input_shapes)

        # Call the forward implementation to get outputs.
        outputs = self.forward(inputs, **kwargs)

        # Record the nodes if necessary.
        if not self._nodes_fixed:
            self._add_node(inputs, outputs)

        return outputs
示例#12
0
文件: native.py 项目: ORG-MARS/dragon
    def graph_def_to_onnx_graph(
        cls,
        graph_def,
        input_names=None,
        output_names=None,
        input_shapes=None,
        constants=None,
        value_info=None,
        opset_version=None,
        workspace=None,
        verbose=True,
    ):
        input_names = [] if input_names is None else input_names
        output_names = [] if output_names is None else output_names
        constants = {} if constants is None else constants
        value_info = {} if value_info is None else value_info

        if not nest.is_sequence(input_names):
            raise ValueError('<input_names> should be a sequence.')
        if not nest.is_sequence(output_names):
            raise ValueError('<output_names> should be a sequence.')
        if not isinstance(constants, dict):
            raise ValueError('<constants> should be a dict with name -> value.')
        if not isinstance(value_info, dict):
            raise ValueError('<value_info> should be a dict with name -> (dtype, shape).')

        # Determine the opset version to select exporters.
        if opset_version is None:
            opset_version = cls._check_opset_version(opset_version)

        # Create aliases for blobs.
        blob_aliases = {}
        for i, alias in enumerate(output_names):
            blob_aliases[graph_def.output[i]] = alias
            workspace.RegisterAlias(graph_def.output[i], alias)
            if graph_def.output[i] in value_info:
                value_info[alias] = value_info[graph_def.output[i]]
        for i, alias in enumerate(input_names):
            blob_aliases[graph_def.input[i]] = alias
            workspace.RegisterAlias(graph_def.input[i], alias)
            if graph_def.input[i] in value_info:
                value_info[alias] = value_info[graph_def.input[i]]

        # Maybe rewrite the input shapes for future development.
        # A common case is that we should fill ``-1`` for dynamic dimension
        # in the inference runtime like TensorRT.
        if input_shapes is not None:
            if isinstance(input_shapes, dict):
                for k, v in input_shapes.items():
                    value_info[k] = (value_info[k][0], v)
            else:
                for k, v in zip(graph_def.input[:], input_shapes):
                    value_info[k] = (value_info[k][0], v)

        # Prepare to make the graph.
        onnx_graph = onnx.GraphProto(name=graph_def.name
                                     if len(graph_def.name) > 0
                                     else 'onnx-model')
        blob_shapes, blob_names = {}, {}
        blob_versions = collections.defaultdict(
            int, **dict((blob_aliases.get(k, k), 1)
                        for k in helper.collect_inputs(graph_def)))
        initializers, seen_initializers = [], set()

        # Build translator context.
        context = export_util.TranslatorContext(
            workspace=workspace,
            blob_names=blob_names,
            blob_shapes=blob_shapes,
            blob_versions=blob_versions,
            opset_version=opset_version,
        )

        # Add nodes.
        for op in graph_def.op:
            # Get the shape of inputs and outputs.
            for name in itertools.chain(op.input, op.output):
                impl = workspace.GetTensor(name)
                if impl is not None:
                    blob_shapes[name] = impl.dims
                else:
                    blob_shapes[name] = value_info[name][1]

            # Translate definition.
            nodes, const_tensors = cls._make_node(op, context)

            # Rewritten for names.
            for node in nodes:
                node.input[:] = [blob_aliases.get(e, e) for e in node.input]
                node.output[:] = [blob_aliases.get(e, e) for e in node.output]
                cls._rewrite_for_ssa(node, context)

            # Convert constant outputs if necessary.
            if None in nodes:
                const_tensors = [helper.from_tensor(name, workspace)
                                 for name in op.output]
            else:
                onnx_graph.node.extend(nodes)

            # Merge constant tensors.
            if const_tensors is not None:
                value_info = {**value_info,
                              **dict((e.name, (e.data_type, e.dims))
                                     for e in const_tensors)}
                for tensor in const_tensors:
                    if tensor.name not in seen_initializers:
                        initializers.append(tensor)
                        seen_initializers.add(tensor.name)

        # Add constants.
        if constants is not None:
            for k, v in constants.items():
                initializers.append(helper.from_array(v, name=k))

        # Add inputs.
        for name in helper.collect_inputs(onnx_graph):
            try:
                onnx_graph.input.extend([
                    helper.make_tensor_value_info(
                        name=name,
                        elem_type=value_info[name][0],
                        shape=value_info[name][1])])
            except KeyError:
                impl = workspace.GetTensor(name)
                if impl is not None:
                    initializer = helper.from_tensor(name, workspace)
                    onnx_graph.input.extend([
                        helper.make_tensor_value_info(
                            name=name,
                            elem_type=initializer.data_type,
                            shape=initializer.dims)])
                    if name not in seen_initializers:
                        initializers.append(initializer)
                        seen_initializers.add(initializer.name)
                else:
                    raise ValueError(
                        'Info of tensor `{}` is missing, '
                        'specify it in <value_info>.'.format(name))

        # Add initializers.
        onnx_graph.initializer.extend(initializers)

        # Add outputs.
        onnx_graph.output.extend(
            helper.make_tensor_value_info(
                name=blob_names.get(name_v2, name_v2),
                elem_type=value_info[name_v2][0],
                shape=value_info[name_v2][1])
            for name_v2 in [blob_aliases.get(name, name)
                            for name in set(graph_def.output)])

        if verbose:
            print(helper.printable_graph(onnx_graph))

        return onnx_graph
示例#13
0
def checkpoint_sequential(functions, input, segments=1, **kwargs):
    """Apply functions and create segmental checkpoints.

    Parameters
    ----------
    functions : Union[torch.nn.Sequential, Sequence[callable]]
        The functions to apply sequentially.
    input : dragon.vm.torch.Tensor
        The input tensor.
    segments : Union[int, Sequence[int]], optional
        The number or size of chunked checkpoints.

    Returns
    -------
    Any
        The function outputs.

    """
    def run_function(start, end, functions):
        def forward(input):
            for j in range(start, end):
                input = functions[j](input)
            with no_checkpoint():
                input = functions[end](input)
            return input
        return forward

    preserve_rng_state = kwargs.pop('preserve_rng_state', True)
    variable_scope = kwargs.pop('variable_scope', 'Buffer')
    if kwargs:
        raise ValueError('Unexpected keyword arguments: ' +
                         ','.join(arg for arg in kwargs))

    if isinstance(functions, Sequential):
        functions = list(functions.children())

    start, end = 0, len(functions) - 1
    if not grad_mode.is_grad_enabled():
        return run_function(start, end, functions)(input)

    if nest.is_sequence(segments):
        size_segments = segments
        if sum(size_segments) != len(functions):
            raise ValueError('Failed to chunk {} functions into {} segments.'
                             .format(len(functions), segments))
    else:
        size = (len(functions) + segments - 1) // segments
        last_size = len(functions) - size * (segments - 1)
        if last_size <= 0:
            raise ValueError('Failed to chunk {} functions into {} segments.'
                             .format(len(functions), segments))
        size_segments = [size] * (segments - 1) + [last_size]

    for size in size_segments:
        end = start + size - 1
        input = checkpoint(
            run_function(start, end, functions), input,
            preserve_rng_state=preserve_rng_state,
            variable_scope=variable_scope)
        start = end + 1
    return input