コード例 #1
0
ファイル: op_spec.py プロジェクト: seetaresearch/dragon
def reshape_spec(args, inputs, outputs):
    outputs[0]._dtype = inputs[0].dtype
    try:
        shape = args['dims']
        out_shape = []
        n_elements, n_elements_known = None, None
        try:
            for i, s in enumerate(shape):
                if s == -1:
                    out_shape.append(1)
                elif s == 0:
                    out_shape.append(inputs[0].shape[i])
                else:
                    out_shape.append(s)
        except IndexError:
            out_shape = None
        try:
            n_elements = math_util.prod(inputs[0].shape)
            n_elements_known = math_util.prod(out_shape)
        except TypeError:
            pass
        for i, s in enumerate(shape):
            if s == -1:
                try:
                    out_shape[i] = n_elements // n_elements_known
                except TypeError:
                    out_shape[i] = None
    except (KeyError, TypeError):
        out_shape = None
    outputs[0]._shape = tuple(out_shape) if out_shape is not None else None
    return outputs
コード例 #2
0
def flatten_spec(args, inputs, outputs):
    outputs[0].dtype = inputs[0].dtype
    keep_axes = args['keep_axes']
    axis, num_axes = args['axis'], args['num_axes']
    if keep_axes is not None:
        out_shape = [None] * keep_axes
    else:
        out_shape = None
    try:
        in_shape = list(inputs[0].shape[:])
        if keep_axes is not None:
            if len(in_shape) <= keep_axes:
                out_shape[:len(in_shape)] = in_shape
            else:
                for i in range(keep_axes - 1):
                    out_shape[i] = in_shape[i]
                try:
                    out_shape[keep_axes - 1] = \
                        math_util.prod(in_shape[keep_axes - 1:])
                except (TypeError, IndexError):
                    out_shape[keep_axes - 1] = None
        else:
            if num_axes == -1:
                num_axes = len(in_shape) - axis
            num_axes = max(num_axes, 1)
            try:
                num_flatten = math_util.prod(in_shape[axis:axis + num_axes])
            except TypeError:
                num_flatten = None
            out_shape = in_shape[: axis] + [num_flatten] + in_shape[axis + num_axes:]
    except (TypeError, IndexError):
        pass
    outputs[0].shape = out_shape
    return outputs
コード例 #3
0
    def numel(self):
        """Return the total number of elements.

        Returns
        -------
        int
            The number of elements.

        """
        return math_util.prod(self)
コード例 #4
0
    def size(self):
        """Return the total number of elements in this tensor.

        Returns
        -------
        int
            The total count of elements.

        """
        if self._shape is None:
            return 0
        if None in self._shape:
            return numpy.inf
        return math_util.prod(self._shape)
コード例 #5
0
    def size(self):
        """Return the total number of elements in this tensor.

        Returns
        -------
        int
            The total count of elements.

        """
        if self._is_variable:
            return self._impl.size
        if self._shape is None:
            return 0
        if None in self._shape:
            return float('inf')
        return math_util.prod(self._shape)
コード例 #6
0
def _calculate_fan_in_and_fan_out(tensor):
    """Return the fan value according to tensor size."""
    dimensions = tensor.ndimension()
    if dimensions < 2:
        raise ValueError("Excepted 2 or higher tensor dimensions.")
    if dimensions == 2:
        fan_in = tensor.size(1)
        fan_out = tensor.size(0)
    else:
        num_input = tensor.size(1)
        num_output = tensor.size(0)
        receptive_field_size = 1
        if tensor.dim() > 2:
            receptive_field_size = math_util.prod(tensor.shape[2:])
        fan_in = num_input * receptive_field_size
        fan_out = num_output * receptive_field_size
    return fan_in, fan_out
コード例 #7
0
ファイル: op_spec.py プロジェクト: seetaresearch/dragon
def flatten_spec(args, inputs, outputs):
    outputs[0]._dtype = inputs[0].dtype
    axis = args['axis']
    end_axis = args.get('end_axis', None)
    end_axis = axis if end_axis is None else end_axis
    try:
        in_shape = list(inputs[0].shape[:])
        out_shape = in_shape[:axis]
        num_axes = len(in_shape[axis:end_axis]) + 1
        try:
            out_shape += [math_util.prod(in_shape[axis:axis + num_axes])]
        except TypeError:
            out_shape += [None]
        out_shape += in_shape[axis + num_axes:]
        outputs[0]._shape = tuple(out_shape)
    except (TypeError, IndexError):
        outputs[0]._shape = None
    return outputs
コード例 #8
0
ファイル: rnn_ops.py プロジェクト: seetaresearch/dragon
 def _create_weights(self):
     """Create a flat weights."""
     gate_size = self._hidden_size * self._num_gates
     # Compute the shape of weight and bias.
     matrix_shapes, bias_shapes = [], []
     for layer in range(self._num_layers):
         for direction in range(self._num_directions):
             layer_input_size = self._input_size if layer == 0 \
                 else self._hidden_size * self._num_directions
             w_ih_shape = [gate_size, layer_input_size]
             w_hh_shape = [gate_size, self._hidden_size]
             b_ih_shape, b_hh_shape = [gate_size], [gate_size]
             matrix_shapes.extend([w_ih_shape, w_hh_shape])
             bias_shapes.extend([b_ih_shape, b_hh_shape])
     # Create single float32 weights.
     weights_count = 0
     self._weights_shapes = matrix_shapes + bias_shapes
     for shape in self._weights_shapes:
         weights_count += math_util.prod(shape)
     self._weights = Tensor([weights_count])
     self._weights.requires_grad = True
コード例 #9
0
ファイル: rnn.py プロジェクト: seetaresearch/dragon
 def flatten_parameters(self):
     """Flatten parameters into a single weights."""
     gate_size = self._num_gates * self.hidden_size
     # Compute the shape of weight and bias.
     matrix_shapes, bias_shapes = [], []
     for layer in range(self.num_layers):
         for direction in range(int(self.bidirectional) + 1):
             layer_input_size = self.input_size if layer == 0 \
                 else self.hidden_size * self.num_directions
             w_ih_shape = [gate_size, layer_input_size]
             w_hh_shape = [gate_size, self.hidden_size]
             b_ih_shape, b_hh_shape = [gate_size], [gate_size]
             matrix_shapes.extend([w_ih_shape, w_hh_shape])
             bias_shapes.extend([b_ih_shape, b_hh_shape])
     # Compute total number of parameters.
     self._weights_count = 0
     self._weights_shapes = matrix_shapes + bias_shapes
     for shape in self._weights_shapes:
         self._weights_count += math_util.prod(shape)
     # Create the flat float32 weights.
     self.weights = Parameter(Tensor(self._weights_count))
コード例 #10
0
ファイル: op_spec.py プロジェクト: seetaresearch/dragon
def repeat_spec(args, inputs, outputs):
    outputs[0]._dtype = inputs[0].dtype
    if 'repeats_desc' in args:
        return outputs
    axis, repeats = args['axis'], args['repeats']
    if axis is None:
        try:
            num_elements = math_util.prod(inputs[0].shape[:])
            outputs[0]._shape = (num_elements * repeats, )
        except TypeError:
            outputs[0]._shape = (None, )
    else:
        try:
            out_shape = list(inputs[0].shape[:])
        except TypeError:
            return outputs
        if axis < len(out_shape):
            try:
                out_shape[axis] *= repeats
            except TypeError:
                out_shape[axis] = None
        outputs[0]._shape = tuple(out_shape)
    return outputs