Exemple #1
0
def expand_dims(input, axis, name=None):
    """
    Inserts a dimension of 1 into a tensor's shape.

    Given a tensor input, this operation inserts a dimension \
    of 1 at the dimension index axis of input's shape. \
    The dimension index axis starts at zero; \
    if you specify a negative number for axis it is counted backward from the end.

    Args:
        input: A Tensor.
    """
    shape = input.shape
    rank = bt.get_rank(shape)
    axis = util.to_axis(axis, rank)[0]

    new_shape = []
    new_shape.extend(shape[:axis])
    new_shape.append(1)
    new_shape.extend(shape[axis:])

    if axis == rank:
        return _lazy_reshape(input, new_shape)

    return bt._View(input, shape=new_shape, name=name)
Exemple #2
0
    def __init__(self, a, perm=None, dtype=None, name=None):

        if bt.get_rank(a.shape) == 1:
            a_shape = tuple([1, a.shape[0]])
        else:
            a_shape = a.shape

        if perm is None:
            perm = tuple(reversed(range(bt.get_rank(a_shape))))
        self.perm = perm

        shape = []
        for p in perm:
            shape.append(a_shape[p])
        shape = tuple(shape)

        bt._Operator.__init__(self, a, dtype=dtype, shape=shape, name=name)
Exemple #3
0
    def __init__(self, values, axis, dtype=None, name=None):
        rank = bt.get_rank(values[0].shape)
        _dtype = values[0].dtype

        for value in values:
            r = bt.get_rank(value.shape)
            if r != rank:
                raise ValueError('all values must have a same rank: %d != %d' %
                                 (r, rank))
            rank = r
            d = value.dtype
            if d != _dtype:
                raise ValueError(
                    'all values must have a same dtype: %s != %s' %
                    (d, _dtype))
            _dtype = d

        if isinstance(axis, (tuple, list)):
            raise TypeError('axis must be int, not tuple or list.')

        axis = util.to_axis(axis, rank)[0]

        shape = []
        for i in range(rank):
            size = 0
            for value in values:
                if i == axis:
                    size += value.shape[i]
                else:
                    if size != 0 and size != value.shape[i]:
                        raise ValueError(
                            'all values must have a same shape, excluding axis: %d != %d'
                            % (size, value.shape[i]))
                    size = max(size, value.shape[i])

            shape.append(size)

        shape = tuple(shape)

        bt._Operator.__init__(self,
                              *values,
                              dtype=dtype,
                              shape=shape,
                              name=name)
        self.axis = axis
Exemple #4
0
def to_conv2d_act_shape(shape):
    bat = 1
    if bt.get_rank(shape) == 1:
        row = 1
    else:
        row = functools.reduce(lambda x, y: x * y, shape[:-1], 1)
    col = 1
    ch = shape[-1]

    return (bat, row, col, ch)
Exemple #5
0
    def __init__(self, input_tensor,
                 axis=None, keep_dims=False, dtype=None, name=None, par=1):

        rank = bt.get_rank(input_tensor.shape)
        axis = util.to_axis(axis, rank)
        shape = util.to_reduce_shape(input_tensor.shape, axis, keep_dims)

        bt._ReductionOperator.__init__(self, input_tensor,
                                       dtype=dtype, shape=shape, name=name,
                                       axis=axis, keep_dims=keep_dims, par=par)
Exemple #6
0
def to_conv2d_weight_shape(shape):
    # expect transposed data
    if bt.get_rank(shape) == 1:
        och = 1
    else:
        och = functools.reduce(lambda x, y: x * y, shape[:-1], 1)
    row = 1
    col = 1
    ich = shape[-1]

    return (och, row, col, ich)
Exemple #7
0
    def get_control_param_values(self):
        buffered = False

        for arg in self.args:
            if self.dtype != arg.dtype:
                buffered = True

        if self.axis == bt.get_rank(self.shape) - 1:
            for arg in self.args:
                if arg.shape[-1] != arg.get_aligned_shape()[-1]:
                    buffered = True

        # for __str__
        self.buffered_value = buffered

        aligned_shape = self.get_aligned_shape()
        aligned_length = self.get_aligned_length()

        arg_read_sizes = [arg.shape[-1] for arg in self.args]
        arg_addr_incs = [
            bt.to_byte(
                bt.align_word(arg.shape[-1], arg.get_word_alignment()) *
                arg.get_ram_width()) for arg in self.args
        ]

        arg_chunk_sizes = [
            functools.reduce(lambda x, y: x * y, arg.shape[self.axis:-1], 1)
            for arg in self.args
        ]

        out_write_size = aligned_shape[-1]
        out_addr_inc = bt.to_byte(
            bt.align_word(self.shape[-1], self.get_word_alignment()) *
            self.get_ram_width())

        num_steps = int(math.ceil(aligned_length / out_write_size))
        if not buffered:
            num_steps *= len(self.args)

        return OrderedDict([('buffered', buffered),
                            ('arg_read_sizes', arg_read_sizes),
                            ('arg_addr_incs', arg_addr_incs),
                            ('arg_chunk_sizes', arg_chunk_sizes),
                            ('out_write_size', out_write_size),
                            ('out_addr_inc', out_addr_inc),
                            ('num_steps', num_steps)])
Exemple #8
0
    def __init__(self,
                 value,
                 ksize,
                 strides,
                 padding='SAME',
                 dtype=None,
                 name=None,
                 par=1,
                 value_ram_size=None,
                 out_ram_size=None):

        if isinstance(padding,
                      str) and padding != 'SAME' and padding != 'VALID':
            raise ValueError(
                "padding options must be 'SAME', 'VALID', int, tuple, or list."
            )
        elif isinstance(padding, (tuple, list)) and len(padding) != 4:
            raise ValueError('padding rank must be 4.')

        if bt.get_rank(value.shape) != 4:
            raise ValueError('rank of value must be 4.')

        if len(ksize) != 4:
            raise ValueError('rank of ksize must be 4.')

        if len(strides) != 4:
            raise ValueError('rank of strides must be 4.')

        if ksize[0] != 1 or ksize[3] != 1:
            raise ValueError('ksize[0] and [3] must be 1')

        if strides[0] != 1 or strides[3] != 1:
            raise ValueError('strides[0] and [3] must be 1')

        if isinstance(padding, str) and (padding == 'SAME'
                                         or padding == 'VALID'):
            shape = []
            shape.append(int(math.ceil(value.shape[0] / strides[0])))
            for sh, st, fs in list(zip(value.shape, strides, ksize))[1:-1]:
                shape.append(util.pix_size(sh, fs, st, padding))
            shape.append(int(math.ceil(value.shape[3] / strides[3])))
        elif isinstance(padding, int):
            shape = []
            shape.append(int(math.ceil(value.shape[0] / strides[0])))
            for sh, st, fs in list(zip(value.shape, strides, ksize))[1:-1]:
                shape.append(util.pix_size(sh + padding * 2, fs, st, 'VALID'))
            shape.append(int(math.ceil(value.shape[3] / strides[3])))
        elif isinstance(padding, (tuple, list)):
            shape = []
            shape.append(int(math.ceil(value.shape[0] / strides[0])))
            for i, (sh, st, fs) in enumerate(
                    list(zip(value.shape, strides, ksize))[1:-1]):
                pd0 = padding[i * 2]
                pd1 = padding[i * 2 + 1]
                shape.append(util.pix_size(sh + pd0 + pd1, fs, st, 'VALID'))
            shape.append(int(math.ceil(value.shape[3] / strides[3])))

        shape = tuple(shape)

        if value_ram_size is not None and value_ram_size < 1:
            raise ValueError('value_ram_size must be greater than 0')

        if out_ram_size is not None and out_ram_size < 1:
            raise ValueError('out_ram_size must be greater than 0')

        bt._Operator.__init__(self,
                              value,
                              dtype=dtype,
                              shape=shape,
                              name=name,
                              par=par)

        self.ksize = tuple(ksize)
        self.strides = tuple(strides)
        self.padding = padding

        # attribute
        self.value_ram_size = value_ram_size
        self.out_ram_size = out_ram_size
        _pool.attribute(self, par, value_ram_size, out_ram_size)
Exemple #9
0
    def __init__(self,
                 a,
                 b,
                 bias=None,
                 scale=None,
                 transposed_a=False,
                 transposed_b=False,
                 rshift_mul=None,
                 rshift_sum=None,
                 rshift_out=None,
                 act_func=None,
                 dtype=None,
                 mul_dtype=None,
                 sum_dtype=None,
                 name=None,
                 par_left_col=1,
                 par_left_row=1,
                 par_out_col=1,
                 concur_out_col=None,
                 stationary='right',
                 left_ram_size=None,
                 right_ram_size=None,
                 bias_ram_size=None,
                 scale_ram_size=None,
                 vshamt_mul_ram_size=None,
                 vshamt_sum_ram_size=None,
                 vshamt_out_ram_size=None,
                 out_ram_size=None,
                 disable_keep_left=False):

        if transposed_a:
            perm = list(range(bt.get_rank(to_shape_2d(a.shape))))
            perm[-2], perm[-1] = perm[-1], perm[-2]
            a = basic.transpose(a, perm=perm)

        if not transposed_b:
            # matrix B must be transposed for fast computation
            perm = list(range(bt.get_rank(to_shape_2d(b.shape))))
            perm[-2], perm[-1] = perm[-1], perm[-2]
            b = basic.transpose(b, perm=perm)

        self.transposed_a = transposed_a
        self.transposed_b = transposed_b

        input_shape = to_conv2d_act_shape(a.shape)
        input = a

        filter_shape = to_conv2d_weight_shape(b.shape)
        filter = b

        strides = (1, 1, 1, 1)
        padding = 'SAME'
        out_shape = tuple(
            list(to_shape_2d(a.shape)[:-2]) +
            [to_shape_2d(a.shape)[-2],
             to_shape_2d(b.shape)[-2]])

        if stationary == 'right':
            stationary = 'filter'
        elif stationary == 'left':
            stationary = 'input'
        else:
            raise ValueError("stationary must be 'left' or 'right'")

        conv2d.conv2d.__init__(
            self, input, filter, strides, bias, scale, rshift_mul, rshift_sum,
            rshift_out, act_func, padding, dtype, mul_dtype, sum_dtype, name,
            par_left_col, par_out_col, par_left_row, 1, concur_out_col,
            stationary, left_ram_size, right_ram_size, bias_ram_size,
            scale_ram_size, vshamt_mul_ram_size, vshamt_sum_ram_size,
            vshamt_out_ram_size, out_ram_size, disable_keep_left, input_shape,
            filter_shape, out_shape)
Exemple #10
0
def to_shape_2d(shape):
    if bt.get_rank(shape) == 1:
        return tuple([1, shape[0]])

    return shape