Пример #1
0
def transpose_test():
    for _ in range(10):
        for shape_len in range(2, 5):
            try:
                shape = np.random.randint( 8, size=(shape_len,) )+1
                axes_order = np.array([*range(shape_len)])
                np.random.shuffle(axes_order)

                val_n = np.random.randint( 2**8, size=shape ).astype(np.float32)
                transposed_n = np.transpose(val_n, axes_order)
                val_t = nn.Tensor_from_value(val_n)
                transposed_t = nn.transpose (val_t, axes_order )

                if transposed_n.shape != transposed_t.shape:
                    raise Exception('shape is not equal')
                if not all ( np.ndarray.flatten( transposed_t.np() == transposed_n ) ):
                    raise Exception(f'data is not equal {shape} {axes_order}')

                transposed_n_grad = np.random.randint( 2**8, size=transposed_n.shape ).astype(np.float32)

                val_t.get_grad().fill(1.0)  # Check addition to gradient
                nn.backward( {transposed_t:transposed_n_grad} , grad_for_non_trainables=True )

                if not all ( np.ndarray.flatten(np.transpose(val_t.get_grad().np()-1.0, axes_order) == transposed_n_grad) ):
                    raise Exception(f'grad is not equal')
            except:
                raise Exception(f"""
shape              : {shape}
axes_order         : {axes_order}
transposed_n_shape : {transposed_n.shape}
transposed_t_shape : {transposed_t.shape}
exception          : {traceback.format_exc()}
""")

        for size in [2,3,4]:
            try:
                N = 1+np.random.randint(8)
                C = (1+np.random.randint(8))*size*size
                H = W = (1+np.random.randint(8))*size

                shape = (N,C,H,W)

                val_n = np.random.randint( 2**8, size=shape ).astype(np.float32)
                val_t = nn.Tensor_from_value(val_n)

                d2s_val_t = depth_to_space(val_t, size)
                s2d_val_t = space_to_depth(d2s_val_t, size)

                if not all ( np.ndarray.flatten( val_t.np() == s2d_val_t.np() ) ):
                    raise Exception(f'data is not equal')
            except:
                raise Exception(f"""
depth_to_space/space_to_depth

shape              : {shape}
size               : {size}
exception          : {traceback.format_exc()}
""")
Пример #2
0
def concat_test():
    for _ in range(10):
        for shape_len in range(2, 5):
            try:
                shape = (np.random.randint(8, size=(shape_len, )) + 1).tolist()
                axis = np.random.randint(shape_len)
                count = np.random.randint(4) + 1

                shapes = tuple(
                    tuple(dim if i != axis else np.random.randint(8) + 1
                          for i, dim in enumerate(shape))
                    for shape in ([shape] * count))

                vals_n = [
                    np.random.randint(2**8, size=shape).astype(np.float32)
                    for shape in shapes
                ]
                concat_n = np.concatenate(vals_n, axis)

                vals_t = [
                    nn.Tensor_from_value(vals_n[i]) for i in range(count)
                ]
                concat_t = nn.concat(vals_t, axis)

                concat_n_grad = np.random.randint(
                    2**8, size=concat_n.shape).astype(np.float32)

                for t in vals_t:
                    t.get_grad().fill(1.0)  # Check addition to gradient

                nn.backward({concat_t: concat_n_grad},
                            grad_for_non_trainables=True)

                if not all(np.ndarray.flatten(concat_t.np() == concat_n)):
                    raise Exception(f'data is not equal')

                axis_offset = 0
                for n in range(count):
                    axis_size = vals_n[n].shape[axis]
                    axis_slice = slice(axis_offset, axis_offset + axis_size, 1)
                    slices = (slice(None, None, None), ) * axis + (
                        axis_slice, ) + (slice(None, None, None), ) * (
                            len(concat_n.shape) - axis - 1)
                    axis_offset += axis_size

                    if not all(
                            np.ndarray.flatten(vals_t[n].get_grad().np() -
                                               1.0 == concat_n_grad[slices])):
                        raise Exception(f'grad is not equal')

            except:
                raise Exception(f"""
shape       : {shape}
axis        : {axis}
count       : {count}
exception   : {traceback.format_exc()}
""")
Пример #3
0
def resize2D_nearest_test():
    for n in [1, 4]:
        for ic in [1, 2, 4]:
            for iw, ih in zip(*[[4, 8, 16]] * 2):
                for size in [2, 3, 4]:
                    try:
                        input_shape = (n, ic, ih, iw)
                        input_n = np.random.randint(
                            2**4, size=input_shape).astype(np.float32)

                        input_t = nn.Tensor_from_value(input_n)

                        upsampled_t = nn.resize2D_nearest(input_t, size=size)

                        upsampled_n = input_n.reshape((n, ic, ih, 1, iw, 1))
                        upsampled_n = np.tile(upsampled_n,
                                              (1, 1, 1, size, 1, size))
                        upsampled_n = upsampled_n.reshape(
                            (n, ic, ih * size, iw * size))

                        if upsampled_n.shape != upsampled_t.shape:
                            raise Exception(f'shape is not equal')

                        if not all(
                                np.ndarray.flatten(
                                    upsampled_t.np() == upsampled_n)):
                            raise Exception(f'data is not equal')

                        upsampled_n_grad = np.random.randint(
                            2**8, size=upsampled_n.shape).astype(np.float32)

                        input_t.get_grad().fill(1.0)
                        nn.backward({upsampled_t: upsampled_n_grad},
                                    grad_for_non_trainables=True)

                        input_n_grad = upsampled_n_grad.reshape(
                            (n, ic, ih, size, iw, size))
                        input_n_grad = input_n_grad.sum((-3, -1))
                        if not all(
                                np.ndarray.flatten(input_t.get_grad().np() -
                                                   1.0) == np.ndarray.flatten(
                                                       input_n_grad)):
                            raise Exception('grad is not equal')
                    except:
                        raise Exception(f"""
input_shape    : {input_shape}
size           : {size}

upsampled_n.shape : {upsampled_n.shape}
upsampled_t.shape : {upsampled_t.shape}
{traceback.format_exc()}
""")
Пример #4
0
def depthwise_conv2d_test():
    for padding in ['same','valid',0,1,2]:
        for dilation in [1,2]:
          for stride in [1,2,3]:
            for ks in [1,3,5,7]:
              for n in [1,4]:
                for ic in [1,2,4]:
                    for ih,iw in zip(*[[4,8,16]]*2):                        
                        if padding == 'valid' and iw < ks:
                            continue
                        try:
                            input_shape  = (n, ic, ih, iw)
                            kernel_shape = (ic, ks, ks)

                            input_n  = np.random.randint( 2**4, size=input_shape ).astype(np.float32)
                            kernel_n = np.random.randint( 2**4, size=kernel_shape ).astype(np.float32)

                            input_t  = nn.Tensor_from_value(input_n)
                            kernel_t = nn.Tensor_from_value(kernel_n)

                            conved_t = nn.depthwise_conv2D(input_t, kernel_t, stride=stride, dilation=dilation, padding=padding)
                            conved_n_grad = np.random.randint( 2**4, size=conved_t.shape).astype(np.float32)
                            conved_n, dI_val, dK_val = _numpy_depthwise_conv2d(input_n, kernel_n, conved_n_grad, STRIDE=stride, DILATION=dilation, padding=padding)

                            if conved_n.shape != conved_t.shape:
                                raise Exception(f'shape is not equal')

                            if not all ( np.ndarray.flatten( conved_t.np() == conved_n) ):
                                raise Exception(f'data is not equal')

                            input_t.get_grad().fill(1.0)
                            kernel_t.get_grad().fill(1.0)
                            nn.backward( {conved_t:conved_n_grad}, grad_for_non_trainables=True )

                            if not all ( np.ndarray.flatten( (input_t.get_grad().np()-1.0) == dI_val )):
                                raise Exception(f'dI not equal')

                            if not all ( np.ndarray.flatten( (kernel_t.get_grad().np()-1.0) == dK_val )):
                                raise Exception(f'dK not equal')
                        except:
                            raise Exception(f"""
input_shape   : {input_shape}
kernel_shape  : {kernel_shape}
padding       : {padding}
stride        : {stride}
dilation      : {dilation}
conved_n.shape : {conved_n.shape}
conved_t.shape : {conved_t.shape}
{traceback.format_exc()}
""")
Пример #5
0
    def backward(self, stop_grad=None, grad_for_non_trainables=False):
        """
        Perform backward computation.

            stop_grad (None)

        None or Tensor or list of Tensors where backprop should stop.

            grad_for_non_trainables (False)

        If False, the backprop will stop at those branches that have no trainable tensors (not attached to any Optimizer),
        also gradient for intermediate non trainable tensors will be freed in order to reduce memory consumption.
        """
        nn.backward(self,
                    stop_grad=stop_grad,
                    grad_for_non_trainables=grad_for_non_trainables)
Пример #6
0
def stack_test():
    for _ in range(10):
        for shape_len in range(1, 4):
            try:
                shape = tuple(np.random.randint(8, size=(shape_len, )) + 1)
                axis = np.random.randint(shape_len + 1)
                stack_count = np.random.randint(4) + 1

                vals_n = [
                    np.random.randint(2**8, size=shape).astype(np.float32)
                    for i in range(stack_count)
                ]
                vals_t = [
                    nn.Tensor_from_value(vals_n[i]) for i in range(stack_count)
                ]
                stack_n = np.stack(vals_n, axis)
                stack_t = nn.stack(vals_t, axis)

                if not all(np.ndarray.flatten(stack_t.np() == stack_n)):
                    raise Exception(f'data is not equal')

                stack_n_grad = np.random.randint(
                    2**8, size=stack_n.shape).astype(np.float32)
                for val_t in vals_t:
                    val_t.get_grad().fill(1.0)
                nn.backward({stack_t: stack_n_grad},
                            grad_for_non_trainables=True)

                for n in range(stack_count):
                    slices = (slice(None, None, None), ) * axis + (
                        n, ) + (slice(None, None, None), ) * (
                            len(stack_n.shape) - axis - 1)
                    if not all(
                            np.ndarray.flatten(vals_t[n].get_grad().np() -
                                               1.0 == stack_n_grad[slices])):
                        raise Exception(f'grad is not equal')

            except:
                raise Exception(f"""
shape         : {shape}
axis          : {axis}
stack_count   : {stack_count}
stack_n_shape : {stack_n.shape}
stack_t_shape : {stack_t.shape}
exception     : {traceback.format_exc()}
""")
Пример #7
0
def resize2D_bilinear_test():
    for n in [1, 4]:
        for ic in [1, 2, 4]:
            for iw, ih in zip(*[[4, 8, 16]] * 2):
                for size in [0.6, 1.0, 2.0]:
                    try:
                        input_shape = (n, ic, ih, iw)
                        input_n = np.random.randint(
                            2**4, size=input_shape).astype(np.float32)

                        input_t = nn.Tensor_from_value(input_n)

                        resized_t = nn.resize2D_bilinear(input_t, size)

                        nn.backward([resized_t], grad_for_non_trainables=True)
                    except:
                        raise Exception(f"""
input_shape    : {input_shape}
size           : {size}
resized_t.shape : {resized_t.shape}
{traceback.format_exc()}
""")
Пример #8
0
def tile_test():
    for _ in range(10):
        for shape_len in range(3, 5):
            try:
                shape = tuple(np.random.randint( 8, size=(shape_len,) )+1)
                tiles = tuple(np.random.randint( 4, size=(shape_len,) )+1)

                val_n = np.random.randint( 2**8, size=shape ).astype(np.float32)
                tiled_n = np.tile(val_n, tiles)

                val_t = nn.Tensor_from_value(val_n)
                tiled_t = nn.tile(val_t, tiles)

                if tiled_n.shape != tiled_t.shape:
                    raise Exception(f'shape is not equal')

                if not all ( np.ndarray.flatten( tiled_t.np() == tiled_n ) ):
                    raise Exception(f'data is not equal')

                tiled_n_grad = np.random.randint( 2**8, size=tiled_n.shape ).astype(np.float32)

                val_t.get_grad().fill(1.0)
                nn.backward( {tiled_t:tiled_n_grad} , grad_for_non_trainables=True )

                info = nc.info.InfoTile( nc.TensorShape(shape), tiles)
                val_n_grad = sum([ tiled_n_grad[axes_slice] for axes_slice in info.axes_slices ])
                if not all ( np.ndarray.flatten(val_t.get_grad().np()-1.0 == val_n_grad) ):
                    raise Exception(f'grad is not equal')

            except:
                
                raise Exception(f"""
shape         : {shape}
tiles         : {tiles}
tiled_n_shape : {tiled_n.shape}
tiled_t_shape : {tiled_t.shape}
exception     : {traceback.format_exc()}
""")
Пример #9
0
def matmul_test():
    for _ in range(10):
        try:
            BATCH = np.random.randint(8) + 1
            M = np.random.randint(8) + 1
            N = np.random.randint(32768) + 1
            K = np.random.randint(32768) + 1

            while K * N > (8000000 // BATCH):
                K = max(1, K // 2)
                N = max(1, N // 2)

            if np.random.randint(2) == 0:
                size = [2, 4, 8, 16][np.random.randint(4)]
                M = max(1, M // size) * size
                N = max(1, N // size) * size
                K = max(1, K // size) * size

            if BATCH == 1:
                A_shape = (M, K)
                B_shape = (K, N)
            else:
                A_shape = (BATCH, M, K)
                B_shape = (BATCH, K, N)

            A_n = np.random.randint(2**4, size=A_shape).astype(np.float32)
            B_n = np.random.randint(2**4, size=B_shape).astype(np.float32)

            O_n = np.matmul(A_n, B_n)

            A_t = nn.Tensor_from_value(A_n)
            B_t = nn.Tensor_from_value(B_n)
            O_t = nn.matmul(A_t, B_t)
            if O_n.shape != O_t.shape:
                raise Exception('shape is not equal')
            if not all(np.ndarray.flatten(O_t.np() == O_n)):
                raise Exception(f'data is not equal')

            O_n_grad = np.random.randint(2**3,
                                         size=O_n.shape).astype(np.float32)

            b_n_axes = tuple(np.arange(len(B_n.shape)))
            B_n_T = B_n.transpose(b_n_axes[:-2] + b_n_axes[-1:] +
                                  b_n_axes[-2:-1])

            a_n_axes = tuple(np.arange(len(A_n.shape)))
            A_n_T = A_n.transpose(a_n_axes[:-2] + a_n_axes[-1:] +
                                  a_n_axes[-2:-1])

            A_n_grad = np.matmul(O_n_grad, B_n_T)
            B_n_grad = np.matmul(A_n_T, O_n_grad)

            A_t.get_grad().fill(1.0)
            B_t.get_grad().fill(1.0)

            nn.backward({O_t: O_n_grad}, grad_for_non_trainables=True)

            if not all(
                    np.ndarray.flatten(
                        (A_t.get_grad().np() - 1.0) == A_n_grad)):
                raise Exception(f'dA is not equal')

            if not all(
                    np.ndarray.flatten(
                        (B_t.get_grad().np() - 1.0) == B_n_grad)):
                raise Exception(f'dB is not equal')
        except:
            raise Exception(f"""
M  : {M}
N  : {N}
K  : {K}
O_n.shape  : {O_n.shape}
O_t.shape  : {O_t.shape}
{traceback.format_exc()}
""")
Пример #10
0
def reduce_test():
    for _ in range(10):
        for op_type in ['sum', 'mean', 'min', 'max']:
            for shape_len in range(2, 5):
                try:
                    shape = np.random.randint( 8, size=(shape_len,) )+1

                    reduction_axes = np.array([*range(shape_len)])
                    np.random.shuffle(reduction_axes)

                    # Cut random amount of reduction_axes
                    reduction_axes = tuple(reduction_axes [:np.random.randint(shape_len+1)])
                    if len(reduction_axes) == 0:
                        reduction_axes = None

                    keepdims = np.random.randint(2) == 0

                    value_n = np.random.randint( 2**8, size=shape ).astype(np.float32)
                    value_t = nn.Tensor_from_value(value_n)

                    if op_type == 'sum':
                        reducted_t = value_t.sum(reduction_axes, keepdims=keepdims)
                        reducted_n = value_n.sum(reduction_axes, keepdims=keepdims)

                        reducted_n_keepdims_shape = value_n.sum(reduction_axes, keepdims=True).shape
                    elif op_type == 'mean':
                        reducted_t = value_t.mean(reduction_axes, keepdims=keepdims)
                        reducted_n = value_n.mean(reduction_axes, keepdims=keepdims)

                        reducted_n_keepdims_shape = value_n.mean(reduction_axes, keepdims=True).shape
                    elif op_type == 'max':
                        reducted_t = value_t.max(reduction_axes, keepdims=keepdims)
                        reducted_n = value_n.max(reduction_axes, keepdims=keepdims)

                        reducted_n_keepdims_shape = value_n.max(reduction_axes, keepdims=True).shape
                    elif op_type == 'min':
                        reducted_t = value_t.min(reduction_axes, keepdims=keepdims)
                        reducted_n = value_n.min(reduction_axes, keepdims=keepdims)

                        reducted_n_keepdims_shape = value_n.min(reduction_axes, keepdims=True).shape

                    if sum (np.ndarray.flatten( reducted_t.np() - reducted_n)) >= 1.0:
                        raise Exception(f'data is not equal')

                    value_t.get_grad().fill(1.0)

                    reducted_n_grad = np.random.randint( 2**8, size=reducted_n.shape ).astype(np.float32)
                    nn.backward( {reducted_t:reducted_n_grad}, grad_for_non_trainables=True )

                    if op_type == 'sum':
                        value_n_grad = np.broadcast_to( np.reshape(reducted_n_grad, reducted_n_keepdims_shape), value_n.shape )

                        if sum (np.ndarray.flatten( value_t.get_grad().np()-1.0 - value_n_grad )) >= 1.0:
                            raise Exception(f'dI is not equal')
                except:
                    raise Exception(f"""
op_type           : {op_type}
shape             : {shape}
reduction_axes    : {reduction_axes}
keepdims          : {keepdims}
reducted_n.shape  : {reducted_n.shape}
reducted_t.shape  : {reducted_t.shape}

exception : {traceback.format_exc() }
""")
Пример #11
0
def pool2d_test():
    for batch in [1, 4]:
        for in_ch in [1, 2, 4]:
            for w, h in zip(*[[4, 8, 16]] * 2):
                for pool_size in [2, 3]:
                    for stride in [1, 2, 3]:
                        for padding in ['same', 'valid', 0, 1, 2]:
                            for op_type in ['avg', 'max', 'min']:
                                try:
                                    input_shape = (batch, in_ch, h, w)
                                    if op_type == 'avg':
                                        input_n = np.random.randint(
                                            2**4, size=input_shape).astype(
                                                np.float32)
                                    else:
                                        # for minmax make unique values in order not to test 'same-hit'
                                        input_shape_prod = int(
                                            np.prod(input_shape))
                                        input_n = np.arange(
                                            input_shape_prod,
                                            dtype=np.float32).reshape(
                                                input_shape) / input_shape_prod

                                    input_t = nn.Tensor_from_value(input_n)

                                    pooled_n, input_n_grad = _numpy_pool2d(
                                        op_type,
                                        input_n,
                                        pool_size,
                                        STRIDE=stride,
                                        padding=padding)

                                    if op_type == 'avg':
                                        pooled_t = nn.avg_pool2D(
                                            input_t, pool_size, stride,
                                            padding)
                                    elif op_type == 'max':
                                        pooled_t = nn.max_pool2D(
                                            input_t, pool_size, stride,
                                            padding)
                                    elif op_type == 'min':
                                        pooled_t = nn.min_pool2D(
                                            input_t, pool_size, stride,
                                            padding)

                                    if pooled_n.shape != pooled_t.shape:
                                        raise Exception(f'shape is not equal')

                                    if np.sum(np.abs(pooled_t.np() -
                                                     pooled_n)) > 0.1:
                                        raise Exception(f'data is not equal')

                                    input_t.get_grad().fill(1.0)
                                    nn.backward(pooled_t,
                                                grad_for_non_trainables=True)

                                    if sum(
                                            np.ndarray.flatten(
                                                input_t.get_grad().np() - 1.0 -
                                                input_n_grad)) >= 1.0:
                                        raise Exception('grad is not equal')

                                except:
                                    raise Exception(f"""
op_type        : {op_type}
input_shape    : {input_shape}
pool_size      : {pool_size}
stride         : {stride}
padding        : {padding}
pooled_n.shape : {pooled_n.shape}
pooled_t.shape : {pooled_t.shape}
{traceback.format_exc()}
""")
Пример #12
0
def slice_test():
    for iteration in range(10):
        for shape_len in range(5, 1, -1):
            try:
                while True:
                    shape = np.random.randint(1, 8, size=(shape_len, ))

                    if iteration == 0:
                        slices = [
                            slice_cls(None, None, None),
                        ] * shape_len
                        axis = np.random.randint(shape_len)
                        shape[axis] = 1
                        slices[axis] = 0
                    else:
                        slices = []
                        for i in range(shape_len):
                            axis_size = shape[i]
                            if np.random.randint(2) == 0:
                                v = axis_size - np.random.randint(
                                    axis_size * 2) - 1
                                slices.append(v)
                            else:
                                b = None if np.random.randint(
                                    2) == 0 else axis_size - np.random.randint(
                                        axis_size * 2)
                                e = None if np.random.randint(
                                    2) == 0 else axis_size - np.random.randint(
                                        axis_size * 2)
                                s = 1 if np.random.randint(2) == 0 else -1

                                slices.append(slice_cls(b, e, s))

                        if np.random.randint(2) == 0:
                            axis = np.random.randint(shape_len)
                            slices[axis] = Ellipsis

                    shape = tuple(shape)
                    slices = tuple(slices)

                    val_n = np.random.randint(2**8,
                                              size=shape).astype(np.float32)

                    sliced_n = val_n[slices]
                    val_t = nn.Tensor_from_value(val_n)
                    sliced_t = val_t[slices]

                    if 0 in sliced_n.shape:
                        # some cases like 0:1:-1 will produce zero shape and invalid array on numpy
                        # but nn.slice has no such behaviour, thus we have to generate new slice again
                        continue

                    if np.prod(sliced_n.shape) != sliced_t.shape.size:
                        raise Exception(f'shape is not equal')

                    if not all(
                            np.ndarray.flatten(np.array(sliced_t.np())) ==
                            np.ndarray.flatten(np.array(sliced_n))):
                        raise Exception(f'data is not equal')

                    sliced_n_grad = np.random.randint(
                        2**8, size=sliced_n.shape).astype(np.float32)

                    val_t.get_grad().fill(1.0)
                    nn.backward({sliced_t: sliced_n_grad},
                                grad_for_non_trainables=True)
                    sliced_t_grad = np.array(val_t.get_grad().np()[slices])

                    if not all(
                            np.ndarray.flatten(np.array([sliced_t_grad - 1.0]))
                            == np.ndarray.flatten(np.array([sliced_n_grad]))):
                        raise Exception(f'grad is not equal')

                    break
            except:
                raise Exception(f"""
shape          : {shape}
slices         : {slices}
sliced_n_shape : {sliced_n.shape}
sliced_t_shape : {sliced_t.shape}
exception      : {traceback.format_exc()}
""")
Пример #13
0
def dual_wise_op_test():
    for op in [
            add, binary_crossentropy, categorical_crossentropy, sub, max, min,
            mul, div
    ]:
        print(f'{op.__name__}()')
        for _ in range(10):
            if op == categorical_crossentropy:
                shape_gen = [2]
            else:
                shape_gen = range(1, 5)

            for shape_len in shape_gen:
                try:
                    a_shape = tuple(
                        np.random.randint(8, size=(shape_len, )) + 1)

                    if op == categorical_crossentropy:
                        b_shape = a_shape
                    else:
                        if np.random.randint(2) == 0:
                            b_shape = tuple(
                                a_shape[np.random.randint(len(a_shape)):])
                            b_shape = (1, ) if len(b_shape) == 0 else b_shape
                        else:
                            b_shape = list(a_shape)
                            b_shape[np.random.randint(len(b_shape))] = 1
                            b_shape = tuple(b_shape)

                        shapes = [a_shape, b_shape]
                        if np.random.randint(2) == 0:
                            shapes = shapes[::-1]
                        a_shape, b_shape = shapes

                    a_n = np.random.randint(1, 2**8,
                                            size=a_shape).astype(np.float32)
                    b_n = np.random.randint(1, 2**8,
                                            size=b_shape).astype(np.float32)
                    a_t = nn.Tensor_from_value(a_n)
                    b_t = nn.Tensor_from_value(b_n)
                    r_t = op(a_t, b_t)

                    r_n_grad = np.random.randint(2**8, size=r_t.shape).astype(
                        np.float32)

                    a_t.get_grad().fill(1.0)
                    b_t.get_grad().fill(1.0)
                    nn.backward({r_t: r_n_grad}, grad_for_non_trainables=True)

                    if op == div:
                        # Test validness and gradient only for div
                        r_n = a_n / b_n

                        if r_n.shape != r_t.shape:
                            raise Exception(f'shapes are not equal')
                        if np.abs(np.sum(
                            (np.ndarray.flatten(r_t.np() - r_n)))) > 1:
                            raise Exception(f'data is not equal')

                        info = nc.info.InfoBroadcast(nc.TensorShape(a_shape),
                                                     nc.TensorShape(b_shape))

                        a_n_grad = r_n_grad / b_n

                        axes = info.a_shape_reduction_axes
                        if axes.rank == 0:
                            a_n_grad = a_n_grad.reshape(a_n.shape)
                        else:
                            a_n_grad = a_n_grad.sum(tuple(axes), keepdims=True)

                        b_n_grad = r_n_grad * (-a_n / (b_n**2))

                        axes = info.b_shape_reduction_axes
                        if axes.rank == 0:
                            b_n_grad = b_n_grad.reshape(b_n.shape)
                        else:
                            b_n_grad = b_n_grad.sum(tuple(axes), keepdims=True)

                        if np.abs(
                                np.sum(
                                    (np.ndarray.flatten(a_t.get_grad().np() -
                                                        1.0 - a_n_grad)))) > 1:
                            raise Exception(f'grad A is not equal')
                        if np.abs(
                                np.sum(
                                    (np.ndarray.flatten(b_t.get_grad().np() -
                                                        1.0 - b_n_grad)))) > 1:
                            raise Exception(f'grad B is not equal')
                    else:
                        if not a_t.has_grad():
                            raise Exception(f'a_t has no grad')
                        if not b_t.has_grad():
                            raise Exception(f'b_t has no grad')

                except:
                    raise Exception(f"""
op        : {op}
a_shape   : {a_shape}
b_shape   : {b_shape}
r_n_shape : {r_n.shape}
exception : {traceback.format_exc() }
""")