Example #1
0
def transpose_test():
    for _ in range(10):
        for shape_len in range(2, 5):
            try:
                shape = np.random.randint( 8, size=(shape_len,) )+1
                axes_order = np.array([*range(shape_len)])
                np.random.shuffle(axes_order)

                val_n = np.random.randint( 2**8, size=shape ).astype(np.float32)
                transposed_n = np.transpose(val_n, axes_order)
                val_t = nn.Tensor_from_value(val_n)
                transposed_t = nn.transpose (val_t, axes_order )

                if transposed_n.shape != transposed_t.shape:
                    raise Exception('shape is not equal')
                if not all ( np.ndarray.flatten( transposed_t.np() == transposed_n ) ):
                    raise Exception(f'data is not equal {shape} {axes_order}')

                transposed_n_grad = np.random.randint( 2**8, size=transposed_n.shape ).astype(np.float32)

                val_t.get_grad().fill(1.0)  # Check addition to gradient
                nn.backward( {transposed_t:transposed_n_grad} , grad_for_non_trainables=True )

                if not all ( np.ndarray.flatten(np.transpose(val_t.get_grad().np()-1.0, axes_order) == transposed_n_grad) ):
                    raise Exception(f'grad is not equal')
            except:
                raise Exception(f"""
shape              : {shape}
axes_order         : {axes_order}
transposed_n_shape : {transposed_n.shape}
transposed_t_shape : {transposed_t.shape}
exception          : {traceback.format_exc()}
""")

        for size in [2,3,4]:
            try:
                N = 1+np.random.randint(8)
                C = (1+np.random.randint(8))*size*size
                H = W = (1+np.random.randint(8))*size

                shape = (N,C,H,W)

                val_n = np.random.randint( 2**8, size=shape ).astype(np.float32)
                val_t = nn.Tensor_from_value(val_n)

                d2s_val_t = depth_to_space(val_t, size)
                s2d_val_t = space_to_depth(d2s_val_t, size)

                if not all ( np.ndarray.flatten( val_t.np() == s2d_val_t.np() ) ):
                    raise Exception(f'data is not equal')
            except:
                raise Exception(f"""
depth_to_space/space_to_depth

shape              : {shape}
size               : {size}
exception          : {traceback.format_exc()}
""")
Example #2
0
def depthwise_conv2d_test():
    for padding in ['same','valid',0,1,2]:
        for dilation in [1,2]:
          for stride in [1,2,3]:
            for ks in [1,3,5,7]:
              for n in [1,4]:
                for ic in [1,2,4]:
                    for ih,iw in zip(*[[4,8,16]]*2):                        
                        if padding == 'valid' and iw < ks:
                            continue
                        try:
                            input_shape  = (n, ic, ih, iw)
                            kernel_shape = (ic, ks, ks)

                            input_n  = np.random.randint( 2**4, size=input_shape ).astype(np.float32)
                            kernel_n = np.random.randint( 2**4, size=kernel_shape ).astype(np.float32)

                            input_t  = nn.Tensor_from_value(input_n)
                            kernel_t = nn.Tensor_from_value(kernel_n)

                            conved_t = nn.depthwise_conv2D(input_t, kernel_t, stride=stride, dilation=dilation, padding=padding)
                            conved_n_grad = np.random.randint( 2**4, size=conved_t.shape).astype(np.float32)
                            conved_n, dI_val, dK_val = _numpy_depthwise_conv2d(input_n, kernel_n, conved_n_grad, STRIDE=stride, DILATION=dilation, padding=padding)

                            if conved_n.shape != conved_t.shape:
                                raise Exception(f'shape is not equal')

                            if not all ( np.ndarray.flatten( conved_t.np() == conved_n) ):
                                raise Exception(f'data is not equal')

                            input_t.get_grad().fill(1.0)
                            kernel_t.get_grad().fill(1.0)
                            nn.backward( {conved_t:conved_n_grad}, grad_for_non_trainables=True )

                            if not all ( np.ndarray.flatten( (input_t.get_grad().np()-1.0) == dI_val )):
                                raise Exception(f'dI not equal')

                            if not all ( np.ndarray.flatten( (kernel_t.get_grad().np()-1.0) == dK_val )):
                                raise Exception(f'dK not equal')
                        except:
                            raise Exception(f"""
input_shape   : {input_shape}
kernel_shape  : {kernel_shape}
padding       : {padding}
stride        : {stride}
dilation      : {dilation}
conved_n.shape : {conved_n.shape}
conved_t.shape : {conved_t.shape}
{traceback.format_exc()}
""")
Example #3
0
 def __truediv__(self, t):
     if isinstance(t, (int, float)):
         return nn.div_const(self, t)
     else:
         if not isinstance(t, Tensor):
             t = nn.Tensor_from_value(t)
         return nn.div(self, t)
Example #4
0
    def __init__(self, kernel_size=3, stride=2, dilation=1, padding='same'):
        super().__init__(saveables=[], trainables=[])

        self.stride = stride
        self.dilation = dilation
        self.padding = padding

        if (kernel_size == 1):
            a = np.array([
                1.,
            ])
        elif (kernel_size == 2):
            a = np.array([1., 1.])
        elif (kernel_size == 3):
            a = np.array([1., 2., 1.])
        elif (kernel_size == 4):
            a = np.array([1., 3., 3., 1.])
        elif (kernel_size == 5):
            a = np.array([1., 4., 6., 4., 1.])
        elif (kernel_size == 6):
            a = np.array([1., 5., 10., 10., 5., 1.])
        elif (kernel_size == 7):
            a = np.array([1., 6., 15., 20., 15., 6., 1.])

        a = a[:, None] * a[None, :]
        a = a / np.sum(a)
        a = a[None, :, :]
        self.a = nn.Tensor_from_value(a)
Example #5
0
def ssim(img1,
         img2,
         max_val,
         filter_size=11,
         filter_sigma=1.5,
         k1=0.01,
         k2=0.03):
    """
    ssim operator.
    
    Computes per-channel structural similarity.
    """
    filter_size = int(filter_size)

    N1, CH1, H1, W1 = img1.shape
    N2, CH2, H2, W2 = img2.shape

    if N1 != N2:
        raise ValueError('Images batch must match.')
    if CH1 != CH2:
        raise ValueError('Images channels must match.')

    kernel_key = (ssim, CH1, filter_size, filter_sigma)
    kernel = nc.Cacheton.get_var(kernel_key)
    if kernel is None:
        kernel = np.arange(0, filter_size, dtype=np.float32)
        kernel -= (filter_size - 1) / 2.0
        kernel = kernel**2
        kernel *= (-0.5 / (filter_sigma**2))
        kernel = np.reshape(kernel, (1, -1)) + np.reshape(kernel, (-1, 1))
        kernel_exp = np.exp(kernel)
        kernel = kernel_exp / kernel_exp.sum()
        kernel = kernel[None, ...]
        kernel = np.tile(kernel, (CH1, 1, 1))
        nc.Cacheton.set_var(kernel_key, kernel)

    kernel_t = nn.Tensor_from_value(kernel)
    c1 = (k1 * max_val)**2
    c2 = (k2 * max_val)**2

    mean0 = nn.depthwise_conv2D(img1, kernel_t, stride=1, padding='valid')
    mean1 = nn.depthwise_conv2D(img2, kernel_t, stride=1, padding='valid')
    num0 = mean0 * mean1 * 2.0

    den0 = mean0 * mean0 + mean1 * mean1

    luminance = (num0 + c1) / (den0 + c1)

    num1 = nn.depthwise_conv2D(
        img1 * img2, kernel_t, stride=1, padding='valid') * 2.0
    den1 = nn.depthwise_conv2D(img1 * img1 + img2 * img2,
                               kernel_t,
                               stride=1,
                               padding='valid')

    cs = (num1 - num0 + c2) / (den1 - den0 + c2)

    return nn.reduce_mean(luminance * cs, axes=(-2, -1))
Example #6
0
def concat_test():
    for _ in range(10):
        for shape_len in range(2, 5):
            try:
                shape = (np.random.randint(8, size=(shape_len, )) + 1).tolist()
                axis = np.random.randint(shape_len)
                count = np.random.randint(4) + 1

                shapes = tuple(
                    tuple(dim if i != axis else np.random.randint(8) + 1
                          for i, dim in enumerate(shape))
                    for shape in ([shape] * count))

                vals_n = [
                    np.random.randint(2**8, size=shape).astype(np.float32)
                    for shape in shapes
                ]
                concat_n = np.concatenate(vals_n, axis)

                vals_t = [
                    nn.Tensor_from_value(vals_n[i]) for i in range(count)
                ]
                concat_t = nn.concat(vals_t, axis)

                concat_n_grad = np.random.randint(
                    2**8, size=concat_n.shape).astype(np.float32)

                for t in vals_t:
                    t.get_grad().fill(1.0)  # Check addition to gradient

                nn.backward({concat_t: concat_n_grad},
                            grad_for_non_trainables=True)

                if not all(np.ndarray.flatten(concat_t.np() == concat_n)):
                    raise Exception(f'data is not equal')

                axis_offset = 0
                for n in range(count):
                    axis_size = vals_n[n].shape[axis]
                    axis_slice = slice(axis_offset, axis_offset + axis_size, 1)
                    slices = (slice(None, None, None), ) * axis + (
                        axis_slice, ) + (slice(None, None, None), ) * (
                            len(concat_n.shape) - axis - 1)
                    axis_offset += axis_size

                    if not all(
                            np.ndarray.flatten(vals_t[n].get_grad().np() -
                                               1.0 == concat_n_grad[slices])):
                        raise Exception(f'grad is not equal')

            except:
                raise Exception(f"""
shape       : {shape}
axis        : {axis}
count       : {count}
exception   : {traceback.format_exc()}
""")
Example #7
0
 def __mul__(self, t):
     if isinstance(t, (int, float)):
         return nn.mul_const(self, t)
     else:
         if not isinstance(t, Tensor):
             t = nn.Tensor_from_value(t)
         if t == self:
             return nn.square(self)
         else:
             return nn.mul(self, t)
Example #8
0
 def __sub__(self, t):
     if isinstance(t, (int, float)):
         return nn.sub_const(self, t)
     else:
         if not isinstance(t, Tensor):
             t = nn.Tensor_from_value(t)
         if t == self:
             return nn.mul_const(self, 0.0)
         else:
             return nn.sub(self, t)
Example #9
0
 def __radd__(self, t):
     if isinstance(t, (int, float)):
         return nn.add_const(self, t)
     else:
         if not isinstance(t, Tensor):
             t = nn.Tensor_from_value(t)
         if t == self:
             return nn.mul_const(self, 2.0)
         else:
             return nn.add(t, self)
Example #10
0
def resize2D_nearest_test():
    for n in [1, 4]:
        for ic in [1, 2, 4]:
            for iw, ih in zip(*[[4, 8, 16]] * 2):
                for size in [2, 3, 4]:
                    try:
                        input_shape = (n, ic, ih, iw)
                        input_n = np.random.randint(
                            2**4, size=input_shape).astype(np.float32)

                        input_t = nn.Tensor_from_value(input_n)

                        upsampled_t = nn.resize2D_nearest(input_t, size=size)

                        upsampled_n = input_n.reshape((n, ic, ih, 1, iw, 1))
                        upsampled_n = np.tile(upsampled_n,
                                              (1, 1, 1, size, 1, size))
                        upsampled_n = upsampled_n.reshape(
                            (n, ic, ih * size, iw * size))

                        if upsampled_n.shape != upsampled_t.shape:
                            raise Exception(f'shape is not equal')

                        if not all(
                                np.ndarray.flatten(
                                    upsampled_t.np() == upsampled_n)):
                            raise Exception(f'data is not equal')

                        upsampled_n_grad = np.random.randint(
                            2**8, size=upsampled_n.shape).astype(np.float32)

                        input_t.get_grad().fill(1.0)
                        nn.backward({upsampled_t: upsampled_n_grad},
                                    grad_for_non_trainables=True)

                        input_n_grad = upsampled_n_grad.reshape(
                            (n, ic, ih, size, iw, size))
                        input_n_grad = input_n_grad.sum((-3, -1))
                        if not all(
                                np.ndarray.flatten(input_t.get_grad().np() -
                                                   1.0) == np.ndarray.flatten(
                                                       input_n_grad)):
                            raise Exception('grad is not equal')
                    except:
                        raise Exception(f"""
input_shape    : {input_shape}
size           : {size}

upsampled_n.shape : {upsampled_n.shape}
upsampled_t.shape : {upsampled_t.shape}
{traceback.format_exc()}
""")
Example #11
0
def stack_test():
    for _ in range(10):
        for shape_len in range(1, 4):
            try:
                shape = tuple(np.random.randint(8, size=(shape_len, )) + 1)
                axis = np.random.randint(shape_len + 1)
                stack_count = np.random.randint(4) + 1

                vals_n = [
                    np.random.randint(2**8, size=shape).astype(np.float32)
                    for i in range(stack_count)
                ]
                vals_t = [
                    nn.Tensor_from_value(vals_n[i]) for i in range(stack_count)
                ]
                stack_n = np.stack(vals_n, axis)
                stack_t = nn.stack(vals_t, axis)

                if not all(np.ndarray.flatten(stack_t.np() == stack_n)):
                    raise Exception(f'data is not equal')

                stack_n_grad = np.random.randint(
                    2**8, size=stack_n.shape).astype(np.float32)
                for val_t in vals_t:
                    val_t.get_grad().fill(1.0)
                nn.backward({stack_t: stack_n_grad},
                            grad_for_non_trainables=True)

                for n in range(stack_count):
                    slices = (slice(None, None, None), ) * axis + (
                        n, ) + (slice(None, None, None), ) * (
                            len(stack_n.shape) - axis - 1)
                    if not all(
                            np.ndarray.flatten(vals_t[n].get_grad().np() -
                                               1.0 == stack_n_grad[slices])):
                        raise Exception(f'grad is not equal')

            except:
                raise Exception(f"""
shape         : {shape}
axis          : {axis}
stack_count   : {stack_count}
stack_n_shape : {stack_n.shape}
stack_t_shape : {stack_t.shape}
exception     : {traceback.format_exc()}
""")
Example #12
0
def element_wise_op_test():
    add_params = {
        add_const: [1.0],
        clip: [0.0, 1.0],
        div_const: [2.0],
        mul_const: [2.0],
        leaky_relu: [0.1],
        rdiv_const: [2.0],
        rsub_const: [2.0],
        sub_const: [1.0],
    }
    for op in [
            abs, add_const, clip, cos, div_const, exp, leaky_relu, log,
            mul_const, rdiv_const, rsub_const, relu, sigmoid, sin, softmax,
            sqrt, square, sub_const, tanh
    ]:
        print(f'{op.__name__}()')
        for _ in range(10):
            for shape_len in range(1, 3):
                try:
                    shape = (np.random.randint(8, size=(shape_len, )) +
                             1).tolist()

                    value_n = np.random.randint(128, size=shape).astype(
                        np.float32) - 64
                    value_t = nn.Tensor_from_value(value_n)

                    args = add_params.get(op, None)
                    if args is None:
                        args = []

                    result_t = op(*([value_t] + args))
                    result_t.backward(grad_for_non_trainables=True)

                    if not value_t.has_grad():
                        raise Exception('No grad.')

                except:
                    raise Exception(f"""
shape       : {shape}
op          : {op.__name__}
args        : {args}
exception   : {traceback.format_exc()}
""")
Example #13
0
def dropout_test():
    for _ in range(10):
        for shape_len in range(2, 5):
            try:
                shape = np.random.randint(1, 8, size=(shape_len, ))
                rate = np.random.uniform()
                val_n = np.random.randint(1, 2**8,
                                          size=shape).astype(np.float32)

                val_t = nn.Tensor_from_value(val_n)
                result_t = dropout(val_t, rate)
                result_t.backward(grad_for_non_trainables=True)

                if not all ( np.ndarray.flatten( np.argwhere( result_t.np() == 0 ) == \
                                                 np.argwhere( val_t.get_grad().np() == 0 ) )):
                    raise Exception(f'dI is wrong.')

            except:
                raise Exception(f"""
shape              : {shape}
exception          : {traceback.format_exc()}
""")
Example #14
0
def resize2D_bilinear_test():
    for n in [1, 4]:
        for ic in [1, 2, 4]:
            for iw, ih in zip(*[[4, 8, 16]] * 2):
                for size in [0.6, 1.0, 2.0]:
                    try:
                        input_shape = (n, ic, ih, iw)
                        input_n = np.random.randint(
                            2**4, size=input_shape).astype(np.float32)

                        input_t = nn.Tensor_from_value(input_n)

                        resized_t = nn.resize2D_bilinear(input_t, size)

                        nn.backward([resized_t], grad_for_non_trainables=True)
                    except:
                        raise Exception(f"""
input_shape    : {input_shape}
size           : {size}
resized_t.shape : {resized_t.shape}
{traceback.format_exc()}
""")
Example #15
0
def tile_test():
    for _ in range(10):
        for shape_len in range(3, 5):
            try:
                shape = tuple(np.random.randint( 8, size=(shape_len,) )+1)
                tiles = tuple(np.random.randint( 4, size=(shape_len,) )+1)

                val_n = np.random.randint( 2**8, size=shape ).astype(np.float32)
                tiled_n = np.tile(val_n, tiles)

                val_t = nn.Tensor_from_value(val_n)
                tiled_t = nn.tile(val_t, tiles)

                if tiled_n.shape != tiled_t.shape:
                    raise Exception(f'shape is not equal')

                if not all ( np.ndarray.flatten( tiled_t.np() == tiled_n ) ):
                    raise Exception(f'data is not equal')

                tiled_n_grad = np.random.randint( 2**8, size=tiled_n.shape ).astype(np.float32)

                val_t.get_grad().fill(1.0)
                nn.backward( {tiled_t:tiled_n_grad} , grad_for_non_trainables=True )

                info = nc.info.InfoTile( nc.TensorShape(shape), tiles)
                val_n_grad = sum([ tiled_n_grad[axes_slice] for axes_slice in info.axes_slices ])
                if not all ( np.ndarray.flatten(val_t.get_grad().np()-1.0 == val_n_grad) ):
                    raise Exception(f'grad is not equal')

            except:
                
                raise Exception(f"""
shape         : {shape}
tiles         : {tiles}
tiled_n_shape : {tiled_n.shape}
tiled_t_shape : {tiled_t.shape}
exception     : {traceback.format_exc()}
""")
Example #16
0
def backward(loss,
             stop_grad=None,
             grad_for_non_trainables=False,
             keep_gradient_for_non_trainables=False):
    """
    Perform backward computation.

    arguments:

        loss

    Tensor or list of Tensors from which backprop should start.
    If value is dict, then keys are loss, and values are numpy gradient init.

        stop_grad (None)

    None or Tensor or list of Tensors where backprop should stop.

        grad_for_non_trainables (False)

    If False, the backprop will stop at those branches that have no trainable tensors (not attached to any Optimizer),
    also gradient for intermediate non trainable tensors will be freed in order to reduce memory consumption.
    """

    loss_gradients = {}
    if isinstance(loss, dict):
        loss_gradients = loss
        loss = list(loss.keys())
    elif not isinstance(loss, (tuple, list)):
        loss = [loss]

    if stop_grad is None:
        stop_grad = []
    elif not isinstance(stop_grad, (tuple, list)):
        stop_grad = [stop_grad]
    user_stop_grad = set(stop_grad)

    key_get_seq_id = operator.methodcaller('_get_seq_id')

    # key is Tensor._get_seq_id(),
    # value is list[] of  Tensor._get_seq_id() which are produced by key-tensor
    stop_grad = {}

    if not grad_for_non_trainables:
        # Determine which branches don't require grad
        # Fill stop_grad dict.

        # form working_list from current loss list
        working_list = loss.copy()
        for t in working_list:
            # loss tensors produce nothing, so set empty array
            stop_grad[t._get_seq_id()] = []

        # Go backward in working_list
        while len(working_list) != 0:
            # Process tensor with largest _get_seq_id (most late)
            working_list = sorted(working_list, key=key_get_seq_id)
            t = working_list.pop(-1)
            t_seq_id = t._get_seq_id()

            if not t._is_trainable():
                # Tensor is not attached to Optimizer (not marked as _is_trainable)
                t_gradfns = t._get_gradfns()
                if t_gradfns is not None:
                    # Tensor is produced by input tensors, iterate over them
                    for input_t in t_gradfns:
                        input_seq_id = input_t._get_seq_id()

                        # Get/set array of next tensors for input_t key
                        i_nexts = stop_grad.get(input_seq_id, None)
                        if i_nexts is None:
                            stop_grad[input_seq_id] = i_nexts = []

                        if t_seq_id not in i_nexts:
                            # add Tensor to array of next tensors
                            i_nexts.append(t_seq_id)

                        if input_t._is_reference():
                            input_t = input_t._get_top_reference_source()

                        if input_t not in working_list:
                            # add input_t to current working_list
                            working_list.append(input_t)
            else:
                # Tensor is attached to Optimizer (marked as _is_trainable
                # Go forward and remove all tensors from stop_grad
                # which produce this and next tensors
                t_list = [t._get_seq_id()]
                while len(t_list) != 0:
                    t = t_list.pop(-1)
                    t_nexts = stop_grad.get(t, None)
                    if t_nexts is not None:
                        t_list += t_nexts
                    if t in stop_grad:
                        stop_grad.pop(t)

    # add user stop_grad
    for t in user_stop_grad:
        stop_grad[t._get_seq_id()] = []

    # Remove tensors from loss list if they are in stop_grad
    loss_list = [t for t in loss if t._get_seq_id() not in stop_grad]

    # Set initial gradient for tensors in loss_list
    for t in loss_list:
        t_grad = t.get_grad()

        if t in loss_gradients:
            # set value specified by user
            nc.op.add(t_grad,
                      nn.Tensor_from_value(loss_gradients[t]),
                      output_t=t_grad)
        else:
            nc.op.add_const(t_grad, 1.0, output_t=t_grad)

    # Convert reference tensors in loss_list to their source
    loss_list = [
        t._get_top_reference_source() if t._is_reference() else t
        for t in loss_list
    ]
    # Filter duplicates
    loss_list = list(set(loss_list))

    timings = []

    # Go backward in working loss_list
    while len(loss_list) != 0:
        # Process tensor with largest _get_seq_id (most late)
        loss_list = sorted(loss_list, key=key_get_seq_id)
        t = loss_list.pop(-1)

        t_gradfns = t._get_gradfns()
        if t_gradfns is not None:
            # Tensor is produced by input tensors, iterate over them
            for input_t in t_gradfns:
                if t._is_freezed() and input_t._is_trainable():
                    # if t is under freeze and it's input_t is used by Optimizer (marked as _is_trainable
                    continue  # then stop gradient
                if input_t._get_seq_id() in stop_grad:
                    continue

                #nn.devices.wait()
                #tim = time.time()

                # Call gradient computation
                for func in t_gradfns[input_t]:
                    func(t, t.get_grad())

                #nn.devices.wait()
                if input_t._is_reference():
                    input_t = input_t._get_top_reference_source()
                if input_t not in loss_list:
                    # add input_t to working loss_list, if not exist.
                    loss_list.append(input_t)

        if not grad_for_non_trainables and not t._is_trainable():
            # grad_for_non_trainables == False, then
            # we no longer need its gradient if it is not used by Optimizer (marked as _is_trainable
            t.free_grad()
Example #17
0
def reduce_test():
    for _ in range(10):
        for op_type in ['sum', 'mean', 'min', 'max']:
            for shape_len in range(2, 5):
                try:
                    shape = np.random.randint( 8, size=(shape_len,) )+1

                    reduction_axes = np.array([*range(shape_len)])
                    np.random.shuffle(reduction_axes)

                    # Cut random amount of reduction_axes
                    reduction_axes = tuple(reduction_axes [:np.random.randint(shape_len+1)])
                    if len(reduction_axes) == 0:
                        reduction_axes = None

                    keepdims = np.random.randint(2) == 0

                    value_n = np.random.randint( 2**8, size=shape ).astype(np.float32)
                    value_t = nn.Tensor_from_value(value_n)

                    if op_type == 'sum':
                        reducted_t = value_t.sum(reduction_axes, keepdims=keepdims)
                        reducted_n = value_n.sum(reduction_axes, keepdims=keepdims)

                        reducted_n_keepdims_shape = value_n.sum(reduction_axes, keepdims=True).shape
                    elif op_type == 'mean':
                        reducted_t = value_t.mean(reduction_axes, keepdims=keepdims)
                        reducted_n = value_n.mean(reduction_axes, keepdims=keepdims)

                        reducted_n_keepdims_shape = value_n.mean(reduction_axes, keepdims=True).shape
                    elif op_type == 'max':
                        reducted_t = value_t.max(reduction_axes, keepdims=keepdims)
                        reducted_n = value_n.max(reduction_axes, keepdims=keepdims)

                        reducted_n_keepdims_shape = value_n.max(reduction_axes, keepdims=True).shape
                    elif op_type == 'min':
                        reducted_t = value_t.min(reduction_axes, keepdims=keepdims)
                        reducted_n = value_n.min(reduction_axes, keepdims=keepdims)

                        reducted_n_keepdims_shape = value_n.min(reduction_axes, keepdims=True).shape

                    if sum (np.ndarray.flatten( reducted_t.np() - reducted_n)) >= 1.0:
                        raise Exception(f'data is not equal')

                    value_t.get_grad().fill(1.0)

                    reducted_n_grad = np.random.randint( 2**8, size=reducted_n.shape ).astype(np.float32)
                    nn.backward( {reducted_t:reducted_n_grad}, grad_for_non_trainables=True )

                    if op_type == 'sum':
                        value_n_grad = np.broadcast_to( np.reshape(reducted_n_grad, reducted_n_keepdims_shape), value_n.shape )

                        if sum (np.ndarray.flatten( value_t.get_grad().np()-1.0 - value_n_grad )) >= 1.0:
                            raise Exception(f'dI is not equal')
                except:
                    raise Exception(f"""
op_type           : {op_type}
shape             : {shape}
reduction_axes    : {reduction_axes}
keepdims          : {keepdims}
reducted_n.shape  : {reducted_n.shape}
reducted_t.shape  : {reducted_t.shape}

exception : {traceback.format_exc() }
""")
Example #18
0
def pool2d_test():
    for batch in [1, 4]:
        for in_ch in [1, 2, 4]:
            for w, h in zip(*[[4, 8, 16]] * 2):
                for pool_size in [2, 3]:
                    for stride in [1, 2, 3]:
                        for padding in ['same', 'valid', 0, 1, 2]:
                            for op_type in ['avg', 'max', 'min']:
                                try:
                                    input_shape = (batch, in_ch, h, w)
                                    if op_type == 'avg':
                                        input_n = np.random.randint(
                                            2**4, size=input_shape).astype(
                                                np.float32)
                                    else:
                                        # for minmax make unique values in order not to test 'same-hit'
                                        input_shape_prod = int(
                                            np.prod(input_shape))
                                        input_n = np.arange(
                                            input_shape_prod,
                                            dtype=np.float32).reshape(
                                                input_shape) / input_shape_prod

                                    input_t = nn.Tensor_from_value(input_n)

                                    pooled_n, input_n_grad = _numpy_pool2d(
                                        op_type,
                                        input_n,
                                        pool_size,
                                        STRIDE=stride,
                                        padding=padding)

                                    if op_type == 'avg':
                                        pooled_t = nn.avg_pool2D(
                                            input_t, pool_size, stride,
                                            padding)
                                    elif op_type == 'max':
                                        pooled_t = nn.max_pool2D(
                                            input_t, pool_size, stride,
                                            padding)
                                    elif op_type == 'min':
                                        pooled_t = nn.min_pool2D(
                                            input_t, pool_size, stride,
                                            padding)

                                    if pooled_n.shape != pooled_t.shape:
                                        raise Exception(f'shape is not equal')

                                    if np.sum(np.abs(pooled_t.np() -
                                                     pooled_n)) > 0.1:
                                        raise Exception(f'data is not equal')

                                    input_t.get_grad().fill(1.0)
                                    nn.backward(pooled_t,
                                                grad_for_non_trainables=True)

                                    if sum(
                                            np.ndarray.flatten(
                                                input_t.get_grad().np() - 1.0 -
                                                input_n_grad)) >= 1.0:
                                        raise Exception('grad is not equal')

                                except:
                                    raise Exception(f"""
op_type        : {op_type}
input_shape    : {input_shape}
pool_size      : {pool_size}
stride         : {stride}
padding        : {padding}
pooled_n.shape : {pooled_n.shape}
pooled_t.shape : {pooled_t.shape}
{traceback.format_exc()}
""")
Example #19
0
    def extract(self,
                input_image,
                is_bgr=True,
                is_remove_intersects=False,
                min_face_size=20):
        """
        Extract faces rects

        arguments

         input_image    np.ndarray of shape
                        (HEIGHT, WIDTH)
                        (HEIGHT, WIDTH, 1)
                        (HEIGHT, WIDTH, 3)

         is_bgr(True)   channels format is BGR(opencv)

         is_remove_intersects(False)
                        remove intersecting faces

         min_face_size(20) minimum face size in pixels

        returns

         list of ints (l,t,r,b,c) sorted by maximum area first
         l - left, t - top, r - right, b - bottom, c - percent

        """

        shape_len = len(input_image.shape)
        if shape_len < 2 or shape_len > 3:
            raise ValueError(f'Wrong shape {input_image.shape}')
        if shape_len == 2:
            input_image = input_image[..., None]

        H, W, C = input_image.shape

        if C == 1:
            np.repeat(input_image, 3, -1)
            H, W, C = input_image.shape

        if C != 3:
            raise ValueError(f'Wrong shape {input_image.shape}')

        if is_bgr:
            input_image = input_image[..., ::-1]
            is_bgr = False

        WHmax = max(W, H)
        scale_to = 640 if WHmax >= 1280 else WHmax / 2
        scale_to = max(64, scale_to)

        input_scale = WHmax / scale_to

        resize_w = int(W / input_scale)
        resize_h = int(H / input_scale)

        input_image = cv2.resize(input_image, (resize_w, resize_h),
                                 interpolation=cv2.INTER_LINEAR)

        input_image = input_image.transpose((2, 0, 1))

        olist = self.module(nn.Tensor_from_value(input_image[None, ...]))

        olist = [x.np() for x in olist]

        detected_faces = []
        for ltrbc in self.refine(olist):
            l, t, r, b, c = ltrbc
            l, t, r, b = (x * input_scale for x in (l, t, r, b))

            bt = b - t
            if min(r - l, bt) < min_face_size:
                continue
            b += bt * 0.1
            detected_faces.append((
                int(l),
                int(t),
                int(r),
                int(b),
                int(c * 100),
            ))

        #sort by largest area first
        detected_faces = [[(l, t, r, b, c), (r - l) * (b - t)]
                          for (l, t, r, b, c) in detected_faces]
        detected_faces = sorted(detected_faces,
                                key=operator.itemgetter(1),
                                reverse=True)
        detected_faces = [x[0] for x in detected_faces]

        if is_remove_intersects:
            for i in range(len(detected_faces) - 1, 0, -1):
                l1, t1, r1, b1, _ = detected_faces[i]
                l0, t0, r0, b0, _ = detected_faces[i - 1]

                dx = min(r0, r1) - max(l0, l1)
                dy = min(b0, b1) - max(t0, t1)
                if (dx >= 0) and (dy >= 0):
                    detected_faces.pop(i)

        return detected_faces
Example #20
0
def slice_test():
    for iteration in range(10):
        for shape_len in range(5, 1, -1):
            try:
                while True:
                    shape = np.random.randint(1, 8, size=(shape_len, ))

                    if iteration == 0:
                        slices = [
                            slice_cls(None, None, None),
                        ] * shape_len
                        axis = np.random.randint(shape_len)
                        shape[axis] = 1
                        slices[axis] = 0
                    else:
                        slices = []
                        for i in range(shape_len):
                            axis_size = shape[i]
                            if np.random.randint(2) == 0:
                                v = axis_size - np.random.randint(
                                    axis_size * 2) - 1
                                slices.append(v)
                            else:
                                b = None if np.random.randint(
                                    2) == 0 else axis_size - np.random.randint(
                                        axis_size * 2)
                                e = None if np.random.randint(
                                    2) == 0 else axis_size - np.random.randint(
                                        axis_size * 2)
                                s = 1 if np.random.randint(2) == 0 else -1

                                slices.append(slice_cls(b, e, s))

                        if np.random.randint(2) == 0:
                            axis = np.random.randint(shape_len)
                            slices[axis] = Ellipsis

                    shape = tuple(shape)
                    slices = tuple(slices)

                    val_n = np.random.randint(2**8,
                                              size=shape).astype(np.float32)

                    sliced_n = val_n[slices]
                    val_t = nn.Tensor_from_value(val_n)
                    sliced_t = val_t[slices]

                    if 0 in sliced_n.shape:
                        # some cases like 0:1:-1 will produce zero shape and invalid array on numpy
                        # but nn.slice has no such behaviour, thus we have to generate new slice again
                        continue

                    if np.prod(sliced_n.shape) != sliced_t.shape.size:
                        raise Exception(f'shape is not equal')

                    if not all(
                            np.ndarray.flatten(np.array(sliced_t.np())) ==
                            np.ndarray.flatten(np.array(sliced_n))):
                        raise Exception(f'data is not equal')

                    sliced_n_grad = np.random.randint(
                        2**8, size=sliced_n.shape).astype(np.float32)

                    val_t.get_grad().fill(1.0)
                    nn.backward({sliced_t: sliced_n_grad},
                                grad_for_non_trainables=True)
                    sliced_t_grad = np.array(val_t.get_grad().np()[slices])

                    if not all(
                            np.ndarray.flatten(np.array([sliced_t_grad - 1.0]))
                            == np.ndarray.flatten(np.array([sliced_n_grad]))):
                        raise Exception(f'grad is not equal')

                    break
            except:
                raise Exception(f"""
shape          : {shape}
slices         : {slices}
sliced_n_shape : {sliced_n.shape}
sliced_t_shape : {sliced_t.shape}
exception      : {traceback.format_exc()}
""")
Example #21
0
def matmul_test():
    for _ in range(10):
        try:
            BATCH = np.random.randint(8) + 1
            M = np.random.randint(8) + 1
            N = np.random.randint(32768) + 1
            K = np.random.randint(32768) + 1

            while K * N > (8000000 // BATCH):
                K = max(1, K // 2)
                N = max(1, N // 2)

            if np.random.randint(2) == 0:
                size = [2, 4, 8, 16][np.random.randint(4)]
                M = max(1, M // size) * size
                N = max(1, N // size) * size
                K = max(1, K // size) * size

            if BATCH == 1:
                A_shape = (M, K)
                B_shape = (K, N)
            else:
                A_shape = (BATCH, M, K)
                B_shape = (BATCH, K, N)

            A_n = np.random.randint(2**4, size=A_shape).astype(np.float32)
            B_n = np.random.randint(2**4, size=B_shape).astype(np.float32)

            O_n = np.matmul(A_n, B_n)

            A_t = nn.Tensor_from_value(A_n)
            B_t = nn.Tensor_from_value(B_n)
            O_t = nn.matmul(A_t, B_t)
            if O_n.shape != O_t.shape:
                raise Exception('shape is not equal')
            if not all(np.ndarray.flatten(O_t.np() == O_n)):
                raise Exception(f'data is not equal')

            O_n_grad = np.random.randint(2**3,
                                         size=O_n.shape).astype(np.float32)

            b_n_axes = tuple(np.arange(len(B_n.shape)))
            B_n_T = B_n.transpose(b_n_axes[:-2] + b_n_axes[-1:] +
                                  b_n_axes[-2:-1])

            a_n_axes = tuple(np.arange(len(A_n.shape)))
            A_n_T = A_n.transpose(a_n_axes[:-2] + a_n_axes[-1:] +
                                  a_n_axes[-2:-1])

            A_n_grad = np.matmul(O_n_grad, B_n_T)
            B_n_grad = np.matmul(A_n_T, O_n_grad)

            A_t.get_grad().fill(1.0)
            B_t.get_grad().fill(1.0)

            nn.backward({O_t: O_n_grad}, grad_for_non_trainables=True)

            if not all(
                    np.ndarray.flatten(
                        (A_t.get_grad().np() - 1.0) == A_n_grad)):
                raise Exception(f'dA is not equal')

            if not all(
                    np.ndarray.flatten(
                        (B_t.get_grad().np() - 1.0) == B_n_grad)):
                raise Exception(f'dB is not equal')
        except:
            raise Exception(f"""
M  : {M}
N  : {N}
K  : {K}
O_n.shape  : {O_n.shape}
O_t.shape  : {O_t.shape}
{traceback.format_exc()}
""")
Example #22
0
def dual_wise_op_test():
    for op in [
            add, binary_crossentropy, categorical_crossentropy, sub, max, min,
            mul, div
    ]:
        print(f'{op.__name__}()')
        for _ in range(10):
            if op == categorical_crossentropy:
                shape_gen = [2]
            else:
                shape_gen = range(1, 5)

            for shape_len in shape_gen:
                try:
                    a_shape = tuple(
                        np.random.randint(8, size=(shape_len, )) + 1)

                    if op == categorical_crossentropy:
                        b_shape = a_shape
                    else:
                        if np.random.randint(2) == 0:
                            b_shape = tuple(
                                a_shape[np.random.randint(len(a_shape)):])
                            b_shape = (1, ) if len(b_shape) == 0 else b_shape
                        else:
                            b_shape = list(a_shape)
                            b_shape[np.random.randint(len(b_shape))] = 1
                            b_shape = tuple(b_shape)

                        shapes = [a_shape, b_shape]
                        if np.random.randint(2) == 0:
                            shapes = shapes[::-1]
                        a_shape, b_shape = shapes

                    a_n = np.random.randint(1, 2**8,
                                            size=a_shape).astype(np.float32)
                    b_n = np.random.randint(1, 2**8,
                                            size=b_shape).astype(np.float32)
                    a_t = nn.Tensor_from_value(a_n)
                    b_t = nn.Tensor_from_value(b_n)
                    r_t = op(a_t, b_t)

                    r_n_grad = np.random.randint(2**8, size=r_t.shape).astype(
                        np.float32)

                    a_t.get_grad().fill(1.0)
                    b_t.get_grad().fill(1.0)
                    nn.backward({r_t: r_n_grad}, grad_for_non_trainables=True)

                    if op == div:
                        # Test validness and gradient only for div
                        r_n = a_n / b_n

                        if r_n.shape != r_t.shape:
                            raise Exception(f'shapes are not equal')
                        if np.abs(np.sum(
                            (np.ndarray.flatten(r_t.np() - r_n)))) > 1:
                            raise Exception(f'data is not equal')

                        info = nc.info.InfoBroadcast(nc.TensorShape(a_shape),
                                                     nc.TensorShape(b_shape))

                        a_n_grad = r_n_grad / b_n

                        axes = info.a_shape_reduction_axes
                        if axes.rank == 0:
                            a_n_grad = a_n_grad.reshape(a_n.shape)
                        else:
                            a_n_grad = a_n_grad.sum(tuple(axes), keepdims=True)

                        b_n_grad = r_n_grad * (-a_n / (b_n**2))

                        axes = info.b_shape_reduction_axes
                        if axes.rank == 0:
                            b_n_grad = b_n_grad.reshape(b_n.shape)
                        else:
                            b_n_grad = b_n_grad.sum(tuple(axes), keepdims=True)

                        if np.abs(
                                np.sum(
                                    (np.ndarray.flatten(a_t.get_grad().np() -
                                                        1.0 - a_n_grad)))) > 1:
                            raise Exception(f'grad A is not equal')
                        if np.abs(
                                np.sum(
                                    (np.ndarray.flatten(b_t.get_grad().np() -
                                                        1.0 - b_n_grad)))) > 1:
                            raise Exception(f'grad B is not equal')
                    else:
                        if not a_t.has_grad():
                            raise Exception(f'a_t has no grad')
                        if not b_t.has_grad():
                            raise Exception(f'b_t has no grad')

                except:
                    raise Exception(f"""
op        : {op}
a_shape   : {a_shape}
b_shape   : {b_shape}
r_n_shape : {r_n.shape}
exception : {traceback.format_exc() }
""")