def TestLinear1d(_dtype):
     """
     _dtype: specify data type of data one of {np.int32, np.float32, np.float64}
     """
     in_size = 5
     in_data = np.empty([in_size], dtype=_dtype)
     out_size = 3
     out_data = np.empty([out_size], dtype=_dtype)
     weight_size = [out_size, in_size]
     weight = np.empty(weight_size, dtype=_dtype)
     bias = None #bias = np.zeros([out_size], dtype=_dtype)
     v = 0
     for i in range(in_size):
         in_data[i] = v % 10
         v += 1
     for r in range(out_size):
         for c in range(in_size): # make identity matrix
             if (r==c): weight[r][c] = 1
             else     : weight[r][c] = 0
     if bias is not None:
         for b in range(out_size): bias[b] = 0
 
     status = Linear1d( out_data    # out_size
                      , in_data     # in_size
                      , weight      # out_size x in_size
                      , bias        # out_size
                      )
     if status:
         dlr_common.DlrPrint(f"in_data:\n{in_data}", flush=True)
         dlr_common.DlrPrint(f"weight:\n{weight}", flush=True)
         dlr_common.DlrPrint(f"bias:\n{bias}", flush=True)
         dlr_common.DlrPrint(f"out_data:\n{out_data}", flush=True)
    def TestDeconvolution2d(_dtype):
        """
        _dtype: specify data type of data one of {np.int32, np.float32, np.float64}
        """
        in_channel = 2
        in_size = 5
        in_data = np.empty([in_channel, in_size, in_size], dtype=_dtype)
        out_channel = 1
        kernel_size = 3
        kernel = np.empty([out_channel, in_channel, kernel_size, kernel_size],
                          dtype=_dtype)
        bias = None  #bias = np.zeros([out_channel], dtype=_dtype)
        stride = 1
        padding = 0
        status, out_size = GetOutputSizeOfDeconvolution2d(
            in_size, kernel_size, stride, padding)
        if not status: return
        # prepare arrays to pass to C function
        out_data = np.empty([out_channel, out_size, out_size], dtype=_dtype)
        v = 0
        for i in range(in_channel):
            for r in range(in_size):
                for c in range(in_size):
                    in_data[i][r][c] = v % 10
                    v += 1
        for o in range(out_channel):
            for i in range(in_channel):
                for r in range(kernel_size):
                    for c in range(kernel_size):  # make identity matrix
                        if (r == c): kernel[o][i][r][c] = 1
                        else: kernel[o][i][r][c] = 0
        if bias is not None:
            for b in range(out_channel):
                bias[b] = 0

        status = Deconvolution2d(
            out_data  # out_channel x out_size x out_size
            ,
            in_data  # in_channel x in_size x in_size
            ,
            kernel  # out_channel x in_channel x kernel_size x kernel_size
            ,
            bias  # out_channel
            ,
            stride,
            padding)
        if status:
            dlr_common.DlrPrint(f"in_data:\n{in_data}", flush=True)
            dlr_common.DlrPrint(f"kernel:\n{kernel}", flush=True)
            dlr_common.DlrPrint(f"bias:\n{bias}", flush=True)
            dlr_common.DlrPrint(f"out_data:\n{out_data}", flush=True)
    def TestConcat2d(_dtype):
        """
        _dtype: specify data type of data one of {np.int32, np.float32, np.float64}
        """
        dims=[0,1]
        for dim in dims:
            if dim==0:
               in_rowsA=3
               in_colsA=4
               in_rowsB=5
               in_colsB=in_colsA
               out_rows=in_rowsA+in_rowsB
               out_cols=in_colsA
            else:
               in_rowsA=3
               in_colsA=4
               in_rowsB=in_rowsA
               in_colsB=4
               out_rows=in_rowsA
               out_cols=in_colsA+in_colsB
            in_dataA = np.empty([in_rowsA,in_colsA], dtype=_dtype)
            in_dataB = np.empty([in_rowsB,in_colsB], dtype=_dtype)
            out_data = np.empty([out_rows,out_cols], dtype=_dtype)
            v = 0
            for r in range(in_rowsA):
                for c in range(in_colsA):
                    in_dataA[r][c] = v    
                    v += 1
            for r in range(in_rowsB):
                for c in range(in_colsB):
                    in_dataB[r][c] = v    
                    v -= 1

            print(f"in_dataA={in_dataA.shape}");
            print(f"in_dataB={in_dataB.shape}");
            print(f"out_data={out_data.shape}");
            status = Concat2d( out_data
                             , in_dataA
                             , in_dataB
                             , dim
                             , rigor=True
                             , verbose=True)
            if status:
                dlr_common.DlrPrint(f"in_dataA:\n{in_dataA}")
                dlr_common.DlrPrint(f"in_dataB:\n{in_dataB}")
                dlr_common.DlrPrint(f"out_data:\n{out_data}")
 def TestNorm3dBatch(_dtype):
     """
     _dtype: specify data type of data one of {np.int32, np.float32, np.float64}
     """
     in_size = [1, 2, 3]  # [ in_channel, ....]
     in_data = (100 + 100) * np.random.random(size=in_size) - 100
     out_data = np.empty(in_size, dtype=_dtype)
     running_mean = (100 + 100) * np.random.random(size=(in_size[0])) - 100
     running_var = (100 + 100) * np.random.random(size=(in_size[0])) - 100
     scale = (10 + 10) * np.random.random(size=(in_size[0])) - 10
     bias = (10 + 10) * np.random.random(size=(in_size[0])) - 10
     if _dtype is np.int32:
         in_data = np.int32(in_data)
         running_mean = np.int32(running_mean)
         running_var = np.int32(running_var)
         scale = np.int32(scale)
         bias = np.int32(bias)
     epsilon = 1e-5
     status = Norm3dBatch(out_data,
                          in_data,
                          running_mean=running_mean,
                          running_var=running_var,
                          scale=scale,
                          bias=bias,
                          epsilon=epsilon,
                          rigor=True,
                          verbose=True)
     if status:
         dlr_common.DlrPrint(f"out_data:\n{out_data}", flush=True)
         dlr_common.DlrPrint(f"in_data:\n{in_data}", flush=True)
         dlr_common.DlrPrint(f"running_mean:\n{running_mean}", flush=True)
         dlr_common.DlrPrint(f"running_var:\n{running_var}", flush=True)
         dlr_common.DlrPrint(f"scale:\n{scale}", flush=True)
         dlr_common.DlrPrint(f"bias:\n{bias}", flush=True)
         dlr_common.DlrPrint(f"epsilon:\n{epsilon}", flush=True)
    def TestPooling2dMax(_dtype):
        """
        _dtype: specify data type of data one of {np.int32, np.float32, np.float64}
        """
        in_channel = 1
        in_size = 8
        out_channel = in_channel
        kernel_size = 2
        stride = 1
        padding = 0
        ceil_mode = False
        in_data = np.empty([in_channel, in_size, in_size], dtype=_dtype)
        status, out_size = GetOutputSizeOfPooling2dMax(in_size,
                                                       kernel_size,
                                                       stride,
                                                       padding,
                                                       rigor=True,
                                                       verbose=True)
        if not status: return
        # prepare arrays to pass to C function
        out_data = np.empty([out_channel, out_size, out_size], dtype=_dtype)
        v = 0
        for i in range(in_channel):
            for r in range(in_size):
                for c in range(in_size):
                    in_data[i][r][c] = v
                    v += 1

        status = Pooling2dMax(
            out_data  # out_channel x out_size x out_size
            ,
            in_data  # in_channel x in_size x in_size
            ,
            kernel_size  # in_channel x out_channel x kernel_size x kernel_size
            ,
            stride,
            padding,
            ceil_mode,
            rigor=True,
            verbose=True)
        if status:
            dlr_common.DlrPrint(f"in_data:\n{in_data}")
            dlr_common.DlrPrint(f"out_data:\n{out_data}")
 def TestActivations(_dtype):
     """
     _dtype: specify data type of data one of {np.int32, np.float32, np.float64}
     """
     funcs = ["ReLu", "LeakyReLu", "Tanh", "Sigmoid"]
     for func in funcs:
         func_name = "Activation" + func
         dims = [1, 2]
         for dim in dims:
             ndim = np.random.randint(10, size=(dim))
             ndim += 1  # prevent 0 in dimension
             in_data = (100 + 100) * np.random.random(size=ndim) - 100
             if _dtype is np.int32:
                 in_data = np.int32(in_data)
             out_data = np.empty(ndim, dtype=_dtype)
             #status = locals()[func_name]( out_data
             status = globals()[func_name](out_data,
                                           in_data,
                                           rigor=True,
                                           verbose=True)
             if status:
                 dlr_common.DlrPrint(f"in_data\n{in_data}")
                 dlr_common.DlrPrint(f"out_data\n{out_data}")
        status = LinearNd(
            out_data  # ndim x out_size
            ,
            in_data  # ndim x in_size
            ,
            weight  # out_size x in_size
            ,
            bias  # out_size
        )
        if status:
            dlr_common.DlrPrint(f"in_data:\n{in_data}", flush=True)
            dlr_common.DlrPrint(f"weight:\n{weight}", flush=True)
            dlr_common.DlrPrint(f"bias:\n{bias}", flush=True)
            dlr_common.DlrPrint(f"out_data:\n{out_data}", flush=True)


#===============================================================================
if __name__ == '__main__':
    dlr_common.DlrPrint("Testing LinarNd", flush=True)
    dlr_common.DlrPrint("*********************", flush=True)
    TestLinearNd(_dtype=np.int32)
    TestLinearNd(_dtype=np.float32)
    TestLinearNd(_dtype=np.float64)

#===============================================================================
# Revision history:
#
# 2020.09.30: argument order of bias and bias_size changed
# 2020.04.25: Started by Ando Ki ([email protected])
#===============================================================================
                for c in range(in_colsB):
                    in_dataB[r][c] = v    
                    v -= 1

            print(f"in_dataA={in_dataA.shape}");
            print(f"in_dataB={in_dataB.shape}");
            print(f"out_data={out_data.shape}");
            status = Concat2d( out_data
                             , in_dataA
                             , in_dataB
                             , dim
                             , rigor=True
                             , verbose=True)
            if status:
                dlr_common.DlrPrint(f"in_dataA:\n{in_dataA}")
                dlr_common.DlrPrint(f"in_dataB:\n{in_dataB}")
                dlr_common.DlrPrint(f"out_data:\n{out_data}")

if __name__=='__main__':
    dlr_common.DlrPrint("Testing Conat2d", flush=True);
    dlr_common.DlrPrint("*********************", flush=True)
    TestConcat2d(_dtype=np.int32)
    #TestConcat2d(_dtype=np.float32)
    #TestConcat2d(_dtype=np.float64)

#===============================================================================
# Revision history:
#
# 2020.04.58: Started by Ando Ki ([email protected])
#===============================================================================
        status = Pooling2dMax(
            out_data  # out_channel x out_size x out_size
            ,
            in_data  # in_channel x in_size x in_size
            ,
            kernel_size  # in_channel x out_channel x kernel_size x kernel_size
            ,
            stride,
            padding,
            ceil_mode,
            rigor=True,
            verbose=True)
        if status:
            dlr_common.DlrPrint(f"in_data:\n{in_data}")
            dlr_common.DlrPrint(f"out_data:\n{out_data}")


if __name__ == '__main__':
    dlr_common.DlrPrint("Testing Pooling2dMax", flush=True)
    dlr_common.DlrPrint("*********************", flush=True)
    TestPooling2dMax(_dtype=np.int32)
    #TestPooling2dMax(_dtype=np.float32)
    #TestPooling2dMax(_dtype=np.float64)

#===============================================================================
# Revision history:
#
# 2020.04.58: Started by Ando Ki ([email protected])
#===============================================================================
            dims = [1, 2]
            for dim in dims:
                ndim = np.random.randint(10, size=(dim))
                ndim += 1  # prevent 0 in dimension
                in_data = (100 + 100) * np.random.random(size=ndim) - 100
                if _dtype is np.int32:
                    in_data = np.int32(in_data)
                out_data = np.empty(ndim, dtype=_dtype)
                #status = locals()[func_name]( out_data
                status = globals()[func_name](out_data,
                                              in_data,
                                              rigor=True,
                                              verbose=True)
                if status:
                    dlr_common.DlrPrint(f"in_data\n{in_data}")
                    dlr_common.DlrPrint(f"out_data\n{out_data}")


if __name__ == '__main__':
    dlr_common.DlrPrint("Testing Activations", flush=True)
    dlr_common.DlrPrint("*********************", flush=True)
    #TestActivations(_dtype=np.int32)
    TestActivations(_dtype=np.float32)
    #TestActivations(_dtype=np.float64)

#===============================================================================
# Revision history:
#
# 2020.04.58: Started by Ando Ki ([email protected])
#===============================================================================
Exemple #11
0
                             running_var=running_var,
                             scale=scale,
                             bias=bias,
                             epsilon=epsilon,
                             rigor=True,
                             verbose=True)
        if status:
            dlr_common.DlrPrint(f"out_data:\n{out_data}", flush=True)
            dlr_common.DlrPrint(f"in_data:\n{in_data}", flush=True)
            dlr_common.DlrPrint(f"running_mean:\n{running_mean}", flush=True)
            dlr_common.DlrPrint(f"running_var:\n{running_var}", flush=True)
            dlr_common.DlrPrint(f"scale:\n{scale}", flush=True)
            dlr_common.DlrPrint(f"bias:\n{bias}", flush=True)
            dlr_common.DlrPrint(f"epsilon:\n{epsilon}", flush=True)


#===============================================================================
if __name__ == '__main__':
    dlr_common.DlrPrint("Testing Norm2dBatch", flush=True)
    dlr_common.DlrPrint("*********************", flush=True)
    #TestNorm2dBatch(_dtype=np.int32)
    TestNorm2dBatch(_dtype=np.float32)
    #TestNorm2dBatch(_dtype=np.float64)

#===============================================================================
# Revision history:
#
# 2020.09.30: argument order of bias and bias_size changed
# 2020.04.25: Started by Ando Ki ([email protected])
#===============================================================================
            ,
            in_data  # in_channel x in_size x in_size
            ,
            kernel  # out_channel x in_channel x kernel_size x kernel_size
            ,
            bias  # out_channel
            ,
            stride,
            padding)
        if status:
            dlr_common.DlrPrint(f"in_data:\n{in_data}", flush=True)
            dlr_common.DlrPrint(f"kernel:\n{kernel}", flush=True)
            dlr_common.DlrPrint(f"bias:\n{bias}", flush=True)
            dlr_common.DlrPrint(f"out_data:\n{out_data}", flush=True)


#===============================================================================
if __name__ == '__main__':
    dlr_common.DlrPrint("Testing Deconvolution2d", flush=True)
    dlr_common.DlrPrint("*********************", flush=True)
    #TestDeconvolution2d(_dtype=np.int32)
    TestDeconvolution2d(_dtype=np.float32)
    #TestDeconvolution2d(_dtype=np.float64)

#===============================================================================
# Revision history:
#
# 2020.09.30: argument order of bias and bias_size changed
# 2020.04.25: Started by Ando Ki ([email protected])
#===============================================================================