Пример #1
0
def layer_factory_deconv(channel_list=[512, 256]):
    layers = []
    layers.append(
        rm.Conv2d(channel=channel_list[0],
                  padding=1,
                  filter=3,
                  initializer=GlorotUniform()))
    layers.append(rm.Relu())
    if 'ceil_mode' in inspect.signature(rm.Deconv2d).parameters:
        layers.append(
            rm.Deconv2d(channel=channel_list[1],
                        padding=1,
                        filter=3,
                        stride=2,
                        initializer=GlorotUniform(),
                        ceil_mode=True))
    else:
        layers.append(
            Deconv2d(channel=channel_list[1],
                     padding=1,
                     filter=3,
                     stride=2,
                     initializer=GlorotUniform(),
                     ceil_mode=True))
    layers.append(rm.Relu())
    return rm.Sequential(layers)
Пример #2
0
    def __init__(self, num_class):
        self.base1 = rm.Sequential([
            InceptionV2Stem(),
            InceptionV2BlockA([64, 48, 64, 64, 96, 32]),
            InceptionV2BlockA(),
            InceptionV2BlockA(),
            InceptionV2BlockB(),
            InceptionV2BlockC([192, 128, 192, 128, 192, 192]),
            InceptionV2BlockC(),
            InceptionV2BlockC(),
            InceptionV2BlockC()])
        self.aux1 = rm.Sequential([
            rm.AveragePool2d(filter=5, stride=3),
            rm.Conv2d(128, filter=1),
            rm.Relu(),
            rm.Conv2d(768, filter=1),
            rm.Relu(),
            rm.Flatten(),
            rm.Dense(num_class)])

        self.base2 = rm.Sequential([
            InceptionV2BlockD(),
            InceptionV2BlockE(),
            InceptionV2BlockE(),
            rm.AveragePool2d(filter=8),
            rm.Flatten()])

        self.aux2 = rm.Dense(num_class)
Пример #3
0
def main():
    mat = scipy.io.loadmat("letter.mat")
    X = mat["X"]
    y = mat["y"]
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)

    sequential = rm.Sequential([
        rm.Dense(32),
        rm.Relu(),
        rm.Dense(16),
        rm.Relu(),
        rm.Dense(1)
    ])
    batch_size = 128
    epoch = 500
    N = len(X_train)
    optimizer = Adam()
    for i in range(epoch):
        perm = np.random.permutation(N)
        loss = 0
        for j in range(0, N//batch_size):
            train_batch = X_train[perm[j*batch_size:(j+1)*batch_size]]
            response_batch = y_train[perm[j*batch_size:(j+1)*batch_size]]
            with sequential.train():
                l = rm.sgce(sequential(train_batch), response_batch)
            grad = l.grad()
            grad.update(optimizer)
            loss += l.as_ndarray()
        train_loss = loss / (N // batch_size)
        test_loss = rm.sgce(sequential(X_test), y_test).as_ndarray()
        print("epoch:{:03d}, train_loss:{:.4f}, test_loss:{:.4f}".format(i, float(train_loss), float(test_loss)))
    predictions = np.argmax(sequential(X_test).as_ndarray(), axis=1)
    print(confusion_matrix(y_test.ravel(), predictions))
    print(classification_report(y_test.ravel(), predictions))
Пример #4
0
 def __init__(
         self,
         input_shape,  #(batch_size, input_size)
         latent_dim=2,
         epoch=5,
         units=1000,
         pre=None,
         dec=None,
         lr_ch=(5, 1.1),
         modeldir='model',
         outdir='result',
         cmap=plt.get_cmap('viridis'),
 ):
     self.input_shape = input_shape
     self.latent_dim = latent_dim
     self.epoch = epoch
     self.lr_ch = lr_ch
     self.shot = epoch // lr_ch[0]
     self.modeldir = modeldir
     self.outdir = outdir
     if not pre:
         pre = rm.Sequential([rm.Dense(units), rm.Relu()])
         enc = Enc(pre, latent_dim)
     if not dec:
         dec = rm.Sequential([
             rm.Dense(units),
             rm.Relu(),
             rm.Dense(input_shape[-1]),
             rm.Sigmoid()
         ])
     self.ae = VAE(enc, dec, latent_dim)
     self.cmap = cmap
Пример #5
0
def conv_block(growth_rate):
    return rm.Sequential([
        rm.BatchNormalize(epsilon=0.001, mode='feature'),
        rm.Relu(),
        rm.Conv2d(growth_rate * 4, 1, padding=0),
        rm.BatchNormalize(epsilon=0.001, mode='feature'),
        rm.Relu(),
        rm.Conv2d(growth_rate, 3, padding=1),
    ])
Пример #6
0
def layer_factory(channel=64, conv_layer_num=2, first=None):
    layers = []
    for _ in range(conv_layer_num):
        if first is not None:
            layers.append(rm.Conv2d(channel=channel, padding=100, filter=3))
            layers.append(rm.Relu())
            first = None
        else:
            layers.append(rm.Conv2d(channel=channel, padding=1, filter=3))
            layers.append(rm.Relu())
    layers.append(MaxPool2d(filter=2, stride=2, ceil_mode=True))
    return rm.Sequential(layers)
Пример #7
0
def layer_factory(channel=32, conv_layer_num=2):
    layers = []
    for _ in range(conv_layer_num):
        layers.append(rm.Conv2d(channel=channel, padding=1, filter=3))
        layers.append(rm.Relu())
    layers.append(rm.MaxPool2d(filter=2, stride=2))
    return rm.Sequential(layers)
Пример #8
0
 def __init__(self, num_classes, block, layers, cardinality):
     self.inplanes = 128
     self.cardinality = cardinality
     super(ResNeXt, self).__init__()
     self.conv1 = rm.Conv2d(64,
                            filter=7,
                            stride=2,
                            padding=3,
                            ignore_bias=True)
     self.bn1 = rm.BatchNormalize(epsilon=0.00001, mode='feature')
     self.relu = rm.Relu()
     self.maxpool = rm.MaxPool2d(filter=3, stride=2, padding=1)
     self.layer1 = self._make_layer(block,
                                    128,
                                    layers[0],
                                    stride=1,
                                    cardinality=self.cardinality)
     self.layer2 = self._make_layer(block,
                                    256,
                                    layers[1],
                                    stride=2,
                                    cardinality=self.cardinality)
     self.layer3 = self._make_layer(block,
                                    512,
                                    layers[2],
                                    stride=2,
                                    cardinality=self.cardinality)
     self.layer4 = self._make_layer(block,
                                    1024,
                                    layers[3],
                                    stride=2,
                                    cardinality=self.cardinality)
     self.flat = rm.Flatten()
     self.fc = rm.Dense(num_classes)
Пример #9
0
    def _gen_model(self):
        N = self.batch

        input_shape = self.arch['input_shape']
        output_shape = self.arch['output_shape']
        if 'debug' in self.arch.keys():
            debug = self.arch['debug']
        else:
            debug = False

        self.batch_input_shape = self.get_shape(N, input_shape)
        self.batch_output_shape = self.get_shape(N, output_shape)
        depth = self.arch['depth']
        unit = self.arch['unit']

        units = np.ones(depth + 1) * unit
        _unit = np.prod(output_shape)
        units[-1] = _unit
        units = units.astype('int')
        layer = [rm.Flatten()]
        for _unit in units:
            layer.append(rm.BatchNormalize())
            layer.append(rm.Relu())
            layer.append(rm.Dense(_unit))
        #layer = layer[:-1] + [rm.Dropout()] + [layer[-1]]
        self.fcnn = rm.Sequential(layer)

        if debug:
            x = np.zeros(self.batch_input_shape)
            for _layer in layer:
                x = _layer(x)
                print(x.shape, str(_layer.__class__).split('.')[-1])
            x = rm.reshape(x, self.batch_output_shape)
            print(x.shape)
Пример #10
0
def transition_layer(growth_rate):
    return rm.Sequential([
        rm.BatchNormalize(epsilon=0.001, mode='feature'),
        rm.Relu(),
        rm.Conv2d(growth_rate, filter=1, padding=0, stride=1),
        rm.AveragePool2d(filter=2, stride=2)
    ])
Пример #11
0
    def __init__(self):
        self.d1=rm.Dense(32)
        self.d2=rm.Dense(32)
        self.d3=rm.Dense(32)
        self.d4=rm.Dense(1)

        self.emb = rm.Embedding(32,6)
        self.ad1 = rm.Dense(32)
        self.r=rm.Relu()
Пример #12
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(planes, stride)
     self.bn1 = rm.BatchNormalize(mode='feature')
     self.relu = rm.Relu()
     self.conv2 = conv3x3(planes)
     self.bn2 = rm.BatchNormalize(mode='feature')
     self.downsample = downsample
     self.stride = stride
Пример #13
0
def layer_factory(channel_list=[64]):
    layers = []
    for i in range(len(channel_list)):
        layers.append(
            rm.Conv2d(channel=channel_list[i],
                      padding=1,
                      filter=3,
                      initializer=GlorotUniform()))
        layers.append(rm.Relu())
    return rm.Sequential(layers)
Пример #14
0
 def _gen_model(self):
     depth = self.arch['depth']
     unit = self.arch['unit']
     # excluding mini-batch size
     input_shape = self.arch['input_shape']
     output_shape = self.arch['output_shape']
     seq = []
     for i in range(depth):
         seq.append(rm.Dense(unit))
         seq.append(rm.Relu())
         if i < 1 or i == depth - 1:
             seq.append(rm.BatchNormalize())
     seq.append(rm.Dense(output_shape))
     self._model = rm.Sequential(seq)
Пример #15
0
    def __init__(self, num_class):
        self.base1 = rm.Sequential([rm.Conv2d(64, filter=7, padding=3, stride=2),
                                    rm.Relu(),
                                    rm.MaxPool2d(filter=3, stride=2, padding=1),
                                    rm.BatchNormalize(mode='feature'),
                                    rm.Conv2d(64, filter=1, stride=1),
                                    rm.Relu(),
                                    rm.Conv2d(192, filter=3, padding=1, stride=1),
                                    rm.Relu(),
                                    rm.BatchNormalize(mode='feature'),
                                    rm.MaxPool2d(filter=3, stride=2, padding=1),
                                    InceptionV1Block(),
                                    InceptionV1Block([128, 128, 192, 32, 96, 64]),
                                    rm.MaxPool2d(filter=3, stride=2),
                                    InceptionV1Block([192, 96, 208, 16, 48, 64]),
                                    ])

        self.aux1 = rm.Sequential([rm.AveragePool2d(filter=5, stride=3),
                                   rm.Flatten(),
                                   rm.Dense(1024),
                                   rm.Dense(num_class)])

        self.base2 = rm.Sequential([InceptionV1Block([160, 112, 224, 24, 64, 64]),
                                    InceptionV1Block([128, 128, 256, 24, 64, 64]),
                                    InceptionV1Block([112, 144, 288, 32, 64, 64])])

        self.aux2 = rm.Sequential([rm.AveragePool2d(filter=5, stride=3),
                                   rm.Flatten(),
                                   rm.Dense(1024),
                                   rm.Dense(num_class)])

        self.base3 = rm.Sequential([InceptionV1Block([256, 160, 320, 32, 128, 128]),
                                    InceptionV1Block([256, 160, 320, 32, 128, 128]),
                                    InceptionV1Block([192, 384, 320, 48, 128, 128]),
                                    rm.AveragePool2d(filter=7, stride=1),
                                    rm.Flatten()])
        self.aux3 = rm.Dense(num_class)
Пример #16
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = rm.Conv2d(planes, filter=1, ignore_bias=True)
     self.bn1 = rm.BatchNormalize(mode='feature')
     self.conv2 = rm.Conv2d(planes,
                            filter=3,
                            stride=stride,
                            padding=1,
                            ignore_bias=True)
     self.bn2 = rm.BatchNormalize(mode='feature')
     self.conv3 = rm.Conv2d(planes * self.expansion,
                            filter=1,
                            ignore_bias=True)
     self.bn3 = rm.BatchNormalize(mode='feature')
     self.relu = rm.Relu()
     self.downsample = downsample
     self.stride = stride
Пример #17
0
def test_relu(tmpdir):
    model = rm.Sequential([rm.Relu()])

    input = renom.Variable(np.random.random((10, 10, 10, 10)))
    m = _run_onnx(tmpdir, model, input)

    assert m.graph.node[0].op_type == 'Relu'

    # check input
    id_input, = m.graph.node[0].input
    assert renom.utility.onnx.OBJNAMES[id(input)] == id_input
    assert id_input == m.graph.input[0].name
    assert get_shape(m.graph.input[0]) == input.shape

    # check output
    id_output, = m.graph.node[0].output
    assert get_shape(m.graph.output[0]) == input.shape
Пример #18
0
 def __init__(self, planes, stride=1, downsample=None, cardinality=32):
     super(Bottleneck, self).__init__()
     self.cardinality = cardinality
     self.conv1 = rm.Conv2d(planes, filter=1, ignore_bias=True)
     self.bn1 = rm.BatchNormalize(epsilon=0.00001, mode='feature')
     self.conv2 = rm.GroupConv2d(planes,
                                 filter=3,
                                 stride=stride,
                                 padding=1,
                                 ignore_bias=True,
                                 groups=self.cardinality)
     self.bn2 = rm.BatchNormalize(epsilon=0.00001, mode='feature')
     self.conv3 = rm.Conv2d(planes * self.expansion,
                            filter=1,
                            ignore_bias=True)
     self.bn3 = rm.BatchNormalize(epsilon=0.00001, mode='feature')
     self.relu = rm.Relu()
     self.downsample = downsample
     self.stride = stride
Пример #19
0
def exp_convolution1():
    np.random.seed(10)
    # Caused by CUDNN_CONVOLUTION_FWD_ALGO_GEMM is not deterministic.
    # 1724.07080078 GPU
    # 1715.86767578 CPU
    cuda.set_cuda_active(True)
    a = np.random.randn(8 * 2, 64, 32, 32).astype(np.float32)
    b = np.random.randn(8 * 2, 32, 28, 28).astype(np.float32)
    layer1 = rm.Conv2d(channel=32, input_size=a.shape[1:])
    layer2 = rm.Conv2d(channel=32, input_size=(32, 30, 30))

    ga = rm.Variable(a, auto_update=False)
    gb = rm.Variable(b, auto_update=False)

    opt = Sgd(0.0001, momentum=0.0)
    start_t = time.time()
    for _ in range(100):
        loss = rm.Sum((layer2(rm.Relu(layer1(ga))) - gb)**2) / 8
        loss.ensure_cpu()
        grad = loss.grad()
        grad.update(opt)
        print(loss)
    print(time.time() - start_t)
Пример #20
0
              check_network=debug,
              growth_factor=1.2)
    enc = Enc(pre=pre, latent_dim=latent_dim)
    dec = Dec2d(
        input_params=latent_dim,
        first_shape=pre.output_shape,
        output_shape=batch_shape,
        check_network=debug,
    )
else:  # fully connected network
    batch_shape = (batch_size, 28 * 28)
    x_train = x_train.reshape(-1, 28 * 28)
    x_test = x_test.reshape(-1, 28 * 28)
    enc_pre = rm.Sequential([
        rm.Dense(hidden),
        rm.Relu(),
        rm.BatchNormalize(),
        rm.Dense(hidden, initializer=Uniform()),
        rm.Relu()
    ])
    enc = Enc(enc_pre, latent_dim)
    dec = rm.Sequential([
        rm.Dense(hidden),
        rm.Relu(),
        rm.BatchNormalize(),
        rm.Dense(28 * 28),
        rm.Sigmoid(),
    ])
vae = VAE(enc, dec, latent_dim)
optimizer = rm.Adam()
Пример #21
0
def main():
    df = pd.read_csv("crx.data", header=None, index_col=None)
    df = df.applymap(lambda d: np.nan if d == "?" else d)
    df = df.dropna(axis=0)
    sr_labels = df.iloc[:, -1]
    labels = sr_labels.str.replace("+", "1").replace("-",
                                                     "0").values.astype(float)
    data = df.iloc[:, :-1].values.astype(str)

    pattern_continuous = re.compile("^\d+\.?\d*\Z")
    continuous_idx = {}
    for i in range(data.shape[1]):
        is_continuous = True if pattern_continuous.match(data[0][i]) else False
        if is_continuous and i == 0:
            X = data[:, i].astype(float)
        elif not is_continuous and i == 0:
            X = pd.get_dummies(data[:, i]).values.astype(float)
        elif is_continuous and i != 0:
            X = np.concatenate((X, data[:, i].reshape(-1, 1).astype(float)),
                               axis=1)
        elif not is_continuous and i != 0:
            X = np.concatenate(
                (X, pd.get_dummies(data[:, i]).values.astype(float)), axis=1)
    print("X:{X.shape}, y:{labels.shape}".format(**locals()))

    X = X
    y = labels.reshape(-1, 1)

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.3,
                                                        random_state=0)

    sequential = rm.Sequential(
        [rm.Dense(46),
         rm.Relu(),
         rm.Dense(16),
         rm.Relu(),
         rm.Dense(1)])
    batch_size = 128
    epoch = 50
    N = len(X_train)
    optimizer = Adam()
    for i in range(epoch):
        perm = np.random.permutation(N)
        loss = 0
        for j in range(0, N // batch_size):
            train_batch = X_train[perm[j * batch_size:(j + 1) * batch_size]]
            response_batch = y_train[perm[j * batch_size:(j + 1) * batch_size]]
            with sequential.train():
                l = rm.sgce(sequential(train_batch), response_batch)
            grad = l.grad()
            grad.update(optimizer)
            loss += l.as_ndarray()
        train_loss = loss / (N // batch_size)
        test_loss = rm.sgce(sequential(X_test), y_test).as_ndarray()
        print("epoch:{:03d}, train_loss:{:.4f}, test_loss:{:.4f}".format(
            i, float(train_loss), float(test_loss)))
    predictions = np.argmax(sequential(X_test).as_ndarray(), axis=1)
    print(confusion_matrix(y_test.ravel(), predictions))
    print(classification_report(y_test.ravel(), predictions))
Пример #22
0
    def __init__(
            self,
            input_params=32768,
            first_shape=(1, 32, 32, 32),
            output_shape=(1, 1, 64, 64),
            check_network=False,
            batchnormal=True,
            dropout=False,
            down_factor=1.6,
            act=rm.Relu(),
            last_act=rm.Sigmoid(),
    ):
        self.input_params = input_params
        self.latent_dim = input_params
        self.first_shape = first_shape
        self.output_shape = output_shape
        self.act = act
        self.last_act = last_act
        self.down_factor = down_factor

        def decide_factor(src, dst):
            factor = np.log(src / dst) / np.log(2)
            if factor % 1 == 0:
                return factor
            return np.ceil(factor)

        ch = first_shape[1]
        v_factor = decide_factor(output_shape[2], first_shape[2])
        h_factor = decide_factor(output_shape[3], first_shape[3])
        v_dim, h_dim = first_shape[2], first_shape[3]
        parameters = []
        check_params = np.array(first_shape[1:]).prod()
        self.trans = False
        if input_params != check_params:
            if check_network:
                print('--- Decoder Network ---')
                print('inserting Dense({})'.format(check_params))
            self.trans = rm.Dense(check_params)
        while v_factor != 0 or h_factor != 0:
            if batchnormal:
                parameters.append(rm.BatchNormalize())
                if check_network:
                    print('BN ', end='')
            stride = (2 if v_factor > 0 else 1, 2 if h_factor > 0 else 1)
            if check_network:
                print('transpose2d ch={}, filter=2, stride={}'.format(
                    ch, stride))
            parameters.append(rm.Deconv2d(channel=ch, filter=2, stride=stride))
            if self.act:
                parameters.append(self.act)
            if ch > output_shape[1]:
                ch = int(np.ceil(ch / self.down_factor))
            v_dim = v_dim * 2 if v_factor > 0 else v_dim + 1
            h_dim = h_dim * 2 if h_factor > 0 else h_dim + 1
            v_factor = v_factor - 1 if v_factor > 0 else 0
            h_factor = h_factor - 1 if h_factor > 0 else 0
        if v_dim > output_shape[2] or h_dim > output_shape[2]:
            last_filter = (v_dim - output_shape[2] + 1,
                           h_dim - output_shape[3] + 1)
            if check_network:
                print('conv2d filter={}, stride=1'.format(last_filter))
            parameters.append(
                rm.Conv2d(channel=output_shape[1],
                          filter=last_filter,
                          stride=1))
        self.parameters = rm.Sequential(parameters)
        if check_network:
            self.forward(np.zeros((first_shape[0], input_params)),
                         print_parameter=True)
Пример #23
0
    def __init__(
            self,
            nname='',
            batch_size=64,
            input_shape=(1, 28, 28),
            batchnormal=True,
            dropout=False,
            first_channel=8,
            growth_factor=2,
            repeats=2,
            tgt_dim=4,
            keep_vertical=False,
            check_network=False,
            act=rm.Relu(),
    ):
        self.input_shape = input_shape
        self.keep_v = keep_vertical
        self.dropout = dropout
        self.batchnormal = batchnormal
        self.act = act
        if check_network:
            print('--- {} Network ---'.format(nname))
        ch = first_channel
        in_ch, v_dim, h_dim = self.input_shape
        if check_network:
            print('Input {}'.format(input_shape))
        self.first = None

        def check_continue():
            v_deci = False if keep_vertical else v_dim > tgt_dim
            h_deci = h_dim > tgt_dim
            return v_deci or h_deci

        parameters = []
        repeat_cnt = 0
        while check_continue():
            if self.first and batchnormal:
                parameters.append(rm.BatchNormalize())
                if check_network:
                    print('BN ', end='')
            cnn_layer = rm.Conv2d(
                channel=ch,
                filter=3,
                padding=1,
            )
            if self.first:
                parameters.append(cnn_layer)
            else:
                self.first = cnn_layer
            if check_network:
                print('Conv2d -> {}'.format((batch_size, ch, v_dim, h_dim)))
            repeat_cnt += 1
            if repeat_cnt == repeats:
                repeat_cnt = 0
                ch = int(ch * growth_factor)
                stride = (1 if self.keep_v else 2, 2)
                parameters.append(
                    rm.Conv2d(
                        channel=ch,
                        filter=1,
                        stride=stride,
                    ))
                v_dim = v_dim if self.keep_v else int(np.ceil(v_dim / 2))
                h_dim = int(np.ceil(h_dim / 2))
                if check_network:
                    print('Conv2d -> {}'.format(
                        (batch_size, ch, v_dim, h_dim)))
        self.output_shape = (batch_size, ch, v_dim, h_dim)
        self.parameters = rm.Sequential(parameters)
        self.nb_parameters = ch * v_dim * h_dim
        if check_network:
            self.forward(
                np.zeros((batch_size, input_shape[0], input_shape[1],
                          input_shape[2])),
                check_network=True,
            )
def one_hot(data, size=11):
    temp = np.zeros((len(data), size))
    temp[np.arange(len(data)), data.reshape(-1)] = 1
    return temp


y_train_1 = one_hot(y_train)
idx = random.permutation(len(y_train))[10000:]
y_train_1[idx] = np.r_[np.zeros(10), np.ones(1)].reshape(1, 11)

# --- model configuration ---
enc_base = rm.Sequential([
    #rm.BatchNormalize(),
    rm.Dense(hidden),
    rm.Relu(),  #rm.Dropout(),
    rm.BatchNormalize(),
    rm.Dense(hidden),
    rm.Relu(),  #rm.Dropout(),
    # xxx rm.BatchNormalize(),
    # Genの最後にBNを配置してはだめ
    rm.Dense(latent_dim, initializer=Uniform())
])
dec = rm.Sequential([
    #rm.BatchNormalize(),
    rm.Dense(hidden),
    rm.LeakyRelu(),
    rm.BatchNormalize(),
    rm.Dense(hidden),
    rm.LeakyRelu(),
    #rm.BatchNormalize(),
        check_network = debug,
        growth_factor = 1.2
    )
    enc = Enc(pre=pre, latent_dim=latent_dim)
    dec = Dec2d(
        input_params = latent_dim,
        first_shape = pre.output_shape,
        output_shape = batch_shape,
        check_network = debug,
    )    
else: # fully connected network
    batch_shape = (batch_size, 28*28)
    x_train = x_train.reshape(-1, 28*28)
    x_test = x_test.reshape(-1, 28*28)
    enc_pre = rm.Sequential([
        rm.Dense(hidden), rm.Relu(),
        rm.BatchNormalize(),
        rm.Dense(hidden, initializer=Uniform()), rm.Relu()
    ])
    enc = Enc(enc_pre, latent_dim)
    dec = rm.Sequential([
        rm.Dense(hidden), rm.Relu(),
        rm.BatchNormalize(),
        rm.Dense(28*28), rm.Sigmoid(),
    ])
vae = VAE(enc, dec, latent_dim)

N = len(x_train)
for e in range(epoch):
    if not e%shot_period == shot_period - 1:
        continue