Example #1
0
class CryptoNet_fivelayer(Module):
    def __init__(self, in_dim, n_class):
        super(CryptoNet_fivelayer, self).__init__()

        self.conv = ConvLayer(in_dim, 5, 5,5, zero_padding=1, stride=2, method='SAME')
        self.sq1 = Activators.Square()
        self.fc1 = FullyConnect(845, 100)
        self.sq2 = Activators.Square()
        self.fc2 = FullyConnect(100, n_class)
        self.logsoftmax = Logsoftmax()

    def forward(self, x):
        in_size = x.shape[0]
        out_1 = self.sq1.forward(self.conv.forward(x))
        self.conv_out_shape = out_1.shape
        # print('out1shape: ',self.conv_out_shape)
        out_1 = out_1.reshape(in_size, -1) # 将输出拉成一行
        out_2 = self.sq2.forward(self.fc1.forward(out_1))
        out_3 = self.fc2.forward(out_2)
        
        out_logsoftmax = self.logsoftmax.forward(out_3)

        return out_logsoftmax
    
    def backward(self, dy):
        dy_logsoftmax = self.logsoftmax.gradient(dy)
        dy_f3 = self.fc2.gradient(dy_logsoftmax)
        dy_f2 = self.fc1.gradient(self.sq2.gradient(dy_f3))
        dy_f2 = dy_f2.reshape(self.conv_out_shape)
        self.conv.gradient(self.sq1.gradient(dy_f2))
Example #2
0
class Discriminator(Module):
    def __init__(self):
        super(Discriminator, self).__init__()
        # 输入1*28*28 MNIST
        # 1*28*28 -> 64*16*16
        self.conv1 = ConvLayer(nc, ndf, 4,4, zero_padding=1, stride=2,method='SAME', bias_required=False)
        self.lrelu1 = Activators.LeakyReLU(0.2)

        # 64*16*16 -> 128*8*8
        self.conv2 = ConvLayer(ndf, ndf*2, 4,4, zero_padding=1, stride=2, method='SAME', bias_required=False)
        self.bn1 = BatchNorm(ndf*2)
        self.lrelu2 = Activators.LeakyReLU(0.2)

        # 128*8*8 -> 256*4*4
        self.conv3 = ConvLayer(ndf*2, ndf*4, 4,4, zero_padding=1, stride=2, method='SAME', bias_required=False)
        self.bn2 = BatchNorm(ndf*4)
        self.lrelu3 = Activators.LeakyReLU(0.2)

        # 256*4*4 -> 1*1
        self.conv4 = ConvLayer(ndf*4, 1, 4,4, zero_padding=0, stride=1, method='VALID', bias_required=False)
        self.sigmoid = Activators.Sigmoid_CE()

    def forward(self, x_input):
        l1 = self.lrelu1.forward(self.conv1.forward(x_input))

        l2 = self.lrelu2.forward(self.bn1.forward(self.conv2.forward(l1)))

        l3 = self.lrelu3.forward(self.bn2.forward(self.conv3.forward(l2)))
        
        l4 = self.conv4.forward(l3)
        # print('D l1 shape: ',l1.shape)
        # print('D l2 shape: ',l2.shape)
        # print('D l3 shape: ',l3.shape)
        # print('D l4 shape: ',l4.shape)
        output_sigmoid = self.sigmoid.forward(l4)
        return output_sigmoid
    
    def backward(self, dy):
        # print('dy.shape: ', dy.shape)
        dy_sigmoid = self.sigmoid.gradient(dy)
        # print('dy_sigmoid.shape: ', dy_sigmoid.shape)
        dy_l4 = self.conv4.gradient(dy_sigmoid)
        dy_l3 = self.conv3.gradient(self.bn2.gradient(self.lrelu3.gradient(dy_l4)))
        dy_l2 = self.conv2.gradient(self.bn1.gradient(self.lrelu2.gradient(dy_l3)))
        dy_l1 = self.conv1.gradient(self.lrelu1.gradient(dy_l2))
        # print('D_backward output shape: ',dy_l1.shape)
        return dy_l1
Example #3
0
class Lenet_numpy(Module):
    def __init__(self, in_dim, n_class):
        super(Lenet_numpy, self).__init__()
        self.conv1 = ConvLayer(in_dim, 6, 5,5, zero_padding=2, stride=1, method='SAME')
        self.conv2 = ConvLayer(6, 16, 5,5, zero_padding=0, stride=1, method='VALID')
        self.conv3 = ConvLayer(16, 120, 5,5, zero_padding=0, stride=1, method='VALID')

        self.maxpool1 = MaxPooling(pool_shape=(2,2), stride=(2,2))
        self.maxpool2 = MaxPooling(pool_shape=(2,2), stride=(2,2))
        self.relu1 = ReLU()
        self.relu2 = ReLU()
        self.relu3 = ReLU()
        self.relu4 = ReLU()
        self.fc1 = FullyConnect(120, 84)
        self.fc2 = FullyConnect(84, n_class)
        self.logsoftmax = Logsoftmax()

    def forward(self, x): # 存在问题是:同一个对象其实是不能多次使用的,因为每个对象都有自己的input和output,如果重复使用反向会错误
        in_size = x.shape[0]
        out_c1s2 = self.relu1.forward(self.maxpool1.forward(self.conv1.forward(x)))
        out_c3s4 = self.relu2.forward(self.maxpool2.forward(self.conv2.forward(out_c1s2)))
        out_c5 = self.relu3.forward(self.conv3.forward(out_c3s4))
        self.conv_out_shape = out_c5.shape

        out_c5 = out_c5.reshape(in_size, -1)
        out_f6 = self.relu4.forward(self.fc1.forward(out_c5))
        out_f7 = self.fc2.forward(out_f6)
        out_logsoftmax = self.logsoftmax.forward(out_f7)

        return out_logsoftmax
    
    def backward(self, dy):
        dy_logsoftmax = self.logsoftmax.gradient(dy)
        dy_f7 = self.fc2.gradient(dy_logsoftmax)
        dy_f6 = self.fc1.gradient(self.relu4.gradient(dy_f7))

        dy_f6 = dy_f6.reshape(self.conv_out_shape)

        dy_c5 = self.conv3.gradient(self.relu3.gradient(dy_f6))
        dy_c3f4 = self.conv2.gradient(self.maxpool2.gradient(self.relu2.gradient(dy_c5)))
        self.conv1.gradient(self.maxpool1.gradient(self.relu1.gradient(dy_c3f4)))