Example #1
0
class CryptoNet_fivelayer(Module):
    def __init__(self, in_dim, n_class):
        super(CryptoNet_fivelayer, self).__init__()

        self.conv = Conv_sec(in_dim,
                             5,
                             5,
                             5,
                             zero_padding=1,
                             stride=2,
                             method='SAME',
                             bit_length=bit_length)
        self.sq1 = Activators_sec.Square(bit_length=bit_length)
        self.fc1 = FullyConnect_sec(845, 100, bit_length=bit_length)
        self.sq2 = Activators_sec.Square(bit_length=bit_length)
        self.fc2 = FullyConnect_sec(100, n_class, bit_length=bit_length)
        # self.logsoftmax = Logsoftmax()
        self.logsoftmax = Logsoftmax_sec(bit_length=bit_length)

    def forward(self, x1, x2):
        in_size = x1.shape[0]
        # start_time = time.time()
        out_c1, out_c2 = self.conv.forward(x1, x2)
        out_11, out_12 = self.sq1.forward(out_c1, out_c2)
        self.conv_out_shape = out_11.shape
        # print('out1shape: ',self.conv_out_shape)
        out_11 = out_11.reshape(in_size, -1)  # 将输出拉成一行
        out_12 = out_12.reshape(in_size, -1)  # 将输出拉成一行
        out_fc11, out_fc12 = self.fc1.forward(out_11, out_12)
        out_21, out_22 = self.sq2.forward(out_fc11, out_fc12)
        out_31, out_32 = self.fc2.forward(out_21, out_22)
        # end_time = time.time()
        # print('time consume: ', (end_time-start_time)*1000)

        out_logsoftmax_1, out_logsoftmax_2 = self.logsoftmax.forward(
            out_31, out_32)
        return out_logsoftmax_1, out_logsoftmax_2
        # out_logsoftmax = self.logsoftmax.forward(out_31+out_32)
        # return out_logsoftmax

    def backward(self, dy1, dy2):
        dy_logsoftmax_1, dy_logsoftmax_2 = self.logsoftmax.gradient(dy1, dy2)
        dy_f31, dy_f32 = self.fc2.gradient(dy_logsoftmax_1, dy_logsoftmax_2)
        dy_sq21, dy_sq22 = self.sq2.gradient(dy_f31, dy_f32)
        dy_f21, dy_f22 = self.fc1.gradient(dy_sq21, dy_sq22)

        dy_f21 = dy_f21.reshape(self.conv_out_shape)
        dy_f22 = dy_f22.reshape(self.conv_out_shape)

        dy_sq11, dy_sq12 = self.sq1.gradient(dy_f21, dy_f22)
        self.conv.gradient(dy_sq11, dy_sq12)
Example #2
0
    def __init__(self, in_dim, n_class):
        super(Minionn_fivelayer, self).__init__()

        self.conv = Conv_sec(in_dim,
                             5,
                             5,
                             5,
                             zero_padding=2,
                             stride=2,
                             method='SAME',
                             bit_length=bit_length)
        self.relu1 = Activators_sec.ReLU(bit_length=bit_length)
        self.fc1 = FullyConnect_sec(980, 100, bit_length=bit_length)
        self.relu2 = Activators_sec.ReLU(bit_length=bit_length)
        self.fc2 = FullyConnect_sec(100, n_class, bit_length=bit_length)
        self.logsoftmax = Logsoftmax_sec(bit_length=bit_length)
Example #3
0
    def __init__(self, in_dim, n_class):
        super(CryptoNet_fivelayer, self).__init__()

        self.conv = Conv_sec(in_dim,
                             5,
                             5,
                             5,
                             zero_padding=1,
                             stride=2,
                             method='SAME',
                             bit_length=bit_length)
        self.sq1 = Activators_sec.Square(bit_length=bit_length)
        self.fc1 = FullyConnect_sec(845, 100, bit_length=bit_length)
        self.sq2 = Activators_sec.Square(bit_length=bit_length)
        self.fc2 = FullyConnect_sec(100, n_class, bit_length=bit_length)
        # self.logsoftmax = Logsoftmax()
        self.logsoftmax = Logsoftmax_sec(bit_length=bit_length)
Example #4
0
    def __init__(self):
        super(Discriminator_sec, self).__init__()
        # 输入1*28*28 MNIST
        # 1*28*28 -> 64*16*16
        self.conv1 = Conv_sec(nc,
                              ndf,
                              4,
                              4,
                              zero_padding=1,
                              stride=2,
                              method='SAME',
                              bias_required=False,
                              bit_length=bit_length)
        self.lrelu1 = Activators_sec.LeakyReLU(0.2, bit_length=bit_length)

        # 64*16*16 -> 128*8*8
        self.conv2 = Conv_sec(ndf,
                              ndf * 2,
                              4,
                              4,
                              zero_padding=1,
                              stride=2,
                              method='SAME',
                              bias_required=False,
                              bit_length=bit_length)
        self.bn1 = BatchNorm_sec(ndf * 2, bit_length=bit_length)
        self.lrelu2 = Activators_sec.LeakyReLU(0.2, bit_length=bit_length)

        # 128*8*8 -> 256*4*4
        self.conv3 = Conv_sec(ndf * 2,
                              ndf * 4,
                              4,
                              4,
                              zero_padding=1,
                              stride=2,
                              method='SAME',
                              bias_required=False,
                              bit_length=bit_length)
        self.bn2 = BatchNorm_sec(ndf * 4, bit_length=bit_length)
        self.lrelu3 = Activators_sec.LeakyReLU(0.2, bit_length=bit_length)

        # 256*4*4 -> 1*1
        self.conv4 = Conv_sec(ndf * 4,
                              1,
                              4,
                              4,
                              zero_padding=0,
                              stride=1,
                              method='VALID',
                              bias_required=False,
                              bit_length=bit_length)
        self.sigmoid = Activators_sec.Sigmoid_CE_sec(bit_length=bit_length)
Example #5
0
class Minionn_fivelayer(Module):
    def __init__(self, in_dim, n_class):
        super(Minionn_fivelayer, self).__init__()

        self.conv = Conv_sec(in_dim,
                             5,
                             5,
                             5,
                             zero_padding=2,
                             stride=2,
                             method='SAME',
                             bit_length=bit_length)
        self.relu1 = Activators_sec.ReLU(bit_length=bit_length)
        self.fc1 = FullyConnect_sec(980, 100, bit_length=bit_length)
        self.relu2 = Activators_sec.ReLU(bit_length=bit_length)
        self.fc2 = FullyConnect_sec(100, n_class, bit_length=bit_length)
        self.logsoftmax = Logsoftmax_sec(bit_length=bit_length)

    def forward(self, x1, x2):
        in_size = x1.shape[0]
        # start_time_sum = time.time()

        # start_conv = time.time()
        out_c1, out_c2 = self.conv.forward(x1, x2)
        # end_conv = time.time()

        out_11, out_12, dfc_time1 = self.relu1.forward(out_c1, out_c2)

        # start_fc1 = time.time()
        self.conv_out_shape = out_11.shape
        # print('out1shape: ',self.conv_out_shape)
        out_11 = out_11.reshape(in_size, -1)  # 将输出拉成一行
        out_12 = out_12.reshape(in_size, -1)  # 将输出拉成一行
        out_fc11, out_fc12 = self.fc1.forward(out_11, out_12)
        # end_fc1 = time.time()
        out_21, out_22, dfc_time2 = self.relu2.forward(out_fc11, out_fc12)
        # start_fc2 = time.time()
        out_31, out_32 = self.fc2.forward(out_21, out_22)
        # end_fc2 = time.time()

        # end_time_sum = time.time()

        # print('time consume: ', (end_conv-start_conv)*1000+(end_fc1-start_fc1)*1000+(end_fc1-start_fc1)*1000+self.relu1.offline_time+self.relu1.online_time+self.relu2.offline_time+self.relu2.online_time)
        # print('time consume sum: ', (end_time_sum-start_time_sum)*1000)
        out_logsoftmax_1, out_logsoftmax_2 = self.logsoftmax.forward(
            out_31, out_32)
        return out_logsoftmax_1, out_logsoftmax_2, dfc_time1 + dfc_time2
        # out_logsoftmax = self.logsoftmax.forward(out_31+out_32)
        # return out_logsoftmax

    def backward(self, dy1, dy2):
        dy_logsoftmax_1, dy_logsoftmax_2 = self.logsoftmax.gradient(dy1, dy2)
        dy_f31, dy_f32 = self.fc2.gradient(dy_logsoftmax_1, dy_logsoftmax_2)
        dy_relu21, dy_relu22 = self.relu2.gradient(dy_f31, dy_f32)
        dy_f21, dy_f22 = self.fc1.gradient(dy_relu21, dy_relu22)

        dy_f21 = dy_f21.reshape(self.conv_out_shape)
        dy_f22 = dy_f22.reshape(self.conv_out_shape)

        dy_relu11, dy_relu12 = self.relu1.gradient(dy_f21, dy_f22)
        self.conv.gradient(dy_relu11, dy_relu12)
Example #6
0
def conv_test():
    bit_length = 32
    # (1,28,28)*(5,5,5)
    # x_numpy = np.random.randn(1,1,28,28).astype(np.float32)
    # w_numpy = np.random.randn(5,1,5,5).astype(np.float32)
    # b_numpy = np.random.randn(5).astype(np.float32)
    # # (1,28,28)*(5,5,5)
    # x_numpy_1 = np.random.randn(1,1,28,28).astype(np.float32)
    # x_numpy_2 = x_numpy-x_numpy_1
    # w_numpy_1 = np.random.randn(5,1,5,5).astype(np.float32)
    # w_numpy_2 = w_numpy-w_numpy_1
    # b_numpy_1 = np.random.randn(5).astype(np.float32)
    # b_numpy_2 = b_numpy-b_numpy_1

    ## (3,32,32)*(64,2,2)
    # x_numpy = np.random.randn(1,3,32,32).astype(np.float32)
    # w_numpy = np.random.randn(64,3,2,2).astype(np.float32)
    # b_numpy = np.random.randn(64).astype(np.float32)
    # x = torch.tensor(x_numpy, requires_grad=True)

    # x_numpy_1 = np.random.randn(1,3,32,32).astype(np.float32)
    # x_numpy_2 = x_numpy-x_numpy_1
    # w_numpy_1 = np.random.randn(64,3,2,2).astype(np.float32)
    # w_numpy_2 = w_numpy-w_numpy_1
    # b_numpy_1 = np.random.randn(64).astype(np.float32)
    # b_numpy_2 = b_numpy-b_numpy_1

    x_numpy = np.random.randn(1,32,32,32).astype(np.float32)
    w_numpy = np.random.randn(128,32,3,3).astype(np.float32)
    b_numpy = np.random.randn(128).astype(np.float32)
    x = torch.tensor(x_numpy, requires_grad=True)

    x_numpy_1 = np.random.randn(1,32,32,32).astype(np.float32)
    x_numpy_2 = x_numpy-x_numpy_1
    w_numpy_1 = np.random.randn(128,32,3,3).astype(np.float32)
    w_numpy_2 = w_numpy-w_numpy_1
    b_numpy_1 = np.random.randn(128).astype(np.float32)
    b_numpy_2 = b_numpy-b_numpy_1

    print('input_shape: ', x_numpy.shape)
    print('w_shape: ', w_numpy.shape)

    # padding=0, stride=2
    # cl1 = Conv_sec(1, 5, 5, 5, zero_padding=0, stride=2, method='SAME')
    # cl1 = Conv_sec(3, 64, 2, 2, zero_padding=0, stride=2, method='SAME')
    cl1 = Conv_sec(32, 128, 3, 3, zero_padding=0, stride=2, method='SAME')
    cl_ori = ConvLayer(1, 5, 5, 5, zero_padding=1, stride=2, method='SAME')
    cl_tensor = torch.nn.Conv2d(1, 5, kernel_size=5, stride=2, padding=1)
    ## 设置参数
    cl_ori.set_weight(Parameter(w_numpy, requires_grad=True))
    cl_ori.set_bias(Parameter(b_numpy, requires_grad=True))
    cl1.set_weight_1(Parameter(w_numpy_1, requires_grad=True))
    cl1.set_bias_1(Parameter(b_numpy_1, requires_grad=True))
    cl1.set_weight_2(Parameter(w_numpy_2, requires_grad=True))
    cl1.set_bias_2(Parameter(b_numpy_2, requires_grad=True))

    # print('param_error: \n', w_numpy-(w_numpy_1+w_numpy_2))
    # print('param_error: \n', cl_ori.weights.data-(cl1.weights_1.data+cl1.weights_2.data))

    '''前向传播'''
    # start_time_tensor = time.time()
    # conv_out = cl_tensor(x)
    # end_time_tensor = time.time()
    # start_time = time.time()
    # conv_out = cl_ori.forward(x_numpy)
    # end_time = time.time()

    test_num = 10
    time_avg = 0
    for i in range(test_num):
        start_time_sec = time.time()
        conv_out_1, conv_out_2 = cl1.forward(x_numpy_1, x_numpy_2)
        end_time_sec = time.time()
        time_avg+=(end_time_sec-start_time_sec)*1000
    print('time avg sec: \n', time_avg/test_num)
Example #7
0
class Discriminator_sec(Module):
    def __init__(self):
        super(Discriminator_sec, self).__init__()
        # 输入1*28*28 MNIST
        # 1*28*28 -> 64*16*16
        self.conv1 = Conv_sec(nc,
                              ndf,
                              4,
                              4,
                              zero_padding=1,
                              stride=2,
                              method='SAME',
                              bias_required=False,
                              bit_length=bit_length)
        self.lrelu1 = Activators_sec.LeakyReLU(0.2, bit_length=bit_length)

        # 64*16*16 -> 128*8*8
        self.conv2 = Conv_sec(ndf,
                              ndf * 2,
                              4,
                              4,
                              zero_padding=1,
                              stride=2,
                              method='SAME',
                              bias_required=False,
                              bit_length=bit_length)
        self.bn1 = BatchNorm_sec(ndf * 2, bit_length=bit_length)
        self.lrelu2 = Activators_sec.LeakyReLU(0.2, bit_length=bit_length)

        # 128*8*8 -> 256*4*4
        self.conv3 = Conv_sec(ndf * 2,
                              ndf * 4,
                              4,
                              4,
                              zero_padding=1,
                              stride=2,
                              method='SAME',
                              bias_required=False,
                              bit_length=bit_length)
        self.bn2 = BatchNorm_sec(ndf * 4, bit_length=bit_length)
        self.lrelu3 = Activators_sec.LeakyReLU(0.2, bit_length=bit_length)

        # 256*4*4 -> 1*1
        self.conv4 = Conv_sec(ndf * 4,
                              1,
                              4,
                              4,
                              zero_padding=0,
                              stride=1,
                              method='VALID',
                              bias_required=False,
                              bit_length=bit_length)
        self.sigmoid = Activators_sec.Sigmoid_CE_sec(bit_length=bit_length)

    def forward(self, x_input_1, x_input_2):
        # l1 = self.lrelu1.forward(self.conv1.forward(x_input))
        # l2 = self.lrelu2.forward(self.bn1.forward(self.conv2.forward(l1)))
        # l3 = self.lrelu3.forward(self.bn2.forward(self.conv3.forward(l2)))
        # l4 = self.conv4.forward(l3)
        # output_sigmoid = self.sigmoid.forward(l4)
        # return output_sigmoid
        start_l1 = time.time()
        conv11, conv12 = self.conv1.forward(x_input_1, x_input_2)
        # end_conv1 = time.time()
        lrelu11, lrelu12, dfc_time_1 = self.lrelu1.forward(
            conv11, conv12)  # 还是relu最消耗时间= =
        # end_l1 = time.time()
        # print('conv1 (ms): ', (end_conv1-start_l1)*1000)
        # print('L_1 (ms): ', (end_l1-start_l1)*1000)

        conv21, conv22 = self.conv2.forward(lrelu11, lrelu12)
        bn11, bn12 = self.bn1.forward(conv21, conv22)
        lrelu21, lrelu22, dfc_time_2 = self.lrelu2.forward(bn11, bn12)

        conv31, conv32 = self.conv3.forward(lrelu21, lrelu22)
        bn21, bn22 = self.bn2.forward(conv31, conv32)
        lrelu31, lrelu32, dfc_time_3 = self.lrelu3.forward(bn21, bn22)

        conv41, conv42 = self.conv4.forward(lrelu31, lrelu32)
        # print('conv4: ', (conv41+conv42)[0][0])
        output_sig_1, output_sig_2 = self.sigmoid.forward(conv41, conv42)
        end_sig = time.time()

        print('sigmoid input shape: ', conv41.shape)
        print('dfc_time (s): ', (dfc_time_1 + dfc_time_2 + dfc_time_3) * 60)
        print('total SD time (s): ', (end_sig - start_l1) -
              (dfc_time_1 + dfc_time_2 + dfc_time_3) * 60)
        print('total SD time real (s): ', (end_sig - start_l1))

        return output_sig_1, output_sig_2

    def backward(self, dy):
        # print('dy.shape: ', dy.shape)
        dy_sigmoid = self.sigmoid.gradient(dy)
        # print('dy_sigmoid.shape: ', dy_sigmoid.shape)
        dy_l4 = self.conv4.gradient(dy_sigmoid)
        dy_l3 = self.conv3.gradient(
            self.bn2.gradient(self.lrelu3.gradient(dy_l4)))
        dy_l2 = self.conv2.gradient(
            self.bn1.gradient(self.lrelu2.gradient(dy_l3)))
        dy_l1 = self.conv1.gradient(self.lrelu1.gradient(dy_l2))
        # print('D_backward output shape: ',dy_l1.shape)
        return dy_l1