예제 #1
0
class CryptoNet_fivelayer(Module):
    def __init__(self, in_dim, n_class):
        super(CryptoNet_fivelayer, self).__init__()

        self.conv = ConvLayer(in_dim, 5, 5,5, zero_padding=1, stride=2, method='SAME')
        self.sq1 = Activators.Square()
        self.fc1 = FullyConnect(845, 100)
        self.sq2 = Activators.Square()
        self.fc2 = FullyConnect(100, n_class)
        self.logsoftmax = Logsoftmax()

    def forward(self, x):
        in_size = x.shape[0]
        out_1 = self.sq1.forward(self.conv.forward(x))
        self.conv_out_shape = out_1.shape
        # print('out1shape: ',self.conv_out_shape)
        out_1 = out_1.reshape(in_size, -1) # 将输出拉成一行
        out_2 = self.sq2.forward(self.fc1.forward(out_1))
        out_3 = self.fc2.forward(out_2)
        
        out_logsoftmax = self.logsoftmax.forward(out_3)

        return out_logsoftmax
    
    def backward(self, dy):
        dy_logsoftmax = self.logsoftmax.gradient(dy)
        dy_f3 = self.fc2.gradient(dy_logsoftmax)
        dy_f2 = self.fc1.gradient(self.sq2.gradient(dy_f3))
        dy_f2 = dy_f2.reshape(self.conv_out_shape)
        self.conv.gradient(self.sq1.gradient(dy_f2))
예제 #2
0
    def __init__(self, in_dim, n_class):
        super(CryptoNet_fivelayer, self).__init__()

        self.conv = ConvLayer(in_dim, 5, 5,5, zero_padding=1, stride=2, method='SAME')
        self.sq1 = Activators.Square()
        self.fc1 = FullyConnect(845, 100)
        self.sq2 = Activators.Square()
        self.fc2 = FullyConnect(100, n_class)
        self.logsoftmax = Logsoftmax()
예제 #3
0
    def __init__(self, in_dim, n_class):
        super(Minionn_fivelayer, self).__init__()

        self.conv = ConvLayer(in_dim,
                              5,
                              5,
                              5,
                              zero_padding=2,
                              stride=2,
                              method='SAME')
        self.relu1 = Activators.ReLU()
        self.fc1 = FullyConnect(980, 100)
        self.relu2 = Activators.ReLU()
        self.fc2 = FullyConnect(100, n_class)
        self.logsoftmax = Logsoftmax()
예제 #4
0
    def __init__(self, in_dim, n_class):
        super(Lenet_numpy, self).__init__()
        self.conv1 = ConvLayer(in_dim, 6, 5,5, zero_padding=2, stride=1, method='SAME')
        self.conv2 = ConvLayer(6, 16, 5,5, zero_padding=0, stride=1, method='VALID')
        self.conv3 = ConvLayer(16, 120, 5,5, zero_padding=0, stride=1, method='VALID')

        self.maxpool1 = MaxPooling(pool_shape=(2,2), stride=(2,2))
        self.maxpool2 = MaxPooling(pool_shape=(2,2), stride=(2,2))
        self.relu1 = ReLU()
        self.relu2 = ReLU()
        self.relu3 = ReLU()
        self.relu4 = ReLU()
        self.fc1 = FullyConnect(120, 84)
        self.fc2 = FullyConnect(84, n_class)
        self.logsoftmax = Logsoftmax()
예제 #5
0
class Secureml_fivelayer(Module):
    def __init__(self, in_dim, n_class):
        super(Secureml_fivelayer, self).__init__()

        self.fc0 = FullyConnect(784,128)
        self.sq1 = Activators.Square()
        self.fc1 = FullyConnect(128, 128)
        self.sq2 = Activators.Square()
        self.fc2 = FullyConnect(128, n_class)
        self.logsoftmax = Logsoftmax()

    def forward(self, x):
        in_size = x.shape[0]
        x = x.reshape(in_size, -1)
        out_1 = self.sq1.forward(self.fc0.forward(x))
        out_2 = self.sq2.forward(self.fc1.forward(out_1))
        out_3 = self.fc2.forward(out_2)
        
        out_logsoftmax = self.logsoftmax.forward(out_3)

        return out_logsoftmax
    
    def backward(self, dy):
        dy_logsoftmax = self.logsoftmax.gradient(dy)
        dy_f3 = self.fc2.gradient(dy_logsoftmax)
        dy_f2 = self.fc1.gradient(self.sq2.gradient(dy_f3))
        self.fc0.gradient(self.sq1.gradient(dy_f2))
예제 #6
0
class Lenet_numpy(Module):
    def __init__(self, in_dim, n_class):
        super(Lenet_numpy, self).__init__()
        self.conv1 = ConvLayer(in_dim, 6, 5,5, zero_padding=2, stride=1, method='SAME')
        self.conv2 = ConvLayer(6, 16, 5,5, zero_padding=0, stride=1, method='VALID')
        self.conv3 = ConvLayer(16, 120, 5,5, zero_padding=0, stride=1, method='VALID')

        self.maxpool1 = MaxPooling(pool_shape=(2,2), stride=(2,2))
        self.maxpool2 = MaxPooling(pool_shape=(2,2), stride=(2,2))
        self.relu1 = ReLU()
        self.relu2 = ReLU()
        self.relu3 = ReLU()
        self.relu4 = ReLU()
        self.fc1 = FullyConnect(120, 84)
        self.fc2 = FullyConnect(84, n_class)
        self.logsoftmax = Logsoftmax()

    def forward(self, x): # 存在问题是:同一个对象其实是不能多次使用的,因为每个对象都有自己的input和output,如果重复使用反向会错误
        in_size = x.shape[0]
        out_c1s2 = self.relu1.forward(self.maxpool1.forward(self.conv1.forward(x)))
        out_c3s4 = self.relu2.forward(self.maxpool2.forward(self.conv2.forward(out_c1s2)))
        out_c5 = self.relu3.forward(self.conv3.forward(out_c3s4))
        self.conv_out_shape = out_c5.shape

        out_c5 = out_c5.reshape(in_size, -1)
        out_f6 = self.relu4.forward(self.fc1.forward(out_c5))
        out_f7 = self.fc2.forward(out_f6)
        out_logsoftmax = self.logsoftmax.forward(out_f7)

        return out_logsoftmax
    
    def backward(self, dy):
        dy_logsoftmax = self.logsoftmax.gradient(dy)
        dy_f7 = self.fc2.gradient(dy_logsoftmax)
        dy_f6 = self.fc1.gradient(self.relu4.gradient(dy_f7))

        dy_f6 = dy_f6.reshape(self.conv_out_shape)

        dy_c5 = self.conv3.gradient(self.relu3.gradient(dy_f6))
        dy_c3f4 = self.conv2.gradient(self.maxpool2.gradient(self.relu2.gradient(dy_c5)))
        self.conv1.gradient(self.maxpool1.gradient(self.relu1.gradient(dy_c3f4)))
예제 #7
0
    def __init__(self, in_dim, n_class):
        super(Secureml_fivelayer, self).__init__()

        self.fc0 = FullyConnect(784,128)
        self.sq1 = Activators.Square()
        self.fc1 = FullyConnect(128, 128)
        self.sq2 = Activators.Square()
        self.fc2 = FullyConnect(128, n_class)
        self.logsoftmax = Logsoftmax()
예제 #8
0
def fc_test():
    bit_length = 32
    batch_size = 1
    in_num = 580
    out_num = 580
    x_numpy = np.random.randn(batch_size, in_num).astype(np.float64)
    w_numpy = np.random.randn(in_num, out_num).astype(np.float64)
    b_numpy = np.random.randn(out_num).astype(np.float64)

    ## 准备秘密分享secret sharing
    x_numpy_1 = np.random.randn(batch_size, in_num).astype(np.float64)
    x_numpy_2 = x_numpy-x_numpy_1
    w_numpy_1 = np.random.randn(in_num, out_num).astype(np.float64)
    w_numpy_2 = w_numpy-w_numpy_1
    b_numpy_1 = np.random.randn(out_num).astype(np.float64)
    b_numpy_2 = b_numpy-b_numpy_1

    fc = FullyConnect(in_num, out_num)
    fc_sec = FullyConnect_sec(in_num, out_num, bit_length=bit_length)

    # 设置参数
    fc.set_weight(Parameter(w_numpy, requires_grad=True))
    fc_sec.set_weight_1(Parameter(w_numpy_1, requires_grad=True))
    fc_sec.set_weight_2(Parameter(w_numpy_2, requires_grad=True))
    fc.set_bias(Parameter(b_numpy, requires_grad=True))
    fc_sec.set_bias_1(Parameter(b_numpy_1, requires_grad=True))
    fc_sec.set_bias_2(Parameter(b_numpy_2, requires_grad=True))

    # fc_out = fc.forward(x_numpy)
    # fc_out_1, fc_out_2 = fc_sec.forward(x_numpy_1, x_numpy_2)
    # print('error: \n', fc_out-(fc_out_1+fc_out_2))

    test_num = 10
    time_avg = 0
    for i in range(test_num):
        start_time_sec = time.time()
        fc_out_1, fc_out_2 = fc_sec.forward(x_numpy_1, x_numpy_2)
        end_time_sec = time.time()
        time_avg+=(end_time_sec-start_time_sec)*1000
    print('time avg sec: \n', time_avg/test_num)
images, labels = load_mnist('./data/mnist')
test_images, test_labels = load_mnist('./data/mnist', 't10k')
print(images.shape, labels.shape)

#images = images[:1000,:]
#labels = labels[:1000]
print(images.shape, labels.shape)

batch_size = 100
conv1 = Conv2D([batch_size, 28, 28, 1], 12, 5, 1)
relu1 = Relu(conv1.output_shape)
pool1 = MaxPooling(relu1.shape_output)
conv2 = Conv2D(pool1.shape_output, 24, 3, 1)
relu2 = Relu(conv2.output_shape)
pool2 = MaxPooling(relu2.shape_output)
fc = FullyConnect(pool2.shape_output, 10)
sf = Softmax(fc.output_shape)

learning_rate = 0.0001
lmbda = 0.0004

# train_loss_record = []
# train_acc_record = []
# val_loss_record = []
# val_acc_record = []
acc = 0.7
batch_acc = 0
for epoch in range(20):
    # if epoch < 5:
    #     learning_rate = 0.00001
    # elif epoch < 10: