Beispiel #1
0
    def gradient(self, eta_1, eta_2):
        # eta=[batchsize, out_num]
        self.eta_1 = eta_1
        self.eta_2 = eta_2
        bias_shape = self.bias_1.data.shape
        # print('eta.shape: \n',self.eta.shape)
        # DNN反向传播, 计算delta_W
        for i in range(0, self.eta_1.shape[0]):
            # input_col_i = self.input_col[i][:, np.newaxis]
            input_col_i_1 = self.input_col_1[i][:, np.newaxis]
            input_col_i_2 = self.input_col_2[i][:, np.newaxis]

            # eta_i = self.eta[i][:, np.newaxis].T
            eta_i_1 = self.eta_1[i][:, np.newaxis].T
            eta_i_2 = self.eta_2[i][:, np.newaxis].T
            # 利用每个batch输出参数误差累加计算梯度
            # weights=[out_num, in_num]
            # self.weights.grad += np.dot(input_col_i, eta_i)
            grad_i_1, grad_i_2 = secp.SecMul_dot_3(input_col_i_1, eta_i_1,
                                                   input_col_i_2, eta_i_2,
                                                   self.bit_length)
            self.weights_1.grad += grad_i_1
            self.weights_2.grad += grad_i_2
            # self.bias.grad += eta_i.reshape(self.bias.data.shape)
            self.bias_1.grad += eta_i_1.reshape(bias_shape)
            self.bias_2.grad += eta_i_2.reshape(bias_shape)

        # print('eta shape: \n',self.eta.shape)
        # print('weight.data shape: \n',self.weights.data.shape)
        # 计算上一层的误差 eta=[batch,out_num], weights=[in_num,out_num]
        # self.eta_next = np.dot(self.eta, self.weights.data.T) # eta_next=[batch, in_num]
        self.eta_next_1, self.eta_next_2 = secp.SecMul_dot_3(
            self.eta_1, self.weights_1.data.T, self.eta_2,
            self.weights_2.data.T, self.bit_length)  # eta_next=[batch, in_num]

        # return self.eta_next
        return self.eta_next_1, self.eta_next_2
Beispiel #2
0
def secmul_dot_test():
    a = np.random.randn(5, 25)  # a=[3,4]
    b = np.random.randn(25, 50)  # b=[4,3]
    # print('a: \n', a)
    # print('b: \n', b)
    a1 = np.ones(a.shape)
    b1 = np.ones(b.shape)
    a2 = a - a1
    b2 = b - b1

    ab_dot = np.dot(a, b)

    # f1, f2 = secp.SecMul_dot(a1, b1, a2, b2)
    # f1, f2 = secp.SecMul_dot_2(a1, b1, a2, b2)
    f1, f2 = secp.SecMul_dot_3(a1, b1, a2, b2)

    # print('ab_dot: \n', ab_dot)
    # print('f1+f2: \n', f1+f2)
    print('error : \n', (f1 + f2) - ab_dot)
Beispiel #3
0
 def forward(self, input_array_1, input_array_2):
     self.input_shape = input_array_1.shape
     self.batchsize = self.input_shape[0]
     # 对batchsize中的每个输入数据进行全连接计算
     # input_col=[batchsize, in_num], weights=[in_num, out_num]
     # 这种结构适合使用batch计算
     self.input_col_1 = input_array_1.reshape([self.batchsize, -1])
     self.input_col_2 = input_array_2.reshape([self.batchsize, -1])
     # print('input_shape: \n', self.input_col.shape)
     '''
         [Z1,Z2,...Zm]=[m x n矩阵]*[A1,A2,...An]+[B1,B2,...Bm]
         输入输出均拉为列向量
     '''
     # output_array = [batchsize, out_num]
     # output_array = np.dot(self.input_col, self.weights.data) + self.bias.data
     output_array_1, output_array_2 = secp.SecMul_dot_3(
         self.input_col_1, self.weights_1.data, self.input_col_2,
         self.weights_2.data, self.bit_length)
     output_array_1 += self.bias_1.data
     output_array_2 += self.bias_2.data
     # print('output_shape: \n',output_array.shape)
     return output_array_1, output_array_2
Beispiel #4
0
    def forward(self, input_array_1, input_array_2):
        self.input_array_1 = input_array_1
        self.input_array_2 = input_array_2
        self.input_shape = self.input_array_1.shape  # [B,C,H,W]
        self.batchsize = self.input_shape[0]
        self.input_height = self.input_shape[2]
        self.input_width = self.input_shape[3]
        # 计算反卷积输出大小
        self.output_size = Deconv_sec.compute_output_size(
            self.input_width, self.filter_size, self.zero_padding, self.stride)

        self.output_array = np.zeros((self.batchsize, self.out_channels,
                                      self.output_size, self.output_size))
        '''反卷积基础参数填充:需要对input进行两次填充'''
        # 第一次,根据stride在input内部填充0,每个元素间填充0的个数为n=stride-1
        input_pad_1 = self.input_array_1
        input_pad_2 = self.input_array_2
        if self.stride >= 2:
            # input_pad = padding_stride(input_pad, self.stride)
            input_pad_1 = padding_stride(input_pad_1, self.stride)
            input_pad_2 = padding_stride(input_pad_2, self.stride)
        # print('input_pad first: ',(input_pad_1+input_pad_2)[0])
        # print('first pad: ', input_pad.shape)
        # 第二次填充,根据输出大小,计算以stride=1的卷积计算的输入需要的padding,如果padding%2不为0,则优先在input的左侧和上侧填充0【2P=(O-1)*1+F-W】
        # input_pad = Conv.padding(input_pad, self.method, self.zero_padding) # 必要填充
        input_pad_1 = Conv.padding(input_pad_1, self.method,
                                   self.zero_padding)  # 必要填充
        input_pad_2 = Conv.padding(input_pad_2, self.method,
                                   self.zero_padding)  # 必要填充
        # print('second pad: ', input_pad.shape)
        # print('second pad: ', (input_pad_1+input_pad_2)[0])
        '''反卷积中的卷积计算填充:需要对input再次填充'''
        padding_num_2 = (self.output_size - 1 + self.filter_size -
                         input_pad_1.shape[3])
        # input_pad = Conv.padding(input_pad, 'SAME', padding_num_2//2) # 必要填充
        input_pad_1 = Conv.padding(input_pad_1, 'SAME',
                                   padding_num_2 // 2)  # 必要填充
        input_pad_2 = Conv.padding(input_pad_2, 'SAME',
                                   padding_num_2 // 2)  # 必要填充

        if padding_num_2 % 2 != 0:  # 在input的左侧和上侧填充0
            # input_pad = padding_additional(input_pad)
            input_pad_1 = padding_additional(input_pad_1)
            input_pad_2 = padding_additional(input_pad_2)
        # print('padding_num_2: ', padding_num_2)
        # print('third pad: ', input_pad.shape)
        # print('third pad: ', (input_pad_1+input_pad_2)[0])
        '''转换filter为矩阵'''
        ## 计算旋转180度的权重矩阵,rot180(W)
        # flip_weights = self.weights.data[...,::-1,::-1]
        # weights_col = flip_weights.reshape([self.out_channels, -1])
        # if self.bias_required:
        #     bias_col = self.bias.data.reshape([self.out_channels, -1])
        flip_weights_1 = self.weights_1.data[..., ::-1, ::-1]
        flip_weights_2 = self.weights_2.data[..., ::-1, ::-1]
        weights_col_1 = flip_weights_1.reshape([self.out_channels, -1])
        weights_col_2 = flip_weights_2.reshape([self.out_channels, -1])
        if self.bias_required:
            bias_col_1 = self.bias_1.data.reshape([self.out_channels, -1])
            bias_col_2 = self.bias_2.data.reshape([self.out_channels, -1])
        # print('weight_sec: ', (weights_col_1+weights_col_1))
        '''计算反卷积前向传播'''
        self.input_col_1 = []
        self.input_col_2 = []
        deconv_out_1 = np.zeros(self.output_array.shape)
        deconv_out_2 = np.zeros(self.output_array.shape)
        for i in range(0, self.batchsize):
            # input_i = input_pad[i][np.newaxis,:] #获取每个batch的输入内容
            input_i_1 = input_pad_1[i][np.newaxis, :]
            input_i_2 = input_pad_2[i][np.newaxis, :]
            # input_col_i = Conv.img2col(input_i, self.filter_size, 1, self.zero_padding) #将每个batch的输入拉为矩阵(注意此处的stride=1)
            input_col_i_1 = Conv.img2col(input_i_1, self.filter_size, 1,
                                         self.zero_padding)
            input_col_i_2 = Conv.img2col(input_i_2, self.filter_size, 1,
                                         self.zero_padding)
            # print('Deconv input_i.shape: \n',input_i.shape)
            # print('Deconv input_col_i_1.shape: \n',input_col_i_1.shape)
            # print('Deconv weights_col_1.shape: \n',weights_col_1.shape)
            if self.bias_required:
                # deconv_out_i = np.dot(weights_col, input_col_i)+bias_col #计算矩阵卷积,输出大小为[Cout,(H-k+1)*(W-k+1)]的矩阵输出
                deconv_out_i_1, deconv_out_i_2 = secp.SecMul_dot_3(
                    weights_col_1, input_col_i_1, weights_col_2, input_col_i_2)
                deconv_out_i_1 += bias_col_1
                deconv_out_i_2 += bias_col_2
            else:
                # deconv_out_i = np.dot(weights_col, input_col_i)
                deconv_out_i_1, deconv_out_i_2 = secp.SecMul_dot_3(
                    weights_col_1, input_col_i_1, weights_col_2, input_col_i_2)
            # print('deconv_out_i.shape: \n',deconv_out_i.shape)
            # deconv_out[i] = np.reshape(deconv_out_i, self.output_array[0].shape) #转换为[Cout,Hout,Wout]的输出
            deconv_out_1[i] = np.reshape(deconv_out_i_1,
                                         self.output_array[0].shape)
            deconv_out_2[i] = np.reshape(deconv_out_i_2,
                                         self.output_array[0].shape)

            # self.input_col_1.append(input_col_i_1)
            # self.input_col_2.append(input_col_i_2)
        # self.input_col_1 = np.array(self.input_col_1)
        # self.input_col_2 = np.array(self.input_col_2)
        # print('deconv out_shape: ',deconv_out_1.shape)
        # print('--------')

        return deconv_out_1, deconv_out_2
Beispiel #5
0
    def gradient(self, eta_1, eta_2):
        # print('eta_shape: ',eta_1.shape)
        # print('output_shape: ',self.output_array.shape)
        # eta表示上层(l+1层)向下层(l层)传输的误差
        # 即Z_ij, eta=[batch,Cout,out_h,out_w]
        self.eta_1 = eta_1
        self.eta_2 = eta_2
        # print('eta.shape: \n', self.eta.shape)
        # eta_col=[batch,Cout,out_h*out_w]
        eta_col_1 = np.reshape(eta_1, [self.batchsize, self.out_channels, -1])
        eta_col_2 = np.reshape(eta_1, [self.batchsize, self.out_channels, -1])
        '''计算W的梯度矩阵 delta_W=a^(l-1) conv delta_Z^l'''
        for i in range(0, self.batchsize):
            # self.weights.grad += np.dot(eta_col[i], self.input_col[i].T).reshape(self.weights.data.shape)
            w1_grad, w2_grad = secp.SecMul_dot_3(eta_col_1[i],
                                                 self.input_col_1[i].T,
                                                 eta_col_2[i],
                                                 self.input_col_2[i].T,
                                                 self.bit_length)
            self.weights_1.grad += w1_grad.reshape(self.weights_1.data.shape)
            self.weights_2.grad += w2_grad.reshape(self.weights_1.data.shape)
        '''计算b的梯度矩阵'''
        # print('eta_col: \n',eta_col)
        # print('eta.shape: \n',self.eta.shape)
        if self.bias_required:
            # self.bias.grad += np.sum(eta_col, axis=(0, 2))
            self.bias_1.grad += np.sum(eta_col_1, axis=(0, 2))
            self.bias_2.grad += np.sum(eta_col_2, axis=(0, 2))
        """计算传输到上一层的误差"""
        ## 针对stride>=2时对误差矩阵的填充,需要在每个误差数据中间填充(stride-1) ##
        eta_pad_1 = self.eta_1
        eta_pad_2 = self.eta_2
        # eta_pad = self.eta
        if self.stride >= 2:
            # 计算中间填充后矩阵的size
            # pad_size = (self.eta.shape[3]-1)*(self.stride-1)+self.eta.shape[3]
            pad_size = (self.eta_1.shape[3] - 1) * (self.stride -
                                                    1) + self.eta_1.shape[3]
            # eta_pad = np.zeros((self.eta.shape[0], self.eta.shape[1], pad_size, pad_size))
            eta_pad_1 = np.zeros(
                (self.eta_1.shape[0], self.eta_1.shape[1], pad_size, pad_size))
            eta_pad_2 = np.zeros(
                (self.eta_2.shape[0], self.eta_2.shape[1], pad_size, pad_size))
            for i in range(0, self.eta_1.shape[3]):
                for j in range(0, self.eta_1.shape[3]):
                    # eta_pad[:,:,self.stride*i,self.stride*j] = self.eta[:,:,i,j]
                    eta_pad_1[:, :, self.stride * i,
                              self.stride * j] = self.eta_1[:, :, i, j]
                    eta_pad_2[:, :, self.stride * i,
                              self.stride * j] = self.eta_2[:, :, i, j]
        # 使用输出误差填充零 conv rot180[weights]
        # 计算填充后的误差delta_Z_pad,即使用0在eta_pad四周填充,'VALID'填充数量为ksize-1,'SAME'填充数量为ksize/2
        if self.method == 'VALID':
            # eta_pad = np.pad(eta_pad, ((0,0),(0,0),(self.filter_height-1, self.filter_height-1),(self.filter_width-1, self.filter_width-1)),'constant',constant_values = (0,0))
            eta_pad_1 = np.pad(
                eta_pad_1, ((0, 0), (0, 0),
                            (self.filter_height - 1, self.filter_height - 1),
                            (self.filter_width - 1, self.filter_width - 1)),
                'constant',
                constant_values=(0, 0))
            eta_pad_2 = np.pad(
                eta_pad_2, ((0, 0), (0, 0),
                            (self.filter_height - 1, self.filter_height - 1),
                            (self.filter_width - 1, self.filter_width - 1)),
                'constant',
                constant_values=(0, 0))

        same_pad_height = (self.input_height - 1 + self.filter_height -
                           eta_pad_1.shape[2])
        same_pad_width = (self.input_width - 1 + self.filter_width -
                          eta_pad_1.shape[3])
        if self.method == 'SAME':
            # eta_pad = np.pad(eta_pad, ((0,0),(0,0),(same_pad_height, same_pad_height),(same_pad_width, same_pad_width)),'constant',constant_values = (0,0))
            eta_pad_1 = np.pad(eta_pad_1,
                               ((0, 0), (0, 0),
                                (same_pad_height // 2, same_pad_height // 2),
                                (same_pad_width // 2, same_pad_width // 2)),
                               'constant',
                               constant_values=(0, 0))
            eta_pad_2 = np.pad(eta_pad_2,
                               ((0, 0), (0, 0),
                                (same_pad_height // 2, same_pad_height // 2),
                                (same_pad_width // 2, same_pad_width // 2)),
                               'constant',
                               constant_values=(0, 0))
        if same_pad_height % 2 != 0:  # 在input的左侧和上侧填充0
            # eta_pad = padding_additional(eta_pad)
            eta_pad_1 = padding_additional(eta_pad_1)
            eta_pad_2 = padding_additional(eta_pad_2)

        ## 计算旋转180度的权重矩阵,rot180(W)
        ##  self.weight[Cout,depth,h,w]
        # flip_weights = self.weights.data[...,::-1,::-1]
        # flip_weights = flip_weights.swapaxes(0, 1)
        # flip_weights_col = flip_weights.reshape([self.in_channels, -1])
        flip_weights_1 = self.weights_1.data[..., ::-1, ::-1]
        flip_weights_2 = self.weights_2.data[..., ::-1, ::-1]
        flip_weights_1 = flip_weights_1.swapaxes(0, 1)
        flip_weights_2 = flip_weights_2.swapaxes(0, 1)
        flip_weights_col_1 = flip_weights_1.reshape([self.in_channels, -1])
        flip_weights_col_2 = flip_weights_2.reshape([self.in_channels, -1])

        ## 计算向上一层传播的误差eta_next,采用卷积乘计算
        # 原本,delta_Z^(l)=delta_Z^(l+1) conv rot180(W^(l))
        # eta_next = []
        eta_next_1 = []
        eta_next_2 = []
        for i in range(0, self.batchsize):
            # eta_pad_col_i = img2col(eta_pad[i][np.newaxis,:], self.filter_width, 1, self.zero_padding)
            eta_pad_col_i_1 = img2col(eta_pad_1[i][np.newaxis, :],
                                      self.filter_width, 1, self.zero_padding)
            eta_pad_col_i_2 = img2col(eta_pad_2[i][np.newaxis, :],
                                      self.filter_width, 1, self.zero_padding)
            # eta_next_i = np.dot(flip_weights_col, eta_pad_col_i)
            eta_next_i_1, eta_next_i_2 = secp.SecMul_dot_3(
                flip_weights_col_1, eta_pad_col_i_1, flip_weights_col_2,
                eta_pad_col_i_2, self.bit_length)
            # eta_next.append(eta_next_i)
            eta_next_1.append(eta_next_i_1)
            eta_next_2.append(eta_next_i_2)
        # self.eta_next = np.array(eta_next)
        self.eta_next_1 = np.array(eta_next_1).reshape(self.input_shape)
        self.eta_next_2 = np.array(eta_next_2).reshape(self.input_shape)
        # input_shape就是上一层的output_shape
        # self.eta_next = self.eta_next.reshape(self.input_shape)

        return self.eta_next_1, self.eta_next_2
Beispiel #6
0
    def forward(self, input_array_1, input_array_2):
        '''初始化输入、输出数据size'''
        self.input_shape = input_array_1.shape
        self.batchsize = self.input_shape[0]
        self.input_height = self.input_shape[2]
        self.input_width = self.input_shape[3]
        # 卷积层输出宽度计算
        self.output_width = Conv_sec.compute_output_size(
            self.input_width, self.filter_width, self.zero_padding,
            self.stride)
        # 卷积层输出高度计算
        self.output_height = Conv_sec.compute_output_size(
            self.input_height, self.filter_height, self.zero_padding,
            self.stride)
        # 卷积层输出矩阵初始化 [batch, output_channel, height, width]
        self.output_array = np.zeros((self.batchsize, self.out_channels,
                                      self.output_height, self.output_width))
        '''计算卷积'''
        # 转换filter为矩阵, 将每个filter拉为一列, filter [Cout,depth,height,width]
        weights_col_1 = self.weights_1.data.reshape([self.out_channels, -1])
        weights_col_2 = self.weights_2.data.reshape([self.out_channels, -1])

        if self.bias_required:
            bias_col_1 = self.bias_1.data.reshape([self.out_channels, -1])
            bias_col_2 = self.bias_2.data.reshape([self.out_channels, -1])

        # padding方法计算填充关系
        input_pad_1 = padding(input_array_1, self.method, self.zero_padding)
        input_pad_2 = padding(input_array_2, self.method, self.zero_padding)

        # self.input_col = []
        self.input_col_1 = []
        self.input_col_2 = []

        conv_out_1 = np.zeros(self.output_array.shape)
        conv_out_2 = np.zeros(self.output_array.shape)

        # print('output_shape: \n', conv_out.shape)

        # 对输入数据batch的每个图片、特征图进行卷积计算
        # start_time = time.time()
        '''卷积计算'''
        for i in range(0, self.batchsize):
            input_i_1 = input_pad_1[i][np.newaxis, :]
            input_i_2 = input_pad_2[i][np.newaxis, :]

            input_col_i_1 = img2col(input_i_1, self.filter_width, self.stride,
                                    self.zero_padding)  #将每个batch的输入拉为矩阵
            input_col_i_2 = img2col(input_i_2, self.filter_width, self.stride,
                                    self.zero_padding)

            # print('weight_col shape: \n', weights_col_1.shape)
            # print('input_col_i shape: \n', input_col_i_1.shape)

            if self.bias_required:
                # conv_out_i = np.dot(weights_col, input_col_i)+bias_col #计算矩阵卷积,输出大小为[Cout,(H-k+1)*(W-k+1)]的矩阵输出
                # 隐私计算部分
                conv_out_i_1, conv_out_i_2 = secp.SecMul_dot_3(
                    weights_col_1, input_col_i_1, weights_col_2, input_col_i_2,
                    self.bit_length)
                conv_out_i_1 += bias_col_1
                conv_out_i_2 += bias_col_2
            else:
                # conv_out_i = np.dot(weights_col, input_col_i)
                ## 隐私计算部分
                conv_out_i_1, conv_out_i_2 = secp.SecMul_dot_3(
                    weights_col_1, input_col_i_1, weights_col_2, input_col_i_2,
                    self.bit_length)
            # conv_out[i] = np.reshape(conv_out_i, self.output_array[0].shape) #转换为[Cout,Hout,Wout]的输出
            conv_out_1[i] = np.reshape(conv_out_i_1,
                                       self.output_array[0].shape)
            conv_out_2[i] = np.reshape(conv_out_i_2,
                                       self.output_array[0].shape)
        # end_time = time.time()
        # print('time consume: \n', (end_time-start_time)*1000)

        #     self.input_col_1.append(input_col_i_1)
        #     self.input_col_2.append(input_col_i_2)
        # self.input_col_1 = np.array(self.input_col_1)
        # self.input_col_2 = np.array(self.input_col_2)

        return conv_out_1, conv_out_2