Ejemplo n.º 1
0
    def __init__(self, input_width, input_height, channel_number,
                 learning_rate, cost_function):
        self.cost_function = cost_function
        self.predict_output_list = []
        self.conv1 = ConvLayer(input_width, input_height, channel_number, 3, 3,
                               16, 1, 1, ReluActivator(), learning_rate)

        self.conv3 = MaxPoolingLayer(input_width, input_height, 16, 3, 3, 1, 2)

        self.conv4 = ConvLayer(input_width // 2, input_height // 2, 16, 3, 3,
                               32, 1, 1, ReluActivator(), learning_rate)

        self.conv5 = MaxPoolingLayer(input_width // 2, input_height // 2, 32,
                                     3, 3, 1, 2)

        self.conv6 = ConvLayer(input_width // 4, input_height // 4, 32, 3, 3,
                               32, 1, 1, ReluActivator(), learning_rate)

        self.conv8 = UpsamplingLayer(input_width // 4, input_height // 4, 32)

        self.conv9 = ConvLayer(input_width // 2, input_height // 2, 32, 3, 3,
                               16, 1, 1, ReluActivator(), learning_rate)

        self.conv10 = UpsamplingLayer(input_width // 2, input_height // 2, 16)

        self.conv12 = ConvLayer(input_width, input_height, 16, 3, 3, 2, 1, 1,
                                TanhActivator(), learning_rate)
Ejemplo n.º 2
0
    def __init__(self, in_dim, n_class):
        super(Lenet_numpy, self).__init__()
        self.conv1 = ConvLayer(in_dim, 6, 5,5, zero_padding=2, stride=1, method='SAME')
        self.conv2 = ConvLayer(6, 16, 5,5, zero_padding=0, stride=1, method='VALID')
        self.conv3 = ConvLayer(16, 120, 5,5, zero_padding=0, stride=1, method='VALID')

        self.maxpool1 = MaxPooling(pool_shape=(2,2), stride=(2,2))
        self.maxpool2 = MaxPooling(pool_shape=(2,2), stride=(2,2))
        self.relu1 = ReLU()
        self.relu2 = ReLU()
        self.relu3 = ReLU()
        self.relu4 = ReLU()
        self.fc1 = FullyConnect(120, 84)
        self.fc2 = FullyConnect(84, n_class)
        self.logsoftmax = Logsoftmax()
Ejemplo n.º 3
0
    def __init__(self):
        super(Discriminator, self).__init__()
        # 输入1*28*28 MNIST
        # 1*28*28 -> 64*16*16
        self.conv1 = ConvLayer(nc,
                               ndf,
                               4,
                               4,
                               zero_padding=1,
                               stride=2,
                               method='SAME',
                               bias_required=False)
        self.lrelu1 = Activators.LeakyReLU(0.2)

        # 64*16*16 -> 128*8*8
        self.conv2 = ConvLayer(ndf,
                               ndf * 2,
                               4,
                               4,
                               zero_padding=1,
                               stride=2,
                               method='SAME',
                               bias_required=False)
        self.bn1 = BatchNorm(ndf * 2)
        self.lrelu2 = Activators.LeakyReLU(0.2)

        # 128*8*8 -> 256*4*4
        self.conv3 = ConvLayer(ndf * 2,
                               ndf * 4,
                               4,
                               4,
                               zero_padding=1,
                               stride=2,
                               method='SAME',
                               bias_required=False)
        self.bn2 = BatchNorm(ndf * 4)
        self.lrelu3 = Activators.LeakyReLU(0.2)

        # 256*4*4 -> 1*1
        self.conv4 = ConvLayer(ndf * 4,
                               1,
                               4,
                               4,
                               zero_padding=0,
                               stride=1,
                               method='VALID',
                               bias_required=False)
        self.sigmoid = Activators.Sigmoid_CE()
Ejemplo n.º 4
0
def Conv(filters,
         kernel_size,
         input_shape,
         strides=(1, 1),
         padding="VALID",
         activation='none'):
    return ConvLayer(filters, kernel_size, input_shape, strides, padding,
                     activation)
Ejemplo n.º 5
0
    def __init__(self, in_dim, n_class):
        super(CryptoNet_fivelayer, self).__init__()

        self.conv = ConvLayer(in_dim, 5, 5,5, zero_padding=1, stride=2, method='SAME')
        self.sq1 = Activators.Square()
        self.fc1 = FullyConnect(845, 100)
        self.sq2 = Activators.Square()
        self.fc2 = FullyConnect(100, n_class)
        self.logsoftmax = Logsoftmax()
Ejemplo n.º 6
0
    def __init__(self, input_width, input_height, channel_number,
                 learning_rate, cost_function):
        self.cost_function = cost_function
        self.predict_output_list = []
        self.conv1 = ConvLayer(input_width, input_height, channel_number, 3, 3,
                               64, 1, 1, ReluActivator(), learning_rate)

        # self.conv2 = ConvLayer(input_width, input_height, 64,
        #                   3, 3, 64, input_width // 2 + 1, 2, ReluActivator(), learning_rate)

        # self.conv2 = ConvLayer(input_width, input_height, 8,
        #                   3, 3, 8, 1, 1, ReluActivator(), learning_rate)

        self.conv3 = MaxPoolingLayer(input_width, input_height, 64, 3, 3, 1, 2)

        # self.conv4 = ConvLayer(input_width, input_height, 16,
        #                   3, 3, 16, 1, 1, ReluActivator(), learning_rate)

        self.conv5 = ConvLayer(input_width // 2, input_height // 2, 64, 3, 3,
                               128, 1, 1, ReluActivator(), learning_rate)

        self.conv6 = MaxPoolingLayer(input_width // 2, input_height // 2, 128,
                                     3, 3, 1, 2)

        self.conv7 = ConvLayer(input_width // 4, input_height // 4, 128, 3, 3,
                               256, 1, 1, ReluActivator(), learning_rate)

        self.conv8 = UpsamplingLayer(input_width // 4, input_height // 4, 256,
                                     3, 3, 128, 1, 1, learning_rate)

        self.conv9 = ConvLayer(input_width // 2, input_height // 2, 128, 3, 3,
                               32, 1, 1, ReluActivator(), learning_rate)

        self.conv10 = UpsamplingLayer(input_width // 2, input_height // 2, 32,
                                      3, 3, 32, 1, 1, learning_rate)

        # self.conv11 = ConvLayer(input_width, input_height, 8,
        #                   3, 3, 4, 1, 1, ReluActivator(), learning_rate)

        self.conv12 = ConvLayer(input_width, input_height, 32, 3, 3, 2, 1, 1,
                                NoneActivator(), learning_rate)
Ejemplo n.º 7
0
    def __init__(self, in_dim, n_class):
        super(Minionn_fivelayer, self).__init__()

        self.conv = ConvLayer(in_dim,
                              5,
                              5,
                              5,
                              zero_padding=2,
                              stride=2,
                              method='SAME')
        self.relu1 = Activators.ReLU()
        self.fc1 = FullyConnect(980, 100)
        self.relu2 = Activators.ReLU()
        self.fc2 = FullyConnect(100, n_class)
        self.logsoftmax = Logsoftmax()
Ejemplo n.º 8
0
def conv_test():
    bit_length = 32
    # (1,28,28)*(5,5,5)
    # x_numpy = np.random.randn(1,1,28,28).astype(np.float32)
    # w_numpy = np.random.randn(5,1,5,5).astype(np.float32)
    # b_numpy = np.random.randn(5).astype(np.float32)
    # # (1,28,28)*(5,5,5)
    # x_numpy_1 = np.random.randn(1,1,28,28).astype(np.float32)
    # x_numpy_2 = x_numpy-x_numpy_1
    # w_numpy_1 = np.random.randn(5,1,5,5).astype(np.float32)
    # w_numpy_2 = w_numpy-w_numpy_1
    # b_numpy_1 = np.random.randn(5).astype(np.float32)
    # b_numpy_2 = b_numpy-b_numpy_1

    ## (3,32,32)*(64,2,2)
    # x_numpy = np.random.randn(1,3,32,32).astype(np.float32)
    # w_numpy = np.random.randn(64,3,2,2).astype(np.float32)
    # b_numpy = np.random.randn(64).astype(np.float32)
    # x = torch.tensor(x_numpy, requires_grad=True)

    # x_numpy_1 = np.random.randn(1,3,32,32).astype(np.float32)
    # x_numpy_2 = x_numpy-x_numpy_1
    # w_numpy_1 = np.random.randn(64,3,2,2).astype(np.float32)
    # w_numpy_2 = w_numpy-w_numpy_1
    # b_numpy_1 = np.random.randn(64).astype(np.float32)
    # b_numpy_2 = b_numpy-b_numpy_1

    x_numpy = np.random.randn(1,32,32,32).astype(np.float32)
    w_numpy = np.random.randn(128,32,3,3).astype(np.float32)
    b_numpy = np.random.randn(128).astype(np.float32)
    x = torch.tensor(x_numpy, requires_grad=True)

    x_numpy_1 = np.random.randn(1,32,32,32).astype(np.float32)
    x_numpy_2 = x_numpy-x_numpy_1
    w_numpy_1 = np.random.randn(128,32,3,3).astype(np.float32)
    w_numpy_2 = w_numpy-w_numpy_1
    b_numpy_1 = np.random.randn(128).astype(np.float32)
    b_numpy_2 = b_numpy-b_numpy_1

    print('input_shape: ', x_numpy.shape)
    print('w_shape: ', w_numpy.shape)

    # padding=0, stride=2
    # cl1 = Conv_sec(1, 5, 5, 5, zero_padding=0, stride=2, method='SAME')
    # cl1 = Conv_sec(3, 64, 2, 2, zero_padding=0, stride=2, method='SAME')
    cl1 = Conv_sec(32, 128, 3, 3, zero_padding=0, stride=2, method='SAME')
    cl_ori = ConvLayer(1, 5, 5, 5, zero_padding=1, stride=2, method='SAME')
    cl_tensor = torch.nn.Conv2d(1, 5, kernel_size=5, stride=2, padding=1)
    ## 设置参数
    cl_ori.set_weight(Parameter(w_numpy, requires_grad=True))
    cl_ori.set_bias(Parameter(b_numpy, requires_grad=True))
    cl1.set_weight_1(Parameter(w_numpy_1, requires_grad=True))
    cl1.set_bias_1(Parameter(b_numpy_1, requires_grad=True))
    cl1.set_weight_2(Parameter(w_numpy_2, requires_grad=True))
    cl1.set_bias_2(Parameter(b_numpy_2, requires_grad=True))

    # print('param_error: \n', w_numpy-(w_numpy_1+w_numpy_2))
    # print('param_error: \n', cl_ori.weights.data-(cl1.weights_1.data+cl1.weights_2.data))

    '''前向传播'''
    # start_time_tensor = time.time()
    # conv_out = cl_tensor(x)
    # end_time_tensor = time.time()
    # start_time = time.time()
    # conv_out = cl_ori.forward(x_numpy)
    # end_time = time.time()

    test_num = 10
    time_avg = 0
    for i in range(test_num):
        start_time_sec = time.time()
        conv_out_1, conv_out_2 = cl1.forward(x_numpy_1, x_numpy_2)
        end_time_sec = time.time()
        time_avg+=(end_time_sec-start_time_sec)*1000
    print('time avg sec: \n', time_avg/test_num)