示例#1
0
 def forward(self, x):
     if self.type == 'scalar':
         z_conv = conv2d_scalar(x, self.conv_layer.weight,
                                self.conv_layer.bias, self.device)
         z_pool = pool2d_scalar(z_conv, self.device)
         z_pool_reshaped = reshape_scalar(z_pool, self.device)
         z_fc1 = fc_layer_scalar(z_pool_reshaped, self.fc_layer1.weight,
                                 self.fc_layer1.bias, self.device)
         z_relu = relu_scalar(z_fc1, self.device)
         z_fc2 = fc_layer_scalar(z_relu, self.fc_layer2.weight,
                                 self.fc_layer2.bias, self.device)
     elif self.type == 'vector':
         z_conv = conv2d_vector(x, self.conv_layer.weight,
                                self.conv_layer.bias, self.device)
         z_pool = pool2d_vector(z_conv, self.device)
         z_pool_reshaped = reshape_vector(z_pool, self.device)
         z_fc1 = fc_layer_vector(z_pool_reshaped, self.fc_layer1.weight,
                                 self.fc_layer1.bias, self.device)
         z_relu = relu_vector(z_fc1, self.device)
         z_fc2 = fc_layer_vector(z_relu, self.fc_layer2.weight,
                                 self.fc_layer2.bias, self.device)
     else:
         z_conv = self.conv_layer(x)
         z_pool = F.max_pool2d(z_conv, 2, 2)
         z_pool_reshaped = z_pool.view(-1, 20 * 12 * 12)
         z_fc1 = self.fc_layer1(z_pool_reshaped)
         z_relu = F.relu(z_fc1)
         z_fc2 = self.fc_layer2(z_relu)
     y = F.softmax(z_fc2, dim=1)
     return y
示例#2
0
 def __forward_scalar(self, x):
     z_conv = conv2d_scalar(x, self.conv_layer.weight, self.conv_layer.bias,
                            self.device)
     z_pool = pool2d_scalar(z_conv, self.device)
     z_pool_reshaped = reshape_scalar(z_pool, self.device)
     z_fc1 = fc_layer_scalar(z_pool_reshaped, self.fc_layer1.weight,
                             self.fc_layer1.bias, self.device)
     z_relu = relu_scalar(z_fc1, self.device)
     z_fc2 = fc_layer_scalar(z_relu, self.fc_layer2.weight,
                             self.fc_layer2.bias, self.device)
     y = F.softmax(z_fc2, dim=1)
     return y
    def forward(self, x):

        if self.version == 'torch':
            z_conv = self.conv_layer(x)
            z_pool = F.max_pool2d(z_conv, 2, 2)
            z_pool_reshaped = z_pool.view(-1, 20 * 12 * 12)
            z_fc1 = self.fc_layer1(z_pool_reshaped)
            z_relu = F.relu(z_fc1)
            z_fc2 = self.fc_layer2(z_relu)
        elif self.version == 'self-scalar':
            z_conv = conv2d_scalar(x,
                                   conv_weight=self.conv_layer.weight,
                                   conv_bias=self.conv_layer.bias,
                                   device=self.device)
            z_pool = pool2d_scalar(z_conv, self.device)
            z_pool_reshaped = reshape_scalar(z_pool, self.device)
            z_fc1 = fc_layer_scalar(z_pool_reshaped, self.fc_layer1.weight,
                                    self.fc_layer1.bias, self.device)
            z_relu = relu_scalar(z_fc1, self.device)
            z_fc2 = fc_layer_scalar(z_relu, self.fc_layer2.weight,
                                    self.fc_layer2.bias, self.device)
        elif self.version == 'self-vector':
            z_conv = conv2d_vector(x,
                                   conv_weight=self.conv_layer.weight,
                                   conv_bias=self.conv_layer.bias,
                                   device=self.device)
            z_pool = pool2d_vector(z_conv, self.device)
            z_pool_reshaped = reshape_vector(z_pool, self.device)
            z_fc1 = fc_layer_vector(z_pool_reshaped, self.fc_layer1.weight,
                                    self.fc_layer1.bias, self.device)
            z_relu = relu_vector(z_fc1, self.device)
            z_fc2 = fc_layer_vector(z_relu, self.fc_layer2.weight,
                                    self.fc_layer2.bias, self.device)
        else:
            raise Exception(
                'Please, choose version from either \'torch\', \'self-vector\' or \'self-scalar\'!'
            )

        y = F.softmax(z_fc2, dim=1)

        return y
    def forward(self, x):
        z_conv = self.conv_layer(x)
        my_conv = conv2d_scalar(x,
                                conv_weight=self.conv_layer.weight,
                                conv_bias=self.conv_layer.bias,
                                device=self.device)

        print("TEST!!! conv = ", diff_mse(my_conv, z_conv))
        z_conv = my_conv

        z_pool = F.max_pool2d(z_conv, 2, 2)
        my_pool = pool2d_scalar(z_conv, self.device)

        print("TEST!!! pool = ", diff_mse(my_pool, z_pool))
        z_pool = my_pool

        z_pool_reshaped = z_pool.view(-1, 20 * 12 * 12)

        z_fc1 = self.fc_layer1(z_pool_reshaped)
        my_fc1 = fc_layer_scalar(z_pool_reshaped, self.fc_layer1.weight,
                                 self.fc_layer1.bias, self.device)

        print("TEST!!! fc1 = ", diff_mse(z_fc1, my_fc1))
        z_fc1 = my_fc1

        z_relu = F.relu(z_fc1)
        my_relu = relu_scalar(z_fc1, self.device)

        print("TEST!!! relu = ", diff_mse(my_relu, z_relu))
        z_relu = my_relu

        z_fc2 = self.fc_layer2(z_relu)
        my_fc2 = fc_layer_scalar(z_relu, self.fc_layer2.weight,
                                 self.fc_layer2.bias, self.device)

        print("TEST!!! fc2 = ", diff_mse(my_fc2, z_fc2))
        z_fc2 = my_fc2

        y = F.softmax(z_fc2, dim=1)

        return y
def check_consistency(model, version):
    # random tensor for test
    x_in = torch.rand([1, 1, 28, 28])
    x_in.to(model.device)

    # conv weights
    conv_weight = model.conv_layer.weight
    conv_bias = model.conv_layer.bias

    # fc weights
    weight = model.fc_layer1.weight
    bias = model.fc_layer1.bias

    if version == 'self-scalar':

        # apply conv
        z_conv = model.conv_layer(x_in)
        z_conv_scalar = conv2d_scalar(x_in, conv_weight, conv_bias,
                                      model.device)
        assert diff_mse(z_conv, z_conv_scalar) < 1e6

        # apply maxpool
        z_pool = F.max_pool2d(z_conv, 2, 2)
        z_pool_scalar = pool2d_scalar(z_conv_scalar, model.device)
        assert diff_mse(z_pool, z_pool_scalar) < 1e6

        # apply reshape
        z_reshape = z_pool.view(-1, 20 * 12 * 12)
        z_reshape_scalar = reshape_scalar(z_pool_scalar, model.device)
        assert diff_mse(z_reshape, z_reshape_scalar) < 1e6

        # apply fc
        z_fc = model.fc_layer1(z_reshape)
        z_fc_scalar = fc_layer_scalar(z_reshape_scalar, weight, bias,
                                      model.device)
        assert diff_mse(z_fc, z_fc_scalar) < 1e6

        # apply relu
        z_relu = F.relu(z_fc)
        z_relu_scalar = relu_scalar(z_fc_scalar, model.device)
        assert diff_mse(z_relu, z_relu_scalar) < 1e6

    elif version == 'self-vector':

        # apply conv
        z_conv = model.conv_layer(x_in)
        z_conv_vector = conv2d_vector(x_in, conv_weight, conv_bias,
                                      model.device)
        assert diff_mse(z_conv, z_conv_vector) < 1e6

        # apply maxpool
        z_pool = F.max_pool2d(z_conv, 2, 2)
        z_pool_vector = pool2d_vector(z_conv_vector, model.device)
        assert diff_mse(z_pool, z_pool_vector) < 1e6

        # apply reshape
        z_reshape = z_pool.view(-1, 20 * 12 * 12)
        z_reshape_vector = reshape_vector(z_pool_vector, model.device)
        assert diff_mse(z_reshape, z_reshape_vector) < 1e6

        # apply fc
        z_fc = model.fc_layer1(z_reshape)
        z_fc_vector = fc_layer_vector(z_reshape_vector, weight, bias,
                                      model.device)
        assert diff_mse(z_fc, z_fc_vector) < 1e6

        # apply relu
        z_relu = F.relu(z_fc)
        z_relu_vector = relu_vector(z_fc_vector, model.device)
        assert diff_mse(z_relu, z_relu_vector) < 1e6