def extract_innerlayer_features(self, net):
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        singular_values = []
        mean = []
        max = []
        std = []
        channel_num = []
        filter_num = []
        for name, mod in net.named_modules():  #mod is a copy
            if isinstance(mod, nn.Conv2d) and 'downsample' not in name:
                filter_num += [mod.out_channels]
                channel_num += [mod.in_channels]
                weight = transform_conv.conv_to_matrix(copy.deepcopy(mod))
                u, s, v = torch.svd(weight)
                singular_values += [s[:self.feature_len - 5]]
                mean += [torch.mean(weight, dim=1)]
                max += [torch.max(weight, dim=1)[0]]
                std += [torch.std(weight, dim=1)]

        innerlayer_features = torch.zeros(
            (sum(filter_num), self.feature_len)).to(device)
        start = 0
        for i in range(len(filter_num)):
            stop = start + filter_num[i]
            innerlayer_features[start:stop][:, 0] = i + 1  #layer
            innerlayer_features[start:stop][:,
                                            1] = channel_num[i]  #channel_num
            innerlayer_features[start:stop][:, 2] = mean[i]  #mean
            innerlayer_features[start:stop][:, 3] = max[i]  #max
            innerlayer_features[start:stop][:, 4] = std[i]  #standard deviation
            innerlayer_features[start:stop][:, 5:] = singular_values[i].repeat(
                filter_num[i], 1)  #top k singuar value
            start = stop
        return innerlayer_features
Exemple #2
0
    def forward_resnet(self, net, rounds):
        net = copy.deepcopy(net)
        while rounds > 0:  #aggregate information in convs
            rounds -= 1
            first_conv = True
            for name, mod in net.named_modules():
                if first_conv and isinstance(
                        mod, nn.Conv2d):  #first conv in the ResNet
                    weight = conv_to_matrix(mod)
                    information_at_last = weight.mean(dim=1).reshape(
                        [-1, 1])  # calculate the mean of current layer
                    first_conv = False
                if isinstance(mod, resnet.Bottleneck):
                    _, information_at_last = self.aggregate_block(
                        mod, information_at_last)
                elif isinstance(mod, resnet_cifar.BasicBlock):
                    _, information_at_last = self.aggregate_block(
                        mod, information_at_last)

        weight_list = []
        for name, mod in net.named_modules():
            if isinstance(mod, nn.Conv2d) and 'downsample' not in name:
                weight_list += [conv_to_matrix(mod)]

        gcn_feature_in = []
        for i in range(len(weight_list)):
            gcn_feature_in += [
                pca(weight_list[i], dim=self.in_features)
            ]  # reduce the dimension of all filters to same value

        features = gcn_feature_in[0]
        for i in range(1, len(gcn_feature_in)):
            features = torch.cat((features, gcn_feature_in[i]), dim=0)

        features = self.normalization(features)
        output = self.network(features)

        return output  # each object represents one conv
def pca_filter(net, feature_len):
    from transform_conv import conv_to_matrix
    from filter_characteristic.graph_convolutional_network import pca
    weight_list = []
    for name, mod in net.named_modules():
        if isinstance(mod, nn.Conv2d) and 'downsample' not in name:
            weight_list += [conv_to_matrix(mod)]

    gcn_feature_in = []
    for i in range(len(weight_list)):
        gcn_feature_in += [
            pca(weight_list[i], dim=feature_len)
        ]  # reduce the dimension of all filters to same value
    features = gcn_feature_in[0]
    for i in range(1, len(gcn_feature_in)):
        features = torch.cat((features, gcn_feature_in[i]), dim=0)
    features = features.detach().cpu().numpy()
    return features
Exemple #4
0
    def forward_vgg(self, net, rounds):
        '''

        :param net:
        :param rounds:
        :return: extracted-features representing the cross layer relationship for each filter
        '''
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        net = copy.deepcopy(net)
        conv_list = []
        filter_num = []
        for mod in net.modules():
            if isinstance(mod, nn.Conv2d):
                filter_num += [mod.out_channels]
                conv_list += [mod]
        while rounds > 0:
            rounds -= 1
            mean = torch.zeros(3, 1).to(net.features[0].weight.device
                                        )  #initialize mean for first layer
            self.aggregate_convs(conv_list, mean)

        weight_list = []
        for mod in net.modules():
            if isinstance(mod, nn.Conv2d):
                weight_list += [conv_to_matrix(mod)]

        gcn_feature_in = []
        for i in range(len(weight_list)):
            gcn_feature_in += [
                pca(weight_list[i], dim=self.in_features)
            ]  #reduce the dimension of all filters to same value

        features = torch.zeros((sum(filter_num), self.in_features)).to(device)
        start = 0
        for i in range(len(filter_num)):
            stop = start + filter_num[i]
            features[start:stop] = gcn_feature_in[i]
            start = stop
        features = self.normalization(features)
        gcn_feature_out = self.network(features)

        return gcn_feature_out  #each object represents one conv
Exemple #5
0
    def aggregate_block(self, block, information_in_front):
        '''
        aggregate basicblock and bottleneck in resnet
        :param block: block module
        :param conv_in_front: conv module in front of block
        :return:
        '''
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        weight_dowmsample = None
        zero_padding = False
        conv_list = []
        for name, mod in block.named_modules():
            if 'downsample' in name and isinstance(
                    mod, resnet_cifar.LambdaLayer):  #for basic block
                zero_padding = True
            if isinstance(mod, nn.Conv2d):
                if 'downsample' in name:
                    weight_dowmsample = conv_to_matrix(mod)
                    continue
                conv_list += [mod]  #a list containing 2-d conv weight matrix

        _, information_at_last = self.aggregate_convs(conv_list,
                                                      information_in_front)

        # shortcut
        if weight_dowmsample is not None:  #with 1x1 conv
            weight_dowmsample += information_in_front.repeat(1, 1).view(-1)
            information_at_last += weight_dowmsample.mean(dim=1).reshape(
                [-1, 1])
        elif zero_padding is True:  #with zero padding
            pad_length = information_at_last.shape[
                0] - information_in_front.shape[0]
            information_at_last += torch.cat(
                (information_in_front, torch.zeros(pad_length, 1).to(device)),
                0)
        else:  #identity map
            information_at_last += information_in_front

        return conv_list, information_at_last
Exemple #6
0
    def aggregate_convs(self, conv_list, information_in_front):
        '''
        aggregate information for convs which were directely linked with each other
        :param conv_list: list containing conv modules.
                            Warnning: This method will modified the weights in conv. So if you don't want to change the
                            weights in the original network, make sure you input a deepcopy of conv to this method.
        :param information_in_front:
        :return:
        '''
        mean = information_in_front
        weight_list = []
        for conv in conv_list:
            weight_list += [conv_to_matrix(conv)]

        for i in range(len(conv_list)):
            kernel_size = conv_list[i].kernel_size[0] * conv_list[
                i].kernel_size[1]
            mean = mean.repeat(1, kernel_size).view(
                -1)  # expand each value for 9 times.
            weight_list[i] += mean  # aggregate the mean from previous layer
            mean = weight_list[i].mean(dim=1).reshape(
                [-1, 1])  # calculate the mean of current layer
        information_at_last = mean
        return conv_list, information_at_last  #information from the last conv