Esempio n. 1
0
    def __init__(self,
                 layer,
                 layer_name,
                 rank_selection,
                 ranks=None,
                 pretrained=None,
                 vbmf_weaken_factor=None,
                 param_reduction_rate=None):
        """
        rank_selection: str, 'vbmf'/'param_reduction'/'manual'
        """

        self.layer_name = layer_name
        self.layer = layer
        self.pretrained = pretrained

        if isinstance(self.layer, nn.Sequential):
            self.cin = self.layer[0].in_channels
            self.cout = self.layer[2].out_channels

            self.kernel_size = self.layer[1].kernel_size
            self.padding = self.layer[1].padding
            self.stride = self.layer[1].stride
#             print('Sequential, cin:{}, cout: {}'.format(self.cin, self.cout))

        else:
            if not isinstance(self.layer, nn.Conv2d):
                raise AttributeError(
                    'only convolution layer can be decomposed')
            self.cin = self.layer.in_channels
            self.cout = self.layer.out_channels

            self.kernel_size = self.layer.kernel_size
            self.padding = self.layer.padding
            self.stride = self.layer.stride
#             print('Conv, cin:{}, cout: {}'.format(self.cin, self.cout))

        self.weight, self.bias = self.get_weights_to_decompose()

        if rank_selection == 'vbmf':
            self.ranks = estimate_vbmf_ranks(self.weight, vbmf_weaken_factor)

        elif rank_selection == 'manual':
            # self.ranks = [rank_cout, rank_cin]
            self.ranks = ranks

        elif rank_selection == 'param_reduction':
            if isinstance(self.layer, nn.Sequential):
                self.ranks = estimate_rank_for_compression_rate((self.layer[1].out_channels,\
                                                                 self.layer[1].in_channels,\
                                                                 *self.kernel_size),\
                                                                rate = param_reduction_rate,\
                                                                key = 'tucker2')
            else:
                self.ranks = estimate_rank_for_compression_rate((self.cout,\
                                                                 self.cin,\
                                                                 *self.kernel_size),\
                                                                rate = param_reduction_rate,\
                                                                key = 'tucker2')
        #print(self.ranks)

        ##### create decomposed layers
        self.new_layers = nn.Sequential()

        for j, l in enumerate(self.create_new_layers()):
            self.new_layers.add_module('{}-{}'.format(self.layer_name, j), l)

        weights, biases = self.get_tucker_factors()

        for j, (w, b) in enumerate(zip(weights, biases)):
            self.new_layers.__getattr__('{}-{}'.format(self.layer_name,
                                                       j)).weight.data = w
            if b is not None:
                self.new_layers.__getattr__('{}-{}'.format(self.layer_name,
                                                           j)).bias.data = b
            else:
                self.new_layers.__getattr__('{}-{}'.format(self.layer_name,
                                                           j)).bias = None

        self.layer = None
        self.weight = None
        self.bias = None
Esempio n. 2
0
    def __init__(self, layer,\
                 layer_name,\
                 rank_selection,\
                 rank = None,\
                 pretrained = None,
                 param_reduction_rate = None):

        self.layer_name = layer_name
        self.layer = layer

        self.min_rank = 2

        if isinstance(self.layer, nn.Sequential):
            self.cin = self.layer[0].in_channels
            self.cout = self.layer[-1].out_channels

            self.kernel_size = (self.layer[1].kernel_size[0],
                                self.layer[2].kernel_size[1])
            self.padding = (self.layer[1].padding[0], self.layer[2].padding[1])
            self.stride = (self.layer[1].stride[0], self.layer[2].stride[1])

        else:
            if not isinstance(self.layer, nn.Conv2d):
                raise AttributeError(
                    'only convolution layer can be decomposed')
            self.cin = self.layer.in_channels
            self.cout = self.layer.out_channels

            self.kernel_size = self.layer.kernel_size
            self.padding = self.layer.padding
            self.stride = self.layer.stride

        self.weight, self.bias = self.get_weights_to_decompose()

        if rank_selection == 'param_reduction':
            if isinstance(self.layer, nn.Sequential):
                prev_rank = self.layer[0].out_channels
            else:
                prev_rank = None

            tensor_shape = (self.cout, self.cin, *self.kernel_size)
            self.rank = estimate_rank_for_compression_rate(
                tensor_shape,
                rate=param_reduction_rate,
                key='cp4',
                prev_rank=prev_rank,
                min_rank=self.min_rank)
        elif rank_selection == 'manual':
            self.rank = rank

        ##### create decomposed layers
        self.new_layers = nn.Sequential()

        for j, l in enumerate(self.create_new_layers()):
            self.new_layers.add_module('{}-{}'.format(self.layer_name, j), l)

        weights, biases = self.get_cp_factors()

        for j, (w, b) in enumerate(zip(weights, biases)):
            self.new_layers.__getattr__('{}-{}'.format(self.layer_name,
                                                       j)).weight.data = w
            if b is not None:
                self.new_layers.__getattr__('{}-{}'.format(self.layer_name,
                                                           j)).bias.data = b
            else:
                self.new_layers.__getattr__('{}-{}'.format(self.layer_name,
                                                           j)).bias = None

        self.layer = None
        self.weight = None
        self.bias = None
Esempio n. 3
0
    def __init__(self,
                 layer,
                 layer_name,
                 rank_selection,
                 rank=None,
                 pretrained=None,
                 vbmf_weaken_factor=None,
                 param_reduction_rate=None):
        """
        rank_selection: str, 'vbmf'/'param_reduction'/'manual'
        """

        self.layer_name = layer_name
        self.layer = layer
        self.pretrained = pretrained

        if isinstance(self.layer, nn.Sequential):
            self.in_features = self.layer[0].in_features
            self.out_features = self.layer[1].out_features
        else:
            if not isinstance(self.layer, nn.Linear):
                raise AttributeError('only linear layer can be decomposed')
            self.in_features = self.layer.in_features
            self.out_features = self.layer.out_features

        self.weight, self.bias = self.get_weights_to_decompose()

        if rank_selection == 'vbmf':
            self.rank = estimate_vbmf_ranks(self.weight, vbmf_weaken_factor)

        elif rank_selection == 'manual':
            self.rank = rank

        elif rank_selection == 'param_reduction':
            if isinstance(self.layer, nn.Sequential):
                self.rank = self.layer[0].out_features // param_reduction_rate
            else:
                self.rank = estimate_rank_for_compression_rate(
                    (self.out_features, self.in_features),
                    rate=param_reduction_rate,
                    key='svd')
        ##### create decomposed layers
        self.new_layers = nn.Sequential()

        for j, l in enumerate(self.create_new_layers()):
            self.new_layers.add_module('{}-{}'.format(self.layer_name, j), l)

        weights, biases = self.get_svd_factors()

        for j, (w, b) in enumerate(zip(weights, biases)):
            self.new_layers.__getattr__('{}-{}'.format(self.layer_name,
                                                       j)).weight.data = w
            if b is not None:
                self.new_layers.__getattr__('{}-{}'.format(self.layer_name,
                                                           j)).bias.data = b
            else:
                self.new_layers.__getattr__('{}-{}'.format(self.layer_name,
                                                           j)).bias = None

        self.layer = None
        self.weight = None
        self.bias = None
Esempio n. 4
0
    def __init__(self, layer, layer_name,\
                 rank_selection,\
                 rank=None,\
                 pretrained = None,\
                 param_reduction_rate = None):

        self.layer_name = layer_name
        self.layer = layer
        self.pretrained = pretrained

        if isinstance(self.layer, nn.Sequential):
            self.cin = self.layer[0].in_channels
            self.cout = self.layer[-1].out_channels

            self.kernel_size = self.layer[1].kernel_size
            self.padding = self.layer[1].padding
            self.stride = self.layer[1].stride

            self.device = self.layer[0].weight.device

        else:
            if not isinstance(self.layer, nn.Conv2d):
                raise AttributeError(
                    'only convolution layer can be decomposed')
            self.cin = self.layer.in_channels
            self.cout = self.layer.out_channels

            self.kernel_size = self.layer.kernel_size
            self.padding = self.layer.padding
            self.stride = self.layer.stride

            self.device = self.layer.weight.device

        self.weight, self.bias = self.get_weights_to_decompose()

        #         print("KERNEL SIZE", self.kernel_size, type(self.kernel_size[0]), type(self.kernel_size[1]))

        if rank_selection == 'param_reduction':
            if isinstance(self.layer, nn.Sequential):
                self.rank = estimate_rank_for_compression_rate(
                    (self.layer[1].out_channels, self.layer[1].in_channels,
                     *self.kernel_size),
                    rate=param_reduction_rate,
                    key='cp3')
            else:
                self.rank = estimate_rank_for_compression_rate(
                    (self.cout, self.cin, *self.kernel_size),
                    rate=param_reduction_rate,
                    key='cp3')
        elif rank_selection == 'manual':
            self.rank = rank

        self.rank = int(self.rank)
        ##### create decomposed layers
        self.new_layers = nn.Sequential()

        for j, l in enumerate(self.create_new_layers()):
            self.new_layers.add_module('{}-{}'.format(self.layer_name, j), l)

        weights, biases = self.get_cp_factors()

        for j, (w, b) in enumerate(zip(weights, biases)):
            self.new_layers.__getattr__('{}-{}'.format(self.layer_name,
                                                       j)).weight.data = w
            if b is not None:
                self.new_layers.__getattr__('{}-{}'.format(self.layer_name,
                                                           j)).bias.data = b
            else:
                self.new_layers.__getattr__('{}-{}'.format(self.layer_name,
                                                           j)).bias = None

        self.layer = None
        self.weight = None
        self.bias = None
Esempio n. 5
0
    def __init__(self,
                 layer,
                 layer_name,
                 rank_selection,
                 rank=None,
                 pretrained=None,
                 vbmf_weaken_factor=None,
                 param_reduction_rate=None):

        self.layer_name = layer_name
        self.layer = layer
        self.pretrained = pretrained

        self.min_rank = 2

        #print(layer)

        if isinstance(self.layer, nn.Sequential):
            self.in_channels = self.layer[0].in_channels
            self.out_channels = self.layer[1].out_channels

            self.padding = self.layer[1].padding
            self.stride = self.layer[1].stride
        else:
            if not isinstance(self.layer, nn.Conv2d):
                raise AttributeError('only conv layer can be decomposed')
            self.in_channels = self.layer.in_channels
            self.out_channels = self.layer.out_channels

            self.padding = self.layer.padding
            self.stride = self.layer.stride

        self.weight, self.bias = self.get_weights_to_decompose()

        if rank_selection == 'vbmf':
            self.rank = estimate_vbmf_ranks(self.weight,
                                            vbmf_weaken_factor,
                                            min_rank=self.min_rank)
        elif rank_selection == 'manual':
            self.rank = rank
        elif rank_selection == 'param_reduction':
            if isinstance(self.layer, nn.Sequential):
                prev_rank = self.layer[0].out_channels
            else:
                prev_rank = None

            self.rank = estimate_rank_for_compression_rate(
                (self.out_channels, self.in_channels),
                rate=param_reduction_rate,
                key='svd',
                prev_rank=prev_rank,
                min_rank=self.min_rank)

        ##### create decomposed layers
        self.new_layers = nn.Sequential()

        for j, l in enumerate(self.create_new_layers()):
            self.new_layers.add_module('{}-{}'.format(self.layer_name, j), l)

        weights, biases = self.get_svd_factors()

        for j, (w, b) in enumerate(zip(weights, biases)):
            self.new_layers.__getattr__('{}-{}'.format(self.layer_name,
                                                       j)).weight.data = w
            if b is not None:
                self.new_layers.__getattr__('{}-{}'.format(self.layer_name,
                                                           j)).bias.data = b
            else:
                self.new_layers.__getattr__('{}-{}'.format(self.layer_name,
                                                           j)).bias = None

        self.layer = None
        self.weight = None
        self.bias = None