Exemple #1
0
    def _contrib_adaptive_avg_pool2d(self, x_in, y_out, ydx, layer, threshold=0.1):
        '''
        Return the contributing synapses for a torch.nn.AdaptiveAveragePool layer

        Parameters
        ----------
        x_in : torch.Tensor
            dimensions: batchsize,channels,height,width
        y_out : torch.Tensor
            dimensions: batchsize,channels,height,width
        ydx : tuple
            (channel,row,column) position of an output neuron
        layer : list(str)
            list containing single key in self.model.available_modules() dictionary
        threshold : float

        Returns
        -------
        neuron_counts
        synapse_counts
        synapse_weights
        '''
        neuron_counts = Counter()
        synapse_counts = Counter()
        synapse_weights = set()
        avgpool = self.model.available_modules()[layer[0]]

        '''Grab the dimensions used by an adaptive pooling layer'''
        output_size = avgpool.output_size[0]
        input_size = x_in.shape[-1]
        stride = (input_size // output_size)
        kernel_size = input_size - (output_size - 1) * stride

        if len(ydx) == 1:
            ch, i, j = get_index(ydx[0], kernel_size)
        else:
            ch, i, j = ydx

        scalar = 1 / (kernel_size**2)  # multiplier for computing average
        goal = threshold * y_out[0, ch, i, j]

        xmat = submatrix_generator(x_in, stride, kernel_size)(i, j)[ch]
        ordsmat = sorted([(v * scalar, c) for c, v in enumerate(xmat.flatten()) if v != 0], key=lambda t: t[0], reverse=True)
        if len(ordsmat) > 0:
            cumsum = torch.cumsum(torch.Tensor([v[0] for v in ordsmat]), dim=0).detach()
            assert np.allclose(cumsum[-1].detach().numpy(), y_out[0, ch, i, j].detach().numpy(), rtol=1e-04, atol=1e-4), f'avgpool failure: {cumsum[-1] - y_out[0,ch,i,j]}'

            for idx, t in enumerate(cumsum):
                if t > goal:
                    break
            totalsum = cumsum[-1]
            for v, c in ordsmat[:idx + 1]:
                neuron = (ch, c // kernel_size + stride * i, c % kernel_size + stride * j)
                neuron_counts.update([neuron])
                synapse_counts.update([(neuron, ydx)])
                weight = round(float((v / totalsum).detach().numpy()), 6)
                synapse_weights.update([(neuron, ydx, weight)])
        return neuron_counts, synapse_counts, synapse_weights
Exemple #2
0
    def _contrib_max2d(self, x_in, y_out, ydx, layer, threshold=None):
        '''
        Return the contributing synapse for a torch.nn.Max2D layer

        Parameters
        ----------
        x_in : torch.Tensor
            dimensions: batchsize,channels,height,width
        y_out : torch.Tensor
            dimensions: batchsize,channels,height,width
        ydx : tuple
            (channel,row,column) position of an output neuron
        layer : list(str)
            list containing single key in self.model.available_modules() dictionary
        threshold : None or float
            not used, placeholder for uniformity in arguments.

        Returns
        -------
        neuron_counts
        synapse_counts
        synapse_weights
        '''
        neuron_counts = Counter()
        synapse_counts = Counter()
        synapse_weights = set()
        maxpool = self.model.available_modules()[layer[0]]

        # Grab dimensions of maxpool from parameters
        stride = maxpool.stride
        kernel_size = maxpool.kernel_size

        if len(ydx) == 1:
            ch, i, j = get_index(ydx[0], kernel_size)
        else:
            ch, i, j = ydx

        xmat = submatrix_generator(x_in, stride, kernel_size)(i, j)[ch]

        c, v = max(list(enumerate(xmat.flatten())), key=lambda x: x[1])
        assert np.allclose(
            v.detach().numpy(),
            y_out[0, ch, i, j].detach().numpy(),
            rtol=1e-04,
            atol=1e-4), f'maxpool failure: {v - y_out[0,ch,i,j]}'

        neuron = (ch, c // kernel_size + stride * i,
                  c % kernel_size + stride * j)
        neuron_counts.update([neuron])
        synapse_counts.update([(neuron, ydx)])
        synapse_weights.update([
            (neuron, ydx, 1)
        ])  # 1 is just a placeholder since it is a direct contribution
        return neuron_counts, synapse_counts, synapse_weights
Exemple #3
0
    def _contrib_conv2d(self, x_in, y_out, ydx, layers, threshold=0.1):
        '''
        Profile a single output neuron from a 2d conv layer

        Pattern
        -------
        x_in : torch.tensor
            dimensions: batchsize,channels,height,width
        y_out : torch.Tensor
            dimensions: batchsize,channels,height,width
        ydx : tuple
            (channel,row,column) 3d position of an output neuron
        layers : list([str,str])
            list containing keys in self.model.available_modules() dictionary
            for conv2d these will refer to a convolutional module and an activation module
        threshold : float

        Returns
        -------
        neuron_counts
        synapse_counts
        synapse_weights

        Note
        ----
        Only implemented for convolution using filters with same height and width
        and strides equal in both dimensions and padding equal in all dimensions

        Synapse profiles for conv2d are indexed by 3 sets of tuples one for each neuron
        and on one for the index of the filter used.
        '''

        neuron_counts = Counter()
        synapse_counts = Counter()
        synapse_weights = set()
        conv, actf = layers
        conv = self.model.available_modules()[conv]
        actf = self.model.available_modules()[actf]

        # assumption is that kernel size, stride are equal in both dimensions
        # and padding preserves input size
        kernel_size = conv.kernel_size[0]
        stride = conv.stride[0]
        padding = conv.padding[0]
        W = conv._parameters['weight']
        B = conv._parameters['bias']

        if len(ydx) == 1:
            d, i, j = get_index(ydx[0], kernel_size)
        else:
            d, i, j = ydx

        # TODO make the lines below loop over a list of ydx positions
        try:
            y_true = y_out[0, d, i, j].detach().numpy()
        except:
            print(d, i, j)
        goal = threshold * y_true
        if goal <= 0:
            warnings.warn(f'output neuron at position {ydx} is less than 0')
        xmat = submatrix_generator(x_in, stride, kernel_size, padding=padding)(
            i, j)  # TODO generate sbmat before the loop

        z = torch.mul(W[d], xmat)
        ordsmat = sorted([(v, idx)
                          for idx, v in enumerate(z.flatten()) if v != 0],
                         key=lambda t: t[0],
                         reverse=True)
        if len(ordsmat) > 0:
            cumsum = torch.cumsum(torch.Tensor([v[0] for v in ordsmat]),
                                  dim=0).detach() + B[d]
            values = [actf(v) for v in cumsum]
            assert np.allclose(
                values[-1].detach().numpy(), y_true, rtol=1e-04, atol=1e-4
            ), f'conv2d failure: {values[-1].detach().numpy() - y_true}'

            for idx, v in enumerate(values):
                if v >= goal:
                    break
            totalsum = cumsum[-1] - B[
                d]  # this is the sum of all the values before bias and activation
            for v, jdx in ordsmat[:idx + 1]:
                wdx = get_index(jdx, kernel_size)
                neuron = tuple(
                    np.array(wdx, dtype=int) + np.array(
                        (0, stride * i - padding, stride * j - padding),
                        dtype=int))  # need the unpadded index
                neuron_counts.update([
                    neuron
                ])  # this shows where the index was in original input
                synapse_counts.update([(neuron, ydx, wdx)])
                weight = round(float((v / totalsum).detach().numpy()), 6)
                synapse_weights.update([(neuron, ydx, weight)])
        return neuron_counts, synapse_counts, synapse_weights