def _closed_form_call(self, x):
     W = utils.safe_tensor_to_ndarray(
         self._model._layer1.weight)  # TODO: save these as self.X
     V = utils.safe_tensor_to_ndarray(self._model._layer2.weight)
     activation = self._model._activation
     Wx = np.matmul(W, x)
     y = utils.get_activation_gradient(activation, Wx)
     y = utils.safe_tensor_to_ndarray(y)
     y = np.outer(y, x)
     y = np.multiply(V, y.T).T
     return y
    def __call__(self, x, return_ndarray=True):
        self._model.zero_grad()
        self._model.train()
        if x.size == utils.shape_to_size(self.input_shape):
            x = np.reshape(x, self.input_shape)
        else:
            if len(x.shape) == 2:
                x = np.reshape(x, (1, 1, *x.shape))
            if len(x.shape) == 3:
                x = np.reshape(x, (1, *x.shape))
            # we try to square the input while keeping the num of channels
            b, c = x.shape[0], x.shape[1]
            channel_size = x.size / (b * c)
            side_size = np.sqrt(channel_size)
            assert int(side_size) == side_size, "Bad input size {}".format(
                x.shape)
            side_size = int(side_size)
            x = np.reshape(x, (b, c, side_size, side_size))

        if not isinstance(x, torch.Tensor):
            x = torch.from_numpy(x).float()
        y = self._model(x.to(utils.get_device()))
        y.backward()
        gradient_matrix = utils.safe_tensor_to_ndarray(self._model._conv.weight.grad) if \
            return_ndarray else self._model._conv.weight.grad

        return gradient_matrix
    def __call__(self, x, return_ndarray=True, closed_form=True):
        if closed_form:
            return self._closed_form_call(x)

        self._model.zero_grad()
        self._model.train()
        if not isinstance(x, torch.Tensor):
            x = torch.from_numpy(x).float().to(utils.get_device())
        y = self._model(x)
        y.backward()
        gradient_matrix = utils.safe_tensor_to_ndarray(self._model._layer1.weight.grad) if \
            return_ndarray else self._model._layer1.weight.grad

        return gradient_matrix
Пример #4
0
def evaluate_model(model, x, y, eval_f=accuracy, pred_postprocessing=None, out_dim: int = 1, batch_size: int = None):
    x = np.asarray(x)
    y = np.asarray(y)
    if isinstance(model, nn.Module):
        model.eval()
        batch_size = 1 if batch_size is None else batch_size
        pred = None
        for i in range((x.shape[0] // batch_size) + 1):
            x_for_network = x[i * batch_size:(i + 1) * batch_size]
            if x_for_network.size > 0:
                batch_pred = utils.safe_tensor_to_ndarray(model(x_for_network))
                pred = batch_pred if pred is None else np.concatenate((pred, batch_pred))
    else:  # then isinstance(model, svm.SVMWrapper) == True
        pred = np.asarray([model(x[i], single_example=True) for i in range(y.shape[0])])

    if pred_postprocessing is not None:
        pred = pred_postprocessing(pred)

    return eval_f(pred, y)
 def get_output_shape(self, x=None):
     weight = utils.safe_tensor_to_ndarray(self._model._conv.weight)
     return weight.shape
 def get_output_shape(self, x=None):
     return utils.safe_tensor_to_ndarray(
         self._model._layer1.weight.grad).shape