Beispiel #1
0
 def __init__(self,
              in_features,
              out_features,
              andor="*",
              modinf=False,
              regular_deriv=False,
              min_input=0.0,
              max_input=1.0,
              min_slope=0.001,
              max_slope=10.0):
     """
     Implementation of RBF module with logloss.
     :param in_features: Number of input features.
     :param out_features: Number of output features.
     :param andor: '^' for and, 'v' for or, '*' for mixed.
     :param modinf: Whether to aggregate using max (if True) of sum (if False).
     :param regular_deriv: Whether to use regular derivatives or not.
     :param min_input: minimum value for w (and therefore min value for input)
     :param max_input: max, as above.
     :param min_slope: min value for u, defining the slope.
     :param max_slope: max value for u, defining the slope.
     """
     super(RBFI, self).__init__()
     self.in_features = in_features
     self.out_features = out_features
     self.andor = andor
     self.modinf = modinf
     self.regular_deriv = regular_deriv
     self.w = BoundedParameter(torch.Tensor(out_features, in_features),
                               lower_bound=min_input,
                               upper_bound=max_input)
     self.u = BoundedParameter(torch.Tensor(out_features, in_features),
                               lower_bound=min_slope,
                               upper_bound=max_slope)
     if andor == 'v':
         self.andor01 = Parameter(torch.ones((1, out_features)))
     elif andor == '^':
         self.andor01 = Parameter(torch.zeros((
             1,
             out_features,
         )))
     else:
         self.andor01 = Parameter(torch.Tensor(
             1,
             out_features,
         ))
         self.andor01.data.random_(0, 2)
     self.andor01.requires_grad = False
     self.w.data.uniform_(min_input, max_input)
     # Initialization of u.
     self.u.data.uniform_(0.2, 0.7)  # These could be parameters.
     self.u.data.clamp_(min_slope, max_slope)
Beispiel #2
0
        def hook(o):
            meta_module, meta_class = None, o.get('meta_class')
            if meta_class in ('Datetime', 'datetime.datetime'):
                # 'Datetime' included for backward compatibility
                try:
                    tmp = datetime.datetime.strptime(
                        o['date'], '%Y-%m-%dT%H:%M:%S.%f')
                except Exception as e:
                    tmp = datetime.datetime.strptime(
                        o['date'], '%Y-%m-%dT%H:%M:%S')
                return tmp
            elif meta_class == 'set':
                # Set.
                return set(o['set'])
            # Numpy arrays.
            elif meta_class == 'numpy.ndarray':
                data = base64.b64decode(o['data'])
                dtype = o['dtype']
                shape = o['shape']
                v = numpy.frombuffer(data, dtype=dtype)
                v = v.reshape(shape)
                return v

            # Numpy numbers.
            elif meta_class == 'numpy.number':
                data = base64.b64decode(o['data'])
                dtype = o['dtype']
                v = numpy.frombuffer(data, dtype=dtype)[0]
                return v

            # Parameters
            elif meta_class == 'Parameter':
                p = Parameter(torch.tensor(o['data']), requires_grad=o['requires_grad'])
                return p.to(device)
            elif meta_class == 'BoundedParameter':
                p = BoundedParameter(torch.tensor(o['data']),
                                     requires_grad=o['requires_grad'],
                                     lower_bound=o['lower_bound'],
                                     upper_bound=o['upper_bound'])
                return p.to(device)

            elif meta_class == 'Storage':
                p = Storage(o['d'])
                return p

            elif meta_class and '.' in meta_class:
                # correct for classes that have migrated from one module to another
                meta_class = remapper.get(meta_class, meta_class)
                # separate the module name from the actual class name
                meta_module, meta_class = meta_class.rsplit('.',1)

            if meta_class is not None:
                del o['meta_class']
                # this option is for backward compatibility in case a module is not specified
                if meta_class in fallback:
                    meta_module = fallback.get(meta_class)

                if meta_module is not None and objectify:
                    try:
                        module = importlib.import_module(meta_module)
                        cls = getattr(module, meta_class)
                        # Figures out parameters for intializer.
                        try:
                            args = inspect.getargspec(cls.__init__)[0]
                            args = [x for x in args if x in o.keys()]
                            dd = [o[x] for x in args]
                            obj = cls(*dd)
                        except:
                            print("Incomplete rebuild:", traceback.format_exc())
                            obj = cls()
                        obj.__dict__.update(o)
                        # Restores modules.
                        if isinstance(obj, Module):
                            for k, v in obj._modules.items():
                                setattr(obj, k, v)
                        o = obj
                    except Exception as e:
                        # We need to allow the case where the class is now obsolete.
                        print(traceback.format_exc())
                        print("Could not restore: %r %r", (meta_module, meta_class))
                        o = None
            elif type(o).__name__ == 'dict':
                o = Storage(o)
            return o
class MWD_trimmable(nn.Module, Serializable):
    def __init__(
        self,
        in_features,
        out_features,
        andor="*",
        pseudo=1.0,
        min_input=0.0,
        max_input=1.0,
        min_slope=0.001,
        max_slope=10.0,
    ):
        """
        Implementation of MWD.
        :param in_features: Number of input features.
        :param out_features: Number of output features.
        :param andor: '^' for and, 'v' for or, '*' for mixed.
        :param modinf: Whether to aggregate using max (if True) of sum (if False).
        :param regular_deriv: Whether to use regular derivatives or not.
        :param min_input: minimum value for w (and therefore min value for input)
        :param max_input: max, as above.
        :param min_slope: min value for u, defining the slope.
        :param max_slope: max value for u, defining the slope.
        """
        super(MWD_trimmable, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.andor = andor
        self.pseudo = pseudo
        self.w = BoundedParameter(torch.Tensor(out_features, in_features),
                                  lower_bound=min_input,
                                  upper_bound=max_input)
        self.u = BoundedParameter(torch.Tensor(out_features, in_features),
                                  lower_bound=min_slope,
                                  upper_bound=max_slope)
        if andor == 'v':
            self.andor01 = Parameter(torch.ones((1, out_features)))
        elif andor == '^':
            self.andor01 = Parameter(torch.zeros((
                1,
                out_features,
            )))
        else:
            self.andor01 = Parameter(torch.Tensor(
                1,
                out_features,
            ))
            self.andor01.data.random_(0, 2)
        self.andor01.requires_grad = False
        self.w.data.uniform_(min_input, max_input)
        # Initialization of u.
        self.u.data.uniform_(0., 0.7)  # These could be parameters.
        self.u.data.clamp_(min_slope, max_slope)

    def forward(self, x):
        # Let n be the input size, and m the output size.
        # The tensor x is of shape * n. To make room for the output,
        # we view it as of shape * 1 n.
        xx = x.unsqueeze(-2)
        xuw = self.u * (xx - self.w)
        xuwsq = xuw * xuw
        # Aggregates into a modulus.
        # We want to get the largest square, which is the min one as we changed signs.
        z = SharedFeedbackMaxExpPseudo.apply(xuwsq, self.pseudo)
        y = LargeAttractorExpPseudo2.apply(z, self.pseudo)
        # Takes into account and-orness.
        if self.andor == '^':
            return y
        elif self.andor == 'v':
            return 1.0 - y
        else:
            return y + self.andor01 * (1.0 - 2.0 * y)

    def interval_forward(self, x_min, x_max):
        xx_min = x_min.unsqueeze(-2)
        xx_max = x_max.unsqueeze(-2)
        xuw1 = self.u * (xx_min - self.w)
        xuwsq1 = xuw1 * xuw1
        xuw2 = self.u * (xx_max - self.w)
        xuwsq2 = xuw2 * xuw2
        sq_max = torch.max(xuwsq1, xuwsq2)
        sq_min = torch.min(xuwsq1, xuwsq2)
        # If w is between x_min and x_max, then sq_min should be 0.
        # So we multiply sq_min by something that is 0 if x_min < w < x_max.
        sq_min = sq_min * ((xx_min > self.w) + (self.w > xx_max)).float()

        y_min = torch.exp(-torch.max(sq_max, -1)[0])
        y_max = torch.exp(-torch.max(sq_min, -1)[0])
        # Takes into account and-orness.
        if self.andor == '^':
            return y_min, y_max
        elif self.andor == 'v':
            return 1.0 - y_max, 1.0 - y_min
        else:
            y1 = y_min + self.andor01 * (1.0 - 2.0 * y_min)
            y2 = y_max + self.andor01 * (1.0 - 2.0 * y_max)
            y_min = torch.min(y1, y2)
            y_max = torch.max(y1, y2)
            return y_min, y_max

    def overall_sensitivity(self):
        """Returns the sensitivity to adversarial examples of the layer."""
        s = torch.max(torch.max(self.u, -1)[0], -1)[0].item()
        s *= np.sqrt(2. / np.e)
        return s

    def sensitivity(self, previous_layer):
        """Given the sensitivity of the previous layer (a vector of length equal
        to the number of inputs), it computes the sensitivity to adversarial examples
         of the current layer, as a vector of length equal to the output size of the
         layer.  If the input sensitivity of the previous layer is None, then unit
         sensitivity is assumed."""
        if previous_layer is None:
            previous_layer = self.w.new(1, self.in_features)
            previous_layer.fill_(1.)
        else:
            previous_layer = previous_layer.view(1, self.in_features)
        u_prod = previous_layer * self.u
        s = SharedFeedbackMaxExpPseudo.apply(u_prod, 1.0)
        s = s * np.sqrt(2. / np.e)
        return s
Beispiel #4
0
class RBFI(nn.Module):
    def __init__(self,
                 in_features,
                 out_features,
                 andor="*",
                 modinf=False,
                 regular_deriv=False,
                 min_input=0.0,
                 max_input=1.0,
                 min_slope=0.001,
                 max_slope=10.0):
        """
        Implementation of RBF module with logloss.
        :param in_features: Number of input features.
        :param out_features: Number of output features.
        :param andor: '^' for and, 'v' for or, '*' for mixed.
        :param modinf: Whether to aggregate using max (if True) of sum (if False).
        :param regular_deriv: Whether to use regular derivatives or not.
        :param min_input: minimum value for w (and therefore min value for input)
        :param max_input: max, as above.
        :param min_slope: min value for u, defining the slope.
        :param max_slope: max value for u, defining the slope.
        """
        super(RBFI, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.andor = andor
        self.modinf = modinf
        self.regular_deriv = regular_deriv
        self.w = BoundedParameter(torch.Tensor(out_features, in_features),
                                  lower_bound=min_input,
                                  upper_bound=max_input)
        self.u = BoundedParameter(torch.Tensor(out_features, in_features),
                                  lower_bound=min_slope,
                                  upper_bound=max_slope)
        if andor == 'v':
            self.andor01 = Parameter(torch.ones((1, out_features)))
        elif andor == '^':
            self.andor01 = Parameter(torch.zeros((
                1,
                out_features,
            )))
        else:
            self.andor01 = Parameter(torch.Tensor(
                1,
                out_features,
            ))
            self.andor01.data.random_(0, 2)
        self.andor01.requires_grad = False
        self.w.data.uniform_(min_input, max_input)
        # Initialization of u.
        self.u.data.uniform_(0.2, 0.7)  # These could be parameters.
        self.u.data.clamp_(min_slope, max_slope)

    def dumps(self):
        """Writes itself to a string."""
        # Creates a dictionary
        d = dict(
            in_features=self.in_features,
            out_features=self.out_features,
            min_input=self.w.lower_bound,
            max_input=self.w.upper_bound,
            min_slope=self.u.lower_bound,
            max_slope=self.u.upper_bound,
            modinf=self.modinf,
            regular_deriv=self.regular_deriv,
            andor=self.andor,
            andor01=self.andor01.cpu().numpy(),
            u=self.u.data.cpu().numpy(),
            w=self.w.data.cpu().numpy(),
        )
        return Serializable.dumps(d)

    @staticmethod
    def loads(s, device):
        """Reads itself from string s."""
        d = Serializable.loads(s)
        m = RBFI(d['in_features'],
                 d['out_features'],
                 andor=d['andor'],
                 modinf=d['modinf'],
                 regular_deriv=d['regular_deriv'],
                 min_input=d['min_input'],
                 max_input=d['max_input'],
                 min_slope=d['min_slope'],
                 max_slope=d['max_slope'])
        m.u.data = torch.from_numpy(d['u']).to(device)
        m.w.data = torch.from_numpy(d['w']).to(device)
        m.andor01.data = torch.from_numpy(d['andor01']).to(device)
        return m

    def forward(self, x):
        # Let n be the input size, and m the output size.
        # The tensor x is of shape * n. To make room for the output,
        # we view it as of shape * 1 n.
        s = list(x.shape)
        new_s = s[:-1] + [1, s[-1]]
        xx = x.view(*new_s)
        xuw = self.u * (xx - self.w)
        xuwsq = xuw * xuw
        # Aggregates into a modulus.
        if self.modinf:
            # We want to get the largest square, which is the min one as we changed signs.
            if self.regular_deriv:
                z, _ = torch.max(xuwsq, -1)
                y = torch.exp(-z)
            else:
                z = SharedFeedbackMax.apply(xuwsq)
                y = LargeAttractorExp.apply(z)
        else:
            z = torch.sum(xuwsq, -1)
            if self.regular_deriv:
                y = torch.exp(-z)
            else:
                y = LargeAttractorExp.apply(z)
        # Takes into account and-orness.
        if self.andor == '^':
            return y
        elif self.andor == 'v':
            return 1.0 - y
        else:
            return y + self.andor01 * (1.0 - 2.0 * y)

    def overall_sensitivity(self):
        """Returns the sensitivity to adversarial examples of the layer."""
        if self.modinf:
            s = torch.max(torch.max(self.u, -1)[0], -1)[0].item()
        else:
            s = torch.max(torch.sqrt(torch.sum(self.u * self.u, -1)))[0].item()
        s *= np.sqrt(2. / np.e)
        return s

    def sensitivity(self, previous_layer):
        """Given the sensitivity of the previous layer (a vector of length equal
        to the number of inputs), it computes the sensitivity to adversarial examples
         of the current layer, as a vector of length equal to the output size of the
         layer.  If the input sensitivity of the previous layer is None, then unit
         sensitivity is assumed."""
        if previous_layer is None:
            previous_layer = self.w.new(1, self.in_features)
            previous_layer.fill_(1.)
        else:
            previous_layer = previous_layer.view(1, self.in_features)
        u_prod = previous_layer * self.u
        if self.modinf:
            # s = torch.max(u_prod, -1)[0]
            s = SharedFeedbackMax.apply(u_prod)
        else:
            s = torch.sqrt(torch.sum(u_prod * u_prod, -1))
        s = s * np.sqrt(2. / np.e)
        return s
Beispiel #5
0
class RBFI(nn.Module):
    def __init__(self,
                 in_features,
                 out_features,
                 andor="*",
                 min_input=0.0,
                 max_input=1.0,
                 min_slope=0.001,
                 max_slope=10.0):
        """
        Implementation of RBF module with logloss.
        :param in_features: Number of input features.
        :param out_features: Number of output features.
        :param andor: '^' for and, 'v' for or, '*' for mixed.
        :param min_input: minimum value for w (and therefore min value for input)
        :param max_input: max, as above.
        :param min_slope: min value for u, defining the slope.
        :param max_slope: max value for u, defining the slope.
        """
        super(RBFI, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.andor = andor
        self.w = BoundedParameter(torch.Tensor(out_features, in_features),
                                  lower_bound=min_input,
                                  upper_bound=max_input)
        self.u = BoundedParameter(torch.Tensor(out_features, in_features),
                                  lower_bound=min_slope,
                                  upper_bound=max_slope)
        if andor == 'v':
            self.andor01 = Parameter(torch.ones((1, out_features)))
        elif andor == '^':
            self.andor01 = Parameter(torch.zeros((
                1,
                out_features,
            )))
        else:
            self.andor01 = Parameter(torch.Tensor(
                1,
                out_features,
            ))
            self.andor01.data.random_(0, 2)
        self.andor01.requires_grad = False
        self.w.data.uniform_(min_input, max_input)
        # Initialization of u.
        self.u.data.uniform_(0.2, 0.7)  # These could be parameters.
        self.u.data.clamp_(min_slope, max_slope)

    def forward(self, x):
        # Let n be the input size, and m the output size.
        # The tensor x is of shape * n. To make room for the output,
        # we view it as of shape * 1 n.
        s = list(x.shape)
        new_s = s[:-1] + [1, s[-1]]
        xx = x.view(*new_s)
        xuw = self.u * (xx - self.w)
        xuwsq = xuw * xuw
        # Aggregates into a modulus.
        z = SharedFeedbackMax.apply(xuwsq)
        y = LargeAttractorExp.apply(z)
        # Takes into account and-orness.
        if self.andor == '^':
            return y
        elif self.andor == 'v':
            return 1.0 - y
        else:
            return y + self.andor01 * (1.0 - 2.0 * y)

    def sensitivity(self, previous_layer):
        """Given the sensitivity of the previous layer (a vector of length equal
        to the number of inputs), it computes the sensitivity to adversarial examples
         of the current layer, as a vector of length equal to the output size of the
         layer.  If the input sensitivity of the previous layer is None, then unit
         sensitivity is assumed."""
        if previous_layer is None:
            previous_layer = self.w.new(1, self.in_features)
            previous_layer.fill_(1.)
        else:
            previous_layer = previous_layer.view(1, self.in_features)
        u = previous_layer * self.u
        s = torch.max(u, -1)[0]
        s *= np.sqrt(2. / np.e)
        return s