Esempio n. 1
0
    def __init__(self, in_channel=3, out_dim=10, pooling=MaxPool2dInterval):
        super(IntervalCNN, self).__init__()

        # self.input = Conv2dInterval(in_channel, 32, kernel_size=3, stride=1, padding=1, input_layer=True)
        self.c1 = nn.Sequential(
            Conv2dInterval(in_channel,
                           32,
                           kernel_size=3,
                           stride=1,
                           padding=1,
                           input_layer=True), nn.ReLU(),
            Conv2dInterval(32, 32, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            Conv2dInterval(32, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(), pooling(2, stride=2, padding=0), IntervalDropout(0.25))
        self.c2 = nn.Sequential(
            Conv2dInterval(64, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            Conv2dInterval(64, 128, kernel_size=3, stride=1, padding=1),
            nn.ReLU(), pooling(2, stride=2, padding=0), IntervalDropout(0.25))
        self.c3 = nn.Sequential(
            Conv2dInterval(128, 128, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            Conv2dInterval(128, 128, kernel_size=3, stride=1, padding=1),
            nn.ReLU(), pooling(2, stride=2, padding=1), IntervalDropout(0.25))
        self.fc1 = nn.Sequential(LinearInterval(128 * 5 * 5, 256), nn.ReLU())
        self.last = LinearInterval(256, out_dim)
        self.a = nn.Parameter(torch.Tensor([1, 1, 1, 1, 1, 1, 1, 2, 0]),
                              requires_grad=True)
        self.a = nn.Parameter(torch.zeros(9), requires_grad=True)
        self.e = torch.zeros(9)
        self.bounds = None
Esempio n. 2
0
 def __init__(self, out_dim=10, in_channel=1, img_sz=32, hidden_dim=256):
     super(IntervalMLP, self).__init__()
     self.in_dim = in_channel * img_sz * img_sz
     self.fc1 = LinearInterval(self.in_dim, hidden_dim, input_layer=True)
     self.fc2 = LinearInterval(hidden_dim, hidden_dim)
     # Subject to be replaced dependent on task
     self.last = LinearInterval(hidden_dim, out_dim)
     self.a = nn.Parameter(torch.Tensor([2, 1, 0]), requires_grad=True)
     self.e = torch.zeros(3)
     self.bounds = None
Esempio n. 3
0
    def __init__(self, eps=0):
        super().__init__()
        self.conv1 = Conv2dInterval(3, 64, 3, 1, input_layer=True)
        self.conv2 = Conv2dInterval(64, 64, 3, 1)
        self.conv3 = Conv2dInterval(64, 128, 3, 2)
        self.conv4 = Conv2dInterval(128, 128, 3, 1)
        self.conv5 = Conv2dInterval(128, 128, 3, 1)
        self.fc1 = LinearInterval(128 * 9 * 9, 200)
        self.last = LinearInterval(200, 10)

        self.a = nn.Parameter(torch.zeros(7), requires_grad=True)
        self.e = None

        self.eps = eps
        self.bounds = None
Esempio n. 4
0
    def create_model(self):
        cfg = self.config

        # Define the backbone (MLP, LeNet, VGG, ResNet ... etc) of model
        model = models.__dict__[cfg['model_type']].__dict__[
            cfg['model_name']]()

        # Apply network surgery to the backbone
        # Create the heads for tasks (It can be single task or multi-task)
        n_feat = model.last.in_features

        # The output of the model will be a dict: {task_name1:output1, task_name2:output2 ...}
        # For a single-headed model the output will be {'All':output}
        model.last = nn.ModuleDict()
        for task, out_dim in cfg['out_dim'].items():
            model.last[task] = LinearInterval(n_feat, out_dim)

        # Redefine the task-dependent function
        def new_logits(self, x):
            outputs = {}
            for task, func in self.last.items():
                outputs[task] = func(x)
            return outputs

        # Replace the task-dependent function
        model.logits = MethodType(new_logits, model)
        # Load pre-trained weights
        if cfg['model_weights'] is not None:
            print('=> Load model weights:', cfg['model_weights'])
            model_state = torch.load(
                cfg['model_weights'],
                map_location=lambda storage, loc: storage)  # Load to CPU.
            model.load_state_dict(model_state)
            print('=> Load Done')
        return model