def __init__(self, input_shape, num_classes, h1=128, h2=64, bn=False):
        super(FFNet, self).__init__()
        self.bn = bn
        self.name = self.__class__.__name__
        if self.bn:
            self.name += "BN"

        h, w, channels = input_shape

        self.linear_size = h * w * channels

        layers = [
            Flatten(),
            nn.Linear(self.linear_size, h1),
            #bn
            nn.ELU(),
            nn.Linear(h1, h2),
            #bn
            nn.ELU(),
            nn.Linear(h2, num_classes),
            nn.LogSoftmax(dim=-1)
        ]
        if self.bn:
            layers.insert(2, nn.BatchNorm1d(h1))
            layers.insert(5, nn.BatchNorm1d(h2))

        self.fc = SequentialWithIntermediates(*layers)
예제 #2
0
class ConvBNRelu(ObservableLayersModule):

    def __init__(self,input:int,output:int,bn:bool):
        super(ConvBNRelu, self).__init__()
        self.bn=bn
        if bn:
            self.name = "ConvBNElu"
            self.layers = SequentialWithIntermediates(
                nn.Conv2d(input, output, kernel_size=3, padding=1),
                nn.ELU(),
                nn.BatchNorm2d(output),
            )
        else:
            self.name = "ConvElu"
            self.layers = SequentialWithIntermediates(
                nn.Conv2d(input, output, kernel_size=3, padding=1),
                nn.ELU(),
            )

    def activation_names(self):
        return self.layers.activation_names()

    def forward(self,x):
        return self.layers.forward(x)

    def forward_intermediates(self,x):
        return self.layers.forward_intermediates(x)
예제 #3
0
    def __init__(self, input_shape, num_classes, conv_filters=32, fc_filters=128, bn=False, kernel_size=3, activation=ActivationFunction.ELU,max_pooling=True):
        super(SimpleConv, self).__init__()
        self.name = self.__class__.__name__
        h, w, channels = input_shape
        self.bn=bn
        self.kernel_size=kernel_size
        self.max_pooling=max_pooling
        assert (kernel_size % 2) ==1
        same_padding = (kernel_size-1)//2
        conv_filters2=conv_filters*2
        conv_filters4 = conv_filters * 4
        self.activation=activation
        activation_class= activation.get_activation_class()
        if max_pooling:
            mp_generator = lambda f: nn.MaxPool2d(stride=2, kernel_size=2)
        else:
            mp_generator = lambda f: nn.Conv2d(f,f,stride=2, kernel_size=3,padding=same_padding)

        conv_layers=[nn.Conv2d(channels, conv_filters, kernel_size, padding=same_padding ),
        #bn
        activation_class(),
        nn.Conv2d(conv_filters, conv_filters, kernel_size, padding=same_padding ),
        # bn
        activation_class(),
        mp_generator(conv_filters),
        nn.Conv2d(conv_filters, conv_filters2, kernel_size, padding=same_padding ),
        # bn
        activation_class(),
        nn.Conv2d(conv_filters2, conv_filters2, kernel_size, padding=same_padding ),
        # bn
        activation_class(),
        mp_generator(conv_filters2),
        nn.Conv2d(conv_filters2, conv_filters4, 3, padding=1 ),
        # bn
        activation_class(),]

        if self.bn:
            conv_layers.insert(1,nn.BatchNorm2d(conv_filters))
            conv_layers.insert(4, nn.BatchNorm2d(conv_filters))
            conv_layers.insert(8, nn.BatchNorm2d(conv_filters2))
            conv_layers.insert(11, nn.BatchNorm2d(conv_filters2))
            conv_layers.insert(15, nn.BatchNorm2d(conv_filters4))


        conv = SequentialWithIntermediates(*conv_layers)

        self.linear_size = h * w * (conv_filters4) // 4 // 4

        fc_layers=[
            Flatten(),
            nn.Linear(self.linear_size, fc_filters),
            # nn.BatchNorm1d(fc_filters),
            activation_class(),
            nn.Linear(fc_filters, num_classes),
            nn.LogSoftmax(dim=-1),
            ]
        if self.bn:
            fc_layers.insert(2,nn.BatchNorm1d(fc_filters))
        fc = SequentialWithIntermediates(*fc_layers)
        self.layers=SequentialWithIntermediates(conv,fc)
    def __init__(self,
                 input_shape,
                 num_classes,
                 transformations: TransformationSet,
                 conv_filters=32,
                 fc_filters=128,
                 bn=False):
        super().__init__()
        self.name = self.__class__.__name__
        self.bn = bn
        h, w, channels = input_shape

        self.transformations = transformations

        conv_filters2 = conv_filters * 2
        conv_filters4 = conv_filters * 4
        conv_layers = [
            nn.Conv2d(channels, conv_filters, 3, padding=1),
            #bn
            nn.ELU(),
            nn.Conv2d(conv_filters, conv_filters, 3, padding=1),
            # bn
            nn.ELU(),
            nn.MaxPool2d(stride=2, kernel_size=2),
            nn.Conv2d(conv_filters, conv_filters2, 3, padding=1),
            # bn
            nn.ELU(),
            nn.Conv2d(conv_filters2, conv_filters2, 3, padding=1),
            # bn
            nn.ELU(),
            nn.MaxPool2d(stride=2, kernel_size=2),
            nn.Conv2d(conv_filters2, conv_filters4, 3, padding=1),
            # bn
            nn.ELU(),
        ]

        if self.bn:
            conv_layers.insert(1, nn.BatchNorm2d(conv_filters))
            conv_layers.insert(4, nn.BatchNorm2d(conv_filters))
            conv_layers.insert(8, nn.BatchNorm2d(conv_filters2))
            conv_layers.insert(11, nn.BatchNorm2d(conv_filters2))
            conv_layers.insert(15, nn.BatchNorm2d(conv_filters4))

        self.conv = SequentialWithIntermediates(*conv_layers)

        self.linear_size = h * w * (conv_filters4) // 4 // 4

        fc_layers = [
            nn.Linear(self.linear_size, fc_filters),
            # nn.BatchNorm1d(fc_filters),
            nn.ELU(),
            nn.Linear(fc_filters, num_classes),
            nn.LogSoftmax(dim=-1),
        ]
        if self.bn:
            fc_layers.insert(2, nn.BatchNorm1d(fc_filters))
        self.fc = SequentialWithIntermediates(*fc_layers)
예제 #5
0
class VGG16D(ObservableLayersModule):

    def __init__(self, input_shape, num_classes, conv_filters=64, fc_filters=4096, bn=False):
        super().__init__()
        self.name = self.__class__.__name__
        self.bn = bn
        if self.bn:
            self.name+="BN"

        h, w, channels = input_shape

        # list of conv layers
        conv_layers=[]
        # Configuration, depends on conv_filters
        convs_per_block= [2,2,3,3]
        feature_maps = [conv_filters,conv_filters*2,conv_filters*4,conv_filters*8]
        # end config
        # Create blocks of layers
        input_feature_maps=channels
        for c,f in zip(convs_per_block,feature_maps):
            conv_layers+=block(input_feature_maps,f,c,bn)
            input_feature_maps=f

        # Calculate flattened output size
        max_pools=len(convs_per_block)
        hf, wf = h // (2 ** max_pools), w // (2 ** max_pools)
        flattened_output_size = hf * wf * input_feature_maps


        dense_layers=[
            Flatten(),
            nn.Dropout(0.5),
            nn.Linear(flattened_output_size, fc_filters),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(fc_filters, fc_filters),
            nn.ReLU(),
            nn.Linear(fc_filters, num_classes),
            nn.LogSoftmax(dim=-1),
        ]

        conv_layers = SequentialWithIntermediates(*conv_layers)
        dense_layers = SequentialWithIntermediates(*dense_layers)
        self.layers=SequentialWithIntermediates(conv_layers,dense_layers)

    def forward(self, x):
        return self.layers(x)

    def forward_intermediates(self,x)->(object,[]):
        return self.layers.forward_intermediates(x)

    def activation_names(self):
        return self.layers.activation_names()
예제 #6
0
 def __init__(self,input:int,output:int,bn:bool):
     super(ConvBNRelu, self).__init__()
     self.bn=bn
     if bn:
         self.name = "ConvBNElu"
         self.layers = SequentialWithIntermediates(
             nn.Conv2d(input, output, kernel_size=3, padding=1),
             nn.ELU(),
             nn.BatchNorm2d(output),
         )
     else:
         self.name = "ConvElu"
         self.layers = SequentialWithIntermediates(
             nn.Conv2d(input, output, kernel_size=3, padding=1),
             nn.ELU(),
         )
class FFNet(ObservableLayersModule):
    def __init__(self, input_shape, num_classes, h1=128, h2=64, bn=False):
        super(FFNet, self).__init__()
        self.bn = bn
        self.name = self.__class__.__name__
        if self.bn:
            self.name += "BN"

        h, w, channels = input_shape

        self.linear_size = h * w * channels

        layers = [
            Flatten(),
            nn.Linear(self.linear_size, h1),
            #bn
            nn.ELU(),
            nn.Linear(h1, h2),
            #bn
            nn.ELU(),
            nn.Linear(h2, num_classes),
            nn.LogSoftmax(dim=-1)
        ]
        if self.bn:
            layers.insert(2, nn.BatchNorm1d(h1))
            layers.insert(5, nn.BatchNorm1d(h2))

        self.fc = SequentialWithIntermediates(*layers)

    def forward(self, x):
        return self.fc(x)

    def activation_names(self) -> [str]:
        return self.fc.activation_names()

    def forward_intermediates(self, x) -> (object, []):
        return self.fc.forward_intermediates(x)
class TIPoolingSimpleConv(ObservableLayersModule):
    def __init__(self,
                 input_shape,
                 num_classes,
                 transformations: TransformationSet,
                 conv_filters=32,
                 fc_filters=128,
                 bn=False):
        super().__init__()
        self.name = self.__class__.__name__
        self.bn = bn
        h, w, channels = input_shape

        self.transformations = transformations

        conv_filters2 = conv_filters * 2
        conv_filters4 = conv_filters * 4
        conv_layers = [
            nn.Conv2d(channels, conv_filters, 3, padding=1),
            #bn
            nn.ELU(),
            nn.Conv2d(conv_filters, conv_filters, 3, padding=1),
            # bn
            nn.ELU(),
            nn.MaxPool2d(stride=2, kernel_size=2),
            nn.Conv2d(conv_filters, conv_filters2, 3, padding=1),
            # bn
            nn.ELU(),
            nn.Conv2d(conv_filters2, conv_filters2, 3, padding=1),
            # bn
            nn.ELU(),
            nn.MaxPool2d(stride=2, kernel_size=2),
            nn.Conv2d(conv_filters2, conv_filters4, 3, padding=1),
            # bn
            nn.ELU(),
        ]

        if self.bn:
            conv_layers.insert(1, nn.BatchNorm2d(conv_filters))
            conv_layers.insert(4, nn.BatchNorm2d(conv_filters))
            conv_layers.insert(8, nn.BatchNorm2d(conv_filters2))
            conv_layers.insert(11, nn.BatchNorm2d(conv_filters2))
            conv_layers.insert(15, nn.BatchNorm2d(conv_filters4))

        self.conv = SequentialWithIntermediates(*conv_layers)

        self.linear_size = h * w * (conv_filters4) // 4 // 4

        fc_layers = [
            nn.Linear(self.linear_size, fc_filters),
            # nn.BatchNorm1d(fc_filters),
            nn.ELU(),
            nn.Linear(fc_filters, num_classes),
            nn.LogSoftmax(dim=-1),
        ]
        if self.bn:
            fc_layers.insert(2, nn.BatchNorm1d(fc_filters))
        self.fc = SequentialWithIntermediates(*fc_layers)

    def forward(self, x):
        results = []
        for t in self.transformations:
            transformed_x = t(x)
            feature_maps = self.conv(transformed_x)
            flattened_feature_maps = feature_maps.view(feature_maps.shape[0],
                                                       -1)
            results.append(flattened_feature_maps)
        x = torch.stack(results, dim=1)
        x, _ = x.max(dim=1)
        x = self.fc(x)
        return x

    def forward_intermediates(self, x) -> (object, []):
        results = []
        conv_intermediates = []

        for t in self.transformations:
            transformed_x = t(x)
            feature_maps, intermediates = self.conv.forward_intermediates(
                transformed_x)
            conv_intermediates.extend(intermediates)
            flattened_feature_maps = feature_maps.view(feature_maps.shape[0],
                                                       -1)
            results.append(flattened_feature_maps)

        x = torch.stack(results, dim=1)

        pooled, _ = x.max(dim=1)

        x, fc_activations = self.fc.forward_intermediates(pooled)
        return x, conv_intermediates + [pooled] + fc_activations

    def layer_before_pooling_each_transformation(self) -> int:
        return len(self.original_conv_names())

    def original_conv_names(self) -> [str]:
        return self.conv.activation_names()

    def fc_names(self) -> [str]:
        return self.fc.activation_names()

    def activation_names(self):
        conv_names = []
        original_conv_names = self.original_conv_names()
        for i in range(len(self.transformations)):
            t_names = [f"t{i:03}_{n}" for n in original_conv_names]
            conv_names.extend(t_names)

        return conv_names + ["Pool+Flatten"] + self.fc.activation_names()