Ejemplo n.º 1
0
    def __init__(self,
                 input_dim=(3, 32, 32),
                 num_filters=32,
                 kernel_size=7,
                 stride_conv=1,
                 weight_scale=0.001,
                 pool=2,
                 stride_pool=2,
                 hidden_dim=100,
                 num_classes=10,
                 dropout=0.0):
        """
        Initialize a new network.

        Inputs:
        - input_dim: Tuple (C, H, W) giving size of input data.
        - num_filters: Number of filters to use in the convolutional layer.
        - kernel_size: Size of filters to use in the convolutional layer.
        - hidden_dim: Number of units to use in the fully-connected hidden layer-
        - num_classes: Number of scores to produce from the final affine layer.
        - stride_conv: Stride for the convolution layer.
        - stride_pool: Stride for the max pooling layer.
        - weight_scale: Scale for the convolution weights initialization
        - pool: The size of the max pooling window.
        - dropout: Probability of an element to be zeroed.
        """
        super(ClassificationCNN, self).__init__()
        channels, height, width = input_dim
        ########################################################################
        # TODO: Initialize the necessary trainable layers to resemble the      #
        # ClassificationCNN architecture  from the class docstring.            #
        #                                                                      #
        # In- and output features should not be hard coded which demands some  #
        # calculations especially for the input of the first fully             #
        # convolutional layer.                                                 #
        #                                                                      #
        # The convolution should use "same" padding which can be derived from  #
        # the kernel size and its weights should be scaled. Layers should have #
        # a bias if possible.                                                  #
        #                                                                      #
        # Note: Avoid using any of PyTorch's random functions or your output   #
        # will not coincide with the Jupyter notebook cell.                    #
        ########################################################################
        #conv - relu - 2x2 max pool - fc - dropout - relu - fc
        padding = int((kernel_size - 1) / 2)
        self.conv1 = Conv2D(channels,
                            num_filters,
                            kernel_size,
                            stride=stride_conv,
                            padding=padding)
        self.conv1.weight.data.mul_(weight_scale)
        self.pool = (pool, stride_pool)
        new_height = int((height - kernel_size + 1 + padding * 2) / pool)
        new_width = int((width - kernel_size + 1 + padding * 2) / pool)
        self.fc1 = nn.Linear(new_height * new_width * num_filters,
                             hidden_dim,
                             bias=True)
        self.dropout = dropout
        self.fc2 = nn.Linear(hidden_dim, num_classes)
Ejemplo n.º 2
0
 def __init__(self, channel_in, classes):
     super(UNet, self).__init__()
     self.input_conv = self.conv = nn.Sequential(
         Conv2D(channel_in, 8, kernel_size=3, padding=1), nn.BatchNorm2d(8),
         nn.ReLU(inplace=True))
     self.down1 = Down(8, 16)
     self.down2 = Down(16, 32)
     self.down3 = Down(32, 32)
     self.up1 = Up(64, 16)
     self.up2 = Up(32, 8)
     self.up3 = Up(16, 4)
     self.output_conv = nn.Conv2d(4, classes, kernel_size=1)
Ejemplo n.º 3
0
 def __init__(self, channel_in, channel_out):
     super(Down, self).__init__()
     self.conv = nn.Sequential(
         Conv2D(channel_in, channel_out, kernel_size=3, padding=1),
         nn.BatchNorm2d(channel_out), nn.ReLU(inplace=True))
Ejemplo n.º 4
0
 def __init__(self, channel_in, channel_out):
     super(Up, self).__init__()
     self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
     self.conv = nn.Sequential(
         Conv2D(channel_in, channel_out, kernel_size=3, padding=1),
         nn.BatchNorm2d(channel_out), nn.ReLU(inplace=True))