Exemple #1
0
    def __init__(self,
                 args,
                 kernel_sizes=[8, 5, 3],
                 out_channels=[128, 256, 128],
                 strides=[1, 1, 1]):
        """
        Create the FCNN model in PyTorch.

        :param args: the general arguments (conv type, debug mode, etc).
        :param dtype: global - the type of pytorch data/weights.
        :param kernel_sizes: the sizes of the kernels in each conv layer.
        :param out_channels: the number of filters for each conv layer.
        :param strides: the strides for the convolutions.
        """
        super(FCNNPytorch, self).__init__()
        # input_size: the length (width) of the time series.
        self.input_size = args.input_size
        # num_classes: number of output classes.
        self.num_classes = args.num_classes
        # in_channels: number of channels in the input data.
        self.in_channels = args.in_channels
        self.dtype = args.dtype
        self.kernel_sizes = kernel_sizes
        self.out_channels = out_channels
        self.strides = strides
        self.conv_type = args.conv_type
        self.is_debug = args.is_debug
        self.preserve_energy = args.preserve_energy

        self.relu = nn.ReLU(inplace=True)
        # For the "same" mode for the convolution, pad the input.
        conv_pads = [kernel_size - 1 for kernel_size in kernel_sizes]

        conv = Conv(kernel_sizes=kernel_sizes,
                    in_channels=self.in_channels,
                    out_channels=out_channels,
                    strides=strides,
                    padding=conv_pads,
                    args=args)

        index = 0
        self.conv0 = conv.get_conv(param_index=index)
        self.bn0 = nn.BatchNorm1d(num_features=out_channels[index])

        index = 1
        self.conv1 = conv.get_conv(param_index=index)
        self.bn1 = nn.BatchNorm1d(num_features=out_channels[index])

        index = 2
        self.conv2 = conv.get_conv(param_index=index)
        self.bn2 = nn.BatchNorm1d(num_features=out_channels[index])
        self.lin = nn.Linear(out_channels[index], self.num_classes)
Exemple #2
0
    def __init__(self,
                 input_size,
                 args,
                 num_classes=10,
                 in_channels=1,
                 kernel_sizes=[5, 5],
                 out_channels=[10, 20],
                 strides=[1, 1],
                 flat_size=320):
        """
        :param input_size:
        :param args: the general arguments for the program, e.g. conv type.
        :param num_classes:
        :param in_channels:
        :param dtype:
        :param kernel_sizes:
        :param out_channels:
        :param strides:
        :param batch_size:
        :param flat_size: the size of the flat vector after the conv layers.
        """
        super(LeNet, self).__init__()
        self.input_size = input_size
        self.args = args
        if out_channels is None:
            self.out_channels = [10, 20]
        else:
            self.out_channels = out_channels
        if flat_size is None:
            self.flat_size = 320  # for MNIST dataset
        else:
            self.flat_size = flat_size

        self.relu = nn.ReLU(inplace=True)
        # For the "same" mode for the convolution, pad the input.
        conv_pads = [0 for _ in kernel_sizes]

        conv = Conv(kernel_sizes=kernel_sizes,
                    in_channels=in_channels,
                    out_channels=out_channels,
                    strides=strides,
                    padding=conv_pads,
                    args=args)

        # self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
        self.conv1 = conv.get_conv(param_index=0)
        # self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
        self.conv2 = conv.get_conv(param_index=1)
        self.conv2_drop = nn.Dropout2d()
        self.fc1 = nn.Linear(flat_size, 50)
        self.fc2 = nn.Linear(50, num_classes)
Exemple #3
0
def conv3x3(in_planes, out_planes, stride=1, args=None):
    """3x3 convolution with padding"""
    # return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
    #                      padding=1, bias=False)
    return Conv(kernel_sizes=[3], in_channels=in_planes,
                out_channels=[out_planes], strides=[stride],
                padding=[1], args=args, is_bias=False).get_conv()
Exemple #4
0
def conv7x7(in_planes, out_planes, stride=2, padding=3, args=None):
    return Conv(kernel_sizes=[7],
                in_channels=in_planes,
                out_channels=[out_planes],
                strides=[stride],
                padding=[padding],
                args=args,
                is_bias=False).get_conv()
Exemple #5
0
def conv1x1(in_planes, out_planes, args, stride=1):
    """1x1 convolution"""
    # return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
    #                  bias=False)
    # It is rather unnecessary to use fft convolution for kernels of size 1x1.
    return Conv(kernel_sizes=[1], in_channels=in_planes,
                out_channels=[out_planes], strides=[stride],
                padding=[0], args=args, is_bias=False).get_conv()
Exemple #6
0
def conv3x3(in_planes, out_planes, compress_rate = args.compress_rate,
            stride=1, padding=1, args=args):
    """3x3 convolution with padding"""
    # return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
    #                      padding=1, bias=False)
    args.compress_rate = compress_rate
    return Conv(kernel_sizes=[3], in_channels=in_planes,
                out_channels=[out_planes], strides=[stride],
                padding=[padding], args=args, is_bias=False).get_conv()
Exemple #7
0
def get_conv(args,
             in_channels,
             out_channels,
             kernel_size,
             stride=1,
             padding=0,
             bias=True):
    return Conv(kernel_sizes=[kernel_size],
                in_channels=in_channels,
                out_channels=[out_channels],
                strides=[stride],
                padding=[padding],
                args=args,
                is_bias=bias).get_conv()