def __init__(self, max_vocab=5000, seq_len=60):
        super(MIMOModel, self).__init__()

        self.embedding = nn.Embedding(max_vocab, 128)
        self.conv_sequence = nn.Conv1d(128, 128, 3, padding=1)
        self.pool_sequence = nn.AvgPool1d(seq_len)

        self.image_forward = resnet18(pretrained=False, num_classes=128)

        self.features = nn.Sequential(nn.Linear(256, 128), nn.ReLU())
        self.output_layer = nn.Sequential(nn.Linear(128, 1000),
                                          nn.Softmax(dim=1))
Ejemplo n.º 2
0
    def __init__(self,
                 input_channels=6,
                 seq_len=125,
                 output_size=2,
                 kernel_sizes=[5, 5],
                 strides=[1, 1],
                 conv_paddings=[0, 0],
                 physNodes=0):

        super(DiscriminatorConv, self).__init__()
        # For other models, do not do any pooling
        # Adapt for the given stride value.
        cOut = 256

        poolKernel = 2
        cpad = conv_paddings[0]
        kernel_sizes = kernel_sizes

        strides = [1, 1]
        kernel_sizes[0] = self.get_kernel_size(kernel_sizes[0], seq_len)
        lout, cpad = self.conv_len_out(seq_len, cpad, kernel_sizes[0],
                                       strides[0])
        lout2 = int((lout // 2) +
                    (lout % 2))  # Fix output len and get the necessary padding
        pool_pad, lout = self.pool_from_len_out_to_pad(lout, lout2, poolKernel,
                                                       poolKernel)

        self.first = nn.Sequential(
            nn.Conv1d(input_channels,
                      64,
                      kernel_sizes[0],
                      stride=strides[0],
                      padding=cpad), nn.ReLU(),
            nn.AvgPool1d(poolKernel, stride=None, padding=pool_pad))
        self.conv2_bn1 = nn.BatchNorm1d(64)

        cpad = conv_paddings[1]
        kernel_sizes[1] = self.get_kernel_size(kernel_sizes[1], lout)
        lout2, cpad = self.conv_len_out(lout, cpad, kernel_sizes[1],
                                        strides[1])
        pool_pad2, lout2 = self.pool_from_len_out_to_pad(
            lout2, int((lout2 // 2) + (lout2 % 2)), poolKernel, poolKernel)

        self.dropout = nn.Dropout(0.5)
        self.set_same_scale = nn.Tanh()
        # self.set_same_scale = lambda x: x
        self.linears1 = nn.Sequential(nn.Linear(60, 64), nn.Softmax(dim=1))
        self.linears2 = nn.Sequential(
            nn.Linear(int(64 * 1 * 64) + physNodes, output_size),
            nn.Softmax(dim=1))
        self.cat = lambda x, y: x
        if physNodes > 0:
            self.cat = lambda x, y: torch.cat((x, y), 1)
Ejemplo n.º 3
0
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv1d(1, 200, 10, 4)
        self.bn1 = nn.BatchNorm1d(200)
        self.pool1 = nn.MaxPool1d(4)

        self.conv2 = nn.Conv1d(200, 400, 3)
        self.bn2 = nn.BatchNorm1d(400)
        self.pool2 = nn.MaxPool1d(4)

        self.avg_pool = nn.AvgPool1d(77)
        self.fc1 = nn.Linear(400, 400)
Ejemplo n.º 4
0
 def __init__(self, num_classes):
     super(Net, self).__init__()
     # M11 network from https://arxiv.org/pdf/1610.00087.pdf
     self.layers = nn.ModuleList([
         ManyConvMaxPool(1, 4, 1, 64, 80, stride=4),
         ManyConvMaxPool(2, 4, 64, 64, 3),
         ManyConvMaxPool(2, 4, 64, 128, 3),
         ManyConvMaxPool(3, 4, 128, 256, 3),
         ManyConvMaxPool(2, 4, 256, 512, 3)
     ])
     self.avgPool = nn.AvgPool1d(6)
     self.fc1 = nn.Linear(512, num_classes)
Ejemplo n.º 5
0
    def __init__(self, in_channels, out_channels1, out_channels2reduce,
                 out_channels2, out_channels3reduce, out_channels3,
                 out_channels4reduce, out_channels4, out_channels5):
        super(InceptionV2ModuleA, self).__init__()

        self.branch1 = ConvBN(in_channels=in_channels,
                              out_channels=out_channels1,
                              kernel_size=1)

        self.branch2 = nn.Sequential(
            ConvBNReLU(in_channels=in_channels,
                       out_channels=out_channels2reduce,
                       kernel_size=1),
            ConvBN(in_channels=out_channels2reduce,
                   out_channels=out_channels2,
                   kernel_size=3,
                   padding=1),
        )

        self.branch3 = nn.Sequential(
            ConvBNReLU(in_channels=in_channels,
                       out_channels=out_channels3reduce,
                       kernel_size=1),
            ConvBNReLU(in_channels=out_channels3reduce,
                       out_channels=out_channels3,
                       kernel_size=5,
                       padding=2),
            ConvBN(in_channels=out_channels3,
                   out_channels=out_channels3,
                   kernel_size=5,
                   padding=2),
        )

        self.branch4 = nn.Sequential(
            ConvBNReLU(in_channels=in_channels,
                       out_channels=out_channels4reduce,
                       kernel_size=1),
            ConvBNReLU(in_channels=out_channels4reduce,
                       out_channels=out_channels4,
                       kernel_size=7,
                       padding=3),
            ConvBN(in_channels=out_channels4,
                   out_channels=out_channels4,
                   kernel_size=7,
                   padding=3),
        )

        self.branch5 = nn.Sequential(
            nn.AvgPool1d(kernel_size=3, stride=1, padding=1),
            ConvBN(in_channels=in_channels,
                   out_channels=out_channels5,
                   kernel_size=1),
        )
Ejemplo n.º 6
0
 def __init__(self):
     super(ReductionCPlus, self).__init__()
     # * Def Reduction 0
     self.red0_branch0 = nn.Sequential(nn.MaxPool1d(3, stride=2),
                                       BasicConv1d(2080, 520, 1))
     self.red0_branch1 = nn.Sequential(BasicConv1d(2080, 520, 1),
                                       BasicConv1d(520, 390, 3, padding=1),
                                       BasicConv1d(390, 260, 3, stride=2))
     self.red0_branch2 = nn.Sequential(BasicConv1d(2080, 520, 1),
                                       BasicConv1d(520, 260, 3, stride=2))
     # * Def Reduction 1
     self.red1_pad = nn.ConstantPad1d((0, 1), 0)
     self.red1_branch0 = nn.Sequential(
         BasicConv1d(1040, 270, 1),
         nn.AvgPool1d(2, padding=1, count_include_pad=False))
     self.red1_branch1 = nn.Sequential(
         BasicConv1d(1040, 270, 1), nn.ConstantPad1d((0, 1), 0),
         BasicConv1d(270, 270, 3, stride=2, padding=1))
     # * Def Reduction 2
     self.red2_avg_pool8 = nn.AvgPool1d(8, count_include_pad=False)
     self.red2_bconv = BasicConv1d(540, 250, 1)
Ejemplo n.º 7
0
    def __init__(self):
        super(ConvSimple, self).__init__()
        self.n_classes = 5
        self.conv1 = nn.Conv1d(1, 32, kernel_size=10, padding=1, stride=3)
        self.conv2 = nn.Conv1d(32, 32, kernel_size=10, padding=1, stride=3)
        self.pool1 = nn.AvgPool1d(2, stride=6)

        self.conv3 = nn.Conv1d(32, 64, kernel_size=3, padding=1)
        self.conv4 = nn.Conv1d(64, 64, kernel_size=3, padding=1)
        self.pool2 = nn.AvgPool1d(2, stride=2)

        self.conv5 = nn.Conv1d(64, 256, kernel_size=3, padding=1)
        self.conv6 = nn.Conv1d(256, 256, kernel_size=3, padding=1)
        self.pool_avg = nn.AvgPool1d(2)

        self.linear1 = nn.Linear(3328, 128)

        self.dropout1 = nn.Dropout(0.2)

        # LL2:   128  -->  classes
        self.linear2 = nn.Linear(128, self.n_classes)
Ejemplo n.º 8
0
    def __init__(self):
        super(model_1DCNN_2_dx, self).__init__()

        self.conv0 = nn.Sequential(
            nn.Conv1d(128, 32, kernel_size=8, stride=1, padding=0),
            nn.BatchNorm1d(32), nn.ReLU(), nn.MaxPool1d(8, stride=8))

        self.conv1 = nn.Sequential(
            nn.Conv1d(32, 64, kernel_size=8, stride=1, padding=0),
            nn.BatchNorm1d(64), nn.ReLU(), nn.AvgPool1d(8, stride=2))

        self.fc0 = nn.Linear(128, 10)
Ejemplo n.º 9
0
 def forward(self, x):
     r"""
     :param torch.Tensor x: [N, C, L] 初始tensor
     :return: torch.Tensor x: [N, C] avg pool后的结果
     """
     # [N,C,L] -> [N,C]
     kernel_size = x.size(2)
     pooling = nn.AvgPool1d(kernel_size=kernel_size,
                            stride=self.stride,
                            padding=self.padding)
     x = pooling(x)
     return x.squeeze(dim=-1)
Ejemplo n.º 10
0
    def __init__(self, args, **kwargs):
        super(AveragePool, self).__init__()
        # self.input_size  = args.lookup_input_size
        # self.hidden_size = args.lookup_hidden_size
        # self.output_size = args.lookup_output_size
        self.kernel_size = args.lookup_kernel_size
        self.stride = args.lookup_stride

        ## output  = [(input + 2*padding  - kernel_size)/stride  + 1]
        self.average_pooling = nn.AvgPool1d(kernel_size=self.kernel_size,
                                            stride=self.stride,
                                            padding=1)
Ejemplo n.º 11
0
 def __init__(self, config):
     super().__init__()
     self.config = config
     self.pos_conv_embed = SEWPositionalConvEmbedding(config)
     self.pool = nn.AvgPool1d(config.squeeze_factor, config.squeeze_factor)
     self.layer_norm = nn.LayerNorm(config.hidden_size,
                                    eps=config.layer_norm_eps)
     self.dropout = nn.Dropout(config.hidden_dropout)
     self.layers = nn.ModuleList(
         [SEWEncoderLayer(config) for _ in range(config.num_hidden_layers)])
     self.upsample = SEWUpsampling(config)
     self.gradient_checkpointing = False
Ejemplo n.º 12
0
 def forward(self, input_ids, token_type_ids, attention_mask):
     last_hidden_state = self.bert(
         input_ids=input_ids,
         token_type_ids=token_type_ids,
         attention_mask=attention_mask)['last_hidden_state']
     last_hidden_state = last_hidden_state.permute(0, 2, 1).contiguous()
     h0, last_hidden_state = last_hidden_state[:, :, 0], last_hidden_state[:, :, 1:]
     h1 = nn.MaxPool1d(last_hidden_state.shape[-1])(last_hidden_state).squeeze(-1)
     h2 = nn.AvgPool1d(last_hidden_state.shape[-1])(last_hidden_state).squeeze(-1)
     h = torch.cat([h0, h1, h2], dim=1)
     logits = self.fc(h)
     return logits
Ejemplo n.º 13
0
    def __init__(self):
        super(Model, self).__init__()

        self.pool_0 = nn.AvgPool1d(kernel_size=3)
        self.pool_1 = nn.AvgPool1d(kernel_size=4, stride=2, padding=2)
        self.pool_2 = nn.AvgPool1d(kernel_size=3, stride=1, padding=(0), ceil_mode=False, count_include_pad=True)
        self.pool_3 = nn.AvgPool1d(kernel_size=5, stride=2, padding=(2), ceil_mode=True, count_include_pad=False)
        self.pool_4 = nn.AvgPool1d(kernel_size=3, stride=2, padding=1, ceil_mode=False, count_include_pad=True)
        self.pool_5 = nn.AvgPool1d(kernel_size=2, stride=1, padding=0, ceil_mode=True, count_include_pad=True)
        self.pool_6 = nn.AvgPool1d(kernel_size=4, stride=1, padding=2, ceil_mode=False, count_include_pad=False)
Ejemplo n.º 14
0
    def __init__(self, num_input_chars, hidden_size, output_classes):
        super(CharCNN, self).__init__()

        self.input_size = num_input_chars
        self.hidden_size = hidden_size
        self.output_size = output_classes

        self.lookup = nn.Embedding(num_input_chars, hidden_size)
        self.conv1d = nn.Conv1d(hidden_size, hidden_size,
                                2)  #inChannel, outChannel, kH, kWid
        self.pool1d = nn.AvgPool1d(2)  # kernel width over which to pool
        self.decoder = nn.Linear(hidden_size, output_classes)
Ejemplo n.º 15
0
    def __init__(self,
                 layers,
                 channels,
                 kernel_size,
                 readout_layers=2,
                 non_linearity=True,
                 pooling='avg',
                 dropout=0.0,
                 batch_norm=False,
                 rank=2,
                 temperature=0.05,
                 init_size=1e-3,
                 max_scale=1. - 1e-3,
                 alphabet_size=4,
                 sequence_length=128,
                 device='cpu'):
        super(HypHCCNN, self).__init__(temperature=temperature,
                                       init_size=init_size,
                                       max_scale=max_scale)

        self.alphabet_size = alphabet_size
        self.device = device
        self.embedding = nn.Linear(alphabet_size, channels)
        self.conv = torch.nn.Sequential()
        for l in range(layers):
            self.conv.add_module(
                'conv_' + str(l + 1),
                nn.Conv1d(in_channels=channels,
                          out_channels=channels,
                          kernel_size=kernel_size,
                          padding=kernel_size // 2))
            if batch_norm:
                self.conv.add_module('batchnorm_' + str(l + 1),
                                     nn.BatchNorm1d(num_features=channels))
            if non_linearity:
                self.conv.add_module('relu_' + str(l + 1), nn.ReLU())

            if pooling == 'avg':
                self.conv.add_module(pooling + '_pool_' + str(l + 1),
                                     nn.AvgPool1d(2))
            elif pooling == 'max':
                self.conv.add_module(pooling + 'pool_' + str(l + 1),
                                     nn.MaxPool1d(2))

        flat_size = channels * sequence_length if pooling == 'none' else channels * (
            sequence_length // 2**layers)
        self.readout = MLP(in_size=flat_size,
                           hidden_size=rank,
                           out_size=rank,
                           layers=readout_layers,
                           mid_activation='relu',
                           dropout=dropout,
                           device=device)
Ejemplo n.º 16
0
    def __init__(self, channels, kernel_size, padding, pool=False, double_channels=False):
        super(ResBlock, self).__init__()
        self.pool = pool
        self.double_channels = double_channels
        self.channels = channels

        self.conv0 = nn.Conv1d(channels, channels, kernel_size, padding=padding)
        self.conv1 = nn.Conv1d(channels, channels, kernel_size, padding=padding)
        self.leakyrelu = nn.LeakyReLU(inplace=True)
        self.pooling_layer = nn.AvgPool1d(2)
        self.layer_norm0 = None
        self.layer_norm1 = None
Ejemplo n.º 17
0
 def __init__(self, alphabet_size, embedding_dim=100, hidden_dim=50, dropout=0.5, gpu=True):
     super(CharCNN, self).__init__()
     self.gpu = gpu
     self.hidden_dim = hidden_dim
     self.char_drop = nn.Dropout(dropout)
     self.char_embeddings = nn.Embedding(alphabet_size, embedding_dim)
     self.char_cnn1 = nn.Conv1d(embedding_dim, self.hidden_dim, kernel_size=3, padding=1)
     self.pool1 = nn.AvgPool1d(4)
     self.char_cnn2 = nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=3, padding=1)
     self.pool2 = nn.AvgPool1d(4)
     self.char_cnn3 = nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=3, padding=1)
     self.pool3 = nn.AvgPool1d(2)
     if self.gpu:
         self.char_drop = self.char_drop.cuda()
         self.char_embeddings = self.char_embeddings.cuda()
         self.char_cnn1 = self.char_cnn1.cuda()
         self.pool1 = self.pool1.cuda()
         self.char_cnn2 = self.char_cnn2.cuda()
         self.pool2 = self.pool2.cuda()
         self.char_cnn3 = self.char_cnn3.cuda()
         self.pool3 = self.pool3.cuda()
 def __init__(self, num_input_features, num_output_features):
     super().__init__()
     self.add_module('norm', nn.BatchNorm1d(num_input_features))
     self.add_module('relu', nn.ReLU(inplace=True))
     self.add_module(
         'conv',
         nn.Conv1d(num_input_features,
                   num_output_features,
                   kernel_size=1,
                   stride=1,
                   bias=False))
     self.add_module('pool', nn.AvgPool1d(kernel_size=2, stride=2))
Ejemplo n.º 19
0
    def __init__(self, num_D, ndf, n_layers, downsampling_factor):
        super().__init__()
        self.model = nn.ModuleDict()
        for i in range(num_D):
            self.model[f"disc_{i}"] = NLayerDiscriminator(
                ndf, n_layers, downsampling_factor)

        self.downsample = nn.AvgPool1d(4,
                                       stride=2,
                                       padding=1,
                                       count_include_pad=False)
        self.apply(weights_init)
Ejemplo n.º 20
0
 def __init__(self, in_planes, out_planes):
     super(_Transition, self).__init__()
     self.add_module('norm', nn.BatchNorm1d(in_planes))
     self.add_module('relu', nn.ReLU(inplace=True))
     self.add_module(
         'conv',
         nn.Conv1d(in_planes,
                   out_planes,
                   kernel_size=1,
                   stride=1,
                   bias=False))
     self.add_module('pool', nn.AvgPool1d(kernel_size=2, stride=2))
Ejemplo n.º 21
0
    def __init__(self, input_channel, layers=[1, 1, 1, 1], num_classes=10):
        self.inplanes5_1 = 64
        self.inplanes5_2 = 64
        self.inplanes5_3 = 64

        super(MSResNet, self).__init__()

        self.conv1 = nn.Conv1d(input_channel, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm1d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)


        self.layer5x5_11 = self._make_layer5_1(BasicBlock5x5_1, 64, layers[0], stride=2)
        self.layer5x5_12 = self._make_layer5_1(BasicBlock5x5_1, 128, layers[1], stride=2)
        self.layer5x5_13 = self._make_layer5_1(BasicBlock5x5_1, 256, layers[2], stride=2)
        # self.layer3x3_4 = self._make_layer3(BasicBlock3x3, 512, layers[3], stride=2)
        # maxplooing kernel size: 16, 11, 6
        self.maxpool5_1 = nn.AvgPool1d(kernel_size=11, stride=1, padding=0)


        self.layer5x5_21 = self._make_layer5_2(BasicBlock5x5_2, 64, layers[0], stride=2)
        self.layer5x5_22 = self._make_layer5_2(BasicBlock5x5_2, 128, layers[1], stride=2)
        self.layer5x5_23 = self._make_layer5_2(BasicBlock5x5_2, 256, layers[2], stride=2)
        # self.layer3x3_4 = self._make_layer3(BasicBlock3x3, 512, layers[3], stride=2)

        # maxplooing kernel size: 16, 11, 6
        self.maxpool5_2 = nn.AvgPool1d(kernel_size=11, stride=1, padding=0)

        self.layer5x5_31 = self._make_layer5_3(BasicBlock5x5_3, 64, layers[0], stride=2)
        self.layer5x5_32 = self._make_layer5_3(BasicBlock5x5_3, 128, layers[1], stride=2)
        self.layer5x5_33 = self._make_layer5_3(BasicBlock5x5_3, 256, layers[2], stride=2)
        # self.layer3x3_4 = self._make_layer3(BasicBlock3x3, 512, layers[3], stride=2)

        # maxplooing kernel size: 16, 11, 6
        self.maxpool5_3 = nn.AvgPool1d(kernel_size=11, stride=1, padding=0)

        # self.drop = nn.Dropout(p=0.2)
        self.fc = nn.Linear(256*3, num_classes)
Ejemplo n.º 22
0
    def __init__(self, length=256):
        super().__init__()

        self.starter = nn.Sequential(Block(2, 16), Block(16, 20),
                                     nn.AvgPool1d(kernel_size=2),
                                     Block(20,
                                           24), nn.AvgPool1d(kernel_size=2),
                                     Block(24,
                                           28), nn.AvgPool1d(kernel_size=2),
                                     Block(28, 32))

        self.pass_down1 = nn.Sequential(nn.AvgPool1d(kernel_size=2),
                                        Block(32, 48))

        self.pass_down2 = nn.Sequential(nn.AvgPool1d(kernel_size=2),
                                        Block(48, 64))

        self.code = nn.Sequential(nn.AvgPool1d(kernel_size=2), Block(64, 96),
                                  nn.Upsample(scale_factor=2), Block(96, 64))

        self.pass_up2 = nn.Sequential(nn.Upsample(scale_factor=2),
                                      Block(128, 64))

        self.pass_up1 = nn.Sequential(nn.Upsample(scale_factor=2),
                                      Block(112, 48))

        self.finisher = nn.Sequential(nn.Upsample(scale_factor=2),
                                      Block(80,
                                            64), nn.Upsample(scale_factor=2),
                                      Block(64,
                                            32), nn.Upsample(scale_factor=2),
                                      Block(32, 16),
                                      nn.Conv1d(16, 2, 1, padding=0))
Ejemplo n.º 23
0
    def __init__(self, inplanes, planes, stride=1, downsample=None):
        super(Bottleneck, self).__init__()
        self.conv1 = nn.Conv1d(inplanes,
                               planes,
                               kernel_size=7,
                               bias=False,
                               padding=3)
        self.bn1 = nn.BatchNorm1d(planes)
        self.conv2 = nn.Conv1d(planes,
                               planes,
                               kernel_size=11,
                               stride=stride,
                               padding=5,
                               bias=False)
        self.bn2 = nn.BatchNorm1d(planes)
        self.conv3 = nn.Conv1d(planes,
                               planes * 4,
                               kernel_size=7,
                               bias=False,
                               padding=3)
        self.bn3 = nn.BatchNorm1d(planes * 4)
        self.relu = nn.ReLU(inplace=True)

        if planes == 64:
            self.globalAvgPool = nn.AvgPool1d(313, stride=1)
        elif planes == 128:
            self.globalAvgPool = nn.AvgPool1d(157, stride=1)
        elif planes == 256:
            self.globalAvgPool = nn.AvgPool1d(79, stride=1)
        elif planes == 512:
            self.globalAvgPool = nn.AvgPool1d(40, stride=1)

        self.fc1 = nn.Linear(in_features=planes * 4,
                             out_features=round(planes / 4))
        self.fc2 = nn.Linear(in_features=round(planes / 4),
                             out_features=planes * 4)

        self.downsample = downsample
        self.stride = stride
        self.dropout = nn.Dropout(.2)
Ejemplo n.º 24
0
    def __init__(self, num_classes=12):
        super().__init__()
        self.in_channels = 12
        self.pool0 = nn.AvgPool1d(kernel_size=2)
        self.pool1 = nn.AvgPool1d(kernel_size=3)
        self.conv0 = nn.Conv1d(in_channels=self.in_channels,
                               out_channels=2*self.in_channels,
                               kernel_size=51,
                               groups=self.in_channels)
        self.bn0 = nn.BatchNorm1d(num_features=2*self.in_channels)

        self.conv1 = nn.Conv1d(in_channels=2*self.in_channels,
                               out_channels=4*self.in_channels,
                               kernel_size=26,
                               groups=self.in_channels)
        self.bn1 = nn.BatchNorm1d(num_features=4*self.in_channels)

        self.conv2 = nn.Conv1d(in_channels=4*self.in_channels,
                               out_channels=4*self.in_channels,
                               kernel_size=10,
                               groups=self.in_channels)
        self.bn2 = nn.BatchNorm1d(num_features=4*self.in_channels)

        self.conv3 = nn.Conv1d(in_channels=4*self.in_channels,
                               out_channels=5*self.in_channels,
                               kernel_size=8)
        self.bn3 = nn.BatchNorm1d(num_features=5*self.in_channels)

        self.conv4 = nn.Conv1d(in_channels=5*self.in_channels,
                               out_channels=5*self.in_channels,
                               kernel_size=4)
        self.bn4 = nn.BatchNorm1d(num_features=5*self.in_channels)

        self.conv5 = nn.Conv1d(in_channels=self.in_channels,
                               out_channels=self.in_channels // 2,
                               kernel_size=11)
        self.bn5 = nn.BatchNorm1d(num_features=self.in_channels // 2)
        self.pred = nn.Linear(in_features=6 * 730, out_features=num_classes)
        encoder_layer = nn.TransformerEncoderLayer(d_model=740, nhead=5, dim_feedforward=740*2)
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
Ejemplo n.º 25
0
    def __init__(self,
                 in_size,
                 out_size,
                 input_channels = 1,
                 convolutions = [16,16],
                 kernel_size = [7,7],
                 hiddenlayer=[512,256],
                 maxpool = 4,
                 dropout = 0.1):
        super(ResidualNetworkD1, self).__init__()


        convLayers = []
        
        ## Convolutional layers
        for i, convolution  in enumerate(convolutions):
            if (i==0):
                convLayers.append(nn.Conv1d(in_channels = input_channels, out_channels = convolutions[0],
                                padding = kernel_size[0]//2, kernel_size = kernel_size[0]))
                convLayers.append(nn.BatchNorm1d(convolutions[0]))
                #layers.append(nn.MaxPool1d(maxpool))
                convLayers.append(nn.ReLU())
                #convLayers.append(nn.Dropout(p=dropout))          
            else:
                convLayers.append(ResnetBlock1d(in_num_filter = convolutions[i-1], out_num_filters = convolutions[i],
                                padding_size = kernel_size[i]//2, kernel_sizes = kernel_size[i]))
                #convLayers.append(nn.BatchNorm1d(convolutions[i]))
                convLayers.append(nn.AvgPool1d(maxpool))
                convLayers.append(nn.ReLU())
                convLayers.append(nn.Dropout(p=dropout))
                
        # Flatten before fully connected
        #layers.append(nn.MaxPool1d(maxpool))
        
        flatten_layers = []
        flatten_layers.append(Flatten())
        
        ## Fully Connected layers
        for i, layer in enumerate(hiddenlayer):
            if i == 0:
                flatten_layers.append(nn.Linear(in_size//(maxpool**(len(convolutions)-1))*convolutions[-1],hiddenlayer[i]))
                flatten_layers.append(nn.ReLU())
                flatten_layers.append(nn.Dropout(p=dropout))
                
            else:
                flatten_layers.append(nn.Linear(hiddenlayer[i-1], hiddenlayer[i]))
                flatten_layers.append(nn.ReLU())
                flatten_layers.append(nn.Dropout(p=dropout))
        
        flatten_layers.append(nn.Linear(hiddenlayer[-1], out_size))
        self.convnet = nn.Sequential(*convLayers)
        self.flatnet = nn.Sequential(*flatten_layers)
Ejemplo n.º 26
0
    def __init__(
        self,
        vocab_size,
        embedding_dim,
        len_sentence,
        channel_size=4,
        x2_size=1,  # additional data - cap ratio
        fc_dim=128,
        padding_idx=1,
        dropout=0.3,
        num_labels=7,
        batch_size=32,
        is_cuda=False,
        n_gram=5,
        additional_kernel_size=1,
    ):
        super(Net, self).__init__()
        self.embedding = nn.Embedding(vocab_size + 2,
                                      embedding_dim=embedding_dim,
                                      padding_idx=padding_idx)
        self.embedding_dim = embedding_dim
        self.vocab_size = vocab_size
        self.channel_size = channel_size
        self.len_sentence = len_sentence
        self.batch_size = batch_size
        self.x2_size = x2_size
        self.kernel_size = (n_gram, embedding_dim + additional_kernel_size)
        self.n_gram = n_gram
        self.additional_kernel_size = additional_kernel_size
        self.conv2d = nn.Conv2d(1,
                                out_channels=channel_size,
                                kernel_size=self.kernel_size,
                                stride=1)
        # output : batch x channel x (len_sentence - 2) x 1

        # -> squeeze : batch x channel x (len_sentence - 2)
        self.relu = nn.ReLU(inplace=True)
        self.dropout1d = nn.Dropout(p=dropout)
        self.pool1d = nn.AvgPool1d(kernel_size=2)
        # output : batch x channel x (len_sentence - 2) / 2

        self.bottleneck_size = channel_size * (len_sentence -
                                               (self.n_gram - 1)) / 2
        assert self.bottleneck_size.is_integer()
        self.bottleneck_size = int(self.bottleneck_size) + self.x2_size

        self.fcn1 = nn.Linear(self.bottleneck_size, fc_dim)
        self.relu1 = nn.ReLU(inplace=True)
        self.fcn2 = nn.Linear(fc_dim, num_labels)
        self.sigmoid = nn.Sigmoid()
        self.fc_dim = fc_dim
        self.num_labels = num_labels
Ejemplo n.º 27
0
    def __init__(self, num_tokens, word_emb_size, hidden_size,
                 device=torch.device("cuda" if torch.cuda.is_available() else "cpu"), num_layers=1, num_filters=3,
                 kernel_size=1, stride=5, fusion="cat", env=None,
                 condition_answer="none", attention_dim=512):
        super(PolicyLSTMBatch, self).__init__()
        self.device = device
        self.condition_answer = condition_answer
        self.num_tokens = num_tokens
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.word_embedding = nn.Embedding(num_tokens, word_emb_size, padding_idx=0)
        self.lstm = nn.LSTM(word_emb_size, self.hidden_size, batch_first=True)
        truncature = {"masked": mask_truncature, "masked_inf": mask_inf_truncature}
        self.truncate = truncature["masked_inf"]
        self.answer_embedding = nn.Embedding(env.dataset.len_vocab_answer, word_emb_size)
        self.fusion = fusion
        self.num_filters = word_emb_size if num_filters is None else num_filters
        self.stride = stride
        self.kernel_size = kernel_size
        self.word_emb_size = word_emb_size
        self.attention_dim = attention_dim
        h_out = int((14 + 2 * 0 - 1 * (self.kernel_size - 1) - 1) / self.stride + 1)
        self.conv = nn.Conv2d(in_channels=1024, out_channels=self.num_filters, kernel_size=self.kernel_size,
                              stride=self.stride)
        if self.fusion == "none":
            self.fusion_dim = self.hidden_size
        elif self.fusion == "film":
            self.gammabeta = nn.Linear(self.hidden_size, 2 * self.num_filters)
            self.film = contrib_nn.FiLM()
            self.fusion_dim = self.num_filters * h_out ** 2
        elif self.fusion == "sat":
            self.attention = Attention(101, self.hidden_size, attention_dim, word_emb_size)
            self.init_h = nn.Linear(101, hidden_size)  # linear layer to find initial hidden state of LSTMCell
            self.init_c = nn.Linear(101, hidden_size)  # linear layer to find initial cell state of LSTMCell
            self.last_states = None
            self.f_beta = nn.Linear(hidden_size, 101)  # linear layer to create a sigmoid-activated gate
            self.sigmoid = nn.Sigmoid()
            self.fc = nn.Linear(hidden_size, self.num_tokens)
            self.decode_step = nn.LSTMCell(word_emb_size + 101, hidden_size, bias=True)
            self.fusion_dim = hidden_size
        elif self.fusion == "average":
            self.projection = nn.Linear(2048, hidden_size)
            self.avg_pooling = nn.AvgPool1d(kernel_size=101)
            self.fusion_dim = 2 * hidden_size
        else:
            self.fusion_dim = self.num_filters * h_out ** 2 + self.hidden_size

        if self.condition_answer in ["after_fusion", "attention"]:
            self.fusion_dim += word_emb_size

        self.action_head = nn.Linear(self.fusion_dim, num_tokens)
        self.value_head = nn.Linear(self.fusion_dim, 1)
Ejemplo n.º 28
0
    def __init__(self,
                 embedding_size: int,
                 embedding_dim: int,
                 num_classes: int = 1,
                 hidden_lstm_size: int = 64,
                 hidden_gru_size: int = 64,
                 dropout_rate: float = 0.5):

        super(LSTM_GRU, self).__init__()

        self.embedding = nn.Embedding(embedding_size, embedding_dim)
        self.embedding_dropout = EmbeddingDropout(dropout_rate)

        self.lstm = nn.LSTM(
            input_size=embedding_dim,
            hidden_size=hidden_lstm_size,
            bias=True,
            batch_first=True,
            bidirectional=True,
        )
        self.gru = nn.GRU(
            input_size=hidden_lstm_size * 2,
            hidden_size=hidden_gru_size,
            bias=True,
            batch_first=True,
            bidirectional=True,
        )
        self.max_pool = nn.MaxPool1d(2)
        self.avg_pool = nn.AvgPool1d(2)
        input_size = 2 * hidden_gru_size  # 2 pooling layers
        self.head = nn.Sequential(
            OrderedDict([
                (
                    "block1",
                    nn.Sequential(
                        nn.Linear(input_size, num_classes * 2),
                        nn.ReLU(True),
                        nn.BatchNorm1d(num_classes * 2),
                        nn.Dropout(dropout_rate),
                    ),
                ),
                (
                    "block2",
                    nn.Sequential(
                        nn.Linear(num_classes * 2, int(num_classes * 1.5)),
                        nn.ReLU(True),
                        nn.BatchNorm1d(int(num_classes * 1.5)),
                        nn.Dropout(dropout_rate),
                    ),
                ),
                ("head", nn.Linear(int(num_classes * 1.5), num_classes)),
            ]))
Ejemplo n.º 29
0
    def __init__(self):
        super(Steganalysis, self).__init__()
        K = np.array([[-1, 2, -1]], dtype=float)
        K = K.reshape(1, 1, 3).astype(np.float32)

        K = torch.from_numpy(K)
        self.K = nn.Parameter(data=K, requires_grad=False)
        self.conv2 = nn.Conv1d(in_channels=1,
                               out_channels=1,
                               kernel_size=5,
                               stride=1,
                               padding=2)
        self.conv3 = nn.Conv1d(1, 8, 1, 1, 0)
        self.conv4 = nn.Conv1d(8, 8, 3, 2, 2)

        self.conv5 = nn.Conv1d(8, 8, 5, 1, 2)
        self.conv6 = nn.Conv1d(8, 16, 1, 1, 0)
        self.conv7 = nn.Conv1d(16, 16, 3, 2, 2)

        self.conv8 = nn.Conv1d(16, 16, 5, 1, 2)
        self.T1 = nn.ReLU()
        self.conv9 = nn.Conv1d(16, 32, 1, 1, 0)
        self.T2 = nn.ReLU()
        self.Max1 = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)

        self.conv10 = nn.Conv1d(32, 32, 5, 1, 2)
        self.T3 = nn.ReLU()
        self.conv11 = nn.Conv1d(32, 64, 1, 1, 0)
        self.T4 = nn.ReLU()
        self.Max2 = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)

        self.conv12 = nn.Conv1d(64, 64, 5, 1, 2)
        self.T5 = nn.ReLU()
        self.conv13 = nn.Conv1d(64, 128, 1, 1, 0)
        self.T6 = nn.ReLU()
        self.Max3 = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)

        self.conv14 = nn.Conv1d(128, 128, 5, 1, 2)
        self.T7 = nn.ReLU()
        self.conv15 = nn.Conv1d(128, 256, 1, 1, 0)
        self.T8 = nn.ReLU()
        self.Max4 = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)

        self.conv16 = nn.Conv1d(256, 256, 5, 1, 2)
        self.T9 = nn.ReLU()
        self.conv17 = nn.Conv1d(256, 512, 1, 1, 0)
        self.T10 = nn.ReLU()
        self.avg = nn.AvgPool1d(kernel_size=256, stride=256)

        self.fully_connected = nn.Linear(in_features=512, out_features=1)
        self.finall = nn.Sigmoid()
        self.init_weights()
Ejemplo n.º 30
0
    def __init__(self, fixed_len=128, p=0.0):
        super(Encoder, self).__init__()

        self.fixed_len = fixed_len

        self.conv_layer1 = conv_layer(1, 64)

        self.stem = conv_stem(64, p=p)

        self.pool1 = nn.AvgPool1d(kernel_size=2)
        self.bottle_neck1 = conv_layer(64, 128, ksize=1)
        self.residual_block_1 = residual_block(128, 128, p=p)

        self.pool2 = nn.AvgPool1d(kernel_size=2)
        self.bottle_neck2 = conv_layer(128, 256, ksize=1)
        self.residual_block_2 = residual_block(256, 256, p=p)

        self.pool3 = nn.AvgPool1d(kernel_size=2)
        self.bottle_neck3 = conv_layer(256, 512, ksize=1)
        self.residual_block_3 = residual_block(512, 512, p=p)

        self.gap = nn.AdaptiveAvgPool1d(1)