Exemple #1
0
def softshrink(x, lambd, gpu_id):
    nch = 2
    nx = 161
    # xs = 128
    # ys = 160
    ws = 5

    t0 = time.time()
    One = Variable(torch.ones(1).cuda(gpu_id), requires_grad=False)
    Zero = Variable(torch.zeros(1).cuda(gpu_id), requires_grad=False)
    lambd_t = One * lambd

    x = x.view(nch, nx, 128, 160)
    poolL2 = nn.LPPool2d(2, ws, stride=ws, ceil_mode=True)
    xx_old = poolL2(x)
    xx_old[xx_old == 0] = lambd_t / 1000
    subgrad = torch.max(One - torch.div(lambd_t, xx_old), Zero)
    xs = subgrad.shape[2]
    ys = subgrad.shape[3]
    subgrad = subgrad.view(nch, nx, xs * ys, -1).repeat(1, 1, 1, ws).view(
        nch, nx, -1, ws * ys).repeat(1, 1, 1, ws).view(nch, nx, -1,
                                                       ws * ys)[:, :, :128, :]
    x = (x * subgrad).view(nch, nx, -1, 20480).squeeze()
    # print('total time per subgrad op: ', time.time()-t0)
    # print('done!')

    return x
Exemple #2
0
def rn18_l4_1a_lppool(p: float):
    n = 49**(1 / p)
    model = rn18_l4_1a_nc_1k_scratch()
    model.classifier = nn.Sequential(nn.LPPool2d(p, kernel_size=7),
                                     LambdaLayer(lambda x: x / n),
                                     nn.Flatten())
    return model
Exemple #3
0
    def __init__(self,
                 plan,
                 conv,
                 num_classes=10,
                 dense_classifier=False,
                 path=False):
        super(VGG, self).__init__()
        layer_list = []
        filters = 3

        self.path = path

        for spec in plan:
            if spec == 'M':
                if self.path:
                    layer_list.append(nn.LPPool2d(1.0, kernel_size=2,
                                                  stride=2))
                else:
                    layer_list.append(nn.MaxPool2d(kernel_size=2, stride=2))
            else:
                layer_list.append(conv(filters, spec))
                filters = spec

        self.layers = nn.Sequential(*layer_list)

        self.fc = layers.Linear(512, num_classes)
        if dense_classifier:
            self.fc = nn.Linear(512, num_classes)

        self._initialize_weights()
Exemple #4
0
    def __init__(self):
        super(NNPoolingModule, self).__init__()
        self.input1d = torch.randn(1, 16, 50)
        self.module1d = nn.ModuleList([
            nn.MaxPool1d(3, stride=2),
            nn.AvgPool1d(3, stride=2),
            nn.LPPool1d(2, 3, stride=2),
            nn.AdaptiveMaxPool1d(3),
            nn.AdaptiveAvgPool1d(3),
        ])

        self.input2d = torch.randn(1, 16, 30, 10)
        self.module2d = nn.ModuleList([
            nn.MaxPool2d((3, 2), stride=(2, 1)),
            nn.AvgPool2d((3, 2), stride=(2, 1)),
            nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5)),
            nn.LPPool2d(2, 3, stride=(2, 1)),
            nn.AdaptiveMaxPool2d((5, 7)),
            nn.AdaptiveAvgPool2d((7)),
        ])

        self.input3d = torch.randn(1, 16, 20, 4, 4)
        self.module3d = nn.ModuleList([
            nn.MaxPool3d(2),
            nn.AvgPool3d(2),
            nn.FractionalMaxPool3d(2, output_ratio=(0.5, 0.5, 0.5)),
            nn.AdaptiveMaxPool3d((5, 7, 9)),
            nn.AdaptiveAvgPool3d((5, 7, 9)),
        ])
Exemple #5
0
    def __init__(self, useCuda, gpuDevice=0):
        super(netOpenFace, self).__init__()

        self.gpuDevice = gpuDevice

        self.layer1 = Conv2d(3, 64, (7, 7), (2, 2), (3, 3))
        self.layer2 = nn.BatchNorm2d(64)
        self.layer3 = nn.ReLU()
        self.layer4 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=(1, 1))
        self.layer5 = CrossMapLRN(5, 0.0001, 0.75, gpuDevice=gpuDevice)
        self.layer6 = Conv2d(64, 64, (1, 1), (1, 1), (0, 0))
        self.layer7 = nn.BatchNorm2d(64)
        self.layer8 = nn.ReLU()
        self.layer9 = Conv2d(64, 192, (3, 3), (1, 1), (1, 1))
        self.layer10 = nn.BatchNorm2d(192)
        self.layer11 = nn.ReLU()
        self.layer12 = CrossMapLRN(5, 0.0001, 0.75, gpuDevice=gpuDevice)
        self.layer13 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=(1, 1))
        self.layer14 = Inception(
            192, (3, 5), (1, 1), (128, 32), (96, 16, 32, 64),
            nn.MaxPool2d((3, 3), stride=(2, 2), padding=(0, 0)), True)
        self.layer15 = Inception(256, (3, 5), (1, 1), (128, 64),
                                 (96, 32, 64, 64),
                                 nn.LPPool2d(2, (3, 3), stride=(3, 3)), True)
        self.layer16 = Inception(
            320, (3, 5), (2, 2), (256, 64), (128, 32, None, None),
            nn.MaxPool2d((3, 3), stride=(2, 2), padding=(0, 0)), True)
        self.layer17 = Inception(640, (3, 5), (1, 1), (192, 64),
                                 (96, 32, 128, 256),
                                 nn.LPPool2d(2, (3, 3), stride=(3, 3)), True)
        self.layer18 = Inception(
            640, (3, 5), (2, 2), (256, 128), (160, 64, None, None),
            nn.MaxPool2d((3, 3), stride=(2, 2), padding=(0, 0)), True)
        self.layer19 = Inception(1024, (3, ), (1, ), (384, ), (96, 96, 256),
                                 nn.LPPool2d(2, (3, 3), stride=(3, 3)), True)
        self.layer21 = Inception(
            736, (3, ), (1, ), (384, ), (96, 96, 256),
            nn.MaxPool2d((3, 3), stride=(2, 2), padding=(0, 0)), True)
        self.layer22 = nn.AvgPool2d((3, 3), stride=(1, 1), padding=(0, 0))
        self.layer25 = nn.Linear(736, 128)

        #
        # self.eval()

        if useCuda:
            self.cuda(gpuDevice)
Exemple #6
0
    def __init__(self):
        super(Model, self).__init__()

        self.pool_0 = nn.LPPool2d(norm_type=2, kernel_size=3)
        self.pool_1 = nn.LPPool2d(norm_type=2, kernel_size=4, stride=2)
        self.pool_2 = nn.LPPool2d(norm_type=1,
                                  kernel_size=(1, 3),
                                  stride=1,
                                  ceil_mode=False)
        self.pool_3 = nn.LPPool2d(norm_type=1,
                                  kernel_size=(4, 5),
                                  stride=(1, 2),
                                  ceil_mode=True)
        self.pool_4 = nn.LPPool2d(norm_type=1.2,
                                  kernel_size=(5, 3),
                                  stride=(2, 1),
                                  ceil_mode=False)
        self.pool_5 = nn.LPPool2d(norm_type=0.5,
                                  kernel_size=2,
                                  stride=1,
                                  ceil_mode=True)
        self.pool_6 = nn.LPPool2d(norm_type=0.1,
                                  kernel_size=(5, 4),
                                  stride=1,
                                  ceil_mode=False)
Exemple #7
0
 def forward(self, x):
     x = self.layers(x)
     if self.path:
         x = nn.LPPool2d(1.0, kernel_size=2)(x)
     else:
         x = nn.AvgPool2d(2)(x)
     x = x.view(x.size(0), -1)
     x = self.fc(x)
     return x
def gram_nuclear_norm(y, gid):
    '''y shape:
    [m+p, n_features, T]'''
    wsize = int((y.shape[2] + 1) / 2)
    ksize = [1, wsize]
    poolL2 = nn.LPPool2d(2, ksize, stride=1).cuda(gid)  # ceil_mode?
    avgpool = nn.AvgPool2d(ksize, stride=1).cuda(gid)
    gnn = avgpool(poolL2(y)**2)  # * wsize

    return gnn
Exemple #9
0
 def forward(self, x):
     out = F.relu(self.bn(self.conv(x)))
     out = self.blocks(out)
     if self.path:
         out = nn.LPPool2d(1.0, out.size()[3])(out)
     else:
         out = F.avg_pool2d(out, out.size()[3])
     out = out.view(out.size(0), -1)
     out = self.fc(out)
     return out
 def __init__(self, num_classes=2):
     super(ConvNet_v1, self).__init__()
     self.features = nn.Sequential(
         nn.Conv2d(9, 32, kernel_size=11, stride=4), nn.BatchNorm2d(32),
         nn.ReLU(inplace=True), nn.LPPool2d(1, kernel_size=2, stride=2),
         nn.Conv2d(32, 64, kernel_size=5, stride=2), nn.BatchNorm2d(64),
         nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2),
         nn.Conv2d(64, 64, kernel_size=3), nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=2, stride=2))
     self.fc = nn.Sequential(nn.Linear(4 * 4 * 64, 256), nn.Sigmoid(),
                             nn.Linear(256, num_classes))
Exemple #11
0
    def __init__(self,
                 Chans,
                 kernLength1,
                 kernLength2,
                 dropoutRates=(0.25, 0.25),
                 F1=4,
                 D=2,
                 F2=8,
                 poolKern1=5,
                 poolKern2=8,
                 norm_rate=0.25,
                 dropoutType='Dropout'):
        super(EEGNet_experimental, self).__init__()
        #block1
        time_padding = int((kernLength1 // 2))
        self.conv1 = nn.Conv2d(in_channels=1,
                               out_channels=F1,
                               kernel_size=(1, kernLength1),
                               padding=(0, time_padding),
                               stride=1,
                               bias=False)

        self.batchnorm1 = nn.BatchNorm2d(num_features=F1, affine=True)
        self.depthwise1 = nn.Conv2d(in_channels=F1,
                                    out_channels=F1 * D,
                                    kernel_size=(Chans, 1),
                                    groups=F1,
                                    padding=0,
                                    bias=False)
        self.lppool = nn.LPPool2d(2, (1, 20),
                                  stride=1)  # convert to power, and pool
        self.applyLog = logTransform()

        self.batchnorm2 = nn.BatchNorm2d(num_features=F1 * D, affine=True)
        self.activation_block1 = nn.ELU()
        #self.avg_pool_block1 = nn.AvgPool2d((1,poolKern1))

        self.avg_pool_block1 = nn.AdaptiveAvgPool2d((1, 32))
        self.dropout_block1 = nn.Dropout(p=dropoutRates[0])

        #block2
        self.separable_block2 = deepwise_separable_conv(nin=F1 * D,
                                                        nout=F2,
                                                        kernelSize=kernLength2)
        self.activation_block2 = nn.ELU()
        self.avg_pool_block2 = nn.AdaptiveAvgPool2d((1, 10))
        self.dropout_block2 = nn.Dropout(dropoutRates[1])
        self.flatten = nn.Flatten()

        # block 3
        self.lstm = nn.LSTM(8, 20)
        self.ln1 = nn.Linear(20, 1)
        self.relu1 = nn.ReLU()
 def __init__(self, num_classes=2):
     super(ConvNet_v6, self).__init__()
     self.layer1 = nn.Sequential(
         nn.Conv2d(9, 32, kernel_size=11, stride=4, padding=10),
         nn.ReLU(inplace=True), nn.LPPool2d(1, kernel_size=2, stride=2))
     self.layer2 = nn.Sequential(
         nn.Conv2d(32, 64, kernel_size=5, stride=2, padding=4),
         nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2))
     self.layer3 = nn.Sequential(
         nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.ReLU(inplace=True))
     self.layer4 = nn.AdaptiveAvgPool2d(4)
     self.fc = nn.Sequential(nn.Linear(4 * 4 * 64, 256), nn.Sigmoid(),
                             nn.Linear(256, num_classes))
Exemple #13
0
    def __init__(self,nb_classes=10, Chans=19, Samples=1000,
           dropoutRates=(0.25,0.25), kernLength1=500,kernLength2=500, poolKern1=5,poolKern2=8, F1=4,
           D=2, F2=8, norm_rate=0.25, dropoutType='Dropout'):
        super(EEGNet_experimental,self).__init__()
        self.Chans = Chans
        self.Samples = Samples
        self.output_sizes = {}
        #block1
        time_padding = int((kernLength1//2))
        self.conv1 = nn.Conv2d(in_channels=1,out_channels=F1,kernel_size =(1,kernLength1),padding=(0,time_padding), stride=1,bias=False)
        self.output_sizes['conv1']=convtransp_output_shape((Chans,Samples), kernel_size=(1,kernLength1), stride=1,
                                                           pad=(0,time_padding))
        self.batchnorm1 = nn.BatchNorm2d(num_features=F1, affine=True)
        self.depthwise1 = nn.Conv2d(in_channels=F1,out_channels=F1*D,kernel_size=(Chans,1),groups=F1,padding=0,bias=False)
        self.lppool=nn.LPPool2d(2,(1,20),stride=1) # convert to power, and pool
        self.applyLog=logTransform()
        self.output_sizes['depthwise1'] = convtransp_output_shape(self.output_sizes['conv1'], kernel_size=(Chans,1),
                                                                  stride=1, pad=0)
        self.batchnorm2 = nn.BatchNorm2d(num_features=F1*D, affine=True)
        self.activation_block1 = nn.ELU()
        #self.avg_pool_block1 = nn.AvgPool2d((1,poolKern1))
        self.output_sizes['avg_pool_block1'] = convtransp_output_shape(self.output_sizes['depthwise1'], kernel_size=(1, poolKern1),
                                                                   stride=(1,poolKern1), pad=0)
        self.avg_pool_block1 = nn.AdaptiveAvgPool2d((1,int(self.output_sizes['depthwise1'][1]/2))) # used to be 4
        self.avg_pool_block1 = nn.AdaptiveAvgPool2d((1, 32))
        self.output_sizes['avg_pool_block1'] = (1,32)
        self.dropout_block1 = nn.Dropout(p=dropoutRates[0])

        #block2
        self.separable_block2 = deepwise_separable_conv(nin=F1*D,nout=F2,kernelSize=kernLength2)
        self.output_sizes['separable_block2'] = self.separable_block2.get_output_size(self.output_sizes['avg_pool_block1'])
        self.activation_block2 = nn.ELU()
        # self.avg_pool_block2 = nn.AvgPool2d((1,poolKern2))
        # self.output_sizes['avg_pool_block2'] = convtransp_output_shape(self.output_sizes['separable_block2'],
        #                                                                kernel_size=(1, poolKern2),
        #                                                                stride=(1, poolKern2), pad=0)
        #self.avg_pool_block2 = nn.AdaptiveAvgPool2d((1,int(self.output_sizes['separable_block2'][1]/4)))
        # self.output_sizes['avg_pool_block2'] = (1,int(self.output_sizes['separable_block2'][1]/4))
        self.avg_pool_block2 = nn.AdaptiveAvgPool2d((1, 10))
        self.output_sizes['avg_pool_block2'] = (1, 10)

        self.dropout_block2 = nn.Dropout(dropoutRates[1])

        self.flatten = nn.Flatten()
        #n_size = self.get_features_dim(Chans,Samples)
        #self.dense = nn.Linear(n_size,nb_classes)

        # block 3
        self.lstm = nn.LSTM(8, 20)
        self.ln1 = nn.Linear(20, 1)
        self.relu1 = nn.ReLU()
Exemple #14
0
    def __init__(self):
        super(OpenFace, self).__init__()

        self.layer1 = nn.Conv2d(3, 64, (7, 7), stride=(2, 2), padding=(3, 3))
        self.layer2 = nn.BatchNorm2d(64)
        self.layer3 = nn.ReLU()
        self.layer4 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=(1, 1))
        self.layer5 = LRN(5)
        self.layer6 = nn.Conv2d(64, 64, (1, 1))
        self.layer7 = nn.BatchNorm2d(64)
        self.layer8 = nn.ReLU()
        self.layer9 = nn.Conv2d(64, 192, (3, 3), stride=(1, 1), padding=(1, 1))
        self.layer10 = nn.BatchNorm2d(192)
        self.layer11 = nn.ReLU()
        self.layer12 = LRN(5)
        self.layer13 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=(1, 1))
        self.layer14 = Inception(
            192, (3, 5), (1, 1), (128, 32), (96, 16, 32, 64),
            nn.MaxPool2d((3, 3), stride=(2, 2), padding=(0, 0)), True)
        self.layer15 = Inception(256, (3, 5), (1, 1), (128, 64),
                                 (96, 32, 64, 64),
                                 nn.LPPool2d(2, (3, 3), stride=(3, 3)), True)
        self.layer16 = Inception(
            320, (3, 5), (2, 2), (256, 64), (128, 32, None, None),
            nn.MaxPool2d((3, 3), stride=(2, 2), padding=(0, 0)), True)
        self.layer17 = Inception(640, (3, 5), (1, 1), (192, 64),
                                 (96, 32, 128, 256),
                                 nn.LPPool2d(2, (3, 3), stride=(3, 3)), True)
        self.layer18 = Inception(
            640, (3, 5), (2, 2), (256, 128), (160, 64, None, None),
            nn.MaxPool2d((3, 3), stride=(2, 2), padding=(0, 0)), True)
        self.layer19 = Inception(1024, (3, ), (1, ), (384, ), (96, 96, 256),
                                 nn.LPPool2d(2, (3, 3), stride=(3, 3)), True)
        self.layer21 = Inception(
            736, (3, ), (1, ), (384, ), (96, 96, 256),
            nn.MaxPool2d((3, 3), stride=(2, 2), padding=(0, 0)), True)
        self.layer22 = nn.AvgPool2d((3, 3), stride=(1, 1))
        self.layer25 = nn.Linear(736, 128)
    def __init__(self, inputdim, outputdim, **kwargs):
        super().__init__()
        self.features = nn.Sequential(
            Block2D(1, 32),
            nn.LPPool2d(4, (2, 4)),
            Block2D(32, 128),
            Block2D(128, 128),
            nn.LPPool2d(4, (2, 4)),
            Block2D(128, 128),
            Block2D(128, 128),
            nn.LPPool2d(4, (1, 4)),
            nn.Dropout(0.3),
        )
        with torch.no_grad():
            rnn_input_dim = self.features(torch.randn(1, 1, 500,
                                                      inputdim)).shape
            rnn_input_dim = rnn_input_dim[1] * rnn_input_dim[-1]

        self.gru = nn.GRU(rnn_input_dim,
                          128,
                          bidirectional=True,
                          batch_first=True)
        self.temp_pool = LinearSoftPool()
        self.outputlayer = nn.Linear(256, outputdim)
 def __init__(self, num_classes=2):
     super(ConvNet_v4, self).__init__()
     self.layer1 = nn.Sequential(nn.Conv2d(9, 16, kernel_size=5),
                                 nn.BatchNorm2d(16), nn.ReLU(inplace=True),
                                 nn.LPPool2d(1, kernel_size=2, stride=2))
     self.layer2 = nn.Sequential(nn.Conv2d(16, 24, kernel_size=5),
                                 nn.BatchNorm2d(24), nn.ReLU(inplace=True),
                                 nn.MaxPool2d(kernel_size=2, stride=2))
     self.layer3 = nn.Sequential(nn.Conv2d(72, 128, kernel_size=3),
                                 nn.ReLU(inplace=True),
                                 nn.MaxPool2d(kernel_size=2, stride=2))
     self.layer4 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3),
                                 nn.ReLU(inplace=True),
                                 nn.AdaptiveMaxPool2d(7))
     self.fc = nn.Sequential(nn.Linear(7 * 7 * 128, 256), nn.Sigmoid(),
                             nn.Linear(256, num_classes))
Exemple #17
0
 def __init__(self, channel, reduction=16):
     super(CALayer5, self).__init__()
     # global average pooling: feature --> point
     self.avg_pool = nn.AdaptiveAvgPool2d(1)
     # global maximum pooling: feature --> point
     self.max_pool = nn.AdaptiveMaxPool2d(1)
     # global Lp pooling: feature --> point
     self.lp_pool = nn.LPPool2d(2, kernel_size=3)
     # define alpha and beta variables
     self.alpha = nn.Parameter(data=torch.Tensor(), requires_grad=True)
     # feature channel downscale and upscale --> channel weight
     self.conv_du = nn.Sequential(
         nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
         nn.ReLU(inplace=True),
         nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
         nn.Sigmoid())
    def __init__(self, mode='max3d', ch=None, bias=False):
        super().__init__()

        if ch is not None and ch[0] != ch[1]:
            raise ValueError('input, output channels must be equal')

        if mode == 'max':
            self.down = nn.MaxPool2d(2, stride=2)
        elif mode == 'max3d':
            self.down = nn.MaxPool3d(2, stride=2)
        elif mode == 'mean':
            self.down = nn.AvgPool2d(2, stride=2)
        elif mode == 'norm2':
            self.down = nn.LPPool2d(2, 2, stride=2)
        elif mode == 'conv':
            self.down = conv1(ch, k=2, stride=2, bias=bias)
        else:
            raise ValueError("mode must be one of maxpool, norm2, conv")
 def __init__(self, num_classes=2):
     super(ConvNet_v3, self).__init__()
     self.features = nn.Sequential(nn.Conv2d(9, 32, kernel_size=5),
                                   nn.BatchNorm2d(32),
                                   nn.ReLU(inplace=True),
                                   nn.LPPool2d(1, kernel_size=2, stride=2),
                                   nn.Conv2d(32, 64, kernel_size=5),
                                   nn.BatchNorm2d(64),
                                   nn.ReLU(inplace=True),
                                   nn.MaxPool2d(kernel_size=2, stride=2),
                                   nn.Conv2d(64, 128, kernel_size=3),
                                   nn.ReLU(inplace=True),
                                   nn.MaxPool2d(kernel_size=2, stride=2),
                                   nn.Conv2d(128, 128, kernel_size=3),
                                   nn.ReLU(inplace=True))
     self.global_pooling = nn.AdaptiveMaxPool2d(7)
     self.fc = nn.Sequential(nn.Linear(7 * 7 * 128, 256), nn.Sigmoid(),
                             nn.Linear(256, num_classes))
 def __init__(self,
              conv_1,
              conv_3,
              conv_5,
              conv_3_max,
              use_max_pool=True,
              use_l2=False):
     super(inception_b, self).__init__()
     model_list = []
     if (len(conv_1) != 0):
         model_list.append(
             nn.Sequential(nn.Conv2d(conv_1[0], conv_1[1], kernel_size=1),
                           nn.ReLU(inplace=1)))
     if (len(conv_3) != 0):
         model_list.append(
             nn.Sequential(
                 nn.Conv2d(conv_3[0], conv_3[1], kernel_size=1),
                 nn.ReLU(inplace=True),
                 nn.Conv2d(conv_3[1], conv_3[2], kernel_size=3, padding=1),
                 nn.ReLU(inplace=True)))
     if (len(conv_5) != 0):
         model_list.append(
             nn.Sequential(
                 nn.Conv2d(conv_5[0], conv_5[1], kernel_size=1),
                 nn.ReLU(inplace=True),
                 nn.Conv2d(conv_5[1], conv_5[2], kernel_size=5, padding=2),
                 nn.ReLU(inplace=True)))
     if (use_max_pool):
         model_list.append(
             nn.Sequential(
                 nn.MaxPool2d(kernel_size=3, padding=1, stride=1),
                 nn.Conv2d(conv_3_max[0],
                           conv_3_max[1],
                           kernel_size=1,
                           padding=0), nn.ReLU(inplace=True)))
     if (use_l2):
         model_list.append(
             nn.Sequential(
                 nn.LPPool2d(2, kernel_size=3, padding=1, stride=1),
                 nn.Conv2d(conv_3_max[0],
                           conv_3_max[1],
                           kernel_size=1,
                           padding=0), nn.ReLU(inplace=True)))
     self.conv = nn.ModuleList(model_list)
Exemple #21
0
    def __init__(self,
                 Chans,
                 kernLength1,
                 kernLength2,
                 dropoutRates=(0.25, 0.25),
                 F1=4,
                 D=2,
                 F2=8,
                 poolKern1=5,
                 poolKern2=8,
                 norm_rate=0.25,
                 dropoutType='Dropout'):
        super(model_simplify, self).__init__()
        #block1
        time_padding = int((kernLength1 // 2))
        self.conv1 = nn.Conv2d(in_channels=1,
                               out_channels=F1,
                               kernel_size=(1, kernLength1),
                               padding=(0, time_padding),
                               stride=1,
                               bias=False)

        self.batchnorm1 = nn.BatchNorm2d(num_features=F1, affine=True)
        self.depthwise1 = nn.Conv2d(in_channels=F1,
                                    out_channels=F1 * D,
                                    kernel_size=(Chans, 1),
                                    groups=F1,
                                    padding=0,
                                    bias=False)
        self.lppool = nn.LPPool2d(2, (1, 20),
                                  stride=1)  # convert to power, and pool
        self.applyLog = logTransform()

        self.batchnorm2 = nn.BatchNorm2d(num_features=F1 * D, affine=True)
        self.activation_block1 = nn.ELU()
        #self.avg_pool_block1 = nn.AvgPool2d((1,poolKern1))

        self.avg_pool_block1 = nn.AdaptiveAvgPool2d((1, 32))
        self.dropout_block1 = nn.Dropout(p=dropoutRates[0])

        # block 3
        self.lstm = nn.LSTM(8, 20, bidirectional=True)
        self.ln1 = nn.Linear(40, 1)
        self.relu1 = nn.ReLU()
Exemple #22
0
    def __init__(self):
        super(DiscriminatorNetwork, self).__init__()
        self.resblock1 = ResBlockDown(1, 16)
        self.resblock2 = ResBlockDown(16, 16)
        self.resblock3 = ResBlockDown(16, 32)
        self.resblock4 = ResBlockDown(32, 64)
        self.resblock5 = ResBlockDown(64, 128)
        self.resblock6 = ResBlockDown(128, 128)
        self.resblock7 = ResBlockDown(128, 256)
        self.resblock8 = ResBlock(256, 256)

        self.resdownblocks = [
            self.resblock1,
            self.resblock2,
            self.resblock3,
            self.resblock4,
            self.resblock5,
            self.resblock6,
            self.resblock7,
        ]

        self.self_attention = SelfAttention(32)
        self.global_sum_pooling = nn.LPPool2d(norm_type=1, kernel_size=(1, 4))
        self.dense = spectral_norm(nn.Linear(256, 1))
Exemple #23
0
 def __init__(self):
     super(HASHI, self).__init__()
     self.conv1 = nn.Conv2d(3, 256, kernel_size=8, stride=1, padding=0, bias=False)      # out 256x94x94
     self.relu = nn.ReLU(inplace=True)
     self.L2pool = nn.LPPool2d(2, 2, stride=2)        # out 256x47x47
     self.fc = nn.Linear(256*47*47, 2)
Exemple #24
0
    def __init__(self, args, gpuDevice=0):
        super(netOpenFace, self).__init__()
        self.gpuDevice = gpuDevice

        self.layer1 = Conv2d(3, 64, (7, 7), (2, 2), (3, 3))
        self.layer2 = BatchNorm(64)
        self.layer3 = nn.ReLU()

        self.layer4 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=(1, 1))
        self.layer5 = CrossMapLRN(5, 0.0001, 0.75, gpuDevice=gpuDevice)

        # Inception (2)
        self.layer6 = Conv2d(64, 64, (1, 1), (1, 1), (0, 0))
        # self.layer6 = Conv2d(64, 64, (1,1), (1,1), (1,1))
        self.layer7 = BatchNorm(64)
        self.layer8 = nn.ReLU()
        self.layer9 = Conv2d(64, 192, (3, 3), (1, 1), (1, 1))
        self.layer10 = BatchNorm(192)
        self.layer11 = nn.ReLU()

        self.layer12 = CrossMapLRN(5, 0.0001, 0.75, gpuDevice=gpuDevice)
        self.layer13 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=(1, 1))

        # Inception (3a)
        # self.layer14 = Inception(192, (3,5), (1,1), (128,32), (96,16,32,64), nn.MaxPool2d((3,3), stride=(2,2), padding=(0,0)), True)
        self.layer14 = Inception(
            192, (3, 5), (1, 1), (128, 32), (96, 16, 32, 64),
            nn.MaxPool2d((3, 3), stride=(1, 1), padding=(1, 1)), True)

        # Inception (3b)
        # self.layer15 = Inception(256, (3,5), (1,1), (128,64), (96,32,64,64), nn.LPPool2d(2, (3,3), stride=(3,3)), True)
        self.layer15 = Inception(256, (3, 5), (1, 1), (128, 64),
                                 (96, 32, 64, 64),
                                 nn.LPPool2d(2, (3, 3), stride=(1, 1)), True)

        # Inception (3c)
        # self.layer16 = Inception(320, (3,5), (2,2), (256,64), (128,32,None,None), nn.MaxPool2d((3,3), stride=(2,2), padding=(0,0)), True)
        self.layer16 = Inception(
            320, (3, 5), (2, 2), (256, 64), (128, 32, None, None),
            nn.MaxPool2d((3, 3), stride=(2, 2), padding=(1, 1)), True)

        # Inception (4a)
        # self.layer17 = Inception(640, (3,5), (1,1), (192,64), (96,32,128,256), nn.LPPool2d(2, (3,3), stride=(3,3)), True)
        self.layer17 = Inception(640, (3, 5), (1, 1), (192, 64),
                                 (96, 32, 128, 256),
                                 nn.LPPool2d(2, (3, 3), stride=(1, 1)), True)

        # Inception (4e)
        # self.layer18 = Inception(640, (3,5), (2,2), (256,128), (160,64,None,None), nn.MaxPool2d((3,3), stride=(2,2), padding=(0,0)), True)
        self.layer18 = Inception(
            640, (3, 5), (2, 2), (256, 128), (160, 64, None, None),
            nn.MaxPool2d((3, 3), stride=(2, 2), padding=(1, 1)), True)

        # Inception (5a)
        # self.layer19 = Inception(1024, (3,), (1,), (384,), (96,96,256), nn.LPPool2d(2, (3,3), stride=(3,3)), True)
        self.layer19 = Inception(1024, (3, ), (1, ), (384, ), (96, 96, 256),
                                 nn.LPPool2d(2, (3, 3), stride=(1, 1)), True)

        # Inception (5b)
        # self.layer21 = Inception(736, (3,), (1,), (384,), (96,96,256), nn.MaxPool2d((3,3), stride=(2,2), padding=(0,0)), True)
        self.layer21 = Inception(
            736, (3, ), (1, ), (384, ), (96, 96, 256),
            nn.MaxPool2d((3, 3), stride=(1, 1), padding=(1, 1)), True)

        self.layer22 = nn.AvgPool2d((3, 3), stride=(1, 1), padding=(0, 0))
        self.layer25 = Linear(736, 128)

        #
        self.resize1 = nn.UpsamplingNearest2d(scale_factor=3)
        self.resize2 = nn.AvgPool2d(4)

        #
        # self.eval()

        if args.cuda:
            self.cuda(gpuDevice)
    def __init__(self,
                 n_conv,
                 n_pool,
                 n_fc,
                 conv_filters,
                 kernel_sizes,
                 p_kernels,
                 strides,
                 p_strides,
                 paddings,
                 fc_dims,
                 in_channels,
                 flat_dim,
                 dropout=.0,
                 batch_norm=False):

        super(ConvNet, self).__init__()

        self.n_conv = n_conv  # integer
        self.n_pool = n_pool  # integer
        self.n_fc = n_fc  # integer
        self.conv_filters = conv_filters  # list with length n_conv
        self.kernel_sizes = kernel_sizes  # list with length n_conv (square filters)
        self.p_kernels = p_kernels  # list with length n_pool (square filters)
        self.strides = strides  # list with length n_conv
        self.p_strides = p_strides  # list with length n_pool
        self.paddings = paddings  # list with length n_conv
        self.fc_dims = fc_dims  # list with length n_fc
        self.in_channels = in_channels  # integer
        self.flat_dim = flat_dim  # integer

        # convolutional layers
        self.conv_layers = nn.ModuleList([
            nn.Conv2d(self.in_channels,
                      self.conv_filters[0],
                      self.kernel_sizes[0],
                      stride=self.strides[0],
                      padding=self.paddings[0])
        ])

        self.conv_layers.extend([
            nn.Conv2d(self.conv_filters[i - 1],
                      self.conv_filters[i],
                      self.kernel_sizes[i],
                      stride=self.strides[i],
                      padding=self.paddings[i]) for i in range(1, self.n_conv)
        ])

        # pooling layers
        self.pool_layers = nn.ModuleList(
            [nn.LPPool2d(2, self.p_kernels[0], stride=self.p_strides[0])])

        self.pool_layers.extend([
            nn.LPPool2d(2, self.p_kernels[i], stride=self.p_strides[i])
            for i in range(1, self.n_pool)
        ])

        # fully connected layers
        self.fc_layers = nn.ModuleList(
            [nn.Linear(self.flat_dim, self.fc_dims[0])])
        self.fc_layers.extend([
            nn.Linear(self.fc_dims[i - 1], self.fc_dims[i])
            for i in range(1, self.n_fc)
        ])
Exemple #26
0
 def __append_layer(self, net_style, args_dict):
     args_values_list = list(args_dict.values())
     if net_style == "Conv2d":
         self.layers.append(
             nn.Conv2d(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3],
                       args_values_list[4], args_values_list[5],
                       args_values_list[6], args_values_list[7]))
     elif net_style == "MaxPool2d":
         self.layers.append(
             nn.MaxPool2d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4], args_values_list[5]))
     elif net_style == "Linear":
         self.layers.append(
             nn.Linear(args_values_list[0], args_values_list[1],
                       args_values_list[2]))
     elif net_style == "reshape":
         # 如果是特殊情况 reshape,就直接将目标向量尺寸传入
         # print(type(args_values_list[0]))
         self.layers.append(args_values_list[0])
     elif net_style == "Conv1d":
         self.layers.append(
             nn.Conv1d(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3],
                       args_values_list[4], args_values_list[5],
                       args_values_list[6], args_values_list[7]))
     elif net_style == "Conv3d":
         self.layers.append(
             nn.Conv3d(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3],
                       args_values_list[4], args_values_list[5],
                       args_values_list[6], args_values_list[7]))
     elif net_style == "ConvTranspose1d":
         self.layers.append(
             nn.ConvTranspose1d(args_values_list[0], args_values_list[1],
                                args_values_list[2], args_values_list[3],
                                args_values_list[4], args_values_list[5],
                                args_values_list[6], args_values_list[7],
                                args_values_list[8]))
     elif net_style == "ConvTranspose2d":
         self.layers.append(
             nn.ConvTranspose2d(args_values_list[0], args_values_list[1],
                                args_values_list[2], args_values_list[3],
                                args_values_list[4], args_values_list[5],
                                args_values_list[6], args_values_list[7],
                                args_values_list[8]))
     elif net_style == "ConvTranspose3d":
         self.layers.append(
             nn.ConvTranspose3d(args_values_list[0], args_values_list[1],
                                args_values_list[2], args_values_list[3],
                                args_values_list[4], args_values_list[5],
                                args_values_list[6], args_values_list[7],
                                args_values_list[8]))
     elif net_style == "Unfold":
         self.layers.append(
             nn.Unfold(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3]))
     elif net_style == "Fold":
         self.layers.append(
             nn.Unfold(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3],
                       args_values_list[4]))
     elif net_style == "MaxPool1d":
         self.layers.append(
             nn.MaxPool1d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4], args_values_list[5]))
     elif net_style == "MaxPool3d":
         self.layers.append(
             nn.MaxPool3d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4], args_values_list[5]))
     elif net_style == "MaxUnpool1d":
         self.layers.append(
             nn.MaxUnpool1d(args_values_list[0], args_values_list[1],
                            args_values_list[2]))
     elif net_style == "MaxUnpool2d":
         self.layers.append(
             nn.MaxUnpool2d(args_values_list[0], args_values_list[1],
                            args_values_list[2]))
     elif net_style == "MaxUnpool3d":
         self.layers.append(
             nn.MaxUnpool3d(args_values_list[0], args_values_list[1],
                            args_values_list[2]))
     elif net_style == "AvgPool1d":
         self.layers.append(
             nn.AvgPool1d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4]))
     elif net_style == "AvgPool2d":
         self.layers.append(
             nn.AvgPool2d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4]))
     elif net_style == "AvgPool3d":
         self.layers.append(
             nn.AvgPool3d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4]))
     elif net_style == "FractionalMaxPool2d":
         self.layers.append(
             nn.FractionalMaxPool2d(args_values_list[0],
                                    args_values_list[1],
                                    args_values_list[2],
                                    args_values_list[3],
                                    args_values_list[4]))
     elif net_style == "LPPool1d":
         self.layers.append(
             nn.LPPool1d(args_values_list[0], args_values_list[1],
                         args_values_list[2], args_values_list[3]))
     elif net_style == "LPPool2d":
         self.layers.append(
             nn.LPPool2d(args_values_list[0], args_values_list[1],
                         args_values_list[2], args_values_list[3]))
     elif net_style == "AdaptiveMaxPool1d":
         self.layers.append(
             nn.AdaptiveMaxPool1d(args_values_list[0], args_values_list[1]))
     elif net_style == "AdaptiveMaxPool2d":
         self.layers.append(
             nn.AdaptiveMaxPool2d(args_values_list[0], args_values_list[1]))
     elif net_style == "AdaptiveMaxPool3d":
         self.layers.append(
             nn.AdaptiveMaxPool3d(args_values_list[0], args_values_list[1]))
     elif net_style == "AdaptiveAvgPool1d":
         self.layers.append(nn.AdaptiveAvgPool1d(args_values_list[0]))
     elif net_style == "AdaptiveAvgPool2d":
         self.layers.append(nn.AdaptiveAvgPool2d(args_values_list[0]))
     elif net_style == "AdaptiveAvgPool3d":
         self.layers.append(nn.AdaptiveAvgPool3d(args_values_list[0]))
     elif net_style == "ReflectionPad1d":
         self.layers.append(nn.ReflectionPad1d(args_values_list[0]))
     elif net_style == "ReflectionPad2d":
         self.layers.append(nn.ReflectionPad2d(args_values_list[0]))
     elif net_style == "ReplicationPad1d":
         self.layers.append(nn.ReplicationPad1d(args_values_list[0]))
     elif net_style == "ReplicationPad2d":
         self.layers.append(nn.ReplicationPad2d(args_values_list[0]))
     elif net_style == "ReplicationPad3d":
         self.layers.append(nn.ReplicationPad3d(args_values_list[0]))
     elif net_style == "ZeroPad2d":
         self.layers.append(nn.ZeroPad2d(args_values_list[0]))
     elif net_style == "ConstantPad1d":
         self.layers.append(
             nn.ConstantPad1d(args_values_list[0], args_values_list[1]))
     elif net_style == "ConstantPad2d":
         self.layers.append(
             nn.ConstantPad2d(args_values_list[0], args_values_list[1]))
     elif net_style == "ConstantPad3d":
         self.layers.append(
             nn.ConstantPad3d(args_values_list[0], args_values_list[1]))
     elif net_style == "ELU":
         self.layers.append(nn.ELU(args_values_list[0],
                                   args_values_list[1]))
     elif net_style == "Hardshrink":
         self.layers.append(nn.Hardshrink(args_values_list[0]))
     elif net_style == "Hardtanh":
         self.layers.append(
             nn.Hardtanh(args_values_list[0], args_values_list[1],
                         args_values_list[2], args_values_list[3],
                         args_values_list[4]))
     elif net_style == "LeakyReLU":
         self.layers.append(
             nn.LeakyReLU(args_values_list[0], args_values_list[1]))
     elif net_style == "LogSigmoid":
         self.layers.append(nn.LogSigmoid())
     elif net_style == "PReLU":
         self.layers.append(
             nn.PReLU(args_values_list[0], args_values_list[1]))
     elif net_style == "ReLU":
         self.layers.append(nn.ReLU(args_values_list[0]))
     elif net_style == "ReLU6":
         self.layers.append(nn.ReLU6(args_values_list[0]))
     elif net_style == "RReLU":
         self.layers.append(
             nn.RReLU(args_values_list[0], args_values_list[1],
                      args_values_list[2]))
     elif net_style == "SELU":
         self.layers.append(nn.SELU(args_values_list[0]))
     elif net_style == "CELU":
         self.layers.append(
             nn.CELU(args_values_list[0], args_values_list[1]))
     elif net_style == "Sigmoid":
         self.layers.append(nn.Sigmoid())
     elif net_style == "Softplus":
         self.layers.append(
             nn.Softplus(args_values_list[0], args_values_list[1]))
     elif net_style == "Softshrink":
         self.layers.append(nn.Softshrink(args_values_list[0]))
     elif net_style == "Softsign":
         self.layers.append(nn.Softsign())
     elif net_style == "Tanh":
         self.layers.append(nn.Tanh())
     elif net_style == "Tanhshrink":
         self.layers.append(nn.Tanhshrink())
     elif net_style == "Threshold":
         self.layers.append(
             nn.Threshold(args_values_list[0], args_values_list[1],
                          args_values_list[2]))
     elif net_style == "Softmin":
         self.layers.append(nn.Softmin(args_values_list[0]))
     elif net_style == "Softmax":
         self.layers.append(nn.Softmax(args_values_list[0]))
     elif net_style == "Softmax2d":
         self.layers.append(nn.Softmax2d())
     elif net_style == "LogSoftmax":
         self.layers.append(nn.LogSoftmax(args_values_list[0]))
     elif net_style == "AdaptiveLogSoftmaxWithLoss":
         self.layers.append(
             nn.AdaptiveLogSoftmaxWithLoss(args_values_list[0],
                                           args_values_list[1],
                                           args_values_list[2],
                                           args_values_list[3],
                                           args_values_list[4]))
     elif net_style == "BatchNorm1d":
         self.layers.append(
             nn.BatchNorm1d(args_values_list[0], args_values_list[1],
                            args_values_list[2], args_values_list[3],
                            args_values_list[4]))
     elif net_style == "BatchNorm2d":
         self.layers.append(
             nn.BatchNorm2d(args_values_list[0], args_values_list[1],
                            args_values_list[2], args_values_list[3],
                            args_values_list[4]))
     elif net_style == "BatchNorm3d":
         self.layers.append(
             nn.BatchNorm3d(args_values_list[0], args_values_list[1],
                            args_values_list[2], args_values_list[3],
                            args_values_list[4]))
     elif net_style == "GroupNorm":
         self.layers.append(
             nn.GroupNorm(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3]))
     elif net_style == "InstanceNorm1d":
         self.layers.append(
             nn.InstanceNorm1d(args_values_list[0], args_values_list[1],
                               args_values_list[2], args_values_list[3],
                               args_values_list[4]))
     elif net_style == "InstanceNorm2d":
         self.layers.append(
             nn.InstanceNorm2d(args_values_list[0], args_values_list[1],
                               args_values_list[2], args_values_list[3],
                               args_values_list[4]))
     elif net_style == "InstanceNorm3d":
         self.layers.append(
             nn.InstanceNorm3d(args_values_list[0], args_values_list[1],
                               args_values_list[2], args_values_list[3],
                               args_values_list[4]))
     elif net_style == "LayerNorm":
         self.layers.append(
             nn.LayerNorm(args_values_list[0], args_values_list[1],
                          args_values_list[2]))
     elif net_style == "LocalResponseNorm":
         self.layers.append(
             nn.LocalResponseNorm(args_values_list[0], args_values_list[1],
                                  args_values_list[2], args_values_list[3]))
     elif net_style == "Linear":
         self.layers.append(
             nn.Linear(args_values_list[0], args_values_list[1],
                       args_values_list[2]))
     elif net_style == "Dropout":
         self.layers.append(
             nn.Dropout(args_values_list[0], args_values_list[1]))
     elif net_style == "Dropout2d":
         self.layers.append(
             nn.Dropout2d(args_values_list[0], args_values_list[1]))
     elif net_style == "Dropout3d":
         self.layers.append(
             nn.Dropout3d(args_values_list[0], args_values_list[1]))
     elif net_style == "AlphaDropout":
         self.layers.append(
             nn.AlphaDropout(args_values_list[0], args_values_list[1]))
Exemple #27
0
    def __init__(self, in_channels, init=False):
        def scaled_tanh_nonlinearity():
            return Expression(lambda x: 1.7156 * F.tanh(2 * x / 3))

        # spatial layer with init from icp?!
        super().__init__()
        spatial_conv = nn.Sequential()
        spatial_conv.add_module(
            "spatial_conv",
            torch.nn.Conv2d(in_channels=in_channels,
                            out_channels=in_channels,
                            kernel_size=(1, 1),
                            stride=1,
                            padding=0,
                            dilation=1,
                            groups=1,
                            bias=False))
        spatial_conv.add_module("activation", scaled_tanh_nonlinearity())
        self.spatial_conv = spatial_conv

        temporal_conv = nn.Sequential()

        NUM_FILTERS = [2, 4, 8]
        DILATIONS = [1, 2, 4]
        temp_in_chans = 1
        for layer, (num_filters,
                    dilation) in enumerate(zip(NUM_FILTERS, DILATIONS)):
            temporal_layer = nn.Conv2d(temp_in_chans,
                                       num_filters,
                                       dilation=dilation,
                                       kernel_size=(1, 17),
                                       bias=False)

            # init temp layer with wavelets
            if init:
                pass

            temporal_conv.add_module(f'temporal_conv{layer}', temporal_layer)
            temporal_conv.add_module(f'activation{layer}',
                                     scaled_tanh_nonlinearity())

            temp_in_chans = num_filters

        self.temporal_conv = temporal_conv
        # pooling layer
        # FIXME: this implementation does not follow published paper
        self.l2_norm = nn.LPPool2d(2, (1, 10), stride=1)
        # THIS IS THE PAPER IMPLEMENTATION
        # 40 might be very aggressive for smaller sampling rates
        # TODO: consider passing pool size and stride as an input argument
        # self.l2_norm = nn.LPPool2d(2, (1, 40), stride=40)
        # lstm
        # FIXME: this implementation does not follow published paper
        self.lstm = nn.LSTM(
            input_size=in_channels * 8,
            hidden_size=10,
            batch_first=True,
            # THIS IS THE PAPER IMPLEMENTATION
            # self.lstm = nn.LSTM(input_size=in_channels * 8 * 25, hidden_size=10, batch_first=False,
            num_layers=1,
            bias=True)
        # linear
        # TODO: replace 1 with NxClasses for multi-task
        self.fc = SequenceWise(nn.Linear(10, 1))
Exemple #28
0
def rn18_l4_1a_lppool_p1_5():
    model = rn18_l4_1a_nc_1k_scratch()
    model.classifier = nn.Sequential(nn.LPPool2d(1.5, kernel_size=7),
                                     LambdaLayer(lambda x: x / 13.390518),
                                     nn.Flatten())
    return model
Exemple #29
0
# 线性层
class torch.nn.Linear(in_features, out_features, bias=True)
torch.nn.functional.linear(input, weight, bias=None)



# 卷积层
class torch.nn.Conv1d(in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1, bias=True)


# 对输入信号提供2维的幂平均池化操作


class torch.nn.LPPool2d(norm_type, kernel_size, stride=None, ceil_mode=False)


torch.nn.functional.lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False)


class torch.nn.AdaptiveMaxPool1d(output_size, return_indices=False)


torch.nn.functional.adaptive_max_pool1d(input, output_size, return_indices=False)



class torch.nn.AdaptiveAvgPool1d(output_size)

Exemple #30
0
    def __init__(self,
                 sampling='down',
                 in_dim=64,
                 pool_ksize=2,
                 down_ch=1,
                 dconv_dim=1):
        super(Multi_Attn, self).__init__()
        self.dim = in_dim
        self.pool_ksize = pool_ksize
        self.sampling = sampling

        # pool methods
        self.max_pool = nn.MaxPool2d(pool_ksize, pool_ksize)
        self.avg_pool = nn.AvgPool2d(pool_ksize, pool_ksize)
        self.lp_pool = nn.LPPool2d(2, pool_ksize, pool_ksize)
        self.pools = [self.max_pool, self.avg_pool, self.lp_pool]

        self.xin_conv = nn.Conv2d(in_channels=in_dim,
                                  out_channels=in_dim * 2,
                                  kernel_size=4,
                                  stride=2,
                                  padding=1)
        self.yin_conv = nn.Conv2d(in_channels=in_dim,
                                  out_channels=in_dim * 2,
                                  kernel_size=4,
                                  stride=2,
                                  padding=1)

        self.q_conv = nn.Conv2d(in_channels=in_dim,
                                out_channels=in_dim // down_ch,
                                kernel_size=1)
        self.k_conv = nn.Conv2d(in_channels=in_dim,
                                out_channels=in_dim // down_ch,
                                kernel_size=1)
        self.v1_conv = nn.Conv2d(in_channels=in_dim,
                                 out_channels=in_dim,
                                 kernel_size=1)
        self.v2_conv = nn.Conv2d(in_channels=in_dim,
                                 out_channels=in_dim,
                                 kernel_size=1)

        self.out_deconv1_1 = nn.ConvTranspose2d(in_channels=in_dim,
                                                out_channels=in_dim *
                                                dconv_dim,
                                                kernel_size=4,
                                                stride=2,
                                                padding=1)
        self.out_deconv1_2 = nn.ConvTranspose2d(in_channels=in_dim,
                                                out_channels=in_dim *
                                                dconv_dim,
                                                kernel_size=4,
                                                stride=2,
                                                padding=1)

        self.out_deconv2_1 = nn.ConvTranspose2d(in_channels=in_dim,
                                                out_channels=in_dim *
                                                dconv_dim,
                                                kernel_size=4,
                                                stride=2,
                                                padding=1)
        self.out_deconv2_2 = nn.ConvTranspose2d(in_channels=in_dim,
                                                out_channels=in_dim *
                                                dconv_dim,
                                                kernel_size=4,
                                                stride=2,
                                                padding=1)

        self.softmax1 = nn.Softmax(dim=-1)
        self.softmax2 = nn.Softmax(dim=1)

        self.gamma1 = nn.Parameter(torch.zeros(1))
        self.gamma2 = nn.Parameter(torch.zeros(1))

        self.a1 = nn.Parameter(torch.ones(1))
        self.a2 = nn.Parameter(torch.ones(1))
        self.a3 = nn.Parameter(torch.ones(1))

        self.b1 = nn.Parameter(torch.ones(1))
        self.b2 = nn.Parameter(torch.ones(1))
        self.b3 = nn.Parameter(torch.ones(1))