def forward(self, x): channel_att_sum = None for pool_type in self.pool_types: if pool_type=='avg': avg_pool = F.avg_pool1d( x, x.size(2), stride=x.size(2)) channel_att_raw = self.mlp( avg_pool ) elif pool_type=='max': max_pool = F.max_pool1d( x, x.size(2), stride=x.size(2)) channel_att_raw = self.mlp( max_pool ) elif pool_type=='lp': lp_pool = F.lp_pool1d( x, x.size(2), stride=x.size(2)) channel_att_raw = self.mlp( lp_pool ) elif pool_type=='lse': # LSE pool only lse_pool = logsumexp_2d(x) channel_att_raw = self.mlp( lse_pool ) if channel_att_sum is None: channel_att_sum = channel_att_raw else: channel_att_sum = channel_att_sum + channel_att_raw # scale = F.sigmoid( channel_att_sum ).unsqueeze(2).unsqueeze(3).expand_as(x) scale = torch.sigmoid( channel_att_sum ).unsqueeze(2).expand_as(x) return x * scale
def forward(self, inputs: 'Tensor') -> 'Tensor': """ Forward pass method for generalize LPPooling layer. Parameters ---------- inputs : Tensor input tensor. Returns ------- Tensor result of pooling operation. """ x = pad(inputs, self.pad_sizes, mode=self.padding_mode, value=self._padding_value) if self.ndims == 2: pool_args = (x, self.norm_type, self.kernel_size[0], self.stride[0]) return F.lp_pool1d(*pool_args) elif self.ndims == 3: pool_args = (x, self.norm_type, self.kernel_size, self.stride) return F.lp_pool2d(*pool_args) elif self.ndims == 4: return F.lp_pool3d(*pool_args)
def forward(self, x): x = F.lp_pool1d(x, norm_type=2, kernel_size=3) x = F.lp_pool1d(x, norm_type=2, kernel_size=4, stride=2) x = F.lp_pool1d(x, norm_type=1, kernel_size=3, stride=1, ceil_mode=False) x = F.lp_pool1d(x, norm_type=1, kernel_size=5, stride=1, ceil_mode=True) x = F.lp_pool1d(x, norm_type=1.2, kernel_size=3, stride=2, ceil_mode=False) x = F.lp_pool1d(x, norm_type=0.5, kernel_size=2, stride=1, ceil_mode=True) x = F.lp_pool1d(x, norm_type=0.1, kernel_size=4, stride=1, ceil_mode=False) return x
def forward(self,x): ## Output from the first layer is pooled and then passed through ## the convolution. x = F.max_pool1d(torch.tanh(self.conv1(x)),2,stride=2) x = F.max_pool1d(torch.tanh(self.conv2(x)),2,stride=2) ## Output from the first layer is pooled globally 3 ways to ## create the next layer. The results are then concatenated ## to create a fully connected layer input. time_steps = x.shape[-1] x = torch.cat((F.avg_pool1d(x,time_steps), F.max_pool1d(x,time_steps), F.lp_pool1d(x,2,time_steps))) x = x.view(-1,32*3) ## Pass through the fully connected layer x = self.fully_connected1(x) return x
def test_lp_pool1d(self): inp = torch.randn(1, 32, 64, device='cuda', dtype=self.dtype) output = F.lp_pool1d(inp, 2, 3, stride=2, ceil_mode=True)
def forward(self, input: Tensor) -> Tensor: input = self.quant_handle(input) return F.lp_pool1d(input, float(self.norm_type), self.kernel_size, self.stride, self.ceil_mode)