def intermediate_forward(self, x, layer_index): feature_list = list(self.net.children()) feature4 = feature_list[4] feature3 = feature_list[3] feature2 = feature_list[2] feature1 = feature_list[1] feature0 = feature_list[0] out = feature0(x) if layer_index == 0: out = F.max_pool2d(out, 32).view(out.size(0), -1) return out out = feature1(out) if layer_index == 1: out = F.max_pool2d(out, 32).view(out.size(0), -1) return out out = feature2(out) if layer_index == 2: out = F.max_pool2d(out, 32).view(out.size(0), -1) return out out = feature3(out) if layer_index == 3: out = F.avg_pool2d(out, 16).view(out.size(0), -1) return out out = feature4(out) if layer_index == 4: out = F.avg_pool2d(out, 4).view(out.size(0), -1) return out
def feature_list(self, x): feature_list = list(self.net.children()) feature5 = feature_list[5] feature4 = feature_list[4] feature3 = feature_list[3] feature2 = feature_list[2] feature1 = feature_list[1] feature0 = feature_list[0] out_list = [] out = feature0(x) out_list.append(F.max_pool2d(out, 32).view(out.size(0), -1)) out = feature1(out) out_list.append(F.max_pool2d(out, 32).view(out.size(0), -1)) out = feature2(out) out_list.append(F.max_pool2d(out, 32).view(out.size(0), -1)) out = feature3(out) out_list.append(F.avg_pool2d(out, 16).view(out.size(0), -1)) out = feature4(out) out_list.append(F.avg_pool2d(out, 4).view(out.size(0), -1)) out = feature5(out) out = F.avg_pool2d(out, 4).view(out.size(0), -1) out = self.gaussian_layer(out) out_list.append(out) return out_list
def forward(self, x): out = self.conv1(x) out = self.trans1(self.dense1(out)) out = self.trans2(self.dense2(out)) out = self.dense3(out) out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8)) out = F.log_softmax(self.fc(out)) return out
def forward(self,x): x = F.relu(self.conv1(x)) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = F.avg_pool2d(x,4) x = x.view(x.size(0),-1) x = self.fc1(x) return x
def forward(self, x): if self.upsample: new_features = [] for layer in self.layers: out = layer(x) x = torch.cat([x, out], 1) new_features.append(out) out = torch.cat(new_features, 1) fm_size = out.size()[2] scale_weight = F.avg_pool2d(out, fm_size) scale_weight = F.relu(self.SE_upsample1(scale_weight)) scale_weight = F.sigmoid(self.SE_upsample2(scale_weight)) out = out * scale_weight.expand_as(out) return out else: for layer in self.layers: out = layer(x) x = torch.cat([x, out], 1) # 1 = channel axis fm_size = x.size()[2] scale_weight = F.avg_pool2d(x, fm_size) scale_weight = F.relu(self.SE1(scale_weight)) scale_weight = F.sigmoid(self.SE2(scale_weight)) x = x * scale_weight.expand_as(x) return x
def forward(self, x): for name, module in self.base._module.items: if name == 'avgpool': break x = module(x) x = F.avg_pool2d(x, x.size()[2:]) x = x.view(x.size(0), -1) if self.cut_at_pooling: return x if self.has_embedding: x = self.feat(x) x = self.feat_bn(x) if self.norm: x = F.normalize(x) if self.dropout > 0: x = self.drop(x) if self.classifier > 0: x = self.classifier(x) return x
def residual(self, x): r""" Helper function for feedforwarding through main layers. """ if self.norm1 is None: self.norm1 = nn.LayerNorm( [self.in_channels, x.shape[2], x.shape[3]]) h = x h = self.norm1(h) h = self.activation(h) h = self.c1(h) if self.norm2 is None: self.norm2 = nn.LayerNorm( [self.hidden_channels, h.shape[2], h.shape[3]]) h = self.norm2(h) h = self.activation(h) h = self.c2(h) if self.downsample: h = F.avg_pool2d(h, 2) return h
def gem(x, p=3, eps=1e-6): return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(1.0 / p)
out = F.conv2d(x, w, b, stride=1, padding=1) x = torch.randn(1, 3, 28, 28) out = F.conv2d(x, w, b, stride=1, padding=1) #P55 pooling下采样-->max/avg pooling #upsample上采样 x = out layer = nn.MaxPool2d(2, stride=2) #window的大小和补偿的大小 out = layer(x) out = F.avg_pool2d(x, 2, stride=2) #F.interpolate x = out out = F.interpolate(x, scale_factor=2, mode='nearest') #scale_factor是放大倍数,mode是紧邻差值模式 out.shape out = F.interpolaye(x, scale_factor=3, mode='nearest') out.shape #卷积函数常用单元Unit:conv2d-->batch normalization-->pool-->relu(顺序可以颠倒) x.shape layer = nn.ReLU(inplace=True) #设置inplace为true的结果是,x'会占据x原本的空间(正常情况下会节省一半的空间)