def forward(self, x): # Normalize x x = (x + 1.) / 2.0 x = (x - self.mean) / self.std # Upsample if necessary if x.shape[2] != 299 or x.shape[3] != 299: x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=True) # 299 x 299 x 3 x = self.net.Conv2d_1a_3x3(x) # 149 x 149 x 32 x = self.net.Conv2d_2a_3x3(x) # 147 x 147 x 32 x = self.net.Conv2d_2b_3x3(x) # 147 x 147 x 64 x = F.max_pool2d(x, kernel_size=3, stride=2) # 73 x 73 x 64 x = self.net.Conv2d_3b_1x1(x) # 73 x 73 x 80 x = self.net.Conv2d_4a_3x3(x) # 71 x 71 x 192 x = F.max_pool2d(x, kernel_size=3, stride=2) # 35 x 35 x 192 x = self.net.Mixed_5b(x) # 35 x 35 x 256 x = self.net.Mixed_5c(x) # 35 x 35 x 288 x = self.net.Mixed_5d(x) # 35 x 35 x 288 x = self.net.Mixed_6a(x) # 17 x 17 x 768 x = self.net.Mixed_6b(x) # 17 x 17 x 768 x = self.net.Mixed_6c(x) # 17 x 17 x 768 x = self.net.Mixed_6d(x) # 17 x 17 x 768 x = self.net.Mixed_6e(x) # 17 x 17 x 768 # 17 x 17 x 768 x = self.net.Mixed_7a(x) # 8 x 8 x 1280 x = self.net.Mixed_7b(x) # 8 x 8 x 2048 x = self.net.Mixed_7c(x) # 8 x 8 x 2048 pool = torch.mean(x.view(x.size(0), x.size(1), -1), 2) # 1 x 1 x 2048 logits = self.net.fc( F.dropout(pool, training=False).view(pool.size(0), -1)) # 1000 (num_classes) return pool, logits
def _forward(self, x): # N x 3 x 299 x 299 x = self.Conv2d_1a_3x3(x) # N x 32 x 149 x 149 x = self.Conv2d_2a_3x3(x) # N x 32 x 147 x 147 x = self.Conv2d_2b_3x3(x) # N x 64 x 147 x 147 x = F.max_pool2d(x, kernel_size=3, stride=2) # N x 64 x 73 x 73 x = self.Conv2d_3b_1x1(x) # N x 80 x 73 x 73 x = self.Conv2d_4a_3x3(x) # N x 192 x 71 x 71 x = F.max_pool2d(x, kernel_size=3, stride=2) # N x 192 x 35 x 35 x = self.Mixed_5b(x) # N x 256 x 35 x 35 x = self.Mixed_5c(x) # N x 288 x 35 x 35 x = self.Mixed_5d(x) # N x 288 x 35 x 35 x = self.Mixed_6a(x) # N x 768 x 17 x 17 x = self.Mixed_6b(x) # N x 768 x 17 x 17 x = self.Mixed_6c(x) # N x 768 x 17 x 17 x = self.Mixed_6d(x) # N x 768 x 17 x 17 x = self.Mixed_6e(x) # N x 768 x 17 x 17 aux_defined = self.training and self.aux_logits if aux_defined: aux = self.AuxLogits(x) else: aux = None # N x 768 x 17 x 17 x = self.Mixed_7a(x) # N x 1280 x 8 x 8 x = self.Mixed_7b(x) # N x 2048 x 8 x 8 x = self.Mixed_7c(x) # N x 2048 x 8 x 8 # Adaptive average pooling x = F.adaptive_avg_pool2d(x, (1, 1)) # N x 2048 x 1 x 1 x = F.dropout(x, training=self.training) # N x 2048 x 1 x 1 x = torch.flatten(x, 1) # N x 2048 x = self.fc(x) # N x 1000 (num_classes) return x, aux
def forward(self, x, y=None): # Apply convs theta = self.theta(x) phi = F.max_pool2d(self.phi(x), [2, 2]) g = F.max_pool2d(self.g(x), [2, 2]) # Perform reshapes theta = theta.view(-1, self.ch // 8, x.shape[2] * x.shape[3]) phi = phi.view(-1, self.ch // 8, x.shape[2] * x.shape[3] // 4) g = g.view(-1, self.ch // 2, x.shape[2] * x.shape[3] // 4) # Matmul and softmax to get attention maps beta = F.softmax(torch.bmm(theta.transpose(1, 2), phi), -1) # Attention map times g path o = self.o( torch.bmm(g, beta.transpose(1, 2)).view(-1, self.ch // 2, x.shape[2], x.shape[3])) return self.gamma * o + x
def _forward(self, x): branch3x3 = self.branch3x3(x) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) outputs = [branch3x3, branch3x3dbl, branch_pool] return outputs
def _forward(self, x): branch3x3 = self.branch3x3_1(x) branch3x3 = self.branch3x3_2(branch3x3) branch7x7x3 = self.branch7x7x3_1(x) branch7x7x3 = self.branch7x7x3_2(branch7x7x3) branch7x7x3 = self.branch7x7x3_3(branch7x7x3) branch7x7x3 = self.branch7x7x3_4(branch7x7x3) branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) outputs = [branch3x3, branch7x7x3, branch_pool] return outputs