def get_heatmap(self, x, b_preprocess=True): x.stop_gradient = True ''' outputs 0-1 normalized heatmap ''' x = F.interpolate(x, size=256, mode='bilinear') x_01 = x * 0.5 + 0.5 outputs, _ = self(x_01) heatmaps = outputs[-1][:, :-1, :, :] scale_factor = x.shape[2] // heatmaps.shape[2] if b_preprocess: heatmaps = F.interpolate(heatmaps, scale_factor=scale_factor, mode='bilinear', align_corners=True) heatmaps = preprocess(heatmaps) return heatmaps
def _residual(self, x, s): x = self.norm1(x, s) x = self.actv(x) if self.upsample: x = F.interpolate(x, scale_factor=2, mode='nearest') x = self.conv1(x) x = self.norm2(x, s) x = self.actv(x) x = self.conv2(x) return x
def forward(self, x): # Normalize x x = (x + 1.) / 2.0 x = (x - self.mean) / self.std # Upsample if necessary if x.shape[2] != 299 or x.shape[3] != 299: x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=True) # 299 x 299 x 3 x = self.net.Conv2d_1a_3x3(x) # 149 x 149 x 32 x = self.net.Conv2d_2a_3x3(x) # 147 x 147 x 32 x = self.net.Conv2d_2b_3x3(x) # 147 x 147 x 64 x = F.max_pool2d(x, kernel_size=3, stride=2) # 73 x 73 x 64 x = self.net.Conv2d_3b_1x1(x) # 73 x 73 x 80 x = self.net.Conv2d_4a_3x3(x) # 71 x 71 x 192 x = F.max_pool2d(x, kernel_size=3, stride=2) # 35 x 35 x 192 x = self.net.Mixed_5b(x) # 35 x 35 x 256 x = self.net.Mixed_5c(x) # 35 x 35 x 288 x = self.net.Mixed_5d(x) # 35 x 35 x 288 x = self.net.Mixed_6a(x) # 17 x 17 x 768 x = self.net.Mixed_6b(x) # 17 x 17 x 768 x = self.net.Mixed_6c(x) # 17 x 17 x 768 x = self.net.Mixed_6d(x) # 17 x 17 x 768 x = self.net.Mixed_6e(x) # 17 x 17 x 768 # 17 x 17 x 768 x = self.net.Mixed_7a(x) # 8 x 8 x 1280 x = self.net.Mixed_7b(x) # 8 x 8 x 2048 x = self.net.Mixed_7c(x) # 8 x 8 x 2048 pool = torch.mean(x.view(x.size(0), x.size(1), -1), 2) # 1 x 1 x 2048 logits = self.net.fc( F.dropout(pool, training=False).view(pool.size(0), -1)) # 1000 (num_classes) return pool, logits
def _forward(self, level, inp): up1 = inp up1 = self._sub_layers['b1_' + str(level)](up1) low1 = F.avg_pool2d(inp, 2, stride=2) low1 = self._sub_layers['b2_' + str(level)](low1) if level > 1: low2 = self._forward(level - 1, low1) else: low2 = low1 low2 = self._sub_layers['b2_plus_' + str(level)](low2) low3 = low2 low3 = self._sub_layers['b3_' + str(level)](low3) up2 = F.interpolate(low3, scale_factor=2, mode='nearest') return up1 + up2
def forward(self, z, gy, x=None, dy=None, train_G=False, return_G_z=False, split_D=False): # If training G, enable grad tape if train_G: self.G.train() else: self.G.eval() # Get Generator output given noise G_z = self.G(z, self.G.shared(gy)) # Cast as necessary # Split_D means to run D once with real data and once with fake, # rather than concatenating along the batch dimension. if split_D: D_fake = self.D(G_z, gy) if x is not None: D_real = self.D(x, dy) return D_fake, D_real else: if return_G_z: return D_fake, G_z else: return D_fake # If real data is provided, concatenate it with the Generator's output # along the batch dimension for improved efficiency. else: if x is not None and x.shape[-1] != G_z.shape[-1]: x = F.interpolate(x, size=G_z.shape[-2:]) D_input = torch.cat([G_z, x], 0) if x is not None else G_z D_class = torch.cat([gy, dy], 0) if dy is not None else gy # Get Discriminator output D_out = self.D(D_input, D_class) if x is not None: return torch.split( D_out, [G_z.shape[0], x.shape[0]]) # D_fake, D_real else: if return_G_z: return D_out, G_z else: return D_out
def forward(self, x, s, masks=None): x = self.from_rgb(x) cache = {} for block in self.encode: if (masks is not None) and (x.shape[2] in [32, 64, 128]): cache[x.shape[2]] = x x = block(x) for block in self.decode: x = block(x, s) if (masks is not None) and (x.shape[2] in [32, 64, 128]): mask = masks[0] if x.shape[2] in [32] else masks[1] mask = F.interpolate(mask, size=x.shape[2], mode='bilinear') x = x + self.hpf(mask * cache[x.shape[2]]) y = self.to_rgb(x) return porch.varbase_to_tensor(y)
def _shortcut(self, x): if self.upsample: x = F.interpolate(x, scale_factor=2, mode='nearest') if self.learned_sc: x = self.conv1x1(x) return x
def forward(self, x, y=None): x=F.interpolate(x,size=[128,128]) return super(Discriminator_Resize, self).forward(x,y)
def forward(self, z, y): # y2=torch.zeros(1000).to(y.device) # y2[:,self.n_classes]=y x_fake=super(Generator_Resize, self).forward(z,y ) return F.interpolate(x_fake,size=[self.resolution,self.resolution])