def encoder(self, x): h1= F.elu(self.e1(x)) h3= F.elu( self.e3(self.e2(h1)) ) h5= F.elu(self.e5(self.e4(h3))) h6=h5.view(-1, 256) h61= self.e61(h6) h62= self.e62(h6) return h61, h62
def encode(self, x): x = F.elu(self.conv1(x)) x = F.avg_pool2d(x, kernel_size=2, stride=2) x = F.elu(self.conv2(x)) x = F.avg_pool2d(x, kernel_size=2, stride=2) x = F.elu(self.conv3(x)) x = x.reshape(-1, 256) x = self.fc_log_sigma(x) return x[:, :latent_dimension], x[:, latent_dimension:]
def forward(self, x): out = self.conv1(x) out = F.elu(self.conv1_bn(out)) out = self.conv2(out) out = F.elu(self.conv2_bn(out)) out = self.conv3(out) out = F.elu(self.conv3_bn(out)) out = out.view(out.size()[0], -1) out = F.elu(self.fc1(out)) out = F.softmax(self.fc2(out), dim=-1) return out
def forward(self, x): out = self.conv1(x) conv1 = F.elu(self.conv1_bn(out)) out = self.conv2(conv1) conv2 = F.elu(self.conv2_bn(out)) out = self.conv3(conv2) conv3 = F.elu(self.conv3_bn(out)) out = conv3.view(conv3.size()[0], -1) out = F.elu(self.fc1(out)) out = F.softmax(self.fc2(out), dim=-1) return out, conv1.detach(), conv2.detach(), conv3.detach()
def decode(self, z): x = F.elu(self.fc_decoder(z)) x = x.reshape(z.shape[0], 256, 1, 1) x = F.elu(self.conv4(x)) x = self.upsampling(x) x = F.elu(self.conv5(x)) x = self.upsampling(x) x = F.elu(self.conv6(x)) x = self.conv7(x) return torch.sigmoid(x)
def decoder(self, z): h1= F.elu(self.d1(z)) h1= h1.view(h1.size(0), 256, 1, 1) h2= F.elu(self.conv1(h1)) h3=self.upsample(h2) h4= F.elu(self.conv2(h3)) h5= self.upsample(h4) h6= F.elu(self.conv3(h5)) h7= self.conv4(h6) h10= F.sigmoid(h7) return h10
def lovasz_loss_flat(logits, labels, error_func): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\infty and +\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: # only void pixels, the gradients should be 0 return logits.sum() * 0. errors = error_func(logits, labels) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) # loss = torch.dot(F.relu(errors_sorted), Variable(grad)) loss = torch.dot(F.elu(errors_sorted) + 1, Variable(grad)) return loss