def forward(self, inputs):
     inputs = F.dropout(inputs, 1 - self.dropout)
     x = F.transpose(inputs, 0, 1)  # Let's hope this works
     inputs = torch.matmul(inputs, x)
     inputs = torch.reshape(inputs, (-1, ))
     outputs = F.sigmoid(inputs)
     return outputs
 def forward(self, input):
     b, c, h, w = input.size()
     F = input.view(b, c, h * w)
     G = torch.bmm(F, F.transpose(1, 2))
     # G.div_(h*w) # Gatys
     G.div_(h * w * c)  # Ulyanov
     return G
示例#3
0
 def forward(self, input):
   b, c, h, w = input.size()
   F = input.view(b, c, h * w)
   #F = input.view(b, c, h * w).div(torch.sqrt(torch.Tensor([h * w])).cuda())
   G = torch.bmm(F, F.transpose(1, 2))
   G.div_(h * w)
   return G
示例#4
0
	def forward(self, source):
		one, nFilter, h, w = source.size()
		m = h * w
		F = source.view(nFilter, m) + self.activationShift
		G = torch.mm(F, F.transpose(0, 1))
		G.div_(nFilter * m)
		return G
示例#5
0
 def forward(self, input):
     b, c, w, h = input.size()
     F = input.view(b, c, h * w)
     # gram matrix is computed by multiplying the input but its transpose
     G = torch.bmm(F, F.transpose(1, 2))
     G.div_(h * w)
     return G
示例#6
0
 def forward(self, input):
     b, c, w, h = input.size()
     F = input.view(b, c, h * w)
     # COMPUTES GRAM MATRIX BY MULTIPLYING INPUT BY TRANPOSE OF ITSELF
     G = torch.bmm(F, F.transpose(1, 2))
     G.div_(h * w)
     return G
示例#7
0
	def forward(self, source):
		one, nFilter, h, w = source.size()
		m = h * w
		F = source.view(nFilter, m)
		A = torch.mean(F, dim=1).view(-1, 1)
		G = torch.mm(F, F.transpose(0, 1)).div(m) - torch.mm(A, A.transpose(0, 1))
		G.div_(nFilter)
		return G
示例#8
0
 def forward(self, input):
     b, c, h, w = input.size()
     F = input.view(b, c, h * w)
     G = torch.bmm(
         F, F.transpose(1, 2)
     )  # Performs a batch matrix-matrix product of matrices stored. batch1 and batch2 must be 3-D tensors each containing the same number of matrices.
     G.div_(h * w)
     return G
示例#9
0
 def forward(self, input):
     b, c, h, w = input.size()
     F = input.view(b, c, h * w)
     mean_ = F.mean(dim=2, keepdim=True).detach()
     mean = torch.cat(h * w * [mean_], 2)
     F = F - mean.detach()
     G = torch.bmm(F, F.transpose(1, 2))
     G.div_(h * w)
     return G.squeeze(0).data, mean_.squeeze().data
示例#10
0
def calc_loss(B, F, G, Sim, gamma1, gamma2, eta, alpha):
    theta = torch.matmul(F, G.transpose(0, 1)) / alpha
    inter_loss = torch.sum(torch.log(1 + torch.exp(theta)) - Sim * theta)
    theta_f = torch.matmul(F, F.transpose(0, 1)) / alpha
    intra_img = torch.sum(torch.log(1 + torch.exp(theta_f)) - Sim * theta_f)
    theta_g = torch.matmul(G, G.transpose(0, 1)) / alpha
    intra_txt = torch.sum(torch.log(1 + torch.exp(theta_g)) - Sim * theta_g)
    intra_loss = gamma1 * intra_img + gamma2 * intra_txt
    quan_loss = torch.sum(torch.pow(B - F, 2) + torch.pow(B - G, 2)) * eta
    # term3 = torch.sum(torch.pow(F.sum(dim=0), 2) + torch.pow(G.sum(dim=0), 2))
    # loss = term1 + gamma * term2 + eta * term3
    loss = inter_loss + intra_loss + quan_loss
    return loss
示例#11
0
    def forward(self, F, pred, seed):
        b, c, h, w = pred.size()

        F = self.bn(self.conv(F))
        F = nn.functional.adaptive_max_pool2d(F, (h, w))
        F = F.view(b, -1, h * w)
        W = torch.bmm(F.transpose(1, 2), F)
        P = self.softmax(W)

        if self.clamp:
            self.alpha.data = torch.clamp(self.alpha.data, 0, 1)
            self.beta.data = torch.clamp(self.beta.data, 0, 1)

        pred_vec = pred.view(b, c, -1)
        out_vec = torch.bmm(P, pred_vec.transpose(1, 2)).transpose(1, 2).contiguous()
        out = (1 / (1 + torch.exp(self.beta))) * ((1 / (1 + torch.exp(self.alpha))) * out_vec.view(b, c, h, w) + (torch.exp(self.alpha) / (1 + torch.exp(self.alpha))) * seed) + (
                    torch.exp(self.beta) / (1 + torch.exp(self.beta))) * pred
        return out, P
示例#12
0
 def forward(self, input):
     b, c, h, w = input.size()
     F = input.view(b, c, h * w)
     G = torch.bmm(F, F.transpose(1, 2))
     G.div_(h * w)
     return G
示例#13
0
def gram(input):
    b, c, h, w = input.size()
    F = input.view(b, c, h * w)
    G = torch.bmm(F, F.transpose(1, 2))
    G.div_(h * w)
    return G
示例#14
0
def GramMatrix(input_):
    b, c, h, w = input_.shape
    F = input_.view(b, c, h * w)
    G = torch.bmm(F, F.transpose(1, 2))
    G.div_(h * w)
    return G
示例#15
0
 def forward(self, x):
     b, c, h, w = x.size()
     matrix_f = x.view(b, c, h * w)
     matrix_g = torch.bmm(matrix_f, F.transpose(1, 2))
     matrix_g.div_(h * w)
     return matrix_g