Beispiel #1
0
class FullyConnectedLayer(Module):
    """
    Simple FC layer, similar to https://arxiv.org/abs/1609.02907
    """

    def __init__(self, in_features, out_features, bias=True):
        super(FullyConnectedLayer, self).__init__()
        self.in_features = in_features
        self.out_features = int(out_features/2)
        self.weight = Parameter(torch.FloatTensor(in_features, int(out_features/2)))
        if bias:
            self.bias = Parameter(torch.FloatTensor(int(out_features/2)))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters()

    def reset_parameters(self):
        stdv = 1. / math.sqrt(self.weight.size(1))
        self.weight.data.uniform_(-stdv, stdv)
        if self.bias is not None:
            self.bias.data.uniform_(-stdv, stdv)

    def forward(self, input):
        support = torch.mm(input, self.weight.double())
        if self.bias is not None:
            support += self.bias.double()
        splitted_outputs = torch.split(support,int(support.shape[0]/2))
        # print(splitted_outputs[0].shape, splitted_outputs[1].shape, support.shape)
        output = torch.cat((splitted_outputs[0],splitted_outputs[1]), dim = 1)
        # print("final layer output shape",output.shape)
        return output

    def __repr__(self):
        return self.__class__.__name__ + ' (' \
               + str(self.in_features) + ' -> ' \
               + str(self.out_features) + ')'
Beispiel #2
0
class GraphConvolution(Module):
    """
    Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
    """
    def __init__(self, in_features, out_features, bias=True):
        super(GraphConvolution, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = Parameter(torch.FloatTensor(in_features, out_features))
        if bias:
            self.bias = Parameter(torch.FloatTensor(out_features))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters()

    def reset_parameters(self):
        stdv = 1. / math.sqrt(self.weight.size(1))
        self.weight.data.uniform_(-stdv, stdv)
        if self.bias is not None:
            self.bias.data.uniform_(-stdv, stdv)

    def forward(self, input, adj):
        output = torch.zeros(input.shape[0], input.shape[1], self.out_features)
        for i in range(input.shape[0]):
            support = torch.mm(input[i].double(), self.weight.double())
            output[i] = torch.spmm(adj[i], support.double())
            if self.bias is not None:
                for j in range(input.shape[1]):
                    output[i, j] = output[i, j] + self.bias

        return output

    def __repr__(self):
        return self.__class__.__name__ + ' (' \
               + str(self.in_features) + ' -> ' \
               + str(self.out_features) + ')'
Beispiel #3
0
class OptNet(nn.Module):
    def __init__(self, nFeatures, nHidden, nCls, bn, nineq=1, neq=0, eps=1e-4):
        super(OptNet, self).__init__()
        self.device = torch.device("cuda")
        self.nFeatures = nFeatures
        self.nHidden = nHidden
        self.bn = bn
        self.nCls = nCls
        self.nineq = nineq
        self.neq = neq
        self.eps = eps

        if bn:
            self.bn1 = nn.BatchNorm1d(nHidden)
            self.bn2 = nn.BatchNorm1d(nCls)

        self.fc1 = nn.Linear(nFeatures, nHidden)
        self.fc2 = nn.Linear(nHidden, nCls)
        X = torch.tril(torch.ones(nCls, nCls))

        self.M = Variable(X.cuda())
        self.L = Parameter(torch.tril(torch.rand(nCls, nCls).cuda()))
        self.p = Parameter(torch.Tensor(1, nCls).uniform_(-1, 1).cuda())
        self.G = Parameter(torch.Tensor(nineq, nCls).uniform_(-1, 1).cuda())
        #self.A =Parameter(torch.Tensor(neq,nCls).uniform_(-1,1).cuda())
        #self.b=
        self.z0 = Parameter(torch.zeros(nCls).cuda())
        self.s0 = Parameter(torch.ones(nineq).cuda())

    def forward(self, x):
        nBatch = x.size(0)

        # FC-ReLU-(BN)-FC-ReLU-(BN)-QP-Softmax
        x = x.view(nBatch, -1)

        x = x.unsqueeze(0)

        x = x.float()
        tmp = self.fc1(x)

        x = F.relu(tmp)

        x = x.squeeze(2)

        #if self.bn:
        #x = self.bn1(x)
        #x = F.relu(self.fc2(x))
        #if self.bn:
        #x = self.bn2(x)

        L = self.M * self.L
        Q = L.mm(L.t()) + self.eps * Variable(torch.eye(self.nCls)).cuda()
        p = self.p.double()
        h = self.G.mv(self.z0) + self.s0
        G = self.G.double()
        Q = Q.double()
        h = h.double()
        print(Q.size(), p.size(), G.size(), h.size())

        e = Variable(torch.Tensor())

        x = QPFunction(verbose=True)(Q, p, G, h, e, e).cuda()
        print(x)
        return F.log_softmax(x, dim=1)