Esempio n. 1
0
    def __init__(self, feature_dim, hidden_dim):
        super(DANNDiscriminator, self).__init__()

        self.ad_layer1 = nn.Linear(feature_dim, hidden_dim)
        self.ad_layer2 = nn.Linear(hidden_dim, hidden_dim)
        self.ad_layer3 = nn.Linear(hidden_dim, 1)

        self.relu = nn.ReLU()
        self.grl_layer = grl.GradientReverseLayer()
        self.sigmoid = nn.Sigmoid()
        self.drop_layer1 = nn.Dropout(0.5)
        self.drop_layer2 = nn.Dropout(0.5)

        self.ad_layer1.weight.data.normal_(0, 0.01)
        self.ad_layer2.weight.data.normal_(0, 0.01)
        self.ad_layer3.weight.data.normal_(0, 0.3)
        self.ad_layer1.bias.data.fill_(0.0)
        self.ad_layer2.bias.data.fill_(0.0)
        self.ad_layer3.bias.data.fill_(0.0)

        self.parameter_list = [{
            "params": self.ad_layer1.parameters(),
            "lr": 10
        }, {
            "params": self.ad_layer2.parameters(),
            "lr": 10
        }, {
            "params": self.ad_layer3.parameters(),
            "lr": 10
        }]
Esempio n. 2
0
 def forward(self, inputs):
     self.iter_count += 1
     grl_layer = grl.GradientReverseLayer(iter_num=self.iter_count)
     outputs = grl_layer(inputs)
     outputs = self.drop_layer1(self.relu(self.ad_layer1(outputs)))
     outputs = self.drop_layer2(self.relu(self.ad_layer2(outputs)))
     outputs = self.sigmoid(self.ad_layer3(outputs))
     return outputs
Esempio n. 3
0
    def forward(self, features, softmax_outputs):
        self.forward_count += 1
        grl_layer = grl.GradientReverseLayer(iter_num=self.forward_count)

        if self.randomize:
            rman_out_list = self.rmm_layer.forward([features, softmax_outputs])
            rman_out = rman_out_list[0]
            for rman_single_out in rman_out_list[1:]:
                rman_out = torch.mul(rman_out, rman_single_out)
            adv_inputs = rman_out.view(-1, rman_out.size(1))
        else:
            op_out = torch.bmm(softmax_outputs.unsqueeze(2),
                               features.unsqueeze(1))
            adv_inputs = op_out.view(
                -1,
                softmax_outputs.size(1) * features.size(1))
        adv_inputs = torch.sqrt(torch.abs(adv_inputs) + 1e-12)
        adv_inputs = F.normalize(adv_inputs, p=2, dim=1)
        outputs = grl_layer(adv_inputs)
        outputs = self.drop_layer1(self.relu(self.ad_layer1(outputs)))
        outputs = self.drop_layer2(self.relu(self.ad_layer2(outputs)))
        outputs = self.sigmoid(self.ad_layer3(outputs))
        return outputs