def __init__(self, config):
        super(PAIR_MODULE, self).__init__()

        self.H_embed = nn.Sequential(
            nn.Dropout(p=config["dropout_fc"]),
            GatedTrans(config["lstm_hidden_size"] * 2,
                       config["lstm_hidden_size"]),
        )
        self.Q_embed = nn.Sequential(
            nn.Dropout(p=config["dropout_fc"]),
            GatedTrans(config["lstm_hidden_size"] * 2,
                       config["lstm_hidden_size"]),
        )
        self.MLP = nn.Sequential(
            nn.Dropout(p=config["dropout_fc"]),
            nn.Linear(config["lstm_hidden_size"] * 2,
                      config["lstm_hidden_size"]),
            nn.Dropout(p=config["dropout_fc"]),
            nn.Linear(config["lstm_hidden_size"], 1))
        self.att = nn.Linear(2, 1)

        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    nn.init.constant_(m.bias.data, 0)
    def __init__(self, config):
        super(ATT_MODULE, self).__init__()

        self.V_embed = nn.Sequential(
            nn.Dropout(p=config["dropout_fc"]),
            GatedTrans(config["img_feature_size"], config["lstm_hidden_size"]),
        )
        self.Q_embed = nn.Sequential(
            nn.Dropout(p=config["dropout_fc"]),
            GatedTrans(config["word_embedding_size"],
                       config["lstm_hidden_size"]),
        )
        self.att = nn.Sequential(nn.Dropout(p=config["dropout_fc"]),
                                 nn.Linear(config["lstm_hidden_size"], 1))

        self.softmax = nn.Softmax(dim=-1)

        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    nn.init.constant_(m.bias.data, 0)