Ejemplo n.º 1
0
  def __init__(self, wrd_vocab, pos_vocab, non_vocab, config):
    super(CMN, self).__init__()
    self.config= config
    self.dropout = self.config['dropout']
    self.hdim = self.config['n_hidden']
    self.wdim = self.config['word_dim']
    self.feat_box  = self.config['feat_box']

    self.use_outer = self.config['use_outer']
    self.fusion    = self.config['fusion']
    self.debug     = self.config['debug']
    self.evaluate  = False

    self.Wwrd = nn.Embedding(len(wrd_vocab), self.wdim)
    self.w2i  = wrd_vocab

    self.SMAX = nn.Softmax()
    self.LSMAX = nn.LogSoftmax()
    self.SIGM = nn.Sigmoid()
    self.RELU = nn.ReLU()
    self.TANH = nn.Tanh()
    self.DROP = nn.Dropout(self.dropout)

    self.WscrSUB = nn.Linear(self.hdim*4, 1)
    self.WscrOBJ = nn.Linear(self.hdim*4, 1)
    self.WscrREL = nn.Linear(self.hdim*4, 1)

    self.rnn0 = nn.LSTM(input_size = self.wdim  ,hidden_size = self.hdim, num_layers = 1,bidirectional = True, dropout = self.dropout)
    self.rnn1 = nn.LSTM(input_size = self.hdim*2,hidden_size = self.hdim, num_layers = 1,bidirectional = True, dropout = self.dropout, bias = False)
    self.h00 = makevar(np.zeros((2,1,self.hdim)),numpy_var = True)
    self.c00 = makevar(np.zeros((2,1,self.hdim)),numpy_var = True)
    self.h01 = makevar(np.zeros((2,1,self.hdim)),numpy_var = True)
    self.c01 = makevar(np.zeros((2,1,self.hdim)),numpy_var = True)

    init_forget(self.rnn0)
    init_forget(self.rnn1)

    self.Wbox = nn.Linear(self.feat_box, self.wdim)

    if self.fusion == 'concat':
#      self.Wout0  = nn.Linear(2*self.wdim, self.wdim)
      self.Wout0  = nn.Linear(2*self.wdim, 1)
#      self.Wrel0 = nn.Linear(2*self.wdim, self.wdim)
    else:
#      self.Wout0  = nn.Linear(self.wdim, self.wdim)
      self.Wout0  = nn.Linear(self.wdim, 1)
#      self.Wrel0 = nn.Linear(self.wdim, self.hdim)
#    self.Wout1 = nn.Linear(self.wdim, 1)

    # if self.use_outer:
    #   self.Wrbox= nn.Linear(self.feat_box*2 + ((5+1)**2), self.wdim)
    # else:
    #   self.Wrbox= nn.Linear(self.feat_box*2, self.wdim)
    self.Wrbox= nn.Linear(5*2, self.wdim)
    self.Wrel1 = nn.Linear(self.wdim, 1)
Ejemplo n.º 2
0
    def __init__(self, wrd_vocab, pos_vocab, non_vocab, config):
        super(BOX_MLP, self).__init__()
        self.config = config
        self.dropout = self.config['dropout']
        self.hdim = self.config['n_hidden']
        self.wdim = self.config['word_dim']
        self.feat_box = self.config['feat_box']
        self.n_layer = self.config['n_layer']

        self.use_outer = self.config['use_outer']
        self.fusion = self.config['fusion']
        self.debug = self.config['debug']
        self.evaluate = False

        self.Wwrd = nn.Embedding(len(wrd_vocab), self.wdim)
        self.w2i = wrd_vocab

        self.SMAX = nn.Softmax()
        self.SIGM = nn.Sigmoid()
        self.RELU = nn.ReLU()
        self.TANH = nn.Tanh()
        self.DROP = nn.Dropout(self.dropout)

        self.WscrSUB = nn.Linear(self.hdim * 4, 1)

        self.rnn0 = nn.LSTM(input_size=self.wdim,
                            hidden_size=self.hdim,
                            num_layers=1,
                            bidirectional=True,
                            dropout=self.dropout)
        self.rnn1 = nn.LSTM(input_size=self.hdim * 2,
                            hidden_size=self.hdim,
                            num_layers=1,
                            bidirectional=True,
                            dropout=self.dropout,
                            bias=False)
        init_forget(self.rnn0)
        init_forget(self.rnn1)

        self.Wbox = nn.Linear(self.feat_box, self.wdim)

        mlist = []
        for i in range(self.n_layer):
            if i == 0:
                mlist.append(nn.Linear(self.feat_box + self.wdim, self.hdim))
            elif i == self.n_layer - 1:
                mlist.append(nn.Linear(self.hdim, 1))
            else:
                mlist.append(nn.Linear(self.hdim, self.hdim))
        self.Wff = nn.ModuleList(mlist)
    def __init__(self, wrd_vocab, pos_vocab, non_vocab, config, cnn_feat=4096):
        super(CMN_LOC, self).__init__()
        self.config = config
        self.dropout = self.config['dropout']
        self.hdim = self.config['n_hidden']
        self.wdim = self.config['word_dim']
        self.feat_box = self.config['feat_box']
        self.cnn_feat = cnn_feat
        self.use_outer = self.config['use_outer']
        self.fusion = self.config['fusion']
        self.debug = self.config['debug']
        self.evaluate = False

        self.Wwrd = nn.Embedding(len(wrd_vocab), self.wdim)
        self.w2i = wrd_vocab

        self.SMAX = nn.Softmax()
        self.LSMAX = nn.LogSoftmax()
        self.SIGM = nn.Sigmoid()
        self.RELU = nn.ReLU()
        self.TANH = nn.Tanh()
        self.DROP = nn.Dropout(self.dropout)

        self.WscrSUB = nn.Linear(self.hdim * 4, 1)

        self.rnn0 = nn.LSTM(input_size=self.wdim,
                            hidden_size=self.hdim,
                            num_layers=1,
                            bidirectional=True,
                            dropout=self.dropout)
        self.rnn1 = nn.LSTM(input_size=self.hdim * 2,
                            hidden_size=self.hdim,
                            num_layers=1,
                            bidirectional=True,
                            dropout=self.dropout,
                            bias=False)
        init_forget(self.rnn0)
        init_forget(self.rnn1)

        if self.use_outer:
            self.Wbox = nn.Linear(
                (self.feat_box - self.cnn_feat + 1)**2 + self.feat_box,
                self.wdim)
        else:
            self.Wbox = nn.Linear(self.feat_box, self.wdim)

        self.Wout0 = nn.Linear(self.wdim, 1)
Ejemplo n.º 4
0
    def __init__(self, wrd_vocab, pos_vocab, non_vocab, config):
        super(GroundNET, self).__init__()
        self.config = config
        self.dropout = self.config['dropout']
        self.hdim = self.config['n_hidden']
        self.layer = self.config['n_layer']
        self.wdim = self.config['word_dim']
        self.feat_box = self.config['feat_box']
        self.use_outer = self.config['use_outer']
        self.fusion = self.config['fusion']
        self.debug = self.config['debug']
        self.encoder = self.config['encoder']
        self.only_spatial = self.config['only_spatial']

        self.Wwrd = nn.Embedding(len(wrd_vocab), self.wdim)
        self.w2i = wrd_vocab

        self.Wbox = nn.Linear(self.feat_box, self.wdim)

        if self.use_outer:
            if self.only_spatial:
                self.Wrbox = nn.Linear(5 * 2 + ((5 + 1)**2), self.wdim)
            else:
                self.Wrbox = nn.Linear(self.feat_box * 2 + ((5 + 1)**2),
                                       self.wdim)
        else:
            if self.only_spatial:
                self.Wrbox = nn.Linear(5 * 2, self.wdim)
            else:
                self.Wrbox = nn.Linear(self.feat_box * 2, self.wdim)

        self.SMAX = nn.Softmax()
        self.LSMAX = nn.LogSoftmax()
        self.SIGM = nn.Sigmoid()
        self.RELU = nn.ReLU()
        self.TANH = nn.Tanh()
        self.DROP = nn.Dropout(self.dropout)
        self.WDROP = WordDropout(self.dropout)

        if self.fusion == 'concat':
            out0_dim = self.wdim * 2
        else:
            out0_dim = self.wdim
        if self.layer == 1 and self.fusion == 'concat':
            out1_dim = self.wdim * 2
        else:
            out1_dim = self.wdim

        self.Wrel0 = nn.Linear(out0_dim, self.wdim)
        self.Wrel1 = nn.Linear(out1_dim, 1)

        self.Wout0 = nn.Linear(out0_dim, self.wdim)
        self.Wout1 = nn.Linear(out1_dim, 1)

        self.Wscr = nn.Linear(self.hdim * 4, 1)

        if self.encoder == "lstm":
            self.rnn0 = nn.LSTM(input_size=self.wdim,
                                hidden_size=self.hdim,
                                num_layers=1,
                                bidirectional=True,
                                dropout=self.dropout)
            self.rnn1 = nn.LSTM(input_size=self.hdim * 2,
                                hidden_size=self.hdim,
                                num_layers=1,
                                bidirectional=True,
                                dropout=self.dropout,
                                bias=False)
            init_forget(self.rnn0)
            init_forget(self.rnn1)

        elif self.encoder == "gru":
            self.rnn0 = nn.GRU(input_size=self.wdim,
                               hidden_size=self.hdim,
                               num_layers=1,
                               bidirectional=True,
                               dropout=self.dropout)
            self.rnn1 = nn.GRU(input_size=self.hdim * 2,
                               hidden_size=self.hdim,
                               num_layers=1,
                               bidirectional=True,
                               dropout=self.dropout)
        else:
            raise NotImplementedError()
        self.h00 = makevar(np.zeros((2, 1, self.hdim)), numpy_var=True)
        self.c00 = makevar(np.zeros((2, 1, self.hdim)), numpy_var=True)
        self.h01 = makevar(np.zeros((2, 1, self.hdim)), numpy_var=True)
        self.c01 = makevar(np.zeros((2, 1, self.hdim)), numpy_var=True)