예제 #1
0
    def __init__(self, config):
        super(TokenEmbeder, self).__init__()
        self.conf = config
        self.margin = config['margin']
        self.n_token_words = config['n_token_words']
        self.n_desc_words = config['n_desc_words']
        self.emb_size = config['emb_size']
        self.n_hidden = config['n_hidden']
        self.dropout = config['dropout']

        self.tok_encoder = SeqEncoder(self.n_token_words, self.emb_size,
                                      self.n_hidden)
        self.desc_encoder = SeqEncoder(self.n_desc_words, self.emb_size,
                                       self.n_hidden)

        #self.w_tok = nn.Linear(config['n_hidden'], config['n_hidden'])
        #self.w_desc = nn.Linear(config['n_hidden'], config['n_hidden'])
        #self.fuse = nn.Linear(config['n_hidden'], config['n_hidden'])

        self.linear_attn_out = nn.Sequential(
            nn.Linear(self.n_hidden, self.n_hidden), nn.Tanh(),
            nn.Linear(self.n_hidden, self.n_hidden))

        if self.conf['transform_every_modal']:
            self.linear_single_modal = nn.Sequential(
                nn.Linear(self.n_hidden, self.n_hidden), nn.Tanh(),
                nn.Linear(self.n_hidden, self.n_hidden))

        if self.conf['save_attn_weight']:
            self.attn_weight_torch = []
            self.node_mask_torch = []

        self.self_atten = nn.Linear(self.n_hidden, self.n_hidden)
        self.self_atten_scalar = nn.Linear(self.n_hidden, 1)
예제 #2
0
    def __init__(self, config):
        super(JointEmbeder, self).__init__()
        self.conf = config
        self.margin = config['margin']

        self.name_encoder = SeqEncoder(config['n_words'], config['emb_size'],
                                       config['lstm_dims'])
        self.api_encoder = SeqEncoder(config['n_words'], config['emb_size'],
                                      config['lstm_dims'])
        self.tok_encoder = BOWEncoder(config['n_words'], config['emb_size'],
                                      config['n_hidden'])
        self.desc_encoder = SeqEncoder(config['n_words'], config['emb_size'],
                                       config['lstm_dims'])
        #self.fuse1=nn.Linear(config['emb_size']+4*config['lstm_dims'], config['n_hidden'])
        #self.fuse2 = nn.Sequential(
        #    nn.Linear(config['emb_size']+4*config['lstm_dims'], config['n_hidden']),
        #    nn.BatchNorm1d(config['n_hidden'], eps=1e-05, momentum=0.1),
        #    nn.ReLU(),
        #    nn.Linear(config['n_hidden'], config['n_hidden']),
        #)
        self.w_name = nn.Linear(2 * config['lstm_dims'], config['n_hidden'])
        self.w_api = nn.Linear(2 * config['lstm_dims'], config['n_hidden'])
        self.w_tok = nn.Linear(config['emb_size'], config['n_hidden'])
        self.fuse3 = nn.Linear(config['n_hidden'], config['n_hidden'])

        self.init_weights()
예제 #3
0
    def __init__(self, config):
        super(MultiEmbeder, self).__init__()
        self.conf = config

        self.margin = config['margin']
        self.emb_size = config['emb_size']
        self.n_hidden = config['n_hidden']
        self.dropout = config['dropout']

        self.n_desc_words = config['n_desc_words']
        self.n_token_words = config['n_token_words']

        self.ast_encoder = TreeLSTM(self.conf)
        self.cfg_encoder = GGNN(self.conf)
        self.tok_encoder = SeqEncoder(self.n_token_words, self.emb_size,
                                      self.n_hidden)
        self.desc_encoder = SeqEncoder(self.n_desc_words, self.emb_size,
                                       self.n_hidden)

        self.tok_attn = nn.Linear(self.n_hidden, self.n_hidden)
        self.tok_attn_scalar = nn.Linear(self.n_hidden, 1)
        self.ast_attn = nn.Linear(self.n_hidden, self.n_hidden)
        self.ast_attn_scalar = nn.Linear(self.n_hidden, 1)
        self.cfg_attn = nn.Linear(self.n_hidden, self.n_hidden)
        self.cfg_attn_scalar = nn.Linear(self.n_hidden, 1)

        self.attn_modal_fusion = nn.Linear(self.n_hidden * 3, self.n_hidden)
예제 #4
0
    def __init__(self, config):
        super(JointEmbeder, self).__init__()
        self.conf = config
        self.margin = config['margin']

        self.name_encoder = SeqEncoder(config['n_words'], config['emb_size'],
                                       config['lstm_dims'])
        self.api_encoder = SeqEncoder(config['n_words'], config['emb_size'],
                                      config['lstm_dims'])
        self.tok_encoder = BOWEncoder(config['n_words'], config['emb_size'],
                                      config['n_hidden'])
        self.desc_encoder = SeqEncoder(config['n_words'], config['emb_size'],
                                       config['lstm_dims'])
        self.fuse = nn.Linear(config['emb_size'] + 4 * config['lstm_dims'],
                              config['n_hidden'])

        # create a model path to store model info
        if not os.path.exists(config['workdir'] + 'models/'):
            os.makedirs(config['workdir'] + 'models/')
예제 #5
0
    def __init__(self, config):
        super(JointEmbeder, self).__init__()
        self.conf = config
        self.margin = config['margin']
        self.dropout = config['dropout']
        self.n_hidden = config['n_hidden']

        self.name_encoder = SeqEncoder(config['n_words'], config['emb_size'],
                                       config['lstm_dims'])
        self.tok_encoder = BOWEncoder(config['n_words'], config['emb_size'],
                                      config['n_hidden'])
        self.desc_encoder = SeqEncoder2(config['n_words'], config['emb_size'],
                                        config['n_hidden'])

        self.w_name = nn.Linear(2 * config['lstm_dims'], config['n_hidden'])
        self.w_tok = nn.Linear(config['emb_size'], config['n_hidden'])
        #self.w_desc = nn.Linear(2*config['lstm_dims'], config['n_hidden'])
        self.fuse3 = nn.Linear(config['n_hidden'], config['n_hidden'])

        self.self_attn2 = nn.Linear(self.n_hidden, self.n_hidden)
        self.self_attn_scalar2 = nn.Linear(self.n_hidden, 1)

        self.init_weights()