Ejemplo n.º 1
0
    def __init__(self, config, gpu_list, *args, **params):
        super(AttentionRNN, self).__init__()

        self.input_dim = 768
        self.hidden_dim = config.getint('model', 'hidden_dim')
        self.dropout_rnn = config.getfloat('model', 'dropout_rnn')
        self.dropout_fc = config.getfloat('model', 'dropout_fc')
        self.bidirectional = config.getboolean('model', 'bidirectional')
        if self.bidirectional:
            self.direction = 2
        else:
            self.direction = 1
        self.num_layers = config.getint("model", 'num_layers')
        self.output_dim = config.getint("model", "output_dim")
        self.max_para_q = config.getint('model', 'max_para_q')

        if config.get('model', 'rnn') == 'lstm':
            self.rnn = nn.LSTM(self.input_dim, self.hidden_dim, batch_first=True, num_layers=self.num_layers,
                               bidirectional=self.bidirectional, dropout=self.dropout_rnn)
        else:
            self.rnn = nn.GRU(self.input_dim, self.hidden_dim, batch_first=True, num_layers=self.num_layers,
                              bidirectional=self.bidirectional, dropout=self.dropout_rnn)

        self.max_pool = nn.MaxPool1d(kernel_size=self.max_para_q)
        self.fc_a = nn.Linear(self.hidden_dim*self.direction, self.hidden_dim*self.direction)
        self.attention = Attention(config)
        self.fc_f = nn.Linear(self.hidden_dim*self.direction, self.output_dim)
        self.dropout = nn.Dropout(self.dropout_fc)
        self.weight = self.init_weight(config, gpu_list)
        #self.criterion = nn.CrossEntropyLoss(weight=self.weight)
        self.criterion = nn.MSELoss()
        self.accuracy_function = init_accuracy_function(config, *args, **params)
Ejemplo n.º 2
0
    def __init__(self, config, gpu_list, *args, **params):
        super(BaselineYSBert, self).__init__()

        self.output_dim = config.getint("model", "output_dim")
        self.bert = BertModel.from_pretrained(config.get("model", "bert_path"))
        self.fc = nn.Linear(768, self.output_dim * 2)

        self.criterion = MultiLabelSoftmaxLoss(config)
        self.accuracy_function = init_accuracy_function(config, *args, **params)
Ejemplo n.º 3
0
    def __init__(self, config, gpu_list, *args, **params):
        super(BaselineZMBert, self).__init__()

        self.output_dim = config.getint("model", "output_dim")
        self.bert = BertModel.from_pretrained(config.get("model", "bert_path"))
        self.fc = nn.Linear(768, self.output_dim)

        self.criterion = cross_entropy_loss
        self.accuracy_function = init_accuracy_function(
            config, *args, **params)
Ejemplo n.º 4
0
    def __init__(self, config, gpu_list, *args, **params):
        super(BertPoint, self).__init__()

        self.output_dim = config.getint("model", "output_dim")
        self.output_mode = config.get('model', 'output_mode')

        self.bert = BertModel.from_pretrained(config.get("model", "bert_path"))
        self.fc = nn.Linear(768, self.output_dim)
        self.criterion = nn.CrossEntropyLoss()
        self.accuracy_function = init_accuracy_function(
            config, *args, **params)
Ejemplo n.º 5
0
    def __init__(self, config, gpu_list, *args, **params):
        super(BasicBert, self).__init__()

        self.output_dim = config.getint("model", "output_dim")
        self.bert = BertEncoder(config, gpu_list, *args, **params)
        self.fc = nn.Linear(768, self.output_dim)

        self.seq = nn.Sequential(self.bert, self.fc)

        self.criterion = nn.CrossEntropyLoss()
        self.accuracy_function = init_accuracy_function(
            config, *args, **params)
Ejemplo n.º 6
0
    def __init__(self, config, gpu_list, *args, **params):
        super(BaselineYSDPCNN, self).__init__()
        self.model_name = "DPCNN"
        self.emb_dim = 300
        self.mem_dim = 150
        self.output_dim = config.getint("model", "output_dim") * 2
        self.word_num = 0
        f = open(os.path.join(config.get("model", "bert_path"), "vocab.txt"), "r")
        for line in f:
            self.word_num += 1

        self.embedding = nn.Embedding(self.word_num, self.emb_dim)

        # region embedding
        self.region_embedding = nn.Sequential(
            nn.Conv1d(self.emb_dim, self.mem_dim,
                      kernel_size=3, padding=1),
            nn.BatchNorm1d(num_features=self.mem_dim),
            nn.ReLU(),
        )
        self.conv_block = nn.Sequential(
            nn.BatchNorm1d(num_features=self.mem_dim),
            nn.ReLU(),
            nn.Conv1d(self.mem_dim, self.mem_dim,
                      kernel_size=3, padding=1),
            nn.BatchNorm1d(num_features=self.mem_dim),
            nn.ReLU(),
            nn.Conv1d(self.mem_dim, self.mem_dim,
                      kernel_size=3, padding=1),
        )

        self.num_seq = config.getint("data", "max_seq_length")
        resnet_block_list = []
        while (self.num_seq > 2):
            resnet_block_list.append(ResnetBlock(self.mem_dim))
            self.num_seq = self.num_seq // 2
        self.resnet_layer = nn.Sequential(*resnet_block_list)
        self.fc = nn.Sequential(
            nn.Linear(self.mem_dim * self.num_seq, self.output_dim),
            nn.BatchNorm1d(self.output_dim),
            nn.ReLU(inplace=True),
            nn.Linear(self.output_dim, self.output_dim)
        )

        self.criterion = MultiLabelSoftmaxLoss(config)
        self.accuracy_function = init_accuracy_function(config, *args, **params)
Ejemplo n.º 7
0
    def __init__(self, config, gpu_list, *args, **params):
        super(BaselineYSGRU, self).__init__()
        self.emb_dim = 256
        self.hidden_size = 256
        self.output_dim = config.getint("model", "output_dim") * 2
        self.word_num = 0
        f = open(os.path.join(config.get("model", "bert_path"), "vocab.txt"), "r")
        for line in f:
            self.word_num += 1

        self.embedding = nn.Embedding(self.word_num, self.emb_dim)

        self.gru = nn.GRU(self.emb_dim, self.hidden_size, 4, batch_first=True, bidirectional=True)
        self.fc = nn.Linear(2 * self.hidden_size, self.output_dim)

        self.criterion = MultiLabelSoftmaxLoss(config)
        self.accuracy_function = init_accuracy_function(config, *args, **params)
Ejemplo n.º 8
0
    def __init__(self, config, gpu_list, *args, **params):
        super(ZMDqn, self).__init__()

        try:
            self.save_details = config.get("eval",
                                           "print_details").find("yes") != -1
        except:
            self.save_details = False

        self.accuracy_function = init_accuracy_function(
            config, *args, **params)
        self.learning_rate = config.getfloat("train", "learning_rate")
        self.batch_size = config.getint("rl", "batch_size")
        self.epsilone = config.getfloat("rl", "epsilone")
        self.gamma = config.getfloat("rl", "gamma")
        self.target_update = config.getint("rl", "target_update")
        self.memory_capacity = config.getint("rl", "memory_capacity")
        self.bert = BertModel.from_pretrained(config.get("model", "bert_path"))

        self.n_actions = config.getint("rl", "n_actions")
        self.n_question = config.getint("rl", "n_questions")
        self.output_dim = config.getint("model", "output_dim")
        self.n_states = self.n_actions
        self.user = UserSimulator()

        self.agent_initialized = False

        self.memory = ReplayMemory(self.memory_capacity)
        self.policy_net = nn.Linear(self.n_states + 768, self.n_actions)
        self.target_net = nn.Linear(self.n_states + 768, self.n_actions)
        self.i_episode = 0
        self.optimizer = torch.optim.Adam(self.policy_net.parameters(),
                                          lr=self.learning_rate)
        self.loss_function = nn.MSELoss()
        self.criterion_out = MultiLabelSoftmaxLoss(config)

        lgb_path = config.get("ml", "lgb_path")
        self.lgb = pickle.load(
            open(os.path.join(lgb_path, "predict_net.pkl"), "rb"))