Exemplo n.º 1
0
 def _get_batch_input(self, batch_data):
     batch_input = batchify(batch_data, self.args.use_char_emb)
     if self.use_cuda:
         batch_input = [Variable(x.cuda(async=True)) for x in batch_input]
     else:
         batch_input = [Variable(x) for x in batch_input]
     return batch_input
Exemplo n.º 2
0
    def _iter_data(self, data):
        num_iter = (len(data) + self.batch_size - 1) // self.batch_size
        for i in range(num_iter):
            start_idx = i * self.batch_size
            batch_data = data[start_idx:(start_idx + self.batch_size)]
            batch_input = batchify(batch_data)

            # Transfer to GPU
            if self.use_cuda:
                batch_input = [Variable(x.cuda(non_blocking=True)) for x in batch_input]
            else:
                batch_input = [Variable(x) for x in batch_input]
            yield batch_input
Exemplo n.º 3
0
    def _iter_data(self, data):
        num_iter = (len(data) + self.batch_size - 1) // self.batch_size

        for i in range(num_iter - 1):  #hack remove -1
            start_idx = i * self.batch_size
            batch_data = data[start_idx:(start_idx + self.batch_size)]
            batch_input = batchify(batch_data)
            # Transfer to GPU
            #if self.use_cuda:
            #    batch_input = [Variable(x.cuda(async=True)) for x in batch_input]
            #else:
            #print ("BATCH",batch_input)
            #batch_input = [Variable(x) for x in batch_input]
            yield batch_input
Exemplo n.º 4
0
    def _iter_data(self, data):
        choice_data, nli_data = data
        num_iter = (len(nli_data) + self.batch_size - 1) // self.batch_size
        for i in range(num_iter):
            start_idx = i * self.batch_size
            batch_data = nli_data[start_idx:(start_idx + self.batch_size)]
            batch_input = batchify(choice_data, batch_data)

            # Transfer to GPU
            if self.use_cuda:
                batch_input = [Variable(x.cuda(async=True)) for x in batch_input]
            else:
                batch_input = [Variable(x) for x in batch_input]
            yield batch_input
Exemplo n.º 5
0
    def _iter_data(self, data):
        num_iter = (len(data) + self.batch_size - 1) // self.batch_size
        for i in range(num_iter):
            start_idx = i * self.batch_size
            batch_data = data[start_idx:(start_idx + self.batch_size)]

            # convert a batch into tensors
            # batch_input = batchify(batch_data)
            batch_input = batchify(batch_data, self.elmo)

            # Transfer to GPU
            if self.use_cuda:
                batch_input = [Variable(x.cuda(async=True)) for x in batch_input]
            else:
                batch_input = [Variable(x) for x in batch_input]

            yield batch_input
Exemplo n.º 6
0
    ### create model
    model = LSTMClassifier(embedding_dim=embedding_dim,
                           hidden_dim=hidden_dim,
                           label_size=n_label,
                           batch_size=batch_size,
                           use_gpu=False,
                           dropout_emb=dropout_emb)

    ### data processing
    train_data = load_data(processed_train)
    dev_data = load_data(processed_dev)

    num_iter = (len(train_data) + batch_size - 1) // batch_size

    for i in range(num_iter):
        p, p_mask, q, q_mask, c, c_mask, y = batchify(
            train_data[i * batch_size:(i + 1) * batch_size])
        # print("passage", p, "\n", len(p), type(p))
        # print("passage mask", p_mask, "\n", len(p_mask), type(p_mask))
        # print("choice", c, "\n", len(c), type(c))
        # print("choice mask", c_mask, "\n", len(c_mask), type(c_mask))
        # print("question ", q, "\n", len(q), type(q))
        # print("question mask", q_mask, "\n", len(q_mask), type(q_mask))

        break

    for i in range(epochs):
        print('Epoch %d...' % i)
        if i == 0:
            dev_acc = model.evaluate(dev_data)
            print('Dev accuracy: %f' % dev_acc)
        start_time = time.time()