Beispiel #1
0
        out = model(q)
        _, pred = torch.max(out.data, 1)
        total += label.size(0)  # batch size
        correct += (pred == label).sum()
    acc = 100 * (correct.cpu().numpy() / total)
    return acc


if __name__ == "__main__":
    # 데이터 처리
    assert torch.cuda.is_available(), "cuda is not available"

    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    dataset = custom_dataset.Custom_dataset()
    train_data = dataset.get_data()

    val_data = train_data[:100]
    train_data = train_data[100:]

    train_loader = DataLoader(
        train_data,
        batch_size=c.batch,
        shuffle=True,
        num_workers=1,  #.cpu_processor,
        drop_last=True)

    # test_loader = DataLoader(test_data,
    #                         batch_size=c.batch,
    #                         shuffle=False,
Beispiel #2
0
        correct += (pred == label).sum()
    acc = 100 * (correct.cpu().numpy() / total)
    return acc


if __name__ == "__main__":
    path_csv = config.path_csv

    # 데이터 처리
    start = time.time()
    vocab = v.create_vocab(path_csv=path_csv)
    word_to_index = vocab.get_data()
    print("time vocab load : ", time.time() - start)

    start = time.time()
    dataset = custom_dataset.Custom_dataset(word_to_index, path_csv=path_csv)
    train_data = dataset.get_data()
    print("데이터 준비 완료")
    print("time data load : ", time.time() - start)

    print(len(train_data))

    train_loader = DataLoader(
        train_data,
        batch_size=config.batch,
        shuffle=True,
        # num_workers=config.cpu_processor,
        drop_last=True)

    test_dataset = custom_dataset.Custom_dataset(word_to_index,
                                                 path_csv="train_data.csv")
Beispiel #3
0
 def __init__(self):
     path = "./train_data.csv"
     vocab = v.create_vocab(path_csv=path)
     word_to_index = vocab.get_data()
     self.dataset = c.Custom_dataset(word_to_index, path)
     self.model = torch.load("./model.pth")
Beispiel #4
0
if __name__ == "__main__":
    # 데이터 처리

    start = time.time()
    vocab = v.create_vocab(mode=config.vocab_mode)
    vocab_list, word_to_index = vocab.get_data()
    print("time vocab load : ", time.time() - start)

    start = time.time()
    glove = custom_glove()
    embedding = glove.get_data(vocab_list)
    print("time glove emb load : ", time.time() - start)

    start = time.time()
    dataset = custom_dataset.Custom_dataset(vocab_list, word_to_index)
    train_data, test_data, dev_data = dataset.get_data()
    print("time data load : ", time.time() - start)

    train_loader = DataLoader(train_data,
                              batch_size=config.batch,
                              shuffle=True,
                              num_workers=config.cpu_processor,
                              drop_last=True)

    test_loader = DataLoader(test_data,
                             batch_size=config.batch,
                             shuffle=False,
                             num_workers=config.cpu_processor,
                             drop_last=True)