Beispiel #1
0
# 對 input 跟 labels 做預處理
preprocess = Preprocess(train_x, sen_len, w2v_path=w2v_path)
embedding = preprocess.make_embedding(load=True)
train_x = preprocess.sentence_word2idx()
y = preprocess.labels_to_tensor(y)

# 製作一個 model 的對象
model = LSTM_Net(embedding, embedding_dim=250, hidden_dim=150, num_layers=3, dropout=0.5, fix_embedding=fix_embedding)
model = model.to(device) # device為 "cuda",model 使用 GPU 來訓練(餵進去的 inputs 也需要是 cuda tensor)

# 把 data 分為 training data 跟 validation data(將一部份 training data 拿去當作 validation data)
X_train, X_val, y_train, y_val = train_x[:180000], train_x[180000:], y[:180000], y[180000:]

# 把 data 做成 dataset 供 dataloader 取用
train_dataset = TwitterDataset(X=X_train, y=y_train)
val_dataset = TwitterDataset(X=X_val, y=y_val)

# 把 data 轉成 batch of tensors
train_loader = torch.utils.data.DataLoader(dataset = train_dataset,
                                            batch_size = batch_size,
                                            shuffle = True,
                                            num_workers = 8)

val_loader = torch.utils.data.DataLoader(dataset = val_dataset,
                                            batch_size = batch_size,
                                            shuffle = False,
                                            num_workers = 8)

# 開始訓練
model_dir = './'
Beispiel #2
0
fix_embedding = True # fix embedding during training
bidirectional = True
batch_size = 16
model_num = 1
epoch = 5
lr = 0.001

model_dir = './model' # model directory for checkpoint model


# semi-supervised
train_x_no_label = load_training_data(train_no_label)
preprocess = Preprocess(train_x_no_label, sen_len, w2v_path=w2v_path)
embedding = preprocess.make_embedding(load=True)
train_x_no_label = preprocess.sentence_word2idx()
train_dataset = TwitterDataset(X=train_x_no_label, y=None)
train_loader = torch.utils.data.DataLoader(dataset = train_dataset,
                                            batch_size = batch_size,
                                            shuffle = False,
                                            num_workers = 8)
outputs = []
for i in range(model_num):
	model = torch.load(os.path.join(model_dir, 'ckpt'+str(i+1)+'.model'))
	outputs.append(testing(batch_size, train_loader, model, device))

# soft-voting ensemble
results = []
for j in range(len(outputs[0])):
	avg = 0
	for i in range(model_num):
		avg += outputs[i][j]
Beispiel #3
0
                     embedding_dim=300,
                     hidden_dim=150,
                     num_layers=2,
                     dropout=0.5,
                     fix_embedding=fix_embedding)
    model = model.to(
        device
    )  # device為 "cuda",model 使用 GPU 來訓練(餵進去的 inputs 也需要是 cuda tensor)

    # 把 data 分為 training data 跟 validation data(將一部份 training data 拿去當作 validation data)
    X_train, X_val, y_train, y_val = train_x[:180000], train_x[
        180000:], y[:180000], y[180000:]
    print(X_train[0])

    # 把 data 做成 dataset 供 dataloader 取用
    train_dataset = TwitterDataset(X=X_train, y=y_train)
    val_dataset = TwitterDataset(X=X_val, y=y_val)

    # 把 data 轉成 batch of tensors
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=8)

    val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=8)

    # 開始訓練
    training(batch_size, epoch, lr, model_dir, train_loader, val_loader, model,
from torch import nn
from gensim.models import word2vec

testing_data = os.path.join('./', 'testing_data.txt')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
w2v_path = os.path.join('./', 'w2v_all.model')

sen_len = 35
batch_size = 32
model_num = 1

test_x = load_testing_data(testing_data)
preprocess = Preprocess(test_x, sen_len, w2v_path=w2v_path)
embedding = preprocess.make_embedding(load=True)
test_x = preprocess.sentence_word2idx()
test_dataset = TwitterDataset(X=test_x, y=None)
test_loader = torch.utils.data.DataLoader(dataset = test_dataset,
                                            batch_size = batch_size,
                                            shuffle = False,
                                            num_workers = 8)
model_dir = './model'
outputs = []
for i in range(model_num):
	model = torch.load(os.path.join(model_dir, 'ckpt'+str(i+1)+'.model'))
	outputs.append(testing(batch_size, test_loader, model, device))

# soft-voting ensemble
results = []
for j in range(len(outputs[0])):
	avg = 0
	for i in range(model_num):
Beispiel #5
0
    model = LSTM_Net(embedding,
                     embedding_dim=250,
                     hidden_dim=200,
                     num_layers=2,
                     dropout=0.5,
                     fix_embedding=fix_embedding)
    model = model.to(
        device
    )  # device為 "cuda",model 使用 GPU 來訓練(餵進去的 inputs 也需要是 cuda tensor)

    # 把 data 分為 training data 跟 validation data(將一部份 training data 拿去當作 validation data)
    X_train, X_val, y_train, y_val = train_x_2[:180000], train_x_2[
        180000:], y[:180000], y[180000:]
    #X_train, X_val, y_train, y_val = train_x_2[:20000], train_x_2[20000:40000], y[:20000], y[20000:40000]
    # 把 data 做成 dataset 供 dataloader 取用
    train_dataset = TwitterDataset(X=X_train, y=y_train)
    val_dataset = TwitterDataset(X=X_val, y=y_val)

    # 把 data 轉成 batch of tensors
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=0)

    val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=0)

    # 開始訓練
    train.training(batch_size, epoch, lr, model_dir, train_loader, val_loader,