Esempio n. 1
0
# coding: utf-8
import os
import sys
sys.path.append('..')
import numpy as np
from common.config import GPU
from common.config import Device as GPU_Device
from dataset import sequence
from attention_seq2seq import AttentionSeq2seq
from ckiptagger import construct_dictionary, WS, POS, NER
from common import util

# if GPU:
#     os.environ["CUDA_VISIBLE_DEVICES"] = str(GPU_Device)

x_train, t_train = sequence.load_data_without_test('train_300000.txt',
                                                   shuffle=False)
char_to_id, id_to_char = sequence.get_vocab()
vocab_size = len(char_to_id)

# x_test, t_test = sequence.load_data_without_test('test-sample.txt', shuffle=False)
#
# # 反轉輸入內容
# x_test = x_test[:, ::-1]

# 設定超參數
# vocab_size = len(char_to_id)
wordvec_size = 16
hidden_size = 256 * 2
batch_size = 128 * 2

model = AttentionSeq2seq(vocab_size, wordvec_size, hidden_size)
Esempio n. 2
0
# coding: utf-8
import sys
sys.path.append('..')
import numpy as np
import matplotlib.pyplot as plt
from dataset import sequence
from common.optimizer import Adam
from common.trainer import Trainer
from common.util import eval_seq2seq
from attention_seq2seq import AttentionSeq2seq

x_train, t_train = sequence.load_data_without_test('ner_train.txt', shuffle=False)
char_to_id, id_to_char = sequence.get_vocab()
vocab_size = len(char_to_id)

x_test, t_test = sequence.load_data_without_test('test-sample.txt', shuffle=False)

# 反轉輸入內容
x_test = x_test[:, ::-1]

# 設定超參數
# vocab_size = len(char_to_id)
wordvec_size = 16
hidden_size = 256 * 2
batch_size = 128 * 2
max_epoch = 1000
max_grad = 5.0

model = AttentionSeq2seq(vocab_size, wordvec_size, hidden_size)
model.load_params("medical-16.pkl")