コード例 #1
0
    def save_params(self, file_name=None):
        if file_name is None:
            file_name = self.__class__.__name__ + '.pkl'

        params = [p.astype(cp.float16) for p in self.params]
        params = [to_cpu(p) for p in params]

        with open(file_name, 'wb') as f:
            pickle.dump(params, f)
コード例 #2
0
ファイル: train.py プロジェクト: araki214/0-Deeplearning2
#データの読み込み
corpus, wordtoid, idtoword = ptb.load_data('train')
vocab_size = len(wordtoid)

contexts, target = create_contexts_target(corpus, window_size)
if config.GPU:
    contexts, target = to_gpu(contexts), to_gpu(target)

#モデルなどの生成
model = CBOW(vocab_size, hidden_size, window_size, corpus)
optimizer = Adam()
trainer = Trainer(model, optimizer)

#学習開始
trainer.fit(contexts, target, max_epoch, batch_size)
trainer.plot()

#後ほど利用できるように、必要なデータを保存
word_vecs = model.word_vecs

if config.GPU:
    word_vecs = to_cpu(word_vecs)
params = {}
params['word_vecs'] = word_vecs.astype(np.float16)
params['wordtoid'] = wordtoid
params['idtoword'] = idtoword
pkl_file = 'cbow_palams.pkl'
with open(pkl_file, 'wb') as f:
    pickle.dump(params, f, -1)
コード例 #3
0
    batch_size = 100
    max_epoch = 10

    # データの読み込み
    corpus, word_to_id, id_to_word = ptb.load_data('train')
    vocab_size = len(word_to_id)

    contexts, target = create_contexts_target(corpus, window_size)
    if config.GPU:
        contexts, target = to_gpu(contexts), to_gpu(target)

    model = CBOW(vocab_size, hidden_size, window_size, corpus)
    optimizer = Adam()
    trainer = Trainer(model, optimizer)

    # 学習開始
    trainer.fit(contexts, target, max_epoch, batch_size)
    trainer.plot()

    # 後ほど使用できるように、必要なデータを保存
    word_vecs = model.word_vecs
    if config.GPU:
        target = to_cpu(word_vecs)

    params = {}
    params['word_vecs'] = word_vecs.astype(np.float16)
    params['word_to_id'] = word_to_id
    params['id_to_word'] = id_to_word
    pkl_file = 'cbow_params.pkl'
    with open(pkl_file, 'wb') as f:
        pickle.dump(params, f, -1)
コード例 #4
0
from dataset import ptb

# ハイパーパラメータの設定
window_size = 5
hidden_size = 100
batch_size = 100
max_epoch = 10

# データの読み込み
corpus, word_to_id, id_to_word = ptb.load_data('train')
vocab_size = len(word_to_id)

if not config.GPU:
    contexts, target = create_contexts_target(corpus, window_size=window_size)
else:
    corpus = to_cpu(corpus)
    contexts, target = create_contexts_target(corpus, window_size=window_size)
    contexts, target = to_gpu(contexts), to_gpu(target)

# モデルなどの生成
model = CBOW(vocab_size, hidden_size, window_size, corpus)
# model = SkipGram(vocab_size, hidden_size, window_size, corpus)
optimizer = Adam()
trainer = Trainer(model, optimizer)

# 学習開始
trainer.fit(contexts, target, max_epoch, batch_size)
trainer.plot()

# 後ほど利用できるように、必要なデータを保存
word_vecs = model.word_vecs