def main(): parser = argparse.ArgumentParser(description='Selective Encoding for Abstractive Sentence Summarization in DyNet') parser.add_argument('--gpu', type=str, default='0', help='GPU ID to use. For cpu, set -1 [default: -1]') parser.add_argument('--n_epochs', type=int, default=3, help='Number of epochs [default: 3]') parser.add_argument('--n_train', type=int, default=3803957, help='Number of training data (up to 3803957 in gigaword) [default: 3803957]') parser.add_argument('--n_valid', type=int, default=189651, help='Number of validation data (up to 189651 in gigaword) [default: 189651])') parser.add_argument('--batch_size', type=int, default=32, help='Mini batch size [default: 32]') parser.add_argument('--vocab_size', type=int, default=124404, help='Vocabulary size [default: 124404]') parser.add_argument('--emb_dim', type=int, default=256, help='Embedding size [default: 256]') parser.add_argument('--hid_dim', type=int, default=256, help='Hidden state size [default: 256]') parser.add_argument('--maxout_dim', type=int, default=2, help='Maxout size [default: 2]') parser.add_argument('--alloc_mem', type=int, default=10000, help='Amount of memory to allocate [mb] [default: 10000]') args = parser.parse_args() print(args) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu N_EPOCHS = args.n_epochs N_TRAIN = args.n_train N_VALID = args.n_valid BATCH_SIZE = args.batch_size VOCAB_SIZE = args.vocab_size EMB_DIM = args.emb_dim HID_DIM = args.hid_dim MAXOUT_DIM = args.maxout_dim ALLOC_MEM = args.alloc_mem # File paths TRAIN_X_FILE = './data/train.article.txt' TRAIN_Y_FILE = './data/train.title.txt' VALID_X_FILE = './data/valid.article.filter.txt' VALID_Y_FILE = './data/valid.title.filter.txt' # DyNet setting dyparams = dy.DynetParams() dyparams.set_autobatch(True) dyparams.set_random_seed(RANDOM_SEED) dyparams.set_mem(ALLOC_MEM) dyparams.init() # Build dataset dataset = Dataset( TRAIN_X_FILE, TRAIN_Y_FILE, VALID_X_FILE, VALID_Y_FILE, vocab_size=VOCAB_SIZE, batch_size=BATCH_SIZE, n_train=N_TRAIN, n_valid=N_VALID ) VOCAB_SIZE = len(dataset.w2i) print('VOCAB_SIZE', VOCAB_SIZE) # Build model model = dy.Model() trainer = dy.AdamTrainer(model) V = model.add_lookup_parameters((VOCAB_SIZE, EMB_DIM)) encoder = SelectiveBiGRU(model, EMB_DIM, HID_DIM) decoder = AttentionalGRU(model, EMB_DIM, HID_DIM, MAXOUT_DIM, VOCAB_SIZE) # Train model start_time = time.time() for epoch in range(N_EPOCHS): # Train loss_all_train = [] dataset.reset_train_iter() for train_x_mb, train_y_mb in tqdm(dataset.train_iter): # Create a new computation graph dy.renew_cg() associate_parameters([encoder, decoder]) losses = [] for x, t in zip(train_x_mb, train_y_mb): t_in, t_out = t[:-1], t[1:] # Encoder x_embs = [dy.lookup(V, x_t) for x_t in x] hp, hb_1 = encoder(x_embs) # Decoder decoder.set_initial_states(hp, hb_1) t_embs = [dy.lookup(V, t_t) for t_t in t_in] y = decoder(t_embs) # Loss loss = dy.esum( [dy.pickneglogsoftmax(y_t, t_t) for y_t, t_t in zip(y, t_out)] ) losses.append(loss) mb_loss = dy.average(losses) # Forward prop loss_all_train.append(mb_loss.value()) # Backward prop mb_loss.backward() trainer.update() # Valid loss_all_valid = [] dataset.reset_valid_iter() for valid_x_mb, valid_y_mb in dataset.valid_iter: # Create a new computation graph dy.renew_cg() associate_parameters([encoder, decoder]) losses = [] for x, t in zip(valid_x_mb, valid_y_mb): t_in, t_out = t[:-1], t[1:] # Encoder x_embs = [dy.lookup(V, x_t) for x_t in x] hp, hb_1 = encoder(x_embs) # Decoder decoder.set_initial_states(hp, hb_1) t_embs = [dy.lookup(V, t_t) for t_t in t_in] y = decoder(t_embs) # Loss loss = dy.esum( [dy.pickneglogsoftmax(y_t, t_t) for y_t, t_t in zip(y, t_out)] ) losses.append(loss) mb_loss = dy.average(losses) # Forward prop loss_all_valid.append(mb_loss.value()) print('EPOCH: %d, Train Loss: %.3f, Valid Loss: %.3f, Time: %.3f[s]' % ( epoch+1, np.mean(loss_all_train), np.mean(loss_all_valid), time.time()-start_time )) # Save model dy.save('./model_e'+str(epoch+1), [V, encoder, decoder]) with open('./w2i.dump', 'wb') as f_w2i, open('./i2w.dump', 'wb') as f_i2w: pickle.dump(dataset.w2i, f_w2i) pickle.dump(dataset.i2w, f_i2w)
def main(): parser = argparse.ArgumentParser( description= 'Convolutional Neural Networks for Sentence Classification in DyNet') parser.add_argument('--gpu', type=int, default=0, help='GPU ID to use. For cpu, set -1 [default: 0]') parser.add_argument( '--train_x_path', type=str, default='./data/train_x.txt', help='File path of train x data [default: `./data/train_x.txt`]') parser.add_argument( '--train_y_path', type=str, default='./data/train_y.txt', help='File path of train y data [default: `./data/train_x.txt`]') parser.add_argument( '--valid_x_path', type=str, default='./data/valid_x.txt', help='File path of valid x data [default: `./data/valid_x.txt`]') parser.add_argument( '--valid_y_path', type=str, default='./data/valid_y.txt', help='File path of valid y data [default: `./data/valid_y.txt`]') parser.add_argument('--n_epochs', type=int, default=10, help='Number of epochs [default: 10]') parser.add_argument('--batch_size', type=int, default=64, help='Mini batch size [default: 64]') parser.add_argument('--win_sizes', type=int, nargs='*', default=[3, 4, 5], help='Window sizes of filters [default: [3, 4, 5]]') parser.add_argument( '--num_fil', type=int, default=100, help='Number of filters in each window size [default: 100]') parser.add_argument('--s', type=float, default=3.0, help='L2 norm constraint on w [default: 3.0]') parser.add_argument('--dropout_prob', type=float, default=0.5, help='Dropout probability [default: 0.5]') parser.add_argument( '--v_strategy', type=str, default='static', help= 'Embedding strategy. rand: Random initialization. static: Load pretrained embeddings and do not update during the training. non-static: Load pretrained embeddings and update during the training. [default: static]' ) parser.add_argument( '--alloc_mem', type=int, default=4096, help='Amount of memory to allocate [mb] [default: 4096]') args = parser.parse_args() print(args) os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) N_EPOCHS = args.n_epochs WIN_SIZES = args.win_sizes BATCH_SIZE = args.batch_size EMB_DIM = 300 OUT_DIM = 1 L2_NORM_LIM = args.s NUM_FIL = args.num_fil DROPOUT_PROB = args.dropout_prob V_STRATEGY = args.v_strategy ALLOC_MEM = args.alloc_mem if V_STRATEGY in ['rand', 'static', 'non-static']: NUM_CHA = 1 else: NUM_CHA = 2 # FILE paths W2V_PATH = './GoogleNews-vectors-negative300.bin' TRAIN_X_PATH = args.train_x_path TRAIN_Y_PATH = args.train_y_path VALID_X_PATH = args.valid_x_path VALID_Y_PATH = args.valid_y_path # DyNet setting dyparams = dy.DynetParams() dyparams.set_random_seed(RANDOM_SEED) dyparams.set_mem(ALLOC_MEM) dyparams.init() # Load pretrained embeddings pretrained_model = gensim.models.KeyedVectors.load_word2vec_format( W2V_PATH, binary=True) vocab = pretrained_model.wv.vocab.keys() w2v = pretrained_model.wv # Build dataset ======================================================================================================= w2c = build_w2c(TRAIN_X_PATH, vocab=vocab) w2i, i2w = build_w2i(TRAIN_X_PATH, w2c, unk='unk') train_x, train_y = build_dataset(TRAIN_X_PATH, TRAIN_Y_PATH, w2i, unk='unk') valid_x, valid_y = build_dataset(VALID_X_PATH, VALID_Y_PATH, w2i, unk='unk') train_x, train_y = sort_data_by_length(train_x, train_y) valid_x, valid_y = sort_data_by_length(valid_x, valid_y) VOCAB_SIZE = len(w2i) print('VOCAB_SIZE:', VOCAB_SIZE) V_init = init_V(w2v, w2i) with open(os.path.join(RESULTS_DIR, './w2i.dump'), 'wb') as f_w2i, open(os.path.join(RESULTS_DIR, './i2w.dump'), 'wb') as f_i2w: pickle.dump(w2i, f_w2i) pickle.dump(i2w, f_i2w) # Build model ================================================================================= model = dy.Model() trainer = dy.AdamTrainer(model) # V1 V1 = model.add_lookup_parameters((VOCAB_SIZE, EMB_DIM)) if V_STRATEGY in ['static', 'non-static', 'multichannel']: V1.init_from_array(V_init) if V_STRATEGY in ['static', 'multichannel']: V1_UPDATE = False else: # 'rand', 'non-static' V1_UPDATE = True make_emb_zero(V1, [w2i['<s>'], w2i['</s>']], EMB_DIM) # V2 if V_STRATEGY == 'multichannel': V2 = model.add_lookup_parameters((VOCAB_SIZE, EMB_DIM)) V2.init_from_array(V_init) V2_UPDATE = True make_emb_zero(V2, [w2i['<s>'], w2i['</s>']], EMB_DIM) layers = [ CNNText(model, EMB_DIM, WIN_SIZES, NUM_CHA, NUM_FIL, dy.tanh, DROPOUT_PROB), Dense(model, 3 * NUM_FIL, OUT_DIM, dy.logistic) ] # Train model ================================================================================ n_batches_train = math.ceil(len(train_x) / BATCH_SIZE) n_batches_valid = math.ceil(len(valid_x) / BATCH_SIZE) start_time = time.time() for epoch in range(N_EPOCHS): # Train loss_all_train = [] pred_all_train = [] for i in tqdm(range(n_batches_train)): # Create a new computation graph dy.renew_cg() associate_parameters(layers) # Create a mini batch start = i * BATCH_SIZE end = start + BATCH_SIZE x = build_batch(train_x[start:end], w2i, max(WIN_SIZES)).T t = np.array(train_y[start:end]) sen_len = x.shape[0] if V_STRATEGY in ['rand', 'static', 'non-static']: x_embs = dy.concatenate_cols( [dy.lookup_batch(V1, x_t, update=V1_UPDATE) for x_t in x]) x_embs = dy.transpose(x_embs) x_embs = dy.reshape(x_embs, (sen_len, EMB_DIM, 1)) else: # multichannel x_embs1 = dy.concatenate_cols( [dy.lookup_batch(V1, x_t, update=V1_UPDATE) for x_t in x]) x_embs2 = dy.concatenate_cols( [dy.lookup_batch(V2, x_t, update=V2_UPDATE) for x_t in x]) x_embs1 = dy.transpose(x_embs1) x_embs2 = dy.transpose(x_embs2) x_embs = dy.concatenate([x_embs1, x_embs2], d=2) t = dy.inputTensor(t, batched=True) y = forwards(layers, x_embs, test=False) mb_loss = dy.mean_batches(dy.binary_log_loss(y, t)) # Forward prop loss_all_train.append(mb_loss.value()) pred_all_train.extend(list(binary_pred(y.npvalue().flatten()))) # Backward prop mb_loss.backward() trainer.update() # L2 norm constraint layers[1].scale_W(L2_NORM_LIM) # Make padding embs zero if V_STRATEGY in ['rand', 'non-static']: make_emb_zero(V1, [w2i['<s>'], w2i['</s>']], EMB_DIM) elif V_STRATEGY in ['multichannel']: make_emb_zero(V2, [w2i['<s>'], w2i['</s>']], EMB_DIM) # Valid loss_all_valid = [] pred_all_valid = [] for i in range(n_batches_valid): # Create a new computation graph dy.renew_cg() associate_parameters(layers) # Create a mini batch start = i * BATCH_SIZE end = start + BATCH_SIZE x = build_batch(valid_x[start:end], w2i, max(WIN_SIZES)).T t = np.array(valid_y[start:end]) sen_len = x.shape[0] if V_STRATEGY in ['rand', 'static', 'non-static']: x_embs = dy.concatenate_cols( [dy.lookup_batch(V1, x_t, update=V1_UPDATE) for x_t in x]) x_embs = dy.transpose(x_embs) x_embs = dy.reshape(x_embs, (sen_len, EMB_DIM, 1)) else: # multichannel x_embs1 = dy.concatenate_cols( [dy.lookup_batch(V1, x_t, update=V1_UPDATE) for x_t in x]) x_embs2 = dy.concatenate_cols( [dy.lookup_batch(V2, x_t, update=V2_UPDATE) for x_t in x]) x_embs1 = dy.transpose(x_embs1) x_embs2 = dy.transpose(x_embs2) x_embs = dy.concatenate([x_embs1, x_embs2], d=2) t = dy.inputTensor(t, batched=True) y = forwards(layers, x_embs, test=True) mb_loss = dy.mean_batches(dy.binary_log_loss(y, t)) # Forward prop loss_all_valid.append(mb_loss.value()) pred_all_valid.extend(list(binary_pred(y.npvalue().flatten()))) print( 'EPOCH: %d, Train Loss:: %.3f (F1:: %.3f, Acc:: %.3f), Valid Loss:: %.3f (F1:: %.3f, Acc:: %.3f), Time:: %.3f[s]' % ( epoch + 1, np.mean(loss_all_train), f1_score(train_y, pred_all_train), accuracy_score(train_y, pred_all_train), np.mean(loss_all_valid), f1_score(valid_y, pred_all_valid), accuracy_score(valid_y, pred_all_valid), time.time() - start_time, )) # Save model ========================================================================================================================= if V_STRATEGY in ['rand', 'static', 'non-static']: dy.save(os.path.join(RESULTS_DIR, './model_e' + str(epoch + 1)), [V1] + layers) else: dy.save(os.path.join(RESULTS_DIR, './model_e' + str(epoch + 1)), [V1, V2] + layers)
def save(self, fname): dy.save(fname, [v for k, v in self._parameters.items()])
def train_network(params, ntags, train_data, dev_set, telemetry_file, randstring, very_common_tag=-1): global MIN_ACC prev_acc = 0 m = params[0] t0 = time.clock() # train the network trainer = dy.SimpleSGDTrainer(m) total_loss = 0 seen_instances = 0 train_good = 0 very_common_tag_count = 0 for x_data, train_y in train_data: dy.renew_cg() output = build_network(params, x_data) # l2 regularization did not look promising at all, so it's commented out loss = -dy.log( output[train_y] ) #+ REG_LAMBDA * sum([dy.l2_norm(p) for p in params[2:]]) if train_y == np.argmax(output.npvalue()): train_good += 1 seen_instances += 1 total_loss += loss.value() loss.backward() trainer.update() if seen_instances % 20000 == 0: # measure elapsed seconds secs = time.clock() - t0 t0 = time.clock() good = case = 0 max_dev_instances = 70 * 1000 dev_instances = 0 for x_tuple, dev_y in dev_set: output = build_network(params, x_tuple) y_hat = np.argmax(output.npvalue()) case += 1 if y_hat == dev_y and y_hat == very_common_tag: case -= 1 # don't count this case very_common_tag_count += 1 elif y_hat == dev_y: good += 1 dev_instances += 1 if dev_instances >= max_dev_instances: break acc = float(good) / case print( "iterations: {}. train_accuracy: {} accuracy: {} avg loss: {} secs per 1000:{}" .format(seen_instances, float(train_good) / 20000, acc, total_loss / (seen_instances + 1), secs / 20)) train_good = 0 if acc > MIN_ACC and acc > prev_acc: print("saving.") dy.save("params_" + randstring, list(params)[1:]) prev_acc = acc telemetry_file.write("{}\t{}\t{}\t{}\n".format( seen_instances, acc, total_loss / (seen_instances + 1), secs / 20)) print("very common tag count: {}".format(very_common_tag_count))
def main(): parser = argparse.ArgumentParser(description='A Neural Attention Model for Abstractive Sentence Summarization in DyNet') parser.add_argument('--gpu', type=str, default='0', help='GPU ID to use. For cpu, set -1 [default: 0]') parser.add_argument('--n_epochs', type=int, default=10, help='Number of epochs [default: 10]') parser.add_argument('--n_train', type=int, default=3803957, help='Number of training data (up to 3803957 in gigaword) [default: 3803957]') parser.add_argument('--n_valid', type=int, default=189651, help='Number of validation data (up to 189651 in gigaword) [default: 189651]') parser.add_argument('--batch_size', type=int, default=32, help='Mini batch size [default: 32]') parser.add_argument('--vocab_size', type=int, default=60000, help='Vocabulary size [default: 60000]') parser.add_argument('--emb_dim', type=int, default=256, help='Embedding size [default: 256]') parser.add_argument('--hid_dim', type=int, default=256, help='Hidden state size [default: 256]') parser.add_argument('--encoder_type', type=str, default='attention', help='Encoder type. bow: Bag-of-words encoder. attention: Attention-based encoder [default: attention]') parser.add_argument('--c', type=int, default=5, help='Window size in neural language model [default: 5]') parser.add_argument('--q', type=int, default=2, help='Window size in attention-based encoder [default: 2]') parser.add_argument('--alloc_mem', type=int, default=4096, help='Amount of memory to allocate [mb] [default: 4096]') args = parser.parse_args() print(args) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu N_EPOCHS = args.n_epochs N_TRAIN = args.n_train N_VALID = args.n_valid BATCH_SIZE = args.batch_size VOCAB_SIZE = args.vocab_size EMB_DIM = args.emb_dim HID_DIM = args.hid_dim ENCODER_TYPE = args.encoder_type C = args.c Q = args.q ALLOC_MEM = args.alloc_mem # File paths TRAIN_X_FILE = './data/train.article.txt' TRAIN_Y_FILE = './data/train.title.txt' VALID_X_FILE = './data/valid.article.filter.txt' VALID_Y_FILE = './data/valid.title.filter.txt' # DyNet setting dyparams = dy.DynetParams() dyparams.set_autobatch(True) dyparams.set_random_seed(RANDOM_STATE) dyparams.set_mem(ALLOC_MEM) dyparams.init() # Build dataset ==================================================================================== w2c = build_word2count(TRAIN_X_FILE, n_data=N_TRAIN) w2c = build_word2count(TRAIN_Y_FILE, w2c=w2c, n_data=N_TRAIN) train_X, w2i, i2w = build_dataset(TRAIN_X_FILE, w2c=w2c, padid=False, eos=True, unksym='<unk>', target=False, n_data=N_TRAIN, vocab_size=VOCAB_SIZE) train_y, _, _ = build_dataset(TRAIN_Y_FILE, w2i=w2i, target=True, n_data=N_TRAIN) valid_X, _, _ = build_dataset(VALID_X_FILE, w2i=w2i, target=False, n_data=N_VALID) valid_y, _, _ = build_dataset(VALID_Y_FILE, w2i=w2i, target=True, n_data=N_VALID) VOCAB_SIZE = len(w2i) OUT_DIM = VOCAB_SIZE print('VOCAB_SIZE:', VOCAB_SIZE) # Build model ====================================================================================== model = dy.Model() trainer = dy.AdamTrainer(model) rush_abs = ABS(model, EMB_DIM, HID_DIM, VOCAB_SIZE, Q, C, encoder_type=ENCODER_TYPE) # Padding train_y = [[w2i['<s>']]*(C-1)+instance_y for instance_y in train_y] valid_y = [[w2i['<s>']]*(C-1)+instance_y for instance_y in valid_y] n_batches_train = math.ceil(len(train_X)/BATCH_SIZE) n_batches_valid = math.ceil(len(valid_X)/BATCH_SIZE) start_time = time.time() for epoch in range(N_EPOCHS): # Train train_X, train_y = shuffle(train_X, train_y) loss_all_train = [] for i in tqdm(range(n_batches_train)): # Create a new computation graph dy.renew_cg() rush_abs.associate_parameters() # Create a mini batch start = i*BATCH_SIZE end = start + BATCH_SIZE train_X_mb = train_X[start:end] train_y_mb = train_y[start:end] losses = [] for x, t in zip(train_X_mb, train_y_mb): t_in, t_out = t[:-1], t[C:] y = rush_abs(x, t_in) loss = dy.esum([dy.pickneglogsoftmax(y_t, t_t) for y_t, t_t in zip(y, t_out)]) losses.append(loss) mb_loss = dy.average(losses) # Forward prop loss_all_train.append(mb_loss.value()) # Backward prop mb_loss.backward() trainer.update() # Valid loss_all_valid = [] for i in range(n_batches_valid): # Create a new computation graph dy.renew_cg() rush_abs.associate_parameters() # Create a mini batch start = i*BATCH_SIZE end = start + BATCH_SIZE valid_X_mb = valid_X[start:end] valid_y_mb = valid_y[start:end] losses = [] for x, t in zip(valid_X_mb, valid_y_mb): t_in, t_out = t[:-1], t[C:] y = rush_abs(x, t_in) loss = dy.esum([dy.pickneglogsoftmax(y_t, t_t) for y_t, t_t in zip(y, t_out)]) losses.append(loss) mb_loss = dy.average(losses) # Forward prop loss_all_valid.append(mb_loss.value()) print('EPOCH: %d, Train Loss: %.3f, Valid Loss: %.3f' % ( epoch+1, np.mean(loss_all_train), np.mean(loss_all_valid) )) # Save model ======================================================================== dy.save('./model_e'+str(epoch+1), [rush_abs]) with open('./w2i.dump', 'wb') as f_w2i, open('./i2w.dump', 'wb') as f_i2w: pickle.dump(w2i, f_w2i) pickle.dump(i2w, f_i2w)
def save(self, basefile): dy.save(basefile, self.params_iterable())
def main(): parser = argparse.ArgumentParser( description= 'Deep Recurrent Generative Decoder for Abstractive Text Summarization in DyNet' ) parser.add_argument('--gpu', type=str, default='0', help='GPU ID to use. For cpu, set -1 [default: -1]') parser.add_argument('--n_epochs', type=int, default=3, help='Number of epochs [default: 3]') parser.add_argument( '--n_train', type=int, default=3803957, help= 'Number of training examples (up to 3803957 in gigaword) [default: 3803957]' ) parser.add_argument( '--n_valid', type=int, default=189651, help= 'Number of validation examples (up to 189651 in gigaword) [default: 189651])' ) parser.add_argument('--batch_size', type=int, default=32, help='Mini batch size [default: 32]') parser.add_argument('--emb_dim', type=int, default=256, help='Embedding size [default: 256]') parser.add_argument('--hid_dim', type=int, default=256, help='Hidden state size [default: 256]') parser.add_argument('--lat_dim', type=int, default=256, help='Latent size [default: 256]') parser.add_argument( '--alloc_mem', type=int, default=8192, help='Amount of memory to allocate [mb] [default: 8192]') args = parser.parse_args() print(args) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu N_EPOCHS = args.n_epochs N_TRAIN = args.n_train N_VALID = args.n_valid BATCH_SIZE = args.batch_size VOCAB_SIZE = 60000 EMB_DIM = args.emb_dim HID_DIM = args.hid_dim LAT_DIM = args.lat_dim ALLOC_MEM = args.alloc_mem # File paths TRAIN_X_FILE = './data/train.article.txt' TRAIN_Y_FILE = './data/train.title.txt' VALID_X_FILE = './data/valid.article.filter.txt' VALID_Y_FILE = './data/valid.title.filter.txt' # DyNet setting dyparams = dy.DynetParams() dyparams.set_autobatch(True) dyparams.set_random_seed(RANDOM_STATE) dyparams.set_mem(ALLOC_MEM) dyparams.init() # Build dataset ==================================================================================== w2c = build_word2count(TRAIN_X_FILE, n_data=N_TRAIN) w2c = build_word2count(TRAIN_Y_FILE, w2c=w2c, n_data=N_TRAIN) train_X, w2i, i2w = build_dataset(TRAIN_X_FILE, w2c=w2c, padid=False, eos=True, unksym='<unk>', target=False, n_data=N_TRAIN, vocab_size=VOCAB_SIZE) train_y, _, _ = build_dataset(TRAIN_Y_FILE, w2i=w2i, target=True, n_data=N_TRAIN) valid_X, _, _ = build_dataset(VALID_X_FILE, w2i=w2i, target=False, n_data=N_VALID) valid_y, _, _ = build_dataset(VALID_Y_FILE, w2i=w2i, target=True, n_data=N_VALID) VOCAB_SIZE = len(w2i) OUT_DIM = VOCAB_SIZE print(VOCAB_SIZE) # Build model ====================================================================================== model = dy.Model() trainer = dy.AdamTrainer(model) V = model.add_lookup_parameters((VOCAB_SIZE, EMB_DIM)) encoder = BiGRU(model, EMB_DIM, 2 * HID_DIM) decoder = RecurrentGenerativeDecoder(model, EMB_DIM, 2 * HID_DIM, LAT_DIM, OUT_DIM) # Train model ======================================================================================= n_batches_train = math.ceil(len(train_X) / BATCH_SIZE) n_batches_valid = math.ceil(len(valid_X) / BATCH_SIZE) start_time = time.time() for epoch in range(N_EPOCHS): # Train train_X, train_y = shuffle(train_X, train_y) loss_all_train = [] for i in tqdm(range(n_batches_train)): # Create a new computation graph dy.renew_cg() encoder.associate_parameters() decoder.associate_parameters() # Create a mini batch start = i * BATCH_SIZE end = start + BATCH_SIZE train_X_mb = train_X[start:end] train_y_mb = train_y[start:end] losses = [] for x, t in zip(train_X_mb, train_y_mb): t_in, t_out = t[:-1], t[1:] # Encoder x_embs = [dy.lookup(V, x_t) for x_t in x] he = encoder(x_embs) # Decoder t_embs = [dy.lookup(V, t_t) for t_t in t_in] decoder.set_initial_states(he) y, KL = decoder(t_embs) loss = dy.esum([ dy.pickneglogsoftmax(y_t, t_t) + KL_t for y_t, t_t, KL_t in zip(y, t_out, KL) ]) losses.append(loss) mb_loss = dy.average(losses) # Forward prop loss_all_train.append(mb_loss.value()) # Backward prop mb_loss.backward() trainer.update() # Valid loss_all_valid = [] for i in range(n_batches_valid): # Create a new computation graph dy.renew_cg() encoder.associate_parameters() decoder.associate_parameters() # Create a mini batch start = i * BATCH_SIZE end = start + BATCH_SIZE valid_X_mb = valid_X[start:end] valid_y_mb = valid_y[start:end] losses = [] for x, t in zip(valid_X_mb, valid_y_mb): t_in, t_out = t[:-1], t[1:] # Encoder x_embs = [dy.lookup(V, x_t) for x_t in x] he = encoder(x_embs) # Decoder t_embs = [dy.lookup(V, t_t) for t_t in t_in] decoder.set_initial_states(he) y, KL = decoder(t_embs) loss = dy.esum([ dy.pickneglogsoftmax(y_t, t_t) + KL_t for y_t, t_t, KL_t in zip(y, t_out, KL) ]) losses.append(loss) mb_loss = dy.average(losses) # Forward prop loss_all_valid.append(mb_loss.value()) print('EPOCH: %d, Train Loss: %.3f, Valid Loss: %.3f' % (epoch + 1, np.mean(loss_all_train), np.mean(loss_all_valid))) # Save model ====================================================================================== dy.save('./model_e' + str(epoch + 1), [V, encoder, decoder]) with open('./w2i.dump', 'wb') as f_w2i, open('./i2w.dump', 'wb') as f_i2w: pickle.dump(w2i, f_w2i) pickle.dump(i2w, f_i2w)