Example #1
0
def valid(model, valid_dataset, step):
    model.eval()
    total_loss, total = 0.0, 0

    hypothesis, references = [], []

    for batch in valid_dataset:
        loss = model(batch.src, batch.tgt).mean()
        total_loss += loss.data
        total += 1

        predictions = parallel_beam_search(opt, model.module.model, batch, valid_dataset.fields)
        hypothesis += [valid_dataset.fields["tgt"].decode(p) for p in predictions]
        references += [valid_dataset.fields["tgt"].decode(t) for t in batch.tgt]

    bleu = calculate_bleu(hypothesis, references)
    logger.info("Valid loss: %.2f\tValid Beam BLEU: %3.2f" % (total_loss / total, bleu))
    checkpoint = {"model": model.module.model.state_dict(), "opt": opt}
    saver.save(checkpoint, printing_opt(opt), step, bleu, total_loss / total)
import torch.cuda

from beaver.data import build_dataset
from beaver.infer import beam_search
from beaver.loss import WarmAdam, LabelSmoothingLoss
from beaver.model import NMTModel
from beaver.utils import SaverTest
from beaver.utils import calculate_bleu
from beaver.utils import parseopt, get_device, printing_opt
from beaver.utils.metric import calculate_rouge

logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
opt = parseopt.parse_train_args()

device = get_device()
logging.info("\n" + printing_opt(opt))

saver = SaverTest(opt)


def write_file(hypothesis, references, step):
    with open("hypothesis_" + str(step), "w", encoding="UTF-8") as out_file:
        out_file.write("\n".join(hypothesis))
        out_file.write("\n")

    with open("references_" + str(step), "w", encoding="UTF-8") as out_file:
        out_file.write("\n".join(references))
        out_file.write("\n")


def valid(model, criterion_cn, criterion_en, valid_dataset, step):
Example #3
0
from beaver.utils import Saver, Loader
from beaver.utils import calculate_bleu
from beaver.utils import parseopt, get_device, get_logger, printing_opt

parser = argparse.ArgumentParser()

parseopt.data_opts(parser)
parseopt.train_opts(parser)
parseopt.model_opts(parser)

opt = parser.parse_args()

device = get_device()
logger = get_logger()

logger.info("\n" + printing_opt(opt))

saver = Saver(save_path=opt.model_path, max_to_keep=opt.max_to_keep)
loader = Loader(opt.model_path, opt, logger)


def valid(model, valid_dataset, step):
    model.eval()
    total_loss, total = 0.0, 0

    hypothesis, references = [], []

    for batch in valid_dataset:
        loss = model(batch.src, batch.tgt).mean()
        total_loss += loss.data
        total += 1