Пример #1
0
    cfg.logger.debug('Schedular patience: {}'.format(cfg.sch_patience))
    cfg.logger.debug('Schedular verbose: {}'.format(cfg.sch_verbose))
    cfg.logger.debug('Device: {}'.format(cfg.device))
    cfg.logger.debug('Embedding model directory: {}'.format(cfg.emb_model_dir))
    cfg.logger.debug('Lyrics data directory: {}'.format(cfg.lyrics_dir))

    if cfg.pretrained_lm_dir:
        cfg.logger.debug('Pre-trained language model: {}'.format(
            cfg.pretrained_lm_dir))
    else:
        cfg.logger.debug('Pre-trained language model: initial training')

    # Training
    language_model = LanguageModel(wv_dict, cfg.hidden_dim).to(cfg.device)
    criterion = nn.NLLLoss()
    optimizer = optim.Adam(language_model.parameters(), lr=cfg.lr)
    schedular = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     mode='min',
                                                     factor=cfg.sch_factor,
                                                     patience=cfg.sch_patience,
                                                     verbose=cfg.sch_verbose)
    if cfg.pretrained_lm_dir:
        lm_loading_res = language_model.load_state_dict(
            torch.load(cfg.pretrained_lm_dir))
        cfg.logger.debug('Loading language model: {}'.format(lm_loading_res))

    train_losses, train_accs = [], []  # losses & accuracies to save
    if cfg.test_ratio > 0:
        test_losses, test_accs = [], []

    cfg.logger.info('Training.')