コード例 #1
0
ファイル: train-ivecs-oracle.py プロジェクト: ishine/BrnoLM
        logger = InfinityLogger(epoch, args.log_interval, lr)
        train_data_filtered = BatchFilter(train_data, args.batch_size,
                                          args.target_seq_len,
                                          args.min_batch_size)

        optim = torch.optim.SGD(lm.model.parameters(),
                                lr=lr,
                                weight_decay=args.beta)

        train(lm.model,
              train_data_filtered,
              optim,
              logger,
              clip=args.clip,
              use_ivecs=True)
        train_data_filtered.report()

        val_loss = evaluate(lm.model, valid_data, use_ivecs=True)
        print(
            epoch_summary(epoch, logger.nb_updates(),
                          logger.time_since_creation(), val_loss))

        # Save the model if the validation loss is the best we've seen so far.
        if not best_val_loss or val_loss < best_val_loss:
            with open(args.save, 'wb') as f:
                lm.save(f)
            best_val_loss = val_loss
        else:
            lr /= 2.0
            pass
コード例 #2
0
ファイル: eval-ivecs-oracle.py プロジェクト: ishine/BrnoLM
        lm = language_model.load(f)
    if args.cuda:
        lm.model.cuda()
    print(lm.model)

    print("loading SMM iVector extractor ...")
    with open(args.ivec_extractor, 'rb') as f:
        ivec_extractor = smm_ivec_extractor.load(f)
    if args.ivec_nb_iters:
        ivec_extractor._nb_iters = args.ivec_nb_iters
    print(ivec_extractor)

    print("preparing data...")

    def ivec_ts_from_file(f):
        ts = TokenizedSplitFFBase(
            f, lm.vocab, lambda seq: TemporalSplits(seq, lm.model.in_len, args.
                                                    target_seq_len))
        return ivec_appenders.CheatingIvecAppender(ts, ivec_extractor)

    data_ivecs = filelist_to_objects(args.file_list, ivec_ts_from_file)
    data = BatchBuilder(data_ivecs,
                        args.batch_size,
                        discard_h=not args.concat_articles)

    if args.cuda:
        data = CudaStream(data)

    loss = evaluate(lm.model, data, use_ivecs=True)
    print('loss {:5.2f} | ppl {:8.2f}'.format(loss, math.exp(loss)))
コード例 #3
0
ファイル: eval-multifile.py プロジェクト: ishine/BrnoLM
                        type=str,
                        required=True,
                        help='where to load a model from')
    args = parser.parse_args()
    print(args)

    init_seeds(args.seed, args.cuda)

    print("loading model...")
    with open(args.load, 'rb') as f:
        lm = language_model.load(f)
    if args.cuda:
        lm.model.cuda()
    print(lm.model)

    print("preparing data...")

    def temp_splits_from_fn(fn):
        tokens = tokens_from_file(fn, lm.vocab, randomize=False)
        return TemporalSplits(tokens, lm.model.in_len, args.target_seq_len)

    tss = filelist_to_objects(args.file_list, temp_splits_from_fn)
    data = BatchBuilder(tss,
                        args.batch_size,
                        discard_h=not args.concat_articles)
    if args.cuda:
        data = CudaStream(data)

    loss = evaluate(lm.model, data, use_ivecs=False)
    print('loss {:5.2f} | ppl {:8.2f}'.format(loss, math.exp(loss)))