Beispiel #1
0
def run(args):
    nnet = Nnet(**nnet_conf)

    trainer = PermutationTrainer(nnet,
                                 gpuid=args.gpu,
                                 checkpoint=args.checkpoint,
                                 **trainer_conf)

    data_conf = {"train_data": train_data, "dev_data": dev_data}
    confs = [nnet_conf, feats_conf, trainer_conf, data_conf]
    names = ["mdl.json", "feats.json", "trainer.json", "data.conf"]

    for conf, fname in zip(confs, names):
        dump_json(conf, args.checkpoint, fname)

    feats_conf["shuf"] = True
    train_loader = make_pitloader(train_data["linear_x"],
                                  feats_conf,
                                  train_data,
                                  batch_size=args.batch_size,
                                  cache_size=args.cache_size)
    feats_conf["shuf"] = False
    dev_loader = make_pitloader(dev_data["linear_x"],
                                feats_conf,
                                dev_data,
                                batch_size=args.batch_size,
                                cache_size=args.cache_size)

    trainer.run(train_loader, dev_loader, num_epochs=args.epochs)
Beispiel #2
0
def run(args):
    gpuids = tuple(map(int, args.gpus.split(",")))

    nnet = ConvTasNet(**nnet_conf)
    trainer = SiSnrTrainer(nnet,
                           gpuid=gpuids,
                           checkpoint=args.checkpoint,
                           resume=args.resume,
                           **trainer_conf)

    data_conf = {
        "train": train_data,
        "dev": dev_data,
        "chunk_size": chunk_size
    }
    for conf, fname in zip([nnet_conf, trainer_conf, data_conf],
                           ["mdl.json", "trainer.json", "data.json"]):
        dump_json(conf, args.checkpoint, fname)

    train_loader = make_dataloader(train=True,
                                   data_kwargs=train_data,
                                   batch_size=args.batch_size,
                                   chunk_size=chunk_size,
                                   num_workers=args.num_workers)
    dev_loader = make_dataloader(train=False,
                                 data_kwargs=dev_data,
                                 batch_size=args.batch_size,
                                 chunk_size=chunk_size,
                                 num_workers=args.num_workers)

    trainer.run(train_loader, dev_loader, num_epochs=args.epochs)
Beispiel #3
0
def run(args):
    train_data["knownPercent"] = args.known_percent
    dev_data["knownPercent"] = args.known_percent
    gpuids = tuple(map(int, args.gpus.split(",")))

    nnet = ConvTasNet(**nnet_conf)
    if args.mixofmix == 0:
        logger.info("SisSnrTrainer")
        trainer = SiSnrTrainer(nnet,
                               gpuid=gpuids,
                               checkpoint=args.checkpoint,
                               resume=args.resume,
                               comment=args.comment,
                               **trainer_conf)
    else:
        logger.info("MixtureOfMixturesTrainer")
        trainer = MixtureOfMixturesTrainer(nnet,
                                           gpuid=gpuids,
                                           checkpoint=args.checkpoint,
                                           resume=args.resume,
                                           comment=args.comment,
                                           **trainer_conf)
    logger.info("Known pecents " + str(dev_data["knownPercent"]))
    data_conf = {
        "train": train_data,
        "dev": dev_data,
        "chunk_size": chunk_size
    }
    for conf, fname in zip([nnet_conf, trainer_conf, data_conf],
                           ["mdl.json", "trainer.json", "data.json"]):
        dump_json(conf, args.checkpoint, fname)

    if args.mixofmix == 0:
        train_loader = make_dataloader(train=True,
                                       data_kwargs=train_data,
                                       batch_size=args.batch_size,
                                       chunk_size=chunk_size,
                                       num_workers=args.num_workers)
        dev_loader = make_dataloader(train=False,
                                     data_kwargs=dev_data,
                                     batch_size=args.batch_size,
                                     chunk_size=chunk_size,
                                     num_workers=args.num_workers)
    else:
        train_loader = make_dataloader(train=True,
                                       data_kwargs=train_data,
                                       batch_size=args.batch_size,
                                       chunk_size=chunk_size,
                                       num_workers=args.num_workers,
                                       mixofmix=True)
        dev_loader = make_dataloader(train=False,
                                     data_kwargs=dev_data,
                                     batch_size=args.batch_size,
                                     chunk_size=chunk_size,
                                     num_workers=args.num_workers,
                                     mixofmix=True)
    trainer.run(train_loader, dev_loader, num_epochs=args.epochs)
Beispiel #4
0
def run(args):
    gpuids = tuple(map(int, args.gpus.split(",")))

    logger.info("Create ConvTasNet ...")
    nnet = ConvTasNet(**nnet_conf)
    if args.loss == "nr_loss":
        logger.info("Create NoiseReconstructTrainer ...")
        trainer = NoiseReconstructTrainer(nnet,
                                          gpuid=gpuids,
                                          checkpoint=args.checkpoint,
                                          resume=args.resume,
                                          **trainer_conf)
    else:
        logger.info("Create SiSnrTrainer ...")
        trainer = SiSnrTrainer(nnet,
                               gpuid=gpuids,
                               checkpoint=args.checkpoint,
                               resume=args.resume,
                               **trainer_conf)

    logger.info("Finish ConvTasNet.")

    logger.info("Prepare data {} {}.".format(train_data, dev_data))
    data_conf = {
        "train": train_data,
        "dev": dev_data,
        "chunk_size": chunk_size
    }
    for conf, fname in zip([nnet_conf, trainer_conf, data_conf],
                           ["mdl.json", "trainer.json", "data.json"]):
        dump_json(conf, args.checkpoint, fname)

    logger.info("make_dataloader for train.")
    train_loader = make_dataloader(train=True,
                                   data_kwargs=train_data,
                                   batch_size=args.batch_size,
                                   chunk_size=chunk_size,
                                   num_workers=args.num_workers)

    logger.info("make_dataloader for dev.")
    dev_loader = make_dataloader(train=False,
                                 data_kwargs=dev_data,
                                 batch_size=args.batch_size,
                                 chunk_size=chunk_size,
                                 num_workers=args.num_workers)

    logger.info("runing...")
    trainer.run(train_loader, dev_loader, num_epochs=args.epochs)