Example #1
0
def version_compare(version1, version2):
    version1 = version1.split(".")
    version2 = version2.split(".")
    num = min(len(version1), len(version2))
    for index in range(num):
        try:
            vn1 = int(version1[index])
        except:
            vn1 = 0
        try:
            vn2 = int(version2[index])
        except:
            vn2 = 0

        if vn1 > vn2:
            return True
        elif vn1 < vn2:
            return False
    return len(version1) > len(version2)


if version_compare(paddle.__version__, "1.8.0"):
    print("verison greater than 1.8")
    import paddle.fluid as fluid
else:
    print("verison is: %s" % paddle.__version__)
    import paddle.fluid.dygraph as dygraph

dygraph.enable_dygraph()
Example #2
0
                        help="path to save synthesized audio")

    args = parser.parse_args()
    with open(args.config, 'rt') as f:
        config = ruamel.yaml.safe_load(f)

    print("Command Line Args: ")
    for k, v in vars(args).items():
        print("{}: {}".format(k, v))

    if args.device == -1:
        place = fluid.CPUPlace()
    else:
        place = fluid.CUDAPlace(args.device)

    dg.enable_dygraph(place)

    model = make_model(config)
    checkpoint_dir = os.path.join(args.output, "checkpoints")
    if args.checkpoint is not None:
        iteration = io.load_parameters(model, checkpoint_path=args.checkpoint)
    else:
        iteration = io.load_parameters(model,
                                       checkpoint_dir=checkpoint_dir,
                                       iteration=args.iteration)

    # WARNING: don't forget to remove weight norm to re-compute each wrapped layer's weight
    # removing weight norm also speeds up computation
    for layer in model.sublayers():
        if isinstance(layer, WeightNormWrapper):
            layer.remove_weight_norm()
Example #3
0
    parser.add_argument("--config",
                        type=str,
                        required=True,
                        help="config file")
    parser.add_argument("--input",
                        type=str,
                        required=True,
                        help="text file to synthesize")
    parser.add_argument("--output",
                        type=str,
                        required=True,
                        help="path to save audio")
    parser.add_argument("--checkpoint",
                        type=str,
                        required=True,
                        help="data path of the checkpoint")
    parser.add_argument("--monotonic_layers",
                        type=str,
                        required=True,
                        help="monotonic decoder layers' indices(start from 1)")
    parser.add_argument("--vocoder",
                        type=str,
                        default="waveflow",
                        choices=['griffin-lim', 'waveflow'],
                        help="vocoder to use")
    args = parser.parse_args()
    with open(args.config, 'rt') as f:
        config = yaml.safe_load(f)

    dg.enable_dygraph(fluid.CUDAPlace(0))
    main(args, config)
Example #4
0
    parser.add_argument("--input",
                        type=str,
                        required=True,
                        help="data path of the original data")
    args = parser.parse_args()
    with open(args.config, 'rt') as f:
        config = yaml.safe_load(f)

    print("========= Command Line Arguments ========")
    for k, v in vars(args).items():
        print("{}: {}".format(k, v))
    print("=========== Configurations ==============")
    for k in ["p_pronunciation", "batch_size"]:
        print("{}: {}".format(k, config[k]))

    ljspeech = LJSpeech(args.input)
    collate_fn = DataCollector(config["p_pronunciation"])

    dg.enable_dygraph(fluid.CPUPlace())
    sampler = PartialyRandomizedSimilarTimeLengthSampler(ljspeech.num_frames())
    cargo = DataCargo(ljspeech,
                      collate_fn,
                      batch_size=config["batch_size"],
                      sampler=sampler)
    loader = DataLoader\
           .from_generator(capacity=5, return_list=True)\
           .set_batch_generator(cargo)

    for i, batch in tqdm.tqdm(enumerate(loader)):
        continue