Exemplo n.º 1
0
def main(args):
    """
    This trains the PedalNet model to match the output data from the input data.

    When you resume training from an existing model, you can override hparams such as
        max_epochs, batch_size, or learning_rate. Note that changing num_channels,
        dilation_depth, num_repeat, or kernel_size will change the shape of the WaveNet
        model and is not advised.

    """
    if args.resume_training != "":
        model = PedalNet.load_from_checkpoint(args.resume_training)
        # Check for any hparams overridden by user and update
        for arg in sys.argv[1:]:
            arg2 = arg.split("=")[0].split("--")[1]
            if arg2 != "resume_training" and arg2 != "cpu" and arg2 != "tpu_cores":
                arg3 = arg.split("=")[1]
                if arg2 in model.hparams:
                    if arg2 == "learning_rate":
                        model.hparams[arg2] = float(arg3)
                    else:
                        model.hparams[arg2] = int(arg3)
                    print("Hparam overridden by user: "******"=", arg3, "\n")
        if args.cpu == 0:
            trainer = pl.Trainer(
                resume_from_checkpoint=args.resume_training,
                gpus=args.gpus,
                log_every_n_steps=100,
                max_epochs=args.max_epochs,
            )
        else:
            trainer = pl.Trainer(resume_from_checkpoint=args.resume_training,
                                 log_every_n_steps=100,
                                 max_epochs=args.max_epochs)
        print("\nHparams for continued model training:\n")
        print(model.hparams, "\n")
    else:
        model = PedalNet(args)
        if args.cpu == 0:
            trainer = pl.Trainer(max_epochs=args.max_epochs,
                                 gpus=args.gpus,
                                 log_every_n_steps=100)
            # The following line is for use with the Colab notebook when training on TPUs.
            # Comment out the above line and uncomment the below line to use.

            # max_epochs=args.max_epochs, tpu_cores=args.tpu_cores, gpus=args.gpus, log_every_n_steps=100
        else:
            trainer = pl.Trainer(max_epochs=args.max_epochs,
                                 log_every_n_steps=100)
    trainer.fit(model)
Exemplo n.º 2
0
def predict(args):
    model = PedalNet.load_from_checkpoint(args.model)
    model.eval()
    train_data = pickle.load(open(os.path.dirname(args.model) + "/data.pickle", "rb"))

    mean, std = train_data["mean"], train_data["std"]

    in_rate, in_data = wavfile.read(args.input)
    assert in_rate == 44100, "input data needs to be 44.1 kHz"
    sample_size = int(in_rate * args.sample_time)
    length = len(in_data) - len(in_data) % sample_size

    # split into samples
    in_data = in_data[:length].reshape((-1, 1, sample_size)).astype(np.float32)

    # standardize
    in_data = (in_data - mean) / std

    # pad each sample with previous sample
    prev_sample = np.concatenate((np.zeros_like(in_data[0:1]), in_data[:-1]), axis=0)
    pad_in_data = np.concatenate((prev_sample, in_data), axis=2)

    pred = []
    batches = pad_in_data.shape[0] // args.batch_size
    for x in tqdm(np.array_split(pad_in_data, batches)):
        pred.append(model(torch.from_numpy(x)).numpy())

    pred = np.concatenate(pred)
    pred = pred[:, :, -in_data.shape[2] :]

    save(args.output, pred)
Exemplo n.º 3
0
def main(args):
    model = PedalNet(args)
    trainer = pl.Trainer(
        max_epochs=args.max_epochs, gpus=args.gpus, row_log_interval=100
        # The following line is for use with the Colab notebook when training on TPUs.
        # Comment out the above line and uncomment the below line to use.
        
        # max_epochs=args.max_epochs, tpu_cores=args.tpu_cores, gpus=args.gpus, row_log_interval=100
    )
    trainer.fit(model)
Exemplo n.º 4
0
def test(args):
    model = PedalNet.load_from_checkpoint(args.model)
    model.eval()
    data = pickle.load(open(args.data, "rb"))

    x_test = data["x_test"]
    prev_sample = np.concatenate((np.zeros_like(x_test[0:1]), x_test[:-1]),
                                 axis=0)
    pad_x_test = np.concatenate((prev_sample, x_test), axis=2)

    y_pred = []
    for x in np.array_split(pad_x_test, 10):
        y_pred.append(model(torch.from_numpy(x)).numpy())

    y_pred = np.concatenate(y_pred)
    y_pred = y_pred[:, :, -x_test.shape[2]:]

    save("y_pred.wav", y_pred)
    save("x_test.wav", data["x_test"] * data["std"] + data["mean"])
    save("y_test.wav", data["y_test"])
Exemplo n.º 5
0
def main(args):
    """
    This trains the PedalNet model to match the output data from the input data.

    When you resume training from an existing model, you can override hparams such as
        max_epochs, batch_size, or learning_rate. Note that changing num_channels,
        dilation_depth, num_repeat, or kernel_size will change the shape of the WaveNet
        model and is not advised.

    """

    prepare(args)
    model = PedalNet(vars(args))
    trainer = pl.Trainer(
        resume_from_checkpoint=args.model if args.resume else None,
        gpus=None if args.cpu or args.tpu_cores else args.gpus,
        tpu_cores=args.tpu_cores,
        log_every_n_steps=100,
        max_epochs=args.max_epochs,
    )

    trainer.fit(model)
    trainer.save_checkpoint(args.model)
Exemplo n.º 6
0
def main(args):
    model = PedalNet(args)
    trainer = pl.Trainer(max_epochs=args.max_epochs,
                         tpu_cores=args.tpu_cores,
                         row_log_interval=100)
    trainer.fit(model)
Exemplo n.º 7
0
def main(args):
    model = PedalNet(vars(args))
    trainer = pl.Trainer(
        max_epochs=args.max_epochs, gpus=args.gpus, row_log_interval=100
    )
    trainer.fit(model)
def convert(args):
    """
    Converts a *.ckpt model from PedalNet into a .json format used in WaveNetVA.

              Current changes to the original PedalNet model to match WaveNetVA include:
                1. Added CausalConv1d() to use causal padding
                2. Added an input layer, which is a Conv1d(in_channls=1, out_channels=num_channels, kernel_size=1)
                3. Instead of two conv_stacks for tanh and sigm, used a single hidden layer with input_channels=16,
                   output_channels=32, then split the matrix for tanh and sigm calculation.

                Note: The original PedalNet model was intended for use on PCM Int16 format wave files. The WaveNetVA is
                    intended as a plugin, which processes float32 audio data. The PedalNet model must be trained on wave files
                    saved as Float32 data, which has sample data in the range -1 to 1.

                Note: The WaveNetVA plugin doesn't perform the standardization step as in predict.py. With the standardization step
                       omitted, the signals match between the plugin with converted model, and the predict.py output.

              The model parameters used for conversion testing match the Wavenetva1 model (limited testing using other parameters):
              --num_channels=16, --dilation_depth=10, --num_repeat=1, --kernel_size=3
    """

    # Permute tensors to match Tensorflow format with .permute(a,b,c):
    a, b, c = (
        2,
        1,
        0,
    )  # Pytorch uses (out_channels, in_channels, kernel_size), TensorFlow uses (kernel_size, in_channels, out_channels)
    model = PedalNet.load_from_checkpoint(checkpoint_path=args.model)

    sd = model.state_dict()

    # Get hparams from model
    hparams = model.hparams
    residual_channels = hparams["num_channels"]
    filter_width = hparams["kernel_size"]
    dilations = [2**d for d in range(hparams["dilation_depth"])
                 ] * hparams["num_repeat"]

    data_out = {
        "activation": "gated",
        "output_channels": 1,
        "input_channels": 1,
        "residual_channels": residual_channels,
        "filter_width": filter_width,
        "dilations": dilations,
        "variables": [],
    }

    # Use pytorch model data to populate the json data for each layer
    for i in range(-1, len(dilations) + 1):
        # Input Layer
        if i == -1:
            data_out["variables"].append({
                "layer_idx":
                i,
                "data": [
                    str(w)
                    for w in (sd["wavenet.input_layer.weight"]
                              ).permute(a, b, c).flatten().numpy().tolist()
                ],
                "name":
                "W",
            })
            data_out["variables"].append({
                "layer_idx":
                i,
                "data": [
                    str(b) for b in (sd["wavenet.input_layer.bias"]
                                     ).flatten().numpy().tolist()
                ],
                "name":
                "b",
            })
        # Linear Mix Layer
        elif i == len(dilations):
            data_out["variables"].append({
                "layer_idx":
                i,
                "data": [
                    str(w)
                    for w in (sd["wavenet.linear_mix.weight"]
                              ).permute(a, b, c).flatten().numpy().tolist()
                ],
                "name":
                "W",
            })

            data_out["variables"].append({
                "layer_idx":
                i,
                "data": [
                    str(b)
                    for b in (sd["wavenet.linear_mix.bias"]).numpy().tolist()
                ],
                "name":
                "b",
            })
        # Hidden Layers
        else:
            data_out["variables"].append({
                "layer_idx":
                i,
                "data": [
                    str(w) for w in sd["wavenet.hidden." + str(i) +
                                       ".weight"].permute(
                                           a, b, c).flatten().numpy().tolist()
                ],
                "name":
                "W_conv",
            })
            data_out["variables"].append({
                "layer_idx":
                i,
                "data": [
                    str(b) for b in sd["wavenet.hidden." + str(i) +
                                       ".bias"].flatten().numpy().tolist()
                ],
                "name":
                "b_conv",
            })
            data_out["variables"].append({
                "layer_idx":
                i,
                "data": [
                    str(w2) for w2 in
                    sd["wavenet.residuals." + str(i) +
                       ".weight"].permute(a, b, c).flatten().numpy().tolist()
                ],
                "name":
                "W_out",
            })
            data_out["variables"].append({
                "layer_idx":
                i,
                "data": [
                    str(b2) for b2 in sd["wavenet.residuals." + str(i) +
                                         ".bias"].flatten().numpy().tolist()
                ],
                "name":
                "b_out",
            })

    # output final dictionary to json file
    with open("converted_model.json", "w") as outfile:
        json.dump(data_out, outfile)