Пример #1
0
def validate_unconditional(conf):
    """Validate model for unconditional handwriting generation

    """
    t_utils.load_data(conf, validate=True)

    if not os.path.isfile(conf.unconditional_model_path):
        logs.print_red("Unconditional model does not exist. Please train one first")

    # Load model
    model = torch.load(conf.unconditional_model_path)


    # Count figure number
    counter = 0
    for counter in range(10):

        # Sample a sequence to follow progress and save the plot
        plot_data = i_utils.sample_unconditional_sequence(conf, model)
        utils.plot_stroke(plot_data.stroke, "Plots/validation/unconditional_sample_%s.png" % counter)

    logs.print_red("Results saved to figures/validation")
Пример #2
0
def validate_conditional(conf):
    """Validate model for conditional handwriting generation

    """

    t_utils.load_data(conf, validate=True)

    if not os.path.isfile(conf.conditional_model_path):
        logs.print_red("Conditional model does not exist. Please train one first")

    # Load model
    model = torch.load(conf.conditional_model_path)

    # Count figure number
    counter = 0
    continue_flag = None
    while True:

        input_text = input("Enter text: ")

        # Check all characters are allowed
        for char in input_text:
            try:
                conf.d_char_to_idx[char]
            except KeyError:
                logs.print_red("%s not in alphabet" % char)
                continue_flag = True

        # Ask for a new input text in case of failogsre
        if continue_flag:
            continue

        plot_data = i_utils.sample_fixed_sequence(conf, model, truth_text=input_text)
        utils.plot_stroke(plot_data.stroke, "Plots/validation/conditional_sample_%s.png" % counter)
        logs.print_red("Results saved to figures/validation")

        counter += 1
def train_conditional(conf):
    """Train model for conditional handwriting generation

    # Input:
    #   configurations for conditional training

    # Output:
    #   model trained and saved to disk
    """

    list_data_train = t_units.load_data(conf)

    # Model specifications
    input_size = list_data_train[0][0].shape[1]
    print ("Input Size : ",input_size)
    onehot_dim = list_data_train[-1][0].shape[-1]
    print ("Onehot dimensions : ",onehot_dim)
    output_size = 3 * conf.n_gaussian + 1
    print ("Output Size : ",output_size)
    model = t_units.get_model(conf, input_size, output_size, onehot_dim=onehot_dim)
    optimizer = t_units.get_optimizer(conf, model)

    loss = ""
    d_monitor = defaultdict(list)

    # ***************** Training *************************
    logs.print_red("Starting training")
    for epoch in tqdm(range(conf.nb_epoch), desc="Training"):

        # Track the training losses over an epoch
        d_epoch_monitor = defaultdict(list)

        # Loop over batches
        desc = "Epoch: %s -- %s" % (epoch, loss)
        for batch in tqdm(range(conf.n_batch_per_epoch), desc=desc):

            X_var, Y_var, onehot_var = t_units.get_random_conditional_training_batch(conf, list_data_train)
            #print (X_var.shape, " X_var")
            #print (Y_var.shape, " Y_var")
            #print (onehot_var, " onehot_var")

            # Train step.
            d_loss = t_units.train_step(conf, model, X_var, Y_var, optimizer, onehot=onehot_var)

            d_epoch_monitor["bce"].append(d_loss["bce"])
            d_epoch_monitor["nll"].append(d_loss["nll"])
            d_epoch_monitor["total"].append(d_loss["total"])

        # Update d_monitor with the mean over an epoch
        for key in d_epoch_monitor.keys():
            d_monitor[key].append(np.mean(d_epoch_monitor[key]))
        # Prepare loss to update progress bar
        loss = "Total : %.3g  " % (d_monitor["total"][-1])

        plot_data = i_utils.sample_fixed_sequence(conf, model)
        v_utils.plot_stroke(plot_data.stroke, "Plots/conditional_training/epoch_%s.png" % epoch)

        # Move model to cpu before training to allow inference on cpu
        if epoch % 5 == 0:


            # Move model to cpu before training to allow inference on cpu
            model.cpu()
            torch.save(model, conf.conditional_model_path)

    logs.print_red("Finished training")
def train_unconditional(conf):
    """Train model for unconditional handwriting generation
    # Input:
    #   configurations for unconditional training

    # Output:
    #   model trained and saved to disk
    """

    data = t_units.load_data(conf)

    # Model specifications
    input_dimensions = data.strokes[0].shape[-1]
    output_dimensions = 6 * conf.n_gaussian + 1
    model = t_units.get_model(conf, input_dimensions, output_dimensions)
    optimizer = t_units.get_optimizer(conf, model)

  
    loss = ""
    d_monitor = defaultdict(list)

    # ***************** Training *************************
    logs.print_red("Starting training")
    for epoch in tqdm(range(conf.nb_epoch), desc="Training"):

        # Track the training losses over an epoch
        d_epoch_monitor = defaultdict(list)

        # Loop over batches
        desc = "Epoch: %s -- %s" % (epoch, loss)
        for batch in tqdm(range(conf.n_batch_per_epoch), desc=desc):

            # Sample a batch (X, Y)
            X_var, Y_var = t_units.get_random_unconditional_training_batch(conf, data)

            # Train step = forward + backward + weight update
            d_loss = t_units.train_step(conf, model, X_var, Y_var, optimizer)


            d_epoch_monitor["bce"].append(d_loss["bce"])
            d_epoch_monitor["nll"].append(d_loss["nll"])
            d_epoch_monitor["total"].append(d_loss["total"])

        # Sample a sequence to follow progress and save the plot
        plot_data = i_utils.sample_unconditional_sequence(conf, model)
        v_utils.plot_stroke(plot_data.stroke, "Plots/unconditional_training/epoch_%s.png" % epoch)

        # Update d_monitor with the mean over an epoch
        for key in d_epoch_monitor.keys():
            d_monitor[key].append(np.mean(d_epoch_monitor[key]))
        # Prepare loss to update progress bar
        loss = "Total : %.3g " % (d_monitor["total"][-1])

        # Save the model at regular intervals
        if epoch % 5 == 0:


            # Move model to cpu before training to allow inference on cpu
            model.cpu()
            torch.save(model, conf.unconditional_model_path)


    logs.print_red("Finished training")
Пример #5
0
        print("Conditional model does not exist.")

    # Load model
    model = torch.load(
        "/Users/agupta/version-control/pytorch/Handwriting_Generation/models/conditional.pt"
    )
    print("loaded")
    input_text = "an input string"
    #print(settings)
    plot_data = i_utils.sample_fixed_sequence(settings,
                                              model,
                                              truth_text=input_text)

    return plot_data


def recognize_stroke(stroke):
    # Input:
    #   stroke - numpy 2D-array (T x 3)

    # Output:
    #   text - str
    return 'welcome to lyrebird'


#stroke = generate_unconditionally()
#v_utils.plot_stroke(stroke)

stroke1 = generate_conditionally()
v_utils.plot_stroke(stroke1)
Пример #6
0
        X_var, Y_var, onehot_var = get_random_conditional_training_batch(conf, data0, data1, data3)
        #print (X_var.shape, " X_var")
        #print (Y_var.shape, " Y_var")
        #print (onehot_var, " onehot_var")

        # Train step.
        d_loss = train_step(conf, model, X_var, Y_var, optimizer, onehot=onehot_var)

        d_epoch_monitor["bce"].append(d_loss["bce"])
        d_epoch_monitor["nll"].append(d_loss["nll"])
        d_epoch_monitor["total"].append(d_loss["total"])

    # Update d_monitor with the mean over an epoch
    for key in d_epoch_monitor.keys():
        d_monitor[key].append(np.mean(d_epoch_monitor[key]))
    # Prepare loss to update progress bar
    loss = "Total : %.3g  " % (d_monitor["total"][-1])

    plot_data = i_utils.sample_fixed_sequence(conf, model)
    v_utils.plot_stroke(plot_data.stroke, "Plots/conditional_training/epoch_%s.png" % epoch)

    # Move model to cpu before training to allow inference on cpu
    if epoch % 5 == 0:


        # Move model to cpu before training to allow inference on cpu
        model.cpu()
        torch.save(model, conf.conditional_model_path)

print("Finished")