Example #1
0
def init_with_seed(pr_orch, batch_size, seed_size, N_orchestra,
                   instru_mapping):
    out_shape = (seed_size, N_orchestra)
    pr_orchestra_gen = np.zeros(out_shape, dtype=np.float32)
    orch_seed_beginning = {k: v[:seed_size] for k, v in pr_orch.items()}
    pr_orchestra_gen = build_data_aux.cast_small_pr_into_big_pr(
        orch_seed_beginning, 0, seed_size, instru_mapping, pr_orchestra_gen)
    # Stack number of batch
    pr_orchestra_gen_stacked = [pr_orchestra_gen for _ in range(batch_size)]
    pr_orchestra_gen_stacked = np.stack(pr_orchestra_gen_stacked, axis=0)
    return pr_orchestra_gen_stacked
Example #2
0
def generate_midi(config_folder_fd, config_folder_bd, config_folder_corr,
                  score_source, save_folder, initialization_type,
                  number_of_version, duration_gen, num_pass_correct,
                  logger_generate):
    """This function generate the orchestration of a midi piano score
    
    Parameters
    ----------
    config_folder : str
        Absolute path to the configuration folder, i.e. the folder containing the saved model and the results
    score_source : str
        Either a path to a folder containing two midi files (piano and orchestration) or the path toa piano midi files
    number_of_version : int
        Number of version generated in a batch manner. Since the generation process involves sampling it might be interesting to generate several versions
    duration_gen : int
        Length of the generated score (in number of events). Useful for generating only the beginning of the piece.
    logger_generate : logger
        Instanciation of logging. Can be None
    """

    logger_generate.info("#############################################")
    logger_generate.info("Orchestrating : " + score_source)

    # Load parameters
    parameters = pkl.load(
        open(config_folder_fd + '/script_parameters.pkl', 'rb'))
    model_parameters_fd = pkl.load(
        open(config_folder_fd + '/model_params.pkl', 'rb'))
    #
    parameters_bd = pkl.load(
        open(config_folder_bd + '/script_parameters.pkl', 'rb'))
    model_parameters_bd = pkl.load(
        open(config_folder_bd + '/model_params.pkl', 'rb'))
    #
    parameters_corr = pkl.load(
        open(config_folder_corr + '/script_parameters.pkl', 'rb'))
    model_parameters_corr = pkl.load(
        open(config_folder_corr + '/model_params.pkl', 'rb'))

    assert (model_parameters_fd["temporal_order"]
            == model_parameters_bd["temporal_order"]) and (
                model_parameters_fd["temporal_order"]
                == model_parameters_corr["temporal_order"]
            ), "The two model have different seed_size"
    assert (parameters["quantization"] == parameters_bd["quantization"]) and (
        parameters["quantization"] == parameters_corr["quantization"]
    ), "The two model have different quantization"
    assert (parameters["temporal_granularity"]
            == parameters_bd["temporal_granularity"]) and (
                parameters["temporal_granularity"]
                == parameters_corr["temporal_granularity"]
            ), "The two model have different temporal_granularity"
    assert (parameters["instru_mapping"] == parameters_bd["instru_mapping"]
            ) and (parameters["instru_mapping"]
                   == parameters_corr["instru_mapping"]
                   ), "The two model have different instru_mapping"
    assert (parameters["normalizer"] == parameters_bd["normalizer"]) and (
        parameters["normalizer"] == parameters_corr["normalizer"]
    ), "The two model have different normalizer"

    seed_size = max(model_parameters_fd['temporal_order'], 10) - 1

    #######################
    # Load data
    if re.search(r'mid$', score_source):
        pr_piano, event_piano, duration_piano, name_piano, pr_orch, instru_orch, duration = generation_utils.load_solo(
            score_source, parameters["quantization"],
            parameters["binarize_piano"], parameters["temporal_granularity"])
    else:
        if initialization_type == "seed":
            pr_piano, event_piano, duration_piano, name_piano, pr_orch, instru_orch, duration = generation_utils.load_from_pair(
                score_source,
                parameters["quantization"],
                parameters["binarize_piano"],
                parameters["binarize_orch"],
                parameters["temporal_granularity"],
                align_bool=True)
        else:
            pr_piano, event_piano, duration_piano, name_piano, pr_orch, instru_orch, duration = generation_utils.load_from_pair(
                score_source,
                parameters["quantization"],
                parameters["binarize_piano"],
                parameters["binarize_orch"],
                parameters["temporal_granularity"],
                align_bool=False)

    if (duration is None) or (duration < duration_gen):
        logger_generate.info("Track too short to be used")
        return
    ########################

    ########################
    # Shorten
    # Keep only the beginning of the pieces (let's say a 100 events)
    pr_piano = pianoroll_processing.extract_pianoroll_part(
        pr_piano, 0, duration_gen)
    if parameters["duration_piano"]:
        duration_piano = np.asarray(duration_piano[:duration_gen])
    else:
        duration_piano = None
    if parameters["temporal_granularity"] == "event_level":
        event_piano = event_piano[:duration_gen]
    pr_orch = pianoroll_processing.extract_pianoroll_part(
        pr_orch, 0, duration_gen)
    ########################

    ########################
    # Instanciate piano pianoroll
    N_piano = parameters["instru_mapping"]['Piano']['index_max']
    pr_piano_gen = np.zeros((duration_gen, N_piano), dtype=np.float32)
    pr_piano_gen = build_data_aux.cast_small_pr_into_big_pr(
        pr_piano, {}, 0, duration_gen, parameters["instru_mapping"],
        pr_piano_gen)
    pr_piano_gen_flat = pr_piano_gen.sum(axis=1)
    silence_piano = [
        e for e in range(duration_gen) if pr_piano_gen_flat[e] == 0
    ]
    ########################

    ########################
    # Initialize orchestra pianoroll with orchestra seed (choose one)
    N_orchestra = parameters['N_orchestra']
    pr_orchestra_truth = np.zeros((duration_gen, N_orchestra),
                                  dtype=np.float32)
    pr_orchestra_truth = build_data_aux.cast_small_pr_into_big_pr(
        pr_orch, instru_orch, 0, duration_gen, parameters["instru_mapping"],
        pr_orchestra_truth)
    if initialization_type == "seed":
        pr_orchestra_seed = generation_utils.init_with_seed(
            pr_orch, number_of_version, seed_size, N_orchestra, instru_orch,
            parameters["instru_mapping"])
    elif initialization_type == "zeros":
        pr_orchestra_seed = generation_utils.init_with_zeros(
            number_of_version, seed_size, N_orchestra)
    elif initialization_type == "constant":
        const_value = 0.1
        pr_orchestra_seed = generation_utils.init_with_constant(
            number_of_version, seed_size, N_orchestra, const_value)
    elif initialization_type == "random":
        proba_activation = 0.01
        pr_orchestra_seed = generation_utils.init_with_random(
            number_of_version, seed_size, N_orchestra, proba_activation)
    ########################

    #######################################
    # Embed piano
    time_embedding = time.time()
    if parameters['embedded_piano']:
        # Load model
        embedding_path = parameters["embedding_path"]
        embedding_model = torch.load(embedding_path, map_location="cpu")

        # Build embedding (no need to batch here, len(pr_piano_gen) is sufficiently small)
        # Plus no CUDA here because : afradi of mix with TF  +  possibly very long piano chunks
        piano_resize_emb = np.zeros(
            (len(pr_piano_gen), 1, 128))  # Embeddings accetp size 128 samples
        piano_resize_emb[:, 0, parameters["instru_mapping"]['Piano']
                         ['pitch_min']:parameters["instru_mapping"]['Piano']
                         ['pitch_max']] = pr_piano_gen
        piano_resize_emb_TT = torch.tensor(piano_resize_emb)
        piano_embedded_TT = embedding_model(piano_resize_emb_TT.float(), 0)
        pr_piano_gen_embedded = piano_embedded_TT.numpy()
    else:
        pr_piano_gen_embedded = pr_piano_gen
    time_embedding = time.time() - time_embedding
    #######################################

    ########################
    # Inputs' normalization
    normalizer = pkl.load(
        open(os.path.join(config_folder_fd, 'normalizer.pkl'), 'rb'))
    if parameters["embedded_piano"]:  # When using embedding, no normalization
        pr_piano_gen_norm = pr_piano_gen_embedded
    else:
        pr_piano_gen_norm = normalizer.transform(pr_piano_gen_embedded)
    ########################

    ########################
    # Store folder
    string = re.split(r'/', name_piano)[-1]
    name_track = re.sub('piano_solo.mid', '', string)
    generated_folder = save_folder + '/fd_bd_corr_' + initialization_type + '_init/' + name_track
    if not os.path.isdir(generated_folder):
        os.makedirs(generated_folder)
    ########################

    ########################
    # Get trainer
    with open(os.path.join(config_folder_fd, 'which_trainer'), 'r') as ff:
        which_trainer_fd = ff.read()
    # Trainer
    trainer_fd = import_trainer(which_trainer_fd, model_parameters_fd,
                                parameters)
    #
    with open(os.path.join(config_folder_bd, 'which_trainer'), 'r') as ff:
        which_trainer_bd = ff.read()
    # Trainer
    trainer_bd = import_trainer(which_trainer_bd, model_parameters_bd,
                                parameters)
    #
    with open(os.path.join(config_folder_corr, 'which_trainer'), 'r') as ff:
        which_trainer_corr = ff.read()
    # Trainer
    trainer_corr = import_trainer(which_trainer_corr, model_parameters_corr,
                                  parameters)
    ########################

    ############################################################
    # Generate
    ############################################################
    time_generate_0 = time.time()
    model_path = 'model_accuracy'
    # Forward
    pr_orchestra_gen = generate(trainer_fd,
                                pr_piano_gen_norm,
                                silence_piano,
                                duration_piano,
                                config_folder_fd,
                                model_path,
                                pr_orchestra_seed,
                                batch_size=number_of_version)
    prefix_name = 'fd_'
    generation_utils.reconstruct_generation(pr_orchestra_gen, event_piano,
                                            generated_folder, prefix_name,
                                            parameters, seed_size)
    # Backward
    pr_orchestra_seed = pr_orchestra_gen[:, -seed_size:]
    pr_orchestra_gen = generate_backward(trainer_bd,
                                         pr_piano_gen_norm,
                                         silence_piano,
                                         duration_piano,
                                         config_folder_bd,
                                         model_path,
                                         pr_orchestra_seed,
                                         batch_size=number_of_version)
    prefix_name = 'bd_'
    generation_utils.reconstruct_generation(pr_orchestra_gen, event_piano,
                                            generated_folder, prefix_name,
                                            parameters, seed_size)
    # Correction
    for pass_index in range(num_pass_correct):
        pr_orchestra_gen = correct(trainer_corr,
                                   pr_piano_gen_norm,
                                   silence_piano,
                                   duration_piano,
                                   config_folder_corr,
                                   model_path,
                                   pr_orchestra_gen,
                                   batch_size=number_of_version)
        generation_utils.reconstruct_generation(pr_orchestra_gen, event_piano,
                                                generated_folder, prefix_name,
                                                parameters, seed_size)
        prefix_name = 'corr_' + str(pass_index) + '_'
    time_generate_1 = time.time()
    logger_generate.info(
        'TTT : Generating data took {} seconds'.format(time_generate_1 -
                                                       time_generate_0))

    ############################################################
    # Reconstruct and write
    ############################################################
    prefix_name = 'final_'
    generation_utils.reconstruct_generation(pr_orchestra_gen, event_piano,
                                            generated_folder, prefix_name,
                                            parameters, seed_size)
    generation_utils.reconstruct_original(pr_piano_gen, pr_orchestra_truth,
                                          event_piano, generated_folder,
                                          parameters)
    return
def generate_midi(config_folder, score_source, number_of_version, duration_gen,
                  logger_generate):
    """This function generate the orchestration of a midi piano score
    
    Parameters
    ----------
    config_folder : str
        Absolute path to the configuration folder, i.e. the folder containing the saved model and the results
    score_source : str
        Either a path to a folder containing two midi files (piano and orchestration) or the path toa piano midi files
    number_of_version : int
        Number of version generated in a batch manner. Since the generation process involves sampling it might be interesting to generate several versions
    duration_gen : int
        Length of the generated score (in number of events). Useful for generating only the beginning of the piece.
    logger_generate : logger
        Instanciation of logging. Can be None
    """

    logger_generate.info("#############################################")
    logger_generate.info("Orchestrating : " + score_source)
    ############################################################
    # Load model, config and data
    ############################################################

    ########################
    # Load config and model
    parameters = pkl.load(open(config_folder + '/script_parameters.pkl', 'rb'))
    model_parameters = pkl.load(open(config_folder + '/model_params.pkl',
                                     'rb'))
    # Set a minimum seed size, because for very short models you don't event see the beginning
    seed_size = max(model_parameters['temporal_order'], 10) - 1
    quantization = parameters['quantization']
    temporal_granularity = parameters['temporal_granularity']
    instru_mapping = parameters['instru_mapping']
    ########################

    #######################
    # Load data
    if re.search(r'mid$', score_source):
        pr_piano, event_piano, duration_piano, name_piano, pr_orch, instru_orch, duration = load_solo(
            score_source, quantization, parameters["binarize_piano"],
            temporal_granularity)
    else:
        pr_piano, event_piano, duration_piano, name_piano, pr_orch, instru_orch, duration = load_from_pair(
            score_source, quantization, parameters["binarize_piano"],
            parameters["binarize_orch"], temporal_granularity)

    if (duration is None) or (duration < duration_gen):
        logger_generate.info("Track too short to be used")
        return
    ########################

    ########################
    # Shorten
    # Keep only the beginning of the pieces (let's say a 100 events)
    pr_piano = extract_pianoroll_part(pr_piano, 0, duration_gen)
    if parameters["duration_piano"]:
        duration_piano = np.asarray(duration_piano[:duration_gen])
    else:
        duration_piano = None
    if parameters["temporal_granularity"] == "event_level":
        event_piano = event_piano[:duration_gen]
    pr_orch = extract_pianoroll_part(pr_orch, 0, duration_gen)
    ########################

    ########################
    # Instanciate piano pianoroll
    N_piano = instru_mapping['Piano']['index_max']
    pr_piano_gen = np.zeros((duration_gen, N_piano), dtype=np.float32)
    pr_piano_gen = build_data_aux.cast_small_pr_into_big_pr(
        pr_piano, {}, 0, duration_gen, instru_mapping, pr_piano_gen)
    pr_piano_gen_flat = pr_piano_gen.sum(axis=1)
    silence_piano = [
        e for e in range(duration_gen) if pr_piano_gen_flat[e] == 0
    ]
    ########################

    ########################
    # Instanciate orchestra pianoroll with orchestra seed
    N_orchestra = parameters['N_orchestra']
    if pr_orch:
        pr_orchestra_gen = np.zeros((seed_size, N_orchestra), dtype=np.float32)
        orch_seed_beginning = {k: v[:seed_size] for k, v in pr_orch.items()}
        pr_orchestra_gen = build_data_aux.cast_small_pr_into_big_pr(
            orch_seed_beginning, instru_orch, 0, seed_size, instru_mapping,
            pr_orchestra_gen)
        pr_orchestra_truth = np.zeros((duration_gen, N_orchestra),
                                      dtype=np.float32)
        pr_orchestra_truth = build_data_aux.cast_small_pr_into_big_pr(
            pr_orch, instru_orch, 0, duration_gen, instru_mapping,
            pr_orchestra_truth)
    else:
        pr_orchestra_gen = None
        pr_orchestra_truth = None
    ########################

    #######################################
    # Embed piano
    time_embedding = time.time()
    if parameters['embedded_piano']:
        # Load model
        embedding_path = parameters["embedding_path"]
        embedding_model = torch.load(embedding_path, map_location="cpu")

        # Build embedding (no need to batch here, len(pr_piano_gen) is sufficiently small)
        # Plus no CUDA here because : afradi of mix with TF  +  possibly very long piano chunks
        piano_resize_emb = np.zeros(
            (len(pr_piano_gen), 1, 128))  # Embeddings accetp size 128 samples
        piano_resize_emb[:, 0, instru_mapping['Piano']['pitch_min']:
                         instru_mapping['Piano']['pitch_max']] = pr_piano_gen
        piano_resize_emb_TT = torch.tensor(piano_resize_emb)
        piano_embedded_TT = embedding_model(piano_resize_emb_TT.float(), 0)
        pr_piano_gen_embedded = piano_embedded_TT.numpy()
    else:
        pr_piano_gen_embedded = pr_piano_gen
    time_embedding = time.time() - time_embedding
    #######################################

    ########################
    # Inputs' normalization
    normalizer = pkl.load(
        open(os.path.join(config_folder, 'normalizer.pkl'), 'rb'))
    if parameters["embedded_piano"]:  # When using embedding, no normalization
        pr_piano_gen_norm = pr_piano_gen_embedded
    else:
        pr_piano_gen_norm = normalizer.transform(pr_piano_gen_embedded)
    ########################

    ########################
    # Store folder
    string = re.split(r'/', name_piano)[-1]
    name_track = re.sub('piano_solo.mid', '', string)
    generated_folder = config_folder + '/generation_reference_example/' + name_track
    if not os.path.isdir(generated_folder):
        os.makedirs(generated_folder)
    ########################

    ########################
    # Get trainer
    with open(os.path.join(config_folder, 'which_trainer'), 'r') as ff:
        which_trainer = ff.read()
    # Trainer
    trainer = import_trainer(which_trainer, model_parameters, parameters)

    ########################

    ############################################################
    # Generate
    ############################################################
    time_generate_0 = time.time()
    generated_sequences = {}
    for measure_name in parameters['save_measures']:
        model_path = 'model_' + measure_name
        generated_sequences[measure_name] = generate(
            trainer,
            pr_piano_gen_norm,
            silence_piano,
            duration_piano,
            config_folder,
            model_path,
            pr_orchestra_gen,
            batch_size=number_of_version)

    time_generate_1 = time.time()
    logger_generate.info(
        'TTT : Generating data took {} seconds'.format(time_generate_1 -
                                                       time_generate_0))

    ############################################################
    # Reconstruct and write
    ############################################################
    def reconstruct_write_aux(generated_sequences, prefix):
        for write_counter in range(generated_sequences.shape[0]):
            # To distinguish when seed stop, insert a sustained note
            this_seq = generated_sequences[write_counter] * 127
            this_seq[:seed_size, 0] = 20
            # Reconstruct
            if parameters['temporal_granularity'] == 'event_level':
                pr_orchestra_rhythm = from_event_to_frame(
                    this_seq, event_piano)
                pr_orchestra_rhythm_I = instrument_reconstruction(
                    pr_orchestra_rhythm, instru_mapping)
                write_path = generated_folder + '/' + prefix + '_' + str(
                    write_counter) + '_generated_rhythm.mid'
                write_midi(pr_orchestra_rhythm_I,
                           quantization,
                           write_path,
                           tempo=80)
            pr_orchestra_event = this_seq
            pr_orchestra_event_I = instrument_reconstruction(
                pr_orchestra_event, instru_mapping)
            write_path = generated_folder + '/' + prefix + '_' + str(
                write_counter) + '_generated.mid'
            write_midi(pr_orchestra_event_I, 1, write_path, tempo=80)
        return

    for measure_name in parameters["save_measures"]:
        reconstruct_write_aux(generated_sequences[measure_name], measure_name)

    ############################################################
    ############################################################
    if parameters["temporal_granularity"] == 'event_level':
        # Write original orchestration and piano scores, but reconstructed version, just to check
        A_rhythm = from_event_to_frame(pr_piano_gen, event_piano)
        B_rhythm = A_rhythm * 127
        piano_reconstructed_rhythm = instrument_reconstruction_piano(
            B_rhythm, instru_mapping)
        write_path = generated_folder + '/piano_reconstructed_rhythm.mid'
        write_midi(piano_reconstructed_rhythm,
                   quantization,
                   write_path,
                   tempo=80)
        # Truth
        A_rhythm = from_event_to_frame(pr_orchestra_truth, event_piano)
        B_rhythm = A_rhythm * 127
        orchestra_reconstructed_rhythm = instrument_reconstruction(
            B_rhythm, instru_mapping)
        write_path = generated_folder + '/orchestra_reconstructed_rhythm.mid'
        write_midi(orchestra_reconstructed_rhythm,
                   quantization,
                   write_path,
                   tempo=80)
        #
        A = pr_piano_gen
        B = A * 127
        piano_reconstructed = instrument_reconstruction_piano(
            B, instru_mapping)
        write_path = generated_folder + '/piano_reconstructed.mid'
        write_midi(piano_reconstructed, 1, write_path, tempo=80)
        #
        A = pr_orchestra_truth
        B = A * 127
        orchestra_reconstructed = instrument_reconstruction(B, instru_mapping)
        write_path = generated_folder + '/orchestra_reconstructed.mid'
        write_midi(orchestra_reconstructed, 1, write_path, tempo=80)
    else:
        A = pr_piano_gen
        B = A * 127
        piano_reconstructed = instrument_reconstruction_piano(
            B, instru_mapping)
        write_path = generated_folder + '/piano_reconstructed.mid'
        write_midi(piano_reconstructed, quantization, write_path, tempo=80)
        #
        A = pr_orchestra_truth
        B = A * 127
        orchestra_reconstructed = instrument_reconstruction(B, instru_mapping)
        write_path = generated_folder + '/orchestra_reconstructed.mid'
        write_midi(orchestra_reconstructed, quantization, write_path, tempo=80)
    ############################################################
    ############################################################
    return
Example #4
0
def generate_midi(config_folder, score_source, number_of_version, duration_gen,
                  rhythmic_reconstruction, logger_generate):
    """This function generate the orchestration of a midi piano score
    
    Parameters
    ----------
    config_folder : str
        Absolute path to the configuration folder, i.e. the folder containing the saved model and the results
    score_source : str
        Either a path to a folder containing two midi files (piano and orchestration) or the path toa piano midi files
    number_of_version : int
        Number of version generated in a batch manner. Since the generation process involves sampling it might be interesting to generate several versions
    duration_gen : int
        Length of the generated score (in number of events). Useful for generating only the beginning of the piece.
    rhythmic_reconstruction: bool
        Whether rythmic reconstrcution from event-level representation to frame-level reconstrcution is performed or not. If true is selected, the rhtyhmic structure of the original piano score is used.
    logger_generate : logger
        Instanciation of logging. Can be None
    """

    logger_generate.info("#############################################")
    logger_generate.info("Orchestrating piano score : " + score_source)
    ############################################################
    # Load model, config and data
    ############################################################

    ########################
    # Load config and model
    parameters = pkl.load(open(config_folder + '/script_parameters.pkl', 'rb'))
    model_parameters = pkl.load(open(config_folder + '/model_params.pkl',
                                     'rb'))
    # Set a minimum seed size, because for very short models you don't event see the beginning
    seed_size = max(model_parameters['temporal_order'], 10) - 1
    quantization = parameters['quantization']
    temporal_granularity = parameters['temporal_granularity']
    instru_mapping = parameters['instru_mapping']
    ########################

    ########################
    # Load data
    if re.search(r'mid$', score_source):
        pr_piano, event_piano, duration_piano, name_piano, pr_orch, instru_orch, duration = load_solo(
            score_source, quantization, parameters["binarize_piano"],
            temporal_granularity)
    else:
        pr_piano, event_piano, duration_piano, name_piano, pr_orch, instru_orch, duration = load_from_pair(
            score_source, quantization, parameters["binarize_piano"],
            parameters["binarize_orch"], temporal_granularity)
    ########################

    ########################
    # Shorten
    # Keep only the beginning of the pieces (let's say a 100 events)
    pr_piano = extract_pianoroll_part(pr_piano, 0, duration_gen)
    if parameters["duration_piano"]:
        duration_piano = np.asarray(duration_piano[:duration_gen])
    else:
        duration_piano = None
    event_piano = event_piano[:duration_gen]
    pr_orch = extract_pianoroll_part(pr_orch, 0, duration_gen)
    ########################

    ########################
    # Instanciate piano pianoroll
    N_piano = instru_mapping['Piano']['index_max']
    pr_piano_gen = np.zeros((duration_gen, N_piano), dtype=np.float32)
    pr_piano_gen = build_data_aux.cast_small_pr_into_big_pr(
        pr_piano, {}, 0, duration_gen, instru_mapping, pr_piano_gen)
    pr_piano_gen_flat = pr_piano_gen.sum(axis=1)
    silence_piano = [
        e for e in range(duration_gen) if pr_piano_gen_flat[e] == 0
    ]
    ########################

    ########################
    # Instanciate orchestra pianoroll with orchestra seed
    N_orchestra = parameters['N_orchestra']
    if pr_orch:
        pr_orchestra_gen = np.zeros((seed_size, N_orchestra), dtype=np.float32)
        orch_seed_beginning = {k: v[:seed_size] for k, v in pr_orch.items()}
        pr_orchestra_gen = build_data_aux.cast_small_pr_into_big_pr(
            orch_seed_beginning, instru_orch, 0, seed_size, instru_mapping,
            pr_orchestra_gen)
        pr_orchestra_truth = np.zeros((duration_gen, N_orchestra),
                                      dtype=np.float32)
        pr_orchestra_truth = build_data_aux.cast_small_pr_into_big_pr(
            pr_orch, instru_orch, 0, duration_gen, instru_mapping,
            pr_orchestra_truth)
    else:
        pr_orchestra_gen = None
        pr_orchestra_truth = None
    ########################

    #######################################
    # Embed piano
    time_embedding = time.time()
    if parameters['embedded_piano']:
        # Load model
        embedding_path = parameters["embedding_path"]
        embedding_model = embedDenseNet(380, 12, (1500, 500), 100, 1500, 2, 3,
                                        12, 0.5, 0, False, True)
        embedding_model.load_state_dict(torch.load(embedding_path))

        # Build embedding (no need to batch here, len(pr_piano_gen) is sufficiently small)
        piano_resize_emb = np.zeros(
            (len(pr_piano_gen), 1, 128))  # Embeddings accetp size 128 samples
        piano_resize_emb[:, 0, instru_mapping['Piano']['pitch_min']:
                         instru_mapping['Piano']['pitch_max']] = pr_piano_gen
        piano_resize_emb_TT = torch.tensor(piano_resize_emb)
        piano_embedded_TT = embedding_model(piano_resize_emb_TT.float(), 0)
        pr_piano_gen_embedded = piano_embedded_TT.numpy()
    else:
        pr_piano_gen_embedded = pr_piano_gen
    time_embedding = time.time() - time_embedding
    #######################################

    ########################
    # Inputs' normalization
    normalizer = pkl.load(
        open(os.path.join(config_folder, 'normalizer.pkl'), 'rb'))
    if parameters["embedded_piano"]:  # When using embedding, no normalization
        pr_piano_gen_norm = pr_piano_gen_embedded
    else:
        pr_piano_gen_norm = normalizer.transform(pr_piano_gen_embedded)
    ########################

    ########################
    # Store folder
    string = re.split(r'/', name_piano)[-1]
    name_track = re.sub('piano_solo.mid', '', string)
    generated_folder = config_folder + '/generation_reference_example/' + name_track
    if not os.path.isdir(generated_folder):
        os.makedirs(generated_folder)
    ########################

    ########################
    # Get trainer
    with open(os.path.join(config_folder, 'which_trainer'), 'r') as ff:
        which_trainer = ff.read()
    # Trainer
    if which_trainer == 'standard_trainer':
        from LOP.Scripts.standard_learning.standard_trainer import Standard_trainer as Trainer
        kwargs_trainer = {'temporal_order': model_parameters["temporal_order"]}
    elif which_trainer == 'NADE_trainer':
        from LOP.Scripts.NADE_learning.NADE_trainer import NADE_trainer as Trainer
        kwargs_trainer = {
            'temporal_order': model_parameters["temporal_order"],
            'num_ordering': model_parameters["num_ordering"]
        }
    else:
        raise Exception("Undefined trainer")
    trainer = Trainer(**kwargs_trainer)
    ########################

    ############################################################
    # Generate
    ############################################################
    time_generate_0 = time.time()
    generated_sequences = {}
    for measure_name in parameters['save_measures']:
        model_path = 'model_' + measure_name
        generated_sequences[measure_name] = generate(
            trainer,
            pr_piano_gen_norm,
            silence_piano,
            duration_piano,
            config_folder,
            model_path,
            pr_orchestra_gen,
            batch_size=number_of_version)
    time_generate_1 = time.time()
    logger_generate.info(
        'TTT : Generating data took {} seconds'.format(time_generate_1 -
                                                       time_generate_0))

    ############################################################
    # Reconstruct and write
    ############################################################
    def reconstruct_write_aux(generated_sequences, prefix):
        for write_counter in range(generated_sequences.shape[0]):
            # To distinguish when seed stop, insert a sustained note
            this_seq = generated_sequences[write_counter] * 127
            this_seq[:seed_size, 0] = 20
            # Reconstruct
            if rhythmic_reconstruction:
                pr_orchestra_clean = from_event_to_frame(this_seq, event_piano)
            else:
                pr_orchestra_clean = this_seq
            pr_orchestra = instrument_reconstruction(pr_orchestra_clean,
                                                     instru_mapping)
            # Write
            write_path = generated_folder + '/' + prefix + '_' + str(
                write_counter) + '_generated.mid'
            if rhythmic_reconstruction:
                write_midi(pr_orchestra, quantization, write_path, tempo=80)
            else:
                write_midi(pr_orchestra, 1, write_path, tempo=80)
        return

    for measure_name in parameters["save_measures"]:
        reconstruct_write_aux(generated_sequences[measure_name], measure_name)

    ############################################################
    ############################################################
    # Write original orchestration and piano scores, but reconstructed version, just to check
    if rhythmic_reconstruction:
        A = from_event_to_frame(pr_piano_gen, event_piano)
    else:
        A = pr_piano_gen
    B = A * 127
    piano_reconstructed = instrument_reconstruction_piano(B, instru_mapping)
    write_path = generated_folder + '/piano_reconstructed.mid'
    if rhythmic_reconstruction:
        write_midi(piano_reconstructed, quantization, write_path, tempo=80)
    else:
        write_midi(piano_reconstructed, 1, write_path, tempo=80)
    #
    if rhythmic_reconstruction:
        A = from_event_to_frame(pr_orchestra_truth, event_piano)
    else:
        A = pr_orchestra_truth
    B = A * 127
    orchestra_reconstructed = instrument_reconstruction(B, instru_mapping)
    write_path = generated_folder + '/orchestra_reconstructed.mid'
    if rhythmic_reconstruction:
        write_midi(orchestra_reconstructed, quantization, write_path, tempo=80)
    else:
        write_midi(orchestra_reconstructed, 1, write_path, tempo=80)