コード例 #1
0
def merge_tracks(tracks, dest_path):
    # Merging is easier with pianorollls
    quantization = 64
    T = 0
    for track in tracks:
        # Add a 4 quarter silence
        T += get_time(track, quantization) + quantization * 4

    t = 0
    flag_time_increment = True
    pr = {}
    for track in tracks:
        a = read_midi(track, quantization)
        for k in a.keys():
            if flag_time_increment:
                tt = t + a[k].shape[0]
                flag_time_increment = False
            if k not in pr.keys():
                pr[k] = np.zeros((T, 128))
            pr[k][t:tt] = a[k]
        t = tt + quantization * 4
        flag_time_increment = True

    write_midi(pr, quantization, dest_path, tempo=80)
    return
コード例 #2
0
def reconstruct_original(pr_piano_gen, pr_orchestra_truth, event_piano,
                         generated_folder, parameters):
    if parameters["temporal_granularity"] == 'event_level':
        # Write original orchestration and piano scores, but reconstructed version, just to check
        A_rhythm = event_level.from_event_to_frame(pr_piano_gen, event_piano)
        B_rhythm = A_rhythm * 127
        piano_reconstructed_rhythm = reconstruct_pr.instrument_reconstruction_piano(
            B_rhythm, parameters["instru_mapping"])
        write_path = generated_folder + '/piano_reconstructed_rhythm.mid'
        write_midi(piano_reconstructed_rhythm,
                   parameters["quantization"],
                   write_path,
                   tempo=80)
        # # Truth
        # A_rhythm = event_level.from_event_to_frame(pr_orchestra_truth, event_piano)
        # B_rhythm = A_rhythm * 127
        # orchestra_reconstructed_rhythm = reconstruct_pr.instrument_reconstruction(B_rhythm, parameters["instru_mapping"])
        # write_path = generated_folder + '/orchestra_reconstructed_rhythm.mid'
        # write_midi(orchestra_reconstructed_rhythm, parameters["quantization"], write_path, tempo=80)
        #
        A = pr_piano_gen
        B = A * 127
        piano_reconstructed = reconstruct_pr.instrument_reconstruction_piano(
            B, parameters["instru_mapping"])
        write_path = generated_folder + '/piano_reconstructed.mid'
        write_midi(piano_reconstructed, 1, write_path, tempo=80)
        #
        A = pr_orchestra_truth
        B = A * 127
        orchestra_reconstructed = reconstruct_pr.instrument_reconstruction(
            B, parameters["instru_mapping"])
        write_path = generated_folder + '/orchestra_reconstructed.mid'
        write_midi(orchestra_reconstructed, 1, write_path, tempo=80)
    else:
        A = pr_piano_gen
        B = A * 127
        piano_reconstructed = reconstruct_pr.instrument_reconstruction_piano(
            B, parameters["instru_mapping"])
        write_path = generated_folder + '/piano_reconstructed.mid'
        write_midi(piano_reconstructed,
                   parameters["quantization"],
                   write_path,
                   tempo=80)
        #
        A = pr_orchestra_truth
        B = A * 127
        orchestra_reconstructed = reconstruct_pr.instrument_reconstruction(
            B, parameters["instru_mapping"])
        write_path = generated_folder + '/orchestra_reconstructed.mid'
        write_midi(orchestra_reconstructed,
                   parameters["quantization"],
                   write_path,
                   tempo=80)
    return
コード例 #3
0
 def reconstruct_write_aux(generated_sequences, prefix):
     for write_counter in range(generated_sequences.shape[0]):
         # To distinguish when seed stop, insert a sustained note
         this_seq = generated_sequences[write_counter] * 127
         this_seq[:seed_size, 0] = 20
         # Reconstruct
         if rhythmic_reconstruction:
             pr_orchestra_clean = from_event_to_frame(this_seq, event_piano)
         else:
             pr_orchestra_clean = this_seq
         pr_orchestra = instrument_reconstruction(pr_orchestra_clean,
                                                  instru_mapping)
         # Write
         write_path = generated_folder + '/' + prefix + '_' + str(
             write_counter) + '_generated.mid'
         if rhythmic_reconstruction:
             write_midi(pr_orchestra, quantization, write_path, tempo=80)
         else:
             write_midi(pr_orchestra, 1, write_path, tempo=80)
     return
コード例 #4
0
def build_split_matrices(folder_paths, quantization, temporal_granularity):
	file_counter = 0
	train_only_files={}
	train_and_valid_files={}

	for folder_path in folder_paths:
		#############
		# Read file
		folder_path = folder_path.rstrip()
		print(folder_path)
		if not os.path.isdir(folder_path):
			continue

		# Is there an original piano score or do we have to create it ?
		num_music_file = max(len(glob.glob(folder_path + '/*.mid')), len(glob.glob(folder_path + '/*.xml')))
		if num_music_file == 2:
			is_piano = True
		elif num_music_file == 1:
			is_piano = False
		else:
			raise Exception("CAVAVAVAMAVAL")

		# Get pr, warped and duration
		if is_piano:
			new_pr_piano, _, _, _, new_name_piano, _, _, _, _, _, duration\
				= build_data_aux.process_folder(folder_path, quantization, binary_piano, binary_orch, temporal_granularity, gapopen=3, gapextend=1)
		else:
			new_pr_piano, _, _, _, new_name_piano, _, _, _, _, _, duration\
				= build_data_aux_no_piano.process_folder_NP(folder_path, quantization, binary_piano, binary_orch, temporal_granularity)

		if new_pr_piano is None:
			print("FAIL !")
			continue

		split_name = re.split("/", new_name_piano)
		folder_name = "Piano_files_for_embeddings/" + split_name[-3]
		file_name = split_name[-1] + '.mid'
		if not os.path.isdir(folder_name):
			os.makedirs(folder_name)
		write_midi(new_pr_piano, 1000,  folder_name + '/' + file_name, tempo=80)
	return
コード例 #5
0
def reconstruct_generation(generated_sequences, event_piano, generated_folder,
                           prefix_name, parameters, seed_size):
    for write_counter in range(generated_sequences.shape[0]):
        # To distinguish when seed stop, insert a sustained note
        this_seq = generated_sequences[write_counter] * 127
        this_seq[:seed_size, 0] = 20
        # Reconstruct
        if parameters['temporal_granularity'] == 'event_level':
            pr_orchestra_rhythm = event_level.from_event_to_frame(
                this_seq, event_piano)
            pr_orchestra_rhythm_I = reconstruct_pr.instrument_reconstruction(
                pr_orchestra_rhythm, parameters["instru_mapping"])
            write_path = generated_folder + '/' + prefix_name + str(
                write_counter) + '_generated_rhythm.mid'
            write_midi(pr_orchestra_rhythm_I,
                       parameters["quantization"],
                       write_path,
                       tempo=80)
        pr_orchestra_event = this_seq
        pr_orchestra_event_I = reconstruct_pr.instrument_reconstruction(
            pr_orchestra_event, parameters["instru_mapping"])
        write_path = generated_folder + '/' + prefix_name + str(
            write_counter) + '_generated.mid'
        if parameters['temporal_granularity'] == 'event_level':
            write_midi(pr_orchestra_event_I, 1, write_path, tempo=80)
        else:
            write_midi(pr_orchestra_event_I,
                       parameters['quantization'],
                       write_path,
                       tempo=80)
    return
コード例 #6
0
 def reconstruct_write_aux(generated_sequences, prefix):
     for write_counter in range(generated_sequences.shape[0]):
         # To distinguish when seed stop, insert a sustained note
         this_seq = generated_sequences[write_counter] * 127
         this_seq[:seed_size, 0] = 20
         # Reconstruct
         if parameters['temporal_granularity'] == 'event_level':
             pr_orchestra_rhythm = from_event_to_frame(
                 this_seq, event_piano)
             pr_orchestra_rhythm_I = instrument_reconstruction(
                 pr_orchestra_rhythm, instru_mapping)
             write_path = generated_folder + '/' + prefix + '_' + str(
                 write_counter) + '_generated_rhythm.mid'
             write_midi(pr_orchestra_rhythm_I,
                        quantization,
                        write_path,
                        tempo=80)
         pr_orchestra_event = this_seq
         pr_orchestra_event_I = instrument_reconstruction(
             pr_orchestra_event, instru_mapping)
         write_path = generated_folder + '/' + prefix + '_' + str(
             write_counter) + '_generated.mid'
         write_midi(pr_orchestra_event_I, 1, write_path, tempo=80)
     return
コード例 #7
0
def generate_midi(config_folder, score_source, number_of_version, duration_gen,
                  logger_generate):
    """This function generate the orchestration of a midi piano score
    
    Parameters
    ----------
    config_folder : str
        Absolute path to the configuration folder, i.e. the folder containing the saved model and the results
    score_source : str
        Either a path to a folder containing two midi files (piano and orchestration) or the path toa piano midi files
    number_of_version : int
        Number of version generated in a batch manner. Since the generation process involves sampling it might be interesting to generate several versions
    duration_gen : int
        Length of the generated score (in number of events). Useful for generating only the beginning of the piece.
    logger_generate : logger
        Instanciation of logging. Can be None
    """

    logger_generate.info("#############################################")
    logger_generate.info("Orchestrating : " + score_source)
    ############################################################
    # Load model, config and data
    ############################################################

    ########################
    # Load config and model
    parameters = pkl.load(open(config_folder + '/script_parameters.pkl', 'rb'))
    model_parameters = pkl.load(open(config_folder + '/model_params.pkl',
                                     'rb'))
    # Set a minimum seed size, because for very short models you don't event see the beginning
    seed_size = max(model_parameters['temporal_order'], 10) - 1
    quantization = parameters['quantization']
    temporal_granularity = parameters['temporal_granularity']
    instru_mapping = parameters['instru_mapping']
    ########################

    #######################
    # Load data
    if re.search(r'mid$', score_source):
        pr_piano, event_piano, duration_piano, name_piano, pr_orch, instru_orch, duration = load_solo(
            score_source, quantization, parameters["binarize_piano"],
            temporal_granularity)
    else:
        pr_piano, event_piano, duration_piano, name_piano, pr_orch, instru_orch, duration = load_from_pair(
            score_source, quantization, parameters["binarize_piano"],
            parameters["binarize_orch"], temporal_granularity)

    if (duration is None) or (duration < duration_gen):
        logger_generate.info("Track too short to be used")
        return
    ########################

    ########################
    # Shorten
    # Keep only the beginning of the pieces (let's say a 100 events)
    pr_piano = extract_pianoroll_part(pr_piano, 0, duration_gen)
    if parameters["duration_piano"]:
        duration_piano = np.asarray(duration_piano[:duration_gen])
    else:
        duration_piano = None
    if parameters["temporal_granularity"] == "event_level":
        event_piano = event_piano[:duration_gen]
    pr_orch = extract_pianoroll_part(pr_orch, 0, duration_gen)
    ########################

    ########################
    # Instanciate piano pianoroll
    N_piano = instru_mapping['Piano']['index_max']
    pr_piano_gen = np.zeros((duration_gen, N_piano), dtype=np.float32)
    pr_piano_gen = build_data_aux.cast_small_pr_into_big_pr(
        pr_piano, {}, 0, duration_gen, instru_mapping, pr_piano_gen)
    pr_piano_gen_flat = pr_piano_gen.sum(axis=1)
    silence_piano = [
        e for e in range(duration_gen) if pr_piano_gen_flat[e] == 0
    ]
    ########################

    ########################
    # Instanciate orchestra pianoroll with orchestra seed
    N_orchestra = parameters['N_orchestra']
    if pr_orch:
        pr_orchestra_gen = np.zeros((seed_size, N_orchestra), dtype=np.float32)
        orch_seed_beginning = {k: v[:seed_size] for k, v in pr_orch.items()}
        pr_orchestra_gen = build_data_aux.cast_small_pr_into_big_pr(
            orch_seed_beginning, instru_orch, 0, seed_size, instru_mapping,
            pr_orchestra_gen)
        pr_orchestra_truth = np.zeros((duration_gen, N_orchestra),
                                      dtype=np.float32)
        pr_orchestra_truth = build_data_aux.cast_small_pr_into_big_pr(
            pr_orch, instru_orch, 0, duration_gen, instru_mapping,
            pr_orchestra_truth)
    else:
        pr_orchestra_gen = None
        pr_orchestra_truth = None
    ########################

    #######################################
    # Embed piano
    time_embedding = time.time()
    if parameters['embedded_piano']:
        # Load model
        embedding_path = parameters["embedding_path"]
        embedding_model = torch.load(embedding_path, map_location="cpu")

        # Build embedding (no need to batch here, len(pr_piano_gen) is sufficiently small)
        # Plus no CUDA here because : afradi of mix with TF  +  possibly very long piano chunks
        piano_resize_emb = np.zeros(
            (len(pr_piano_gen), 1, 128))  # Embeddings accetp size 128 samples
        piano_resize_emb[:, 0, instru_mapping['Piano']['pitch_min']:
                         instru_mapping['Piano']['pitch_max']] = pr_piano_gen
        piano_resize_emb_TT = torch.tensor(piano_resize_emb)
        piano_embedded_TT = embedding_model(piano_resize_emb_TT.float(), 0)
        pr_piano_gen_embedded = piano_embedded_TT.numpy()
    else:
        pr_piano_gen_embedded = pr_piano_gen
    time_embedding = time.time() - time_embedding
    #######################################

    ########################
    # Inputs' normalization
    normalizer = pkl.load(
        open(os.path.join(config_folder, 'normalizer.pkl'), 'rb'))
    if parameters["embedded_piano"]:  # When using embedding, no normalization
        pr_piano_gen_norm = pr_piano_gen_embedded
    else:
        pr_piano_gen_norm = normalizer.transform(pr_piano_gen_embedded)
    ########################

    ########################
    # Store folder
    string = re.split(r'/', name_piano)[-1]
    name_track = re.sub('piano_solo.mid', '', string)
    generated_folder = config_folder + '/generation_reference_example/' + name_track
    if not os.path.isdir(generated_folder):
        os.makedirs(generated_folder)
    ########################

    ########################
    # Get trainer
    with open(os.path.join(config_folder, 'which_trainer'), 'r') as ff:
        which_trainer = ff.read()
    # Trainer
    trainer = import_trainer(which_trainer, model_parameters, parameters)

    ########################

    ############################################################
    # Generate
    ############################################################
    time_generate_0 = time.time()
    generated_sequences = {}
    for measure_name in parameters['save_measures']:
        model_path = 'model_' + measure_name
        generated_sequences[measure_name] = generate(
            trainer,
            pr_piano_gen_norm,
            silence_piano,
            duration_piano,
            config_folder,
            model_path,
            pr_orchestra_gen,
            batch_size=number_of_version)

    time_generate_1 = time.time()
    logger_generate.info(
        'TTT : Generating data took {} seconds'.format(time_generate_1 -
                                                       time_generate_0))

    ############################################################
    # Reconstruct and write
    ############################################################
    def reconstruct_write_aux(generated_sequences, prefix):
        for write_counter in range(generated_sequences.shape[0]):
            # To distinguish when seed stop, insert a sustained note
            this_seq = generated_sequences[write_counter] * 127
            this_seq[:seed_size, 0] = 20
            # Reconstruct
            if parameters['temporal_granularity'] == 'event_level':
                pr_orchestra_rhythm = from_event_to_frame(
                    this_seq, event_piano)
                pr_orchestra_rhythm_I = instrument_reconstruction(
                    pr_orchestra_rhythm, instru_mapping)
                write_path = generated_folder + '/' + prefix + '_' + str(
                    write_counter) + '_generated_rhythm.mid'
                write_midi(pr_orchestra_rhythm_I,
                           quantization,
                           write_path,
                           tempo=80)
            pr_orchestra_event = this_seq
            pr_orchestra_event_I = instrument_reconstruction(
                pr_orchestra_event, instru_mapping)
            write_path = generated_folder + '/' + prefix + '_' + str(
                write_counter) + '_generated.mid'
            write_midi(pr_orchestra_event_I, 1, write_path, tempo=80)
        return

    for measure_name in parameters["save_measures"]:
        reconstruct_write_aux(generated_sequences[measure_name], measure_name)

    ############################################################
    ############################################################
    if parameters["temporal_granularity"] == 'event_level':
        # Write original orchestration and piano scores, but reconstructed version, just to check
        A_rhythm = from_event_to_frame(pr_piano_gen, event_piano)
        B_rhythm = A_rhythm * 127
        piano_reconstructed_rhythm = instrument_reconstruction_piano(
            B_rhythm, instru_mapping)
        write_path = generated_folder + '/piano_reconstructed_rhythm.mid'
        write_midi(piano_reconstructed_rhythm,
                   quantization,
                   write_path,
                   tempo=80)
        # Truth
        A_rhythm = from_event_to_frame(pr_orchestra_truth, event_piano)
        B_rhythm = A_rhythm * 127
        orchestra_reconstructed_rhythm = instrument_reconstruction(
            B_rhythm, instru_mapping)
        write_path = generated_folder + '/orchestra_reconstructed_rhythm.mid'
        write_midi(orchestra_reconstructed_rhythm,
                   quantization,
                   write_path,
                   tempo=80)
        #
        A = pr_piano_gen
        B = A * 127
        piano_reconstructed = instrument_reconstruction_piano(
            B, instru_mapping)
        write_path = generated_folder + '/piano_reconstructed.mid'
        write_midi(piano_reconstructed, 1, write_path, tempo=80)
        #
        A = pr_orchestra_truth
        B = A * 127
        orchestra_reconstructed = instrument_reconstruction(B, instru_mapping)
        write_path = generated_folder + '/orchestra_reconstructed.mid'
        write_midi(orchestra_reconstructed, 1, write_path, tempo=80)
    else:
        A = pr_piano_gen
        B = A * 127
        piano_reconstructed = instrument_reconstruction_piano(
            B, instru_mapping)
        write_path = generated_folder + '/piano_reconstructed.mid'
        write_midi(piano_reconstructed, quantization, write_path, tempo=80)
        #
        A = pr_orchestra_truth
        B = A * 127
        orchestra_reconstructed = instrument_reconstruction(B, instru_mapping)
        write_path = generated_folder + '/orchestra_reconstructed.mid'
        write_midi(orchestra_reconstructed, quantization, write_path, tempo=80)
    ############################################################
    ############################################################
    return
コード例 #8
0
def generate_midi(config_folder, score_source, number_of_version, duration_gen,
                  rhythmic_reconstruction, logger_generate):
    """This function generate the orchestration of a midi piano score
    
    Parameters
    ----------
    config_folder : str
        Absolute path to the configuration folder, i.e. the folder containing the saved model and the results
    score_source : str
        Either a path to a folder containing two midi files (piano and orchestration) or the path toa piano midi files
    number_of_version : int
        Number of version generated in a batch manner. Since the generation process involves sampling it might be interesting to generate several versions
    duration_gen : int
        Length of the generated score (in number of events). Useful for generating only the beginning of the piece.
    rhythmic_reconstruction: bool
        Whether rythmic reconstrcution from event-level representation to frame-level reconstrcution is performed or not. If true is selected, the rhtyhmic structure of the original piano score is used.
    logger_generate : logger
        Instanciation of logging. Can be None
    """

    logger_generate.info("#############################################")
    logger_generate.info("Orchestrating piano score : " + score_source)
    ############################################################
    # Load model, config and data
    ############################################################

    ########################
    # Load config and model
    parameters = pkl.load(open(config_folder + '/script_parameters.pkl', 'rb'))
    model_parameters = pkl.load(open(config_folder + '/model_params.pkl',
                                     'rb'))
    # Set a minimum seed size, because for very short models you don't event see the beginning
    seed_size = max(model_parameters['temporal_order'], 10) - 1
    quantization = parameters['quantization']
    temporal_granularity = parameters['temporal_granularity']
    instru_mapping = parameters['instru_mapping']
    ########################

    ########################
    # Load data
    if re.search(r'mid$', score_source):
        pr_piano, event_piano, duration_piano, name_piano, pr_orch, instru_orch, duration = load_solo(
            score_source, quantization, parameters["binarize_piano"],
            temporal_granularity)
    else:
        pr_piano, event_piano, duration_piano, name_piano, pr_orch, instru_orch, duration = load_from_pair(
            score_source, quantization, parameters["binarize_piano"],
            parameters["binarize_orch"], temporal_granularity)
    ########################

    ########################
    # Shorten
    # Keep only the beginning of the pieces (let's say a 100 events)
    pr_piano = extract_pianoroll_part(pr_piano, 0, duration_gen)
    if parameters["duration_piano"]:
        duration_piano = np.asarray(duration_piano[:duration_gen])
    else:
        duration_piano = None
    event_piano = event_piano[:duration_gen]
    pr_orch = extract_pianoroll_part(pr_orch, 0, duration_gen)
    ########################

    ########################
    # Instanciate piano pianoroll
    N_piano = instru_mapping['Piano']['index_max']
    pr_piano_gen = np.zeros((duration_gen, N_piano), dtype=np.float32)
    pr_piano_gen = build_data_aux.cast_small_pr_into_big_pr(
        pr_piano, {}, 0, duration_gen, instru_mapping, pr_piano_gen)
    pr_piano_gen_flat = pr_piano_gen.sum(axis=1)
    silence_piano = [
        e for e in range(duration_gen) if pr_piano_gen_flat[e] == 0
    ]
    ########################

    ########################
    # Instanciate orchestra pianoroll with orchestra seed
    N_orchestra = parameters['N_orchestra']
    if pr_orch:
        pr_orchestra_gen = np.zeros((seed_size, N_orchestra), dtype=np.float32)
        orch_seed_beginning = {k: v[:seed_size] for k, v in pr_orch.items()}
        pr_orchestra_gen = build_data_aux.cast_small_pr_into_big_pr(
            orch_seed_beginning, instru_orch, 0, seed_size, instru_mapping,
            pr_orchestra_gen)
        pr_orchestra_truth = np.zeros((duration_gen, N_orchestra),
                                      dtype=np.float32)
        pr_orchestra_truth = build_data_aux.cast_small_pr_into_big_pr(
            pr_orch, instru_orch, 0, duration_gen, instru_mapping,
            pr_orchestra_truth)
    else:
        pr_orchestra_gen = None
        pr_orchestra_truth = None
    ########################

    #######################################
    # Embed piano
    time_embedding = time.time()
    if parameters['embedded_piano']:
        # Load model
        embedding_path = parameters["embedding_path"]
        embedding_model = embedDenseNet(380, 12, (1500, 500), 100, 1500, 2, 3,
                                        12, 0.5, 0, False, True)
        embedding_model.load_state_dict(torch.load(embedding_path))

        # Build embedding (no need to batch here, len(pr_piano_gen) is sufficiently small)
        piano_resize_emb = np.zeros(
            (len(pr_piano_gen), 1, 128))  # Embeddings accetp size 128 samples
        piano_resize_emb[:, 0, instru_mapping['Piano']['pitch_min']:
                         instru_mapping['Piano']['pitch_max']] = pr_piano_gen
        piano_resize_emb_TT = torch.tensor(piano_resize_emb)
        piano_embedded_TT = embedding_model(piano_resize_emb_TT.float(), 0)
        pr_piano_gen_embedded = piano_embedded_TT.numpy()
    else:
        pr_piano_gen_embedded = pr_piano_gen
    time_embedding = time.time() - time_embedding
    #######################################

    ########################
    # Inputs' normalization
    normalizer = pkl.load(
        open(os.path.join(config_folder, 'normalizer.pkl'), 'rb'))
    if parameters["embedded_piano"]:  # When using embedding, no normalization
        pr_piano_gen_norm = pr_piano_gen_embedded
    else:
        pr_piano_gen_norm = normalizer.transform(pr_piano_gen_embedded)
    ########################

    ########################
    # Store folder
    string = re.split(r'/', name_piano)[-1]
    name_track = re.sub('piano_solo.mid', '', string)
    generated_folder = config_folder + '/generation_reference_example/' + name_track
    if not os.path.isdir(generated_folder):
        os.makedirs(generated_folder)
    ########################

    ########################
    # Get trainer
    with open(os.path.join(config_folder, 'which_trainer'), 'r') as ff:
        which_trainer = ff.read()
    # Trainer
    if which_trainer == 'standard_trainer':
        from LOP.Scripts.standard_learning.standard_trainer import Standard_trainer as Trainer
        kwargs_trainer = {'temporal_order': model_parameters["temporal_order"]}
    elif which_trainer == 'NADE_trainer':
        from LOP.Scripts.NADE_learning.NADE_trainer import NADE_trainer as Trainer
        kwargs_trainer = {
            'temporal_order': model_parameters["temporal_order"],
            'num_ordering': model_parameters["num_ordering"]
        }
    else:
        raise Exception("Undefined trainer")
    trainer = Trainer(**kwargs_trainer)
    ########################

    ############################################################
    # Generate
    ############################################################
    time_generate_0 = time.time()
    generated_sequences = {}
    for measure_name in parameters['save_measures']:
        model_path = 'model_' + measure_name
        generated_sequences[measure_name] = generate(
            trainer,
            pr_piano_gen_norm,
            silence_piano,
            duration_piano,
            config_folder,
            model_path,
            pr_orchestra_gen,
            batch_size=number_of_version)
    time_generate_1 = time.time()
    logger_generate.info(
        'TTT : Generating data took {} seconds'.format(time_generate_1 -
                                                       time_generate_0))

    ############################################################
    # Reconstruct and write
    ############################################################
    def reconstruct_write_aux(generated_sequences, prefix):
        for write_counter in range(generated_sequences.shape[0]):
            # To distinguish when seed stop, insert a sustained note
            this_seq = generated_sequences[write_counter] * 127
            this_seq[:seed_size, 0] = 20
            # Reconstruct
            if rhythmic_reconstruction:
                pr_orchestra_clean = from_event_to_frame(this_seq, event_piano)
            else:
                pr_orchestra_clean = this_seq
            pr_orchestra = instrument_reconstruction(pr_orchestra_clean,
                                                     instru_mapping)
            # Write
            write_path = generated_folder + '/' + prefix + '_' + str(
                write_counter) + '_generated.mid'
            if rhythmic_reconstruction:
                write_midi(pr_orchestra, quantization, write_path, tempo=80)
            else:
                write_midi(pr_orchestra, 1, write_path, tempo=80)
        return

    for measure_name in parameters["save_measures"]:
        reconstruct_write_aux(generated_sequences[measure_name], measure_name)

    ############################################################
    ############################################################
    # Write original orchestration and piano scores, but reconstructed version, just to check
    if rhythmic_reconstruction:
        A = from_event_to_frame(pr_piano_gen, event_piano)
    else:
        A = pr_piano_gen
    B = A * 127
    piano_reconstructed = instrument_reconstruction_piano(B, instru_mapping)
    write_path = generated_folder + '/piano_reconstructed.mid'
    if rhythmic_reconstruction:
        write_midi(piano_reconstructed, quantization, write_path, tempo=80)
    else:
        write_midi(piano_reconstructed, 1, write_path, tempo=80)
    #
    if rhythmic_reconstruction:
        A = from_event_to_frame(pr_orchestra_truth, event_piano)
    else:
        A = pr_orchestra_truth
    B = A * 127
    orchestra_reconstructed = instrument_reconstruction(B, instru_mapping)
    write_path = generated_folder + '/orchestra_reconstructed.mid'
    if rhythmic_reconstruction:
        write_midi(orchestra_reconstructed, quantization, write_path, tempo=80)
    else:
        write_midi(orchestra_reconstructed, 1, write_path, tempo=80)
コード例 #9
0
        # Get midi file names
        pair_folder = IN_DB + '/' + pair
        mid_files = glob.glob(pair_folder + '/*.mid')
        csv_files = glob.glob(pair_folder + '/*.csv')
        if len(mid_files) != 2:
            raise Error(pair_folder + " contains more than 2 midi files")

        # Read midi files
        prs = [Read_midi(e, quantization).read_file() for e in mid_files]

        # Align them
        pr0_aligned, _, pr1_aligned, _, _, _ = align_pianorolls(prs[0],
                                                                prs[1],
                                                                gapopen=3,
                                                                gapextend=1)
        prs_out = [pr0_aligned, pr1_aligned]

        # Output file names
        mid_files_outS = [re.sub(IN_DB, OUT_DB, e) for e in mid_files]
        out_folder = re.sub(IN_DB, OUT_DB, pair_folder)

        # Create directory
        os.makedirs(out_folder)

        # Write aligned midi in it
        [
            write_midi(e[0], quantization, e[1])
            for e in zip(prs_out, mid_files_outS)
        ]
        # Copy csv files
        [shutil.copy(e, re.sub(IN_DB, OUT_DB, e)) for e in csv_files]
コード例 #10
0
def process_folder(folder_path,
                   quantization,
                   binary_piano,
                   binary_orch,
                   temporal_granularity,
                   gapopen=3,
                   gapextend=1,
                   align_bool=True):

    # Get instrus and prs from a folder name name
    pr0, instru0, T0, name0, pr1, instru1, T1, name1 = get_instru_and_pr_from_folder_path(
        folder_path, quantization)

    pr_piano, instru_piano, T_piano, name_piano, pr_orch, instru_orch, T_orch, name_orch=\
            discriminate_between_piano_and_orchestra(pr0, instru0, T0, name0, pr1, instru1, T1, name1)

    write_midi(pr_piano, ticks_per_beat=quantization, write_path="test.mid")

    pr_piano = process_data_piano(pr_piano, binary_piano)
    pr_orch = process_data_orch(pr_orch, binary_orch)

    import pdb
    pdb.set_trace()

    # Temporal granularity
    if temporal_granularity == 'event_level':
        event_piano = get_event_ind_dict(pr_piano)
        event_orch = get_event_ind_dict(pr_orch)

        def get_duration(event, last_time):
            start_ind = event[:]
            end_ind = np.zeros(event.shape, dtype=np.int)
            end_ind[:-1] = event[1:]
            end_ind[-1] = last_time
            duration_list = end_ind - start_ind
            return duration_list

        duration_piano = get_duration(event_piano, T_piano)
        duration_orch = get_duration(event_orch, T_orch)
        # Get the duration of each event
        pr_piano = warp_pr_aux(pr_piano, event_piano)
        pr_orch = warp_pr_aux(pr_orch, event_orch)
    else:
        event_piano = None
        event_orch = None

    import pdb
    pdb.set_trace()

    # Align tracks
    if align_bool:
        piano_aligned, trace_piano, orch_aligned, trace_orch, trace_prod, total_time = align_pianorolls(
            pr_piano, pr_orch, gapopen, gapextend)
        # Clean events
        if (temporal_granularity == 'event_level'):
            if (trace_piano is None) or (trace_orch is None):
                event_piano_aligned = None
                event_orch_aligned = None
                duration_piano_aligned = None
                duration_orch_aligned = None
            else:
                event_piano_aligned = clean_event(event_piano, trace_piano,
                                                  trace_prod)
                event_orch_aligned = clean_event(event_orch, trace_orch,
                                                 trace_prod)
                duration_piano_aligned = clean_event(duration_piano,
                                                     trace_piano, trace_prod)
                duration_orch_aligned = clean_event(duration_orch, trace_orch,
                                                    trace_prod)
        else:
            event_piano_aligned = []
            event_orch_aligned = []
            duration_piano_aligned = []
            duration_orch_aligned = []
    else:
        piano_aligned = pr_piano
        event_piano_aligned = event_piano
        duration_piano_aligned = duration_piano
        orch_aligned = pr_orch
        event_orch_aligned = event_orch
        duration_orch_aligned = duration_orch
        total_time = T_piano

    import pdb
    pdb.set_trace()

    return piano_aligned, event_piano_aligned, duration_piano_aligned, instru_piano, name_piano, orch_aligned, event_orch_aligned, duration_orch_aligned, instru_orch, name_orch, total_time