예제 #1
0
파일: deepBach.py 프로젝트: pustar/DeepBach
def create_models(model_name=None,
                  create_new=False,
                  num_dense=200,
                  num_units_lstm=[200, 200],
                  pickled_dataset=BACH_DATASET,
                  num_voices=4,
                  metadatas=None,
                  timesteps=16):
    """
    Choose one model
    :param model_name:
    :return:
    """

    _, _, _, index2notes, _, _ = pickle.load(open(pickled_dataset, 'rb'))
    num_pitches = list(map(len, index2notes))
    for voice_index in range(num_voices):
        # We only need one example for features dimensions
        gen = generator_from_raw_dataset(batch_size=1,
                                         timesteps=timesteps,
                                         voice_index=voice_index,
                                         pickled_dataset=pickled_dataset)

        ((left_features, central_features, right_features),
         (left_metas, central_metas, right_metas), labels) = next(gen)

        if 'deepbach' in model_name:
            model = deepBach(num_features_lr=left_features.shape[-1],
                             num_features_c=central_features.shape[-1],
                             num_pitches=num_pitches[voice_index],
                             num_features_meta=left_metas.shape[-1],
                             num_dense=num_dense,
                             num_units_lstm=num_units_lstm)
        elif 'skip' in model_name:
            model = deepbach_skip_connections(
                num_features_lr=left_features.shape[-1],
                num_features_c=central_features.shape[-1],
                num_features_meta=left_metas.shape[-1],
                num_pitches=num_pitches[voice_index],
                num_dense=num_dense,
                num_units_lstm=num_units_lstm,
                timesteps=timesteps)
        else:
            raise ValueError

        model_path_name = 'models/' + model_name + '_' + str(voice_index)
        if not os.path.exists(model_path_name + '.json') or create_new:
            save_model(model, model_name=model_path_name, overwrite=create_new)
예제 #2
0
def create_models(model_name=None,
                  create_new=False,
                  num_dense=200,
                  num_units_lstm=[200, 200]):
    """
    Choose one model
    :param model_name:
    :return:
    """

    _, min_pitches, max_pitches, num_voices = pickle.load(
        open(RAW_DATASET, 'rb'))
    for voice_index in range(4):
        # We only need one example for features dimensions
        gen = generator_from_raw_dataset(batch_size=batch_size,
                                         timesteps=timesteps,
                                         voice_index=voice_index)

        (left_features, central_features, right_features, beats, labels,
         fermatas) = next(gen)

        if 'deepbach' in model_name:
            model = deepBach(
                num_features_lr=left_features.shape[-1],
                num_features_c=central_features.shape[-1],
                num_pitches=max_pitches[voice_index] -
                min_pitches[voice_index] + 1 + 1,  # for continuation symbol
                num_dense=num_dense,
                num_units_lstm=num_units_lstm)
        elif 'maxent' in model_name:
            model = maxEnt(num_features_lr=left_features.shape[-1],
                           num_features_c=central_features.shape[-1],
                           num_pitches=max_pitches[voice_index] -
                           min_pitches[voice_index] + 1 +
                           1)  # for continuation symbol
        elif 'mlp' in model_name:
            model = mlp(num_features_lr=left_features.shape[-1],
                        num_features_c=central_features.shape[-1],
                        num_pitches=max_pitches[voice_index] -
                        min_pitches[voice_index] + 1 + 1,
                        num_hidden=num_dense)  # for continuation symbol
        else:
            raise ValueError

        model_path_name = 'models/' + model_name + '_' + str(voice_index)
        if not os.path.exists(model_path_name + '.json') or create_new:
            save_model(model, model_name=model_path_name, overwrite=create_new)
예제 #3
0
def test_autoencoder(model_name, timesteps, pickled_dataset=BACH_DATASET):
    voice_index = 0

    num_epochs = 200
    samples_per_epoch = 1024 * 100
    batch_size = 64
    nb_val_samples = 1024

    X, X_metadatas, voice_ids, index2notes, note2indexes, metadatas = pickle.load(
        open(pickled_dataset, 'rb'))
    # sequences
    num_voices = 1
    num_pitches = list(map(len, index2notes))

    generator_train = (({
        'left_features': left_features,
        'central_features': central_features,
        'right_features': right_features,
        'left_metas': left_metas,
        'right_metas': right_metas,
        'central_metas': central_metas,
    }, {
        'pitch_prediction': labels
    }) for (
        (left_features, central_features,
         right_features), (left_metas, central_metas, right_metas),
        labels) in generator_from_raw_dataset(batch_size=batch_size,
                                              timesteps=timesteps,
                                              voice_index=voice_index,
                                              phase='train',
                                              pickled_dataset=pickled_dataset))

    generator_unitary = (({
        'left_features': left_features,
        'central_features': central_features,
        'right_features': right_features,
        'left_metas': left_metas,
        'right_metas': right_metas,
        'central_metas': central_metas,
    }, {
        'pitch_prediction': labels
    }) for (
        (left_features, central_features,
         right_features), (left_metas, central_metas, right_metas),
        labels) in generator_from_raw_dataset(batch_size=1,
                                              timesteps=timesteps,
                                              voice_index=voice_index,
                                              phase='all',
                                              pickled_dataset=pickled_dataset))

    inputs, outputs = next(generator_train)

    model = load_model(model_name)

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    hidden_repr_model = Model(input=model.input,
                              output=model.layers[-1].output)
    hidden_repr_model.compile(optimizer='adam',
                              loss='categorical_crossentropy',
                              metrics=['accuracy'])

    # create score target
    chorale_seq = chorale_onehot_to_indexed_chorale(
        onehot_chorale=inputs['left_features'][0],
        num_pitches=num_pitches,
        time_major=False)

    score = indexed_chorale_to_score(chorale_seq,
                                     pickled_dataset=pickled_dataset)
    score.show()
    nearest_chorale_inputs, intermediate_results = find_nearest(
        inputs, hidden_repr_model, generator_unitary, num_elements=20000)
    # concat all results
    nearest_chorale = np.concatenate([
        np.array(nearest_chorale_inputs[0]['left_features'][0])
        for nearest_chorale_inputs in intermediate_results
    ],
                                     axis=0)

    # create score nearest
    nearest_chorale_seq = chorale_onehot_to_indexed_chorale(
        onehot_chorale=nearest_chorale,
        num_pitches=num_pitches,
        time_major=False)

    score_nearest = indexed_chorale_to_score(nearest_chorale_seq,
                                             pickled_dataset=pickled_dataset)
    score_nearest.show()
예제 #4
0
파일: deepBach.py 프로젝트: pustar/DeepBach
def train_models(model_name,
                 steps_per_epoch,
                 num_epochs,
                 validation_steps,
                 timesteps,
                 pickled_dataset=BACH_DATASET,
                 num_voices=4,
                 batch_size=16,
                 metadatas=None):
    """
    Train models
    :param batch_size:
    :param metadatas:

    """
    models = []
    for voice_index in range(num_voices):
        # Load appropriate generators

        generator_train = (({
            'left_features': left_features,
            'central_features': central_features,
            'right_features': right_features,
            'left_metas': left_metas,
            'right_metas': right_metas,
            'central_metas': central_metas,
        }, {
            'pitch_prediction': labels
        }) for ((left_features, central_features,
                 right_features), (left_metas, central_metas, right_metas),
                labels) in generator_from_raw_dataset(
                    batch_size=batch_size,
                    timesteps=timesteps,
                    voice_index=voice_index,
                    phase='train',
                    pickled_dataset=pickled_dataset))

        generator_val = (({
            'left_features': left_features,
            'central_features': central_features,
            'right_features': right_features,
            'left_metas': left_metas,
            'right_metas': right_metas,
            'central_metas': central_metas,
        }, {
            'pitch_prediction': labels
        }) for ((left_features, central_features,
                 right_features), (left_metas, central_metas, right_metas),
                labels) in generator_from_raw_dataset(
                    batch_size=batch_size,
                    timesteps=timesteps,
                    voice_index=voice_index,
                    phase='test',
                    pickled_dataset=pickled_dataset))

        model_path_name = 'models/' + model_name + '_' + str(voice_index)

        model = load_model(model_path_name)

        model.compile(optimizer='adam',
                      loss={'pitch_prediction': 'categorical_crossentropy'},
                      metrics=['accuracy'])

        model.fit_generator(generator_train,
                            samples_per_epoch=steps_per_epoch,
                            epochs=num_epochs,
                            verbose=1,
                            validation_data=generator_val,
                            validation_steps=validation_steps)

        models.append(model)

        save_model(model, model_path_name, overwrite=True)
    return models
예제 #5
0
def train_models(model_name, samples_per_epoch, num_epochs, nb_val_samples):
    """
    Train models

    """
    models = []
    for voice_index in range(4):
        # Load appropriate generators

        generator_train = (({
            'left_features': left_features,
            'central_features': central_features,
            'right_features': right_features,
            'beat': beat,
            'beats_left': beats_left,
            'beats_right': beats_right,
            'fermatas_left': fermatas_left,
            'fermatas_right': fermatas_right,
            'central_fermata': central_fermata
        }, {
            'pitch_prediction': labels
        }) for (left_features, central_features, right_features,
                (beats_left, beat, beats_right), labels,
                (fermatas_left, central_fermata, fermatas_right
                 )) in generator_from_raw_dataset(batch_size=batch_size,
                                                  timesteps=timesteps,
                                                  voice_index=voice_index,
                                                  phase='train'))

        generator_val = (({
            'left_features': left_features,
            'central_features': central_features,
            'right_features': right_features,
            'beat': beat,
            'beats_left': beats_left,
            'beats_right': beats_right,
            'fermatas_left': fermatas_left,
            'fermatas_right': fermatas_right,
            'central_fermata': central_fermata
        }, {
            'pitch_prediction': labels
        }) for (left_features, central_features, right_features,
                (beats_left, beat, beats_right), labels,
                (fermatas_left, central_fermata, fermatas_right
                 )) in generator_from_raw_dataset(batch_size=batch_size,
                                                  timesteps=timesteps,
                                                  voice_index=voice_index,
                                                  phase='train'))

        model_path_name = 'models/' + model_name + '_' + str(voice_index)

        model = load_model(model_path_name)

        model.compile(optimizer='adam',
                      loss={'pitch_prediction': 'categorical_crossentropy'},
                      metrics=['accuracy'])

        model.fit_generator(generator_train,
                            samples_per_epoch=samples_per_epoch,
                            nb_epoch=num_epochs,
                            verbose=1,
                            validation_data=generator_val,
                            nb_val_samples=nb_val_samples)

        models.append(model)

        save_model(model, model_path_name, overwrite=True)
    return models
예제 #6
0
def train_models(model_name,
                 steps_per_epoch,
                 num_epochs,
                 validation_steps,
                 timesteps,
                 pickled_dataset=ARASHI_DATASET,
                 num_voices=4,
                 batch_size=16,
                 metadatas=None):
    """
    Train models
    :param batch_size:
    :param metadatas:

    """
    models = []
    for voice_index in range(num_voices):
        # Load appropriate generators

        #print('髪切ったー'*100)
        generator_train = (({
            'left_features': left_features,
            'central_features': central_features,
            'right_features': right_features,
            'left_metas': left_metas,
            'right_metas': right_metas,
            'central_metas': central_metas,
            'left_local_seqs': left_local_seqs,
            'right_local_seqs': right_local_seqs
        }, {
            'pitch_prediction': labels
        }) for ((left_features, central_features,
                 right_features), (left_metas, central_metas,
                                   right_metas), labels, left_local_seqs,
                right_local_seqs) in generator_from_raw_dataset(
                    batch_size=batch_size,
                    timesteps=timesteps,
                    voice_index=voice_index,
                    phase='train',
                    pickled_dataset=pickled_dataset))  ########訓練データ
        #print('春服買いたい'*100)

        generator_val = (({
            'left_features': left_features,
            'central_features': central_features,
            'right_features': right_features,
            'left_metas': left_metas,
            'right_metas': right_metas,
            'central_metas': central_metas,
            'left_local_seqs': left_local_seqs,
            'right_local_seqs': right_local_seqs
        }, {
            'pitch_prediction': labels
        }) for ((left_features, central_features,
                 right_features), (left_metas, central_metas,
                                   right_metas), labels, left_local_seqs,
                right_local_seqs) in generator_from_raw_dataset(
                    batch_size=batch_size,
                    timesteps=timesteps,
                    voice_index=voice_index,
                    phase='test',
                    pickled_dataset=pickled_dataset))  #######検証データ

        #print('いちご食べたい'*100)
        #print(model_name)
        model_path_name = 'models/' + model_name + '_' + str(voice_index)

        model = load_model(model_path_name)

        model.compile(optimizer='adam',
                      loss={'pitch_prediction': 'categorical_crossentropy'},
                      metrics=['accuracy'])  #############すべてを理解した(*ノω・*)テヘ

        #print(validation_steps)
        model.fit_generator(generator_train,
                            samples_per_epoch=steps_per_epoch,
                            epochs=num_epochs,
                            verbose=1,
                            validation_data=generator_val,
                            validation_steps=validation_steps)

        models.append(model)  #modelという要素をもつリストを作った

        save_model(model, model_path_name, overwrite=True)
        #print('はい'*100)
        #print(len(models))
    return models
예제 #7
0
def create_models(model_name=None,
                  create_new=False,
                  num_dense=200,
                  num_units_lstm=[200, 200],
                  pickled_dataset=ARASHI_DATASET,
                  num_voices=4,
                  metadatas=None,
                  timesteps=16):
    """
    Choose one model
    :param model_name:
    :return:
    """

    _, _, _, index2notes, _, _, _ = pickle.load(open(pickled_dataset, 'rb'))
    # X, X_metadatas, voice_ids, index2notes, note2indexes, metadata = pickle.load(open(pickled_dataset, 'rb'))
    #print('統計物理重い'*50)
    #print('index2notes===================\n', index2notes)
    num_pitches = list(map(len, index2notes))  #各パートの音の種類数
    #print('num_pitches=',num_pitches)
    for voice_index in range(num_voices):
        print('=' * 10 + 'パート', voice_index, '=' * 10, timesteps)
        # We only need one example for features dimensions
        gen = generator_from_raw_dataset(batch_size=1,
                                         timesteps=timesteps,
                                         voice_index=voice_index,
                                         pickled_dataset=pickled_dataset)
        #batch_size=1としているのはgenerator_from_raw_datasetのwhile文の反復を1回にするため、ここではモデルのサイズさえわかれば良いので

        ((left_features, central_features,
          right_features), (left_metas, central_metas, right_metas), labels,
         left_local_seqs, right_local_seqs) = next(gen)
        """
        print()
        print('num_features_lr=', left_features.shape[-1])
        print('num_features_c=', central_features.shape[-1])
        print('num_pitches=', num_pitches)
        print('num_features_meta=', left_metas.shape[-1])
        print('num_dense=', num_dense)
        print('num_units_lstm=', num_units_lstm)
        print()
        """
        #if voice_index == 0:
        #np.set_printoptions(threshold=np.inf)
        #print('bbbbbbbbbb',left_features.shape)
        #print(left_features)
        if 'deepbach' in model_name:

            #model = deepBach(num_features_lr=left_features.shape[-1],
            #                 num_features_c=central_features.shape[-1],
            #                 num_pitches=num_pitches[voice_index],
            #                 num_features_meta=left_metas.shape[-1],
            #                 num_dense=num_dense, num_units_lstm=num_units_lstm,
            #                 timesteps = timesteps)

            model = deepBach_chord(num_features_lr=left_features.shape[-1],
                                   num_features_c=central_features.shape[-1],
                                   num_pitches=num_pitches[voice_index],
                                   num_features_meta=left_metas.shape[-1],
                                   num_dense=num_dense,
                                   num_units_lstm=num_units_lstm,
                                   timesteps=timesteps,
                                   num_localseqs_lr=left_local_seqs.shape[-1])
        elif 'skip' in model_name:
            model = deepbach_skip_connections(
                num_features_lr=left_features.shape[-1],
                num_features_c=central_features.shape[-1],
                num_features_meta=left_metas.shape[-1],
                num_pitches=num_pitches[voice_index],
                num_dense=num_dense,
                num_units_lstm=num_units_lstm,
                timesteps=timesteps)
        else:
            raise ValueError

        model_path_name = 'models/' + model_name + '_' + str(voice_index)
        if not os.path.exists(model_path_name + '.json') or create_new:
            save_model(model, model_name=model_path_name, overwrite=create_new)