def save_audio(self, irun, iday, ismodel):
     sm = self.rd[irun]['songs'].iloc[iday][ismodel]
     soundwave = sm.gen_sound()
     # normalize to get the sound louder
     wav_song = normalize_and_center(soundwave)
     wavfile.write(filename="{}_song.wav".format(self.conf[irun]['name']),
                   rate=bsa.SR,
                   data=wav_song)
Exemple #2
0
def generate_data_struct(l_path):
    sim = []
    for path in l_path:
        d = {}
        d["fft_step"] = bsa.FFT_STEP
        d["freq_range"] = bsa.FREQ_RANGE
        d["fft_size"] = bsa.FFT_SIZE
        d["sr"], d["tutor"] = wavfile.read(join(path, 'tutor.wav'))
        d["tspec"] = bsa.spectral_derivs(d["tutor"], d["freq_range"],
                                         d["fft_step"], d["fft_size"])
        d["run_param"], d["songlog"] = get_run_param_and_songlog(path)
        rd, smodel, score = get_rd_best_smodel_and_score(d["songlog"])
        d["rd"] = rd
        d["smodel"] = smodel
        d["score"] = score
        d["song"] = smodel.gen_sound()
        d["starts"] = []
        for i, gesture in enumerate(d["smodel"].gestures):
            d["starts"].append(gesture[0])
        d["smspec"] = bsa.spectral_derivs(d["song"], d["freq_range"],
                                          d["fft_step"], d["fft_size"])
        song_name = basename(d["run_param"]['tutor']).split('.')[0]
        synth_ab = np.loadtxt('../data/{}_ab.dat'.format(song_name))
        d["ab"] = d["smodel"].gen_alphabeta()
        for start, g in d["smodel"].gestures:
            d["ab"][start] = np.nan
        d["synth_ab"] = synth_ab
        nct = normalize_and_center(d["tutor"])
        param_feat = bsa.all_song_features(nct,
                                           d["sr"],
                                           freq_range=d["freq_range"],
                                           fft_step=d["fft_step"],
                                           fft_size=d["fft_size"])
        d["tfeat"] = get_features(d["tutor"], param_feat)
        d["smfeat"] = get_features(d["song"], param_feat)
        tmp = '../data/{}_out.wav'
        sr, d["synth"] = wavfile.read(tmp.format(song_name))
        d["Boari_score"] = utils.boari_synth_song_error(
            d["tutor"],
            d["synth"],
            d["run_param"]['coefs'],
            tutor_feat=param_feat)
        d["mtutor"] = bsa_measure(d["tutor"],
                                  d["sr"],
                                  coefs=d["run_param"]['coefs'],
                                  tutor_feat=param_feat)
        d["msynth"] = bsa_measure(d["synth"],
                                  d["sr"],
                                  coefs=d["run_param"]['coefs'],
                                  tutor_feat=param_feat)
        d["msong"] = bsa_measure(d["song"],
                                 d["sr"],
                                 coefs=d["run_param"]['coefs'],
                                 tutor_feat=param_feat)
        sim.append(d)
    return sim
 def tutor_sound_wave(self, irun):
     sr, tutor = wavfile.read(join(self.run_paths[irun], 'tutor.wav'))
     tutor = normalize_and_center(tutor)
     fig = plt.figure(figsize=self.figsize)
     ax = fig.gca()
     ax.plot(tutor, color='C0')
     ax.set_xlim(0, len(tutor))
     ax.set_title("tutor sound wave (normalized)")
     plt.close(fig)
     return plot_to_html(fig)
 def learning_curve(self, i, rescaling=False):
     """
     rescaling: boolean. If True, use the rescaling measure to calculate
     the error score.
     """
     tutor_feat = None
     fig = plt.figure(figsize=self.figsize)
     ax = fig.gca()
     try:
         ax = draw_learning_curve(self.rd[i], ax)
     except Exception as e:
         print(e)
     else:
         sr, synth = wavfile.read('../data/{}_out.wav'.format(
             basename(self.conf[i]['tutor']).split('.')[0]))
         sr, tutor = wavfile.read(join(self.run_paths[i], 'tutor.wav'))
         if rescaling:
             tutor = normalize_and_center(tutor)
             tutor_feat = bsa.all_song_features(tutor,
                                                bsa.SR,
                                                freq_range=bsa.FREQ_RANGE,
                                                fft_step=bsa.FFT_STEP,
                                                fft_size=bsa.FFT_SIZE)
         score = boari_synth_song_error(tutor, synth, self.conf[i]['coefs'],
                                        tutor_feat)
         ax.axhline(-1 * score,
                    color="orange",
                    label="Erreur avec méthode de Boari")
         print("Boari score:", score)
         best = np.argmin(self.rd[i]['scores'].iloc[-1])
         best_score = self.rd[i]['scores'].iloc[-1][best]
         print("Best song model score:", best_score)
         ax.legend()
     finally:
         if self.save_fig:
             fig.savefig('learning_curve_{}.png'.format(i), dpi=300)
         plt.close(fig)
     return plot_to_html(fig)
Exemple #5
0
def fit_song(tutor_song, conf, datasavers=None):
    """Fit a song with a day and a night phase.

    This function returns a list of SongModel.

    The fit is split in two phases: A day part and a night part. The day part
    is a simple optimisation algorithm within gesture. The night part
    is a restructuring algorithm. See details in the modules
    `song_model.SongModel`, `day_optimisers` and `night_optimisers`


    Parameters
    ----------
    tutor_song : 1D array_like
        The tutor song that the algorithm will try to reproduce.
        It will be normalized between -1 and +1 internally.
        You don't need to do it yourself.
    conf : dict
        The dictionnary of all the parameters needed for the run.
        Values that are required with `fit_song are`:
            'dlm': The day learning model key from DAY_LEARNING_MODELS dict.
            'nlm': The night learning model key from NIGHT_LEARNING_MODELS dict.
            'days': The number of day for a run
            'concurrent': The number of concurrent songs during the day.
            'comp_obj': a callable for the comparison.
            'rng_obj': a `numpy.RandomState` object for the random generation.
            'measure_obj': a callable to measure song features.

        'comp_obj', 'rng_obj' and 'measure_obj' are not importable from json
        files, but can be built easily by reading arguments like 'seed' or keys
        from the configuration files, like 'dlm' and 'nlm'.

        The required values depend on the day learning model and night
        learning model picked.

    Returns
    -------
    songmodels : List[SongModel]
        The songmodels at the end of the training.

    See also
    --------
    song_model.SongModel
    day_optimisers
    night_optimisers

    """
    tutor_song = normalize_and_center(tutor_song)

    tutor_feat = bsa.all_song_features(tutor_song,
                                       bsa.SR,
                                       freq_range=bsa.FREQ_RANGE,
                                       fft_step=bsa.FFT_STEP,
                                       fft_size=bsa.FFT_SIZE)

    conf['measure_obj'] = lambda x: bsa_measure(
        x, bsa.SR, coefs=conf['coefs'], tutor_feat=tutor_feat)

    day_optimisation = DAY_LEARNING_MODELS[conf['dlm']]
    night_optimisation = NIGHT_LEARNING_MODELS[conf['nlm']]
    nb_day = conf['days']
    nb_conc_song = conf['concurrent']
    measure = conf['measure_obj']
    comp = conf['comp_obj']
    rng = conf['rng_obj']
    nb_split = conf.get('split', 10)
    # muta_proba is a list of 3 values: [P(deletion), P(division), P(movement)]
    muta_proba = conf['muta_proba']

    songs = [
        SongModel(song=tutor_song,
                  priors=conf['prior'],
                  nb_split=nb_split,
                  rng=rng,
                  muta_proba=muta_proba) for i in range(nb_conc_song)
    ]

    goal = measure(tutor_song)

    if datasavers is None:
        datasavers = {}
        datasavers["standard"] = QuietDataSaver()
        datasavers["day"] = QuietDataSaver()
        datasavers["night"] = QuietDataSaver()
    datasavers["standard"].add(moment='Start',
                               songs=songs,
                               scores=get_scores(goal, songs, measure, comp))

    cond1 = conf['dlm'] == 'optimise_gesture_whole'
    cond2 = conf['dlm'] == 'optimise_gesture_whole_local_search'
    cond3 = conf['dlm'] == 'optimise_proportional_training'
    if cond1 or cond2 or cond3:
        target = goal
    else:
        # case where conf['dlm'] == 'optimise_root_mean_square_error'
        target = tutor_song

    for iday in range(nb_day):
        logger.info('*\t*\t*\tDay {} of {}\t*\t*\t*'.format(iday + 1, nb_day))
        with datasavers["day"].set_context('day_optim'):
            songs = day_optimisation(songs,
                                     target,
                                     conf,
                                     datasaver=datasavers["day"],
                                     iday=iday)
        datasavers["day"].flush(
        )  # Write the data in several times, otherwise it is too big and cause MemoryError. It means it has to be extract from the pickle file differently
        score = get_scores(goal, songs, measure, comp)
        if iday + 1 != nb_day:
            logger.debug(score)
            datasavers["standard"].add(moment='before_night',
                                       songs=songs,
                                       scores=score)
            logger.info('z\tz\tz\tNight\tz\tz\tz')
            with datasavers["standard"].set_context('night_optim'):
                with datasavers["night"].set_context('replay'):
                    if conf['nlm'] == "no_night":
                        pass
                    # kind of "multi objective diversity" by minimising the number of neighbours of a song and also keep good song with low error distance
                    elif conf['nlm'] == "mutate_microbial_diversity_uniform":
                        songs = night_optimisation(songs,
                                                   goal,
                                                   iday,
                                                   nb_day,
                                                   conf,
                                                   datasavers=datasavers)
                    # only diversity by maximising the metric between songs, metric not symmetric
                    elif conf[
                            'nlm'] == "mutate_microbial_diversity_continuous_uniform":
                        songs = night_optimisation(songs,
                                                   conf,
                                                   i_night=iday,
                                                   datasavers=datasavers)
                    # only diversity by maximising the distance between songs, using a symmetrical distance
                    elif conf[
                            'nlm'] == "mutate_microbial_diversity_distance_uniform":
                        songs = night_optimisation(songs,
                                                   conf,
                                                   i_night=iday,
                                                   datasavers=datasavers)
                    else:
                        songs = night_optimisation(
                            songs,
                            goal,
                            conf,
                            datasaver=datasavers["standard"])
            score = get_scores(goal, songs, measure, comp)
            datasavers["night"].flush(
            )  # Write the data in several times, otherwise it is too big and cause MemoryError. It means it has to be extract from the pickle file differently
            datasavers["standard"].add(moment='after_night',
                                       songs=songs,
                                       scores=score)
        datasavers["standard"].write()
    datasavers["standard"].add(moment='End',
                               songs=songs,
                               scores=get_scores(goal, songs, measure, comp))
    return songs