Exemple #1
0
def generate_waveform(predict_dict, features_dict):
    pred_f0 = predict_dict['f0_predictions']
    pred_ec = predict_dict['ec_predictions']
    f0_src = features_dict['f0']
    ec_src = features_dict['ec']
    nz_idx = features_dict['nz_idx']
    straight_src = features_dict['straight']
    ap = features_dict['aperiod']
    smooth_param = global_dict['smooth_param']

    trans_f0                    = smooth(pred_f0.reshape(-1,), \
                                         window_len=smooth_param)
    mean_smooth_f0 = np.zeros((f0_src.shape[0], 1))

    mean_smooth_f0[nz_idx] = trans_f0[smooth_param - 1:-1 *
                                      (smooth_param - 1)].reshape(-1, 1)

    trans_ec                    = smooth(pred_ec.reshape(-1,), \
                                         window_len=smooth_param)
    new_ec = trans_ec[smooth_param - 1:-1 * (smooth_param - 1)].reshape(-1, 1)

    z_idx = np.where(new_ec <= 0)[0]
    ec_src = ec_src.reshape(-1, 1)
    new_ec[z_idx] = ec_src[z_idx]

    ratio_ec = np.divide(new_ec.T, ec_src.T)
    straight_src                = np.multiply(straight_src, \
                                    np.matlib.repmat(ratio_ec, 513, 1))
    straight_src = straight_src.T.copy(order='C')

    recon_sig                   = pw.synthesize(mean_smooth_f0.reshape((-1,)), \
                                                straight_src, ap, global_dict['fs'], \
                                                frame_period=int(1000*global_dict['window_len']))
    recon_sig                   = 2 * ((recon_sig - np.min(recon_sig)) \
                                       / (np.max(recon_sig) - np.min(recon_sig))) - 1
    return {'data': recon_sig, 'fs': global_dict['fs']}
    def run(self,chunk=1024,channels=1,sample_rate=44100):
    	controller = pyaudio.PyAudio()
    	stream     = controller.open(format = pyaudio.paInt32,
                                   channels = channels,
                                       rate = sample_rate,
                                      input = True,
                          frames_per_buffer = chunk)
    	curr_data = None
    	while True:
    		if not self.input_queue.empty():
    			self.input_queue.get()

    			data = np.fromstring(old_data,dtype=np.uint16)
    			data = noise_cancel(data)
    			smooth_data = smooth(data)
    			vol = self.volume(smooth_data)
    			self.output_queue.put(vol)
    		old_data,curr_data = curr_data, stream.read(chunk)
Exemple #3
0
# data_a_y = helper.ewma(data_a_y, 0.1)
helper.offset_data(data_time,data_a_x,data_a_y,starts,ends)
# data_a_x, data_a_y = helper.set_move_period(data_a_x, data_a_y, data_a_z)

for i in range(1, len(data_time)):

    status.move(data_time[i] - data_time[i-1], data_a_x[i], data_a_y[i], data_d_z[i])

    print status
    xs.append(status.position.x)
    ys.append(status.position.y)
    angle.append(status.direction.angle * 180 / 3.14159)

xs2 = []
ys2 = []
for item in starts:
    xs2.append(xs[item])
    ys2.append(ys[item])
xs2.append(xs[len(xs) - 1])
ys2.append(ys[len(ys) - 1])

plt.plot(xs, ys)
plt.plot(xs2, ys2)
plt.show()

plt.plot(helper.smooth(data_d_z, 50))
plt.show()

plt.plot(helper.smooth(angle, 50))
plt.show()
Exemple #4
0
print(sample_result1.round(2), '\n')

sample2 = pm.sample_ppc(trace, samples=1, model=model)
sample_result2 = np.mean(sample2['L'], axis=0) / N_data
print(sample_result2.round(2), '\n')

sample3 = pm.sample_ppc(trace, samples=1, model=model)
sample_result3 = np.mean(sample3['L'], axis=0) / N_data
print(sample_result3.round(2), '\n')

PProb = np.random.rand((config.T))

plt.figure()

ax = plt.subplot(211)
ax.plot(smooth(PProb), label=config.ALGORITHMS[0])
ax.set_yticks(np.arange(0, 1., 0.1))
ax.set_xticks(np.arange(0, config.T, config.T / 20))
ax.grid()

ax2 = plt.subplot(212)
ax2.plot(smooth(PProb), label=config.ALGORITHMS[0])
#ax2.set_yticks(np.arange(0, 1., 0.1))
ax2.set_xticks(np.arange(0, config.T, config.T / 20))
ax2.grid()

plt.subplot(211)
plt.xlabel('t')
plt.ylabel('Probability of optimal action')
plt.legend()
Exemple #5
0
for i in range(len(algs)):

    pprob_string = save_dir + datetime + '_N=' + str(N) + '_AVE_PProb_' + algs[i] + '.npy'
    pprob = np.load(pprob_string)

    regret_string = save_dir + datetime + '_N=' + str(N) + '_AVE_Regret_' + algs[i] + '.npy'
    regret = np.load(regret_string)

    cumregret_string = save_dir + datetime + '_N=' + str(N) + '_AVE_CumRegret_' + algs[i] + '.npy'
    cumregret = np.load(cumregret_string)

    accuracy_string = save_dir + datetime + '_N=' + str(N) + '_AVE_Accuracy_' + algs[i] + '.npy'
    accuracy = np.load(accuracy_string)

    ax = plt.subplot(221)
    ax.plot(smooth(pprob), label=algs[i])

    ax = plt.subplot(222)
    ax.plot(smooth(regret), label=algs[i])

    ax = plt.subplot(223)
    ax.plot(smooth(cumregret), label=algs[i])

    ax = plt.subplot(224)
    ax.plot(smooth(accuracy), label=algs[i])


plt.subplot(221)
plt.xlabel('t')
plt.ylabel('Probability of optimal action')
Exemple #6
0
def main():

    # Load settings
    config = Config()

    # Get date-time stamp
    stamp = str(datetime.datetime.now()).split('.')[0]
    stamp = stamp.replace(':', '-')
    stamp = stamp.replace(' ', '-')

    # Data save folder
    save_path = 'data_folder'

    # ---------------------------------------------------------------------- #
    # Compare algorithms - Instantiate intent for all timesteps              #
    # ---------------------------------------------------------------------- #
    if config.USE_EXISTING_I_SAMPLES:  # Load I samples from existing file
        I_samples_list = np.load(os.path.join(
            save_path, config.I_SAMPLE_FILE))[:, :config.T * config.N_BATCH]
        I_samples_list = np.tile(I_samples_list, (config.N, 1))  # Tile N times
        T = eval(config.T_)  # This needs I_samples_list
        N = config.N
    else:  # Instantiate confounders and I samples to use for all timesteps
        T = eval(config.T_)
        U_samples_list = np.empty((config.N, config.N_CONFOUNDERS, T))
        I_samples_list = np.empty((config.N, T))
        N = config.N

        for i in range(config.N):
            U_samples = np.random.randint(2, size=(config.N_CONFOUNDERS, T))
            I_samples = np.array(config.intent(U_samples[0], U_samples[1]))
            U_samples_list[i] = U_samples
            I_samples_list[i] = I_samples

        if config.USE_ORDERED_I_SAMPLES:
            # Systematic pattern of intents
            I_patterns = np.zeros((config.K, config.K)).astype(int)
            for i in range(config.K):
                I_patterns[i] = np.roll(np.arange(config.K, dtype=np.int), -i)
            I_patterns = np.tile(I_patterns, np.ceil(T / config.K).astype(int))
            I_patterns = np.tile(I_patterns,
                                 (np.ceil(N / config.K).astype(int), 1))
            I_samples_list = I_patterns[:N, :T]

        if config.SAVE_I_SAMPLES:
            np.save(os.path.join(save_path, 'I_samples_' + stamp),
                    I_samples_list)

    # ------------------------------------------------------ #
    #  Prepare run for T timesteps N times                   #
    # ------------------------------------------------------ #

    if config.COMPARE_HYPERPARAMETERS:
        N = config.N
    else:
        # If config.USE_RANDOM_I_SAMPLES, will NOT WORK
        N = I_samples_list.shape[
            0] if config.USE_EXISTING_I_SAMPLES is True else config.N

    if config.MODE == 'vanilla' and config.USE_RANDOM_DATA:
        if config.USE_SAVED_RANDOM_DATA:
            data_exp_list = np.load('data_exp_list.npy')
            data_obs_list = np.load('data_obs_list.npy')
            theta_list = np.load('theta_list.npy')
            N = len(theta_list)

        else:
            data_exp_list, data_obs_list, theta_list = generate_and_save_n_data(
                config.N, save=config.SAVE_LIST, noisy=False)

        data_lists = [data_exp_list, data_obs_list, theta_list]
    else:
        data_lists = None

    # Sums of actions, rewards, and times best action chosen.
    IntentSum = np.zeros((config.N_ALGS, T))
    ActionSum = np.zeros((config.N_ALGS, T))
    RewardSum = np.zeros((config.N_ALGS, T))
    ProbSum = np.zeros((config.N_ALGS, T))
    RegretSum = np.zeros((config.N_ALGS, T))
    CumRegretSum = np.zeros((config.N_ALGS, T))
    CumProbSum = np.zeros((config.N_ALGS, T))
    AccuracySum = np.zeros((config.N_ALGS, T))
    ExpectedRewardSum = np.zeros((config.N_ALGS, T))

    if config.PLOT:
        plt.figure()

    # -------------------------------------------------- #
    #  Run for T timesteps N times for all algorithms    #
    # -------------------------------------------------- #

    print('Running {} algorithms... \n'.format(config.N_ALGS))

    for a in range(config.N_ALGS):

        alg_name = config.ALGORITHMS[a]
        print('Running algorithm: {} \n'.format(alg_name))

        # -------------------------------- #
        #  Instantiate algorithm           #
        # -------------------------------- #

        if alg_name.startswith('MCMC'):
            # algorithm = mabuc_bayesian.MabucBayesianAlgorithm(alg_name, a)  # <-- doesn't work with random data
            algorithm = mabuc_bayesian_clean.MabucBayesianAlgorithm(
                alg_name, a)
            T = config.T * config.N_BATCH
            # N = config.N_MCMC

        else:
            # algorithm = mabuc.MabucAlgorithm(alg_name)
            algorithm = mabuc_clean.MabucAlgorithm(alg_name)
            T = config.T
            # N = config.N_IVWA

        # For calculating sample standard deviation

        Prob_log = np.zeros((N, T))
        Regret_log = np.zeros((N, T))
        CumRegret_log = np.zeros((N, T))
        ExpectedReward_log = np.zeros((N, T))
        SmoothedCumRegret_log = np.zeros((N, T))
        Accuracy_log = np.zeros((N, T))

        # -------------------------------- #
        # Run N Monte Carlo Simulations    #
        # -------------------------------- #

        print('Executing {} MC Simulations... \n'.format(N))

        for n in range(N):

            print('======  N = {} ====== \n'.format(n))

            # Run T exploration steps
            Intent, Action, Reward, Prob, CumProb, AveragePayoutAccuracy = algorithm.run_simulation(
                n, T, I_samples, data_lists)

            # Collect stats
            IntentSum[a, :] += Intent
            ActionSum[a, :] += Action
            RewardSum[a, :] += Reward

            ProbSum[a, :] += Prob
            CumProbSum[a, :] += CumProb
            AccuracySum[a, :] += AveragePayoutAccuracy

            # Save regret for this simulation (N)
            optimal_rewards = np.max(config.THETA[:, Intent.astype(int)],
                                     axis=0)
            action_rewards = config.THETA[Action.astype(int),
                                          Intent.astype(int)]
            Regret = optimal_rewards - action_rewards
            RegretSum[a, :] += Regret
            CumRegret = np.cumsum(optimal_rewards - action_rewards)
            CumRegretSum[a, :] += CumRegret
            ExpectedRewardSum[a, :] += action_rewards

            Prob_log[n, :] = Prob
            Regret_log[n, :] = Regret
            CumRegret_log[n, :] = CumRegret
            ExpectedReward_log[n, :] = action_rewards
            Accuracy_log[n, :] = AveragePayoutAccuracy

            if config.SAVE_CHECKPOINTS:

                if (config.SAVE_AT_END
                        and n == N - 1) or not config.SAVE_AT_END:

                    print('Saving checkpoint... \n')

                    folder = './' + save_path + '/' + config.MODE + '/'
                    np.save(
                        folder + stamp + '_N=' + str(n) + '_AVE_PProb' + '_' +
                        config.ALGORITHMS[a], ProbSum[a, :] / (n + 1))
                    np.save(
                        folder + stamp + '_N=' + str(n) + '_AVE_Regret' + '_' +
                        config.ALGORITHMS[a], RegretSum[a, :] / (n + 1))
                    np.save(
                        folder + stamp + '_N=' + str(n) + '_AVE_CumRegret' +
                        '_' + config.ALGORITHMS[a],
                        CumRegretSum[a, :] / (n + 1))
                    np.save(
                        folder + stamp + '_N=' + str(n) + '_AVE_Accuracy' +
                        '_' + config.ALGORITHMS[a],
                        AccuracySum[a, :] / (n + 1))

                    if n >= 1:
                        STD_PProb = np.std(Prob_log[:(n + 1)], axis=0,
                                           ddof=1)  # Sample std
                        STD_Regret = np.std(Regret_log[:(n + 1)],
                                            axis=0,
                                            ddof=1)  # Sample std
                        STD_CumRegret = np.std(CumRegret_log[:(n + 1)],
                                               axis=0,
                                               ddof=1)  # Sample std
                        STD_Accuracy = np.std(Accuracy_log[:(n + 1)],
                                              axis=0,
                                              ddof=1)  # Sample std

                        np.save(
                            folder + stamp + '_N=' + str(n) + '_STD_PProb' +
                            '_' + config.ALGORITHMS[a], STD_PProb)
                        np.save(
                            folder + stamp + '_N=' + str(n) + '_STD_Regret' +
                            '_' + config.ALGORITHMS[a], STD_Regret)
                        np.save(
                            folder + stamp + '_N=' + str(n) +
                            '_STD_CumRegret' + '_' + config.ALGORITHMS[a],
                            STD_CumRegret)
                        np.save(
                            folder + stamp + '_N=' + str(n) + '_STD_Accuracy' +
                            '_' + config.ALGORITHMS[a], STD_Accuracy)

        # --------------------------------------- #
        #  Save results to plot                   #
        # --------------------------------------- #

        if 1 == 0:

            # Get Index of best and worst runs
            for n in range(N):
                SmoothedCumRegret_log[n] = smooth(CumRegret_log[n])
            BestRun_idx = np.argmin(SmoothedCumRegret_log[:, -1])
            WorstRun_idx = np.argmax(SmoothedCumRegret_log[:, -1])

            #  Probability of optimal action
            # -----------------------------------------------
            PProb = ProbSum[a, :] / N
            STD_PProb = np.std(Prob_log, axis=0, ddof=1)  # Sample std
            name = config.ALGORITHMS[a] + '_' + stamp + '_PProb'
            name_std = config.ALGORITHMS[a] + '_' + stamp + '_PProb_std'
            if config.SAVE_DATA:
                np.save(os.path.join(save_path, name), PProb)
                np.save(os.path.join(save_path, name_std), STD_PProb)

            #  Save cumulative probability of optimal action
            # -----------------------------------------------
            CumPProb = CumProbSum[a, :] / N
            name = config.ALGORITHMS[a] + '_' + stamp + '_CumPProb'
            if config.SAVE_DATA:
                np.save(os.path.join(save_path, name), CumPProb)

            #  Save regret
            # -----------------------------------------------
            Regret = RegretSum[a, :] / N
            STD_Regret = np.std(Regret_log, axis=0, ddof=1)  # Sample std

            Regret_Best = Regret_log[BestRun_idx]
            Regret_Worst = Regret_log[WorstRun_idx]

            if config.SAVE_DATA:
                name = config.ALGORITHMS[a] + '_' + stamp + '_Regret'
                name_std = config.ALGORITHMS[a] + '_' + stamp + '_Regret_std'
                np.save(os.path.join(save_path, name), Regret)
                np.save(os.path.join(save_path, name_std), STD_Regret)

                name_best = config.ALGORITHMS[a] + '_' + stamp + '_Regret_Best'
                name_worst = config.ALGORITHMS[
                    a] + '_' + stamp + '_Regret_Worst'
                np.save(os.path.join(save_path, name_best), Regret_Best)
                np.save(os.path.join(save_path, name_worst), Regret_Worst)

            #  Cumulative regret
            # -----------------------------------------------
            CumRegret = CumRegretSum[a, :] / N
            STD_CumRegret = np.std(CumRegret_log, axis=0, ddof=1)  # Sample std

            CumRegret_Best = CumRegret_log[BestRun_idx]
            CumRegret_Worst = CumRegret_log[WorstRun_idx]

            if config.SAVE_DATA:
                name = config.ALGORITHMS[a] + '_' + stamp + '_CumRegret'
                name_std = config.ALGORITHMS[a] + '_' + stamp + '_CumRegret_std'
                np.save(os.path.join(save_path, name), CumRegret)
                np.save(os.path.join(save_path, name_std), STD_CumRegret)

                name_best = config.ALGORITHMS[
                    a] + '_' + stamp + '_CumRegret_Best'
                name_worst = config.ALGORITHMS[
                    a] + '_' + stamp + '_CumRegret_Worst'
                np.save(os.path.join(save_path, name_best), CumRegret_Best)
                np.save(os.path.join(save_path, name_worst), CumRegret_Worst)

            #  Payout estimate accuracy
            # -----------------------------------------------
            Accuracy = AccuracySum[a, :] / N
            STD_Accuracy = np.std(Accuracy_log, axis=0, ddof=1)  # Sample std
            name = config.ALGORITHMS[a] + '_' + stamp + '_Accuracy'
            name_std = config.ALGORITHMS[a] + '_' + stamp + '_Accuracyt_std'
            if config.SAVE_DATA:
                np.save(os.path.join(save_path, name), Accuracy)
                np.save(os.path.join(save_path, name_std), STD_Accuracy)

            #  Action rewards
            # -----------------------------------------------
            ExpectedReward = ExpectedRewardSum[a, :] / N
            STD_ExpectedReward = np.std(ExpectedReward_log, axis=0,
                                        ddof=1)  # Sample std

            ExpectedReward_Best = ExpectedReward_log[BestRun_idx]
            ExpectedReward_Worst = ExpectedReward_log[WorstRun_idx]

            if config.SAVE_DATA:
                name = config.ALGORITHMS[a] + '_' + stamp + '_ExpectedReward'
                name_std = config.ALGORITHMS[
                    a] + '_' + stamp + '_ExpectedReward_std'
                np.save(os.path.join(save_path, name), ExpectedReward)
                np.save(os.path.join(save_path, name_std), STD_ExpectedReward)

                name_best = config.ALGORITHMS[
                    a] + '_' + stamp + '_ExpectedReward_Best'
                name_worst = config.ALGORITHMS[
                    a] + '_' + stamp + '_ExpectedReward_Worst'
                np.save(os.path.join(save_path, name_best),
                        ExpectedReward_Best)
                np.save(os.path.join(save_path, name_worst),
                        ExpectedReward_Worst)

        # ---------------------------------- #
        #  Plot graphs                       #
        # ---------------------------------- #

        if config.PLOT:

            ax1 = plt.subplot(321)
            PProb = ProbSum[a, :] / N
            ax1.plot(smooth(PProb), label=config.ALGORITHMS[a])
            # ax1.set_yticks(np.arange(0, 1., 0.1))
            # ax1.set_xticks(np.arange(0, T, 50))
            ax1.grid()

            ax2 = plt.subplot(322)
            CumPProb = CumProbSum[a, :] / N
            ax2.plot(smooth(CumPProb), label=config.ALGORITHMS[a])
            #ax2.set_yticks(np.arange(0, 100., 20))
            #ax2.set_xticks(np.arange(0, T, 50))
            ax2.grid()

            ax3 = plt.subplot(323)
            Regret = RegretSum[a, :] / N
            ax3.plot(smooth(Regret), label=config.ALGORITHMS[a])
            #ax3.set_yticks(np.arange(0, 100., 20))
            #ax3.set_xticks(np.arange(0, T, 50))
            ax3.grid()

            ax4 = plt.subplot(324)
            CumRegret = CumRegretSum[a, :] / N
            ax4.plot(smooth(CumRegret), label=config.ALGORITHMS[a])
            #ax4.set_yticks(np.arange(0, 100., 20))
            #ax4.set_xticks(np.arange(0, T, 50))
            ax4.grid()

            ax5 = plt.subplot(325)
            Accuracy = AccuracySum[a, :] / N
            ax5.plot(smooth(Accuracy), label=config.ALGORITHMS[a])
            #ax5.set_yticks(np.arange(0, 100., 20))
            #ax5.set_xticks(np.arange(0, T, 50))
            ax5.grid()

            ax6 = plt.subplot(326)
            ExpectedReward = ExpectedRewardSum[a, :] / N
            ax6.plot(smooth(ExpectedReward), label=config.ALGORITHMS[a])
            #ax6.set_yticks(np.arange(0, 100., 20))
            #ax6.set_xticks(np.arange(0, T, 50))
            ax6.grid()

            #plt.subplot(326)
            #plt.fill_between(np.arange(T), smooth(ExpectedReward) - smooth(STD_ExpectedReward), smooth(ExpectedReward) + smooth(STD_ExpectedReward), color='b', alpha=0.2)
            #plt.fill_between(np.arange(T), smooth(ExpectedReward_Best), smooth(ExpectedReward_Worst), color='b', alpha=0.2)

    if config.PLOT:
        plt.subplot(321)
        plt.xlabel('t')
        plt.ylabel('Probability of optimal action')
        plt.xlim(0, T)
        plt.legend()

        plt.subplot(322)
        plt.xlabel('t')
        plt.ylabel('Cumulative Probability')
        plt.xlim(0, T)
        plt.legend()

        plt.subplot(323)
        plt.xlabel('t')
        plt.ylabel('Current Regret')
        plt.xlim(0, T)
        plt.legend()

        plt.subplot(324)
        plt.xlabel('t')
        plt.ylabel('Total regret')
        plt.xlim(0, T)
        plt.legend()

        plt.subplot(325)
        plt.xlabel('t')
        plt.ylabel('Accuracy of Payout')
        plt.xlim(0, T)
        plt.legend()

        # plt.subplot(326)
        # plt.xlabel('t')
        # plt.ylabel('Expected Reward')
        # plt.xlim(0, T)
        # plt.legend()

        plt.show()
high_threshold = 32768
low_threshold = 5000
data_max = np.max(new_array)

for i in range(len(new_array)):
	if new_array[i] > high_threshold:
		new_array[i] = 65535 - new_array[i]

#new_array[high] -= 65200
new_array[new_array < low_threshold] = 0
#print find_peaks_cwt(new_array,np.arange(1,10))

from scipy.fftpack import rfft,irfft

new_array = smooth(new_array)
almost_derivative = np.diff(new_array)
plt.plot(new_array)
plt.show()
plt.plot(almost_derivative)
plt.show()
for i in xrange(len(almost_derivative[:-2])):
	if almost_derivative[i] > 0 and almost_derivative[i+1] < 0:
	    print i, almost_derivative[i],new_array[i]
plt.subplot(121)
plt.plot(np.concatenate(frames),'o')
plt.subplot(122)
plt.plot(new_array,'o')
plt.show()
plt.plot(rfft(new_array),'o')
plt.show()
Exemple #8
0
def conversion(model_path, data_dir, output_dir, no_spec=False):

    sampling_rate = 16000
    num_mcep = 23
    frame_period = 5.0

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
        
    model = EncDecGen(num_mfc_features=23, pre_train=None)
    model.load(filepath=model_path)

    for file in os.listdir(data_dir):

        try:

            wav = scwav.read(os.path.join(data_dir, file))
            wav = wav[1].astype(np.float64)
            wav = preproc.wav_padding(wav=wav, sr=sampling_rate, \
                    frame_period=frame_period, multiple=4)
            f0, sp, ap = preproc.world_decompose(wav=wav, \
                    fs=sampling_rate, frame_period=frame_period)
            code_sp = preproc.world_encode_spectral_envelop(sp, \
                                    sampling_rate, dim=num_mcep)

            z_idx = np.where(f0<10.0)[0]
            f0 = scisig.medfilt(f0, kernel_size=3)
            f0 = generate_interpolation(f0)
            f0 = smooth(f0, window_len=13)
            f0 = np.reshape(f0, (1,-1,1))
            code_sp = np.reshape(code_sp, (1,-1,num_mcep))

            code_sp = np.transpose(code_sp, axes=(0,2,1))
            f0 = np.transpose(f0, axes=(0,2,1))

            # Prediction
            _, f0_conv, code_sp_conv = model.test(input_mfc=code_sp, \
                                                  input_pitch=f0)
            
            code_sp_conv = np.transpose(code_sp_conv, axes=(0,2,1))

            f0_conv = np.asarray(np.reshape(f0_conv,(-1,)), np.float64)
            code_sp_conv = np.asarray(np.squeeze(code_sp_conv), np.float64)
            code_sp_conv = np.copy(code_sp_conv, order='C')
            sp_conv = preproc.world_decode_spectral_envelop(code_sp_conv, \
                                                            sampling_rate)
            f0_conv[z_idx] = 0.0
            
            if no_spec == True:
                ec = np.reshape(np.sqrt(np.sum(np.square(sp), axis=1)), (-1,1))
                ec_conv = np.reshape(np.sqrt(np.sum(np.square(sp_conv), axis=1)), \
                                     (-1,1))

                # Making sure silence remains silence
                sil_zone = np.where(ec<1e-10)[0]
                ec_conv[sil_zone] = 1e-10
                
                sp = np.divide(np.multiply(sp, ec_conv), ec)
                sp = np.copy(sp, order='C')
                
                wav_transformed = preproc.world_speech_synthesis(f0=f0_conv, \
                                    decoded_sp=sp, \
                                    ap=ap, fs=sampling_rate, \
                                    frame_period=frame_period)
            else:
                wav_transformed = preproc.world_speech_synthesis(f0=f0_conv, \
                                    decoded_sp=sp_conv, \
                                    ap=ap, fs=sampling_rate, \
                                    frame_period=frame_period)
            
            librosa.output.write_wav(os.path.join(output_dir, \
                    os.path.basename(file)), wav_transformed, sampling_rate)
            print("Reconstructed file "+os.path.basename(file))
        except Exception as ex:
            print(ex)
def train(emo_pair, train_dir, model_dir, model_name, \
            random_seed, validation_dir, output_dir, \
            pre_train=None, lambda_encoder=1, lambda_decoder=1, \
            lambda_generator=1):

    np.random.seed(random_seed)

    num_epochs = 1000
    mini_batch_size = 1
    encoder_learning_rate = 0.0001
    decoder_learning_rate = 0.0001
    generator_learning_rate = 0.0001

    sampling_rate = 16000
    num_mcep = 23
    frame_period = 5.0
    n_frames = 128

    lambda_encoder = lambda_encoder
    lambda_decoder = lambda_decoder
    lambda_generator = lambda_generator

    le_ld_lg = "le_"+str(lambda_encoder)+"_ld_"+str(lambda_decoder) \
                +"_lg_"+str(lambda_generator)+'_'+emo_pair

    logger_file = './log/' + le_ld_lg + '.log'

    if not os.path.exists('./log'):
        os.mkdir('./log')

    if os.path.exists(logger_file):
        os.remove(logger_file)

    logging.basicConfig(filename="./log/logger_"+le_ld_lg+".log", \
                            level=logging.DEBUG)

    logging.info("encoder_loss - L1")
    logging.info("decoder_loss - L1")
    logging.info("generator_loss - L1")

    logging.info("lambda_encoder - {}".format(lambda_encoder))
    logging.info("lambda_decoder - {}".format(lambda_decoder))
    logging.info("lambda_generator - {}".format(lambda_generator))

    if not os.path.isdir("./generated_pitch_spect/" + le_ld_lg):
        os.makedirs("./generated_pitch_spect/" + le_ld_lg)

    logging.info('Loading Data...')

    start_time = time.time()

    data_train = scio.loadmat(os.path.join(train_dir, 'momenta_train.mat'))
    data_valid = scio.loadmat(os.path.join(train_dir, 'momenta_valid.mat'))

    pitch_A_train = np.expand_dims(data_train['src_f0_feat'], axis=-1)
    pitch_B_train = np.expand_dims(data_train['tar_f0_feat'], axis=-1)
    mfc_A_train = data_train['src_mfc_feat']
    mfc_B_train = data_train['tar_mfc_feat']
    momenta_A2B_train = np.expand_dims(data_train['momenta_f0'], axis=-1)

    pitch_A_valid = np.expand_dims(data_valid['src_f0_feat'], axis=-1)
    pitch_B_valid = np.expand_dims(data_valid['tar_f0_feat'], axis=-1)
    mfc_A_valid = data_valid['src_mfc_feat']
    mfc_B_valid = data_valid['tar_mfc_feat']
    momenta_A2B_valid = np.expand_dims(data_valid['momenta_f0'], axis=-1)

    mfc_A_valid, pitch_A_valid, mfc_B_valid, pitch_B_valid, momenta_A2B_valid \
        = preproc.sample_data(mfc_A=mfc_A_valid, pitch_A=pitch_A_valid, \
                              mfc_B=mfc_B_valid, pitch_B=pitch_B_valid, \
                              momenta_A2B=momenta_A2B_valid)

    if validation_dir is not None:
        validation_output_dir = os.path.join(output_dir, le_ld_lg)
        if not os.path.exists(validation_output_dir):
            os.makedirs(validation_output_dir)

    end_time = time.time()
    time_elapsed = end_time - start_time

    logging.info('Loading Done.')

    logging.info('Time Elapsed for Data Preprocessing: %02d:%02d:%02d' % (time_elapsed // 3600, \
                                                                   (time_elapsed % 3600 // 60), \
                                                                   (time_elapsed % 60 // 1)))

    model = EncDecGen(
        num_mfc_features=23,
        pre_train=pre_train)  #use pre_train arg to provide trained model

    for epoch in range(1, num_epochs + 1):

        logging.info('Epoch: %d' % epoch)

        start_time_epoch = time.time()

        mfc_A, pitch_A, mfc_B, \
                pitch_B, momenta_A2B = preproc.sample_data(mfc_A=mfc_A_train, \
                                        pitch_A=pitch_A_train, mfc_B=mfc_B_train, \
                                        pitch_B=pitch_B_train, momenta_A2B=momenta_A2B_train)

        n_samples = mfc_A.shape[0]

        batch_enc_loss = list()
        batch_dec_loss = list()
        batch_gen_loss = list()
        batch_tot_loss = list()

        for i in range(n_samples // mini_batch_size):

            start = i * mini_batch_size
            end = (i + 1) * mini_batch_size

            encoder_loss, decoder_loss, generator_loss, \
            gen_momenta, gen_pitch, gen_mfc \
                = model.train(input_mfc_A=mfc_A[start:end], \
                            input_mfc_B=mfc_B[start:end], \
                            input_pitch_A=pitch_A[start:end], \
                            input_pitch_B=pitch_B[start:end], \
                            input_momenta_A2B=momenta_A2B[start:end], \
                            lambda_encoder=lambda_encoder, \
                            lambda_decoder=lambda_decoder, \
                            lambda_generator=lambda_generator, \
                            encoder_learning_rate=encoder_learning_rate, \
                            decoder_learning_rate=decoder_learning_rate, \
                            generator_learning_rate = generator_learning_rate)

            batch_enc_loss.append(encoder_loss)
            batch_dec_loss.append(decoder_loss)
            batch_gen_loss.append(generator_loss)
            batch_tot_loss.append(lambda_encoder*encoder_loss \
                    + lambda_decoder*decoder_loss + lambda_generator*generator_loss)

        model.save(directory=model_dir, filename=model_name)

        logging.info("Train Encoder Loss- {}".format(np.mean(batch_enc_loss)))
        logging.info("Train Decoder Loss- {}".format(np.mean(batch_dec_loss)))
        logging.info("Train Generator Loss- {}".format(
            np.mean(batch_gen_loss)))
        logging.info("Train Total Loss- {}".format(np.mean(batch_tot_loss)))

        # Getting results on validation set

        valid_enc_loss = list()
        valid_dec_loss = list()
        valid_gen_loss = list()
        valid_tot_loss = list()

        for i in range(mfc_A_valid.shape[0]):

            gen_momenta, gen_pitch, gen_mfc, \
            enc_loss, dec_loss, gen_loss, \
                = model.compute_test_loss(input_mfc_A=mfc_A_valid[i:i+1], \
                             input_pitch_A=pitch_A_valid[i:i+1], \
                             input_momenta_A2B=momenta_A2B_valid[i:i+1], \
                             input_mfc_B=mfc_B_valid[i:i+1], \
                             input_pitch_B=pitch_B_valid[i:i+1])

            valid_enc_loss.append(enc_loss)
            valid_dec_loss.append(dec_loss)
            valid_gen_loss.append(gen_loss)
            valid_tot_loss.append(lambda_encoder*enc_loss \
                    + lambda_decoder*dec_loss + lambda_generator*gen_loss)

            if epoch % 100 == 0:
                pylab.figure(figsize=(12, 12))
                pylab.plot(pitch_A_valid[i].reshape(-1, ),
                           label="Input Neutral")
                pylab.plot(pitch_B_valid[i].reshape(-1, ),
                           label="Target Angry")
                pylab.plot(gen_pitch.reshape(-1, ), label="Generated Angry")
                pylab.plot(momenta_A2B_valid[i].reshape(-1, ),
                           label="Target Momentum")
                pylab.plot(gen_momenta.reshape(-1, ),
                           label="Generated Momentum")
                pylab.legend(loc=1)
                pylab.title("Epoch " + str(epoch) + " example " + str(i + 1))
                pylab.savefig("./generated_pitch_spect/"+le_ld_lg+'/'+str(epoch)\
                                + "_"+str(i+1)+".png")
                pylab.close()

        logging.info("Valid Encoder Loss- {}".format(np.mean(valid_enc_loss)))
        logging.info("Valid Decoder Loss- {}".format(np.mean(valid_dec_loss)))
        logging.info("Valid Generator Loss- {}".format(
            np.mean(valid_gen_loss)))
        logging.info("Valid Total Loss- {}".format(np.mean(valid_tot_loss)))

        end_time_epoch = time.time()
        time_elapsed_epoch = end_time_epoch - start_time_epoch

        logging.info('Time Elapsed for This Epoch: %02d:%02d:%02d' % (time_elapsed_epoch // 3600, \
                (time_elapsed_epoch % 3600 // 60), (time_elapsed_epoch % 60 // 1)))

        if validation_dir is not None:
            if epoch % 100 == 0:
                logging.info('Generating Validation Data B from A...')
                sys.stdout.flush()
                for file in sorted(os.listdir(validation_dir)):
                    try:
                        filepath = os.path.join(validation_dir, file)
                        wav = scwav.read(filepath)
                        wav = wav[1].astype(np.float64)
                        wav = preproc.wav_padding(wav=wav, sr=sampling_rate, \
                                frame_period=frame_period, multiple=4)
                        f0, sp, ap = preproc.world_decompose(wav=wav, \
                                fs=sampling_rate, frame_period=frame_period)
                        code_sp = preproc.world_encode_spectral_envelop(sp, \
                                                sampling_rate, dim=num_mcep)

                        z_idx = np.where(f0 < 10.0)[0]
                        f0 = scisig.medfilt(f0, kernel_size=3)
                        f0 = generate_interpolation(f0)
                        f0 = smooth(f0, window_len=13)
                        f0 = np.reshape(f0, (1, -1, 1))
                        code_sp = np.reshape(code_sp, (1, -1, num_mcep))

                        code_sp = np.transpose(code_sp, axes=(0, 2, 1))
                        f0 = np.transpose(f0, axes=(0, 2, 1))

                        # Prediction
                        _, f0_conv, code_sp_conv = model.test(input_mfc=code_sp, \
                                                              input_pitch=f0)

                        code_sp_conv = np.transpose(code_sp_conv,
                                                    axes=(0, 2, 1))

                        f0_conv = np.asarray(np.reshape(f0_conv, (-1, )),
                                             np.float64)
                        code_sp_conv = np.asarray(np.squeeze(code_sp_conv),
                                                  np.float64)
                        code_sp_conv = np.copy(code_sp_conv, order='C')
                        sp_conv = preproc.world_decode_spectral_envelop(code_sp_conv, \
                                                                        sampling_rate)

                        f0_conv[z_idx] = 0.0
                        wav_transformed = preproc.world_speech_synthesis(f0=f0_conv, \
                                            decoded_sp=sp_conv, \
                                            ap=ap, fs=sampling_rate, \
                                            frame_period=frame_period)
                        librosa.output.write_wav(os.path.join(validation_output_dir, \
                                os.path.basename(file)), wav_transformed, sampling_rate)
                        logging.info("Reconstructed file " +
                                     os.path.basename(file))
                    except Exception as ex:
                        logging.info(ex)
Exemple #10
0
def conversion(model_f0_path, model_mcep_path, mcep_nmz_path, data_dir,
               conversion_direction, output_dir, emo_pair):

    num_mceps = 24
    sampling_rate = 16000
    frame_period = 5.0

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    mcep_normalization_params = np.load(mcep_nmz_path)
    mcep_mean_A = mcep_normalization_params['mean_A']
    mcep_std_A = mcep_normalization_params['std_A']
    mcep_mean_B = mcep_normalization_params['mean_B']
    mcep_std_B = mcep_normalization_params['std_B']

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    for file in os.listdir(data_dir):

        try:

            filepath = os.path.join(data_dir, file)
            wav, _ = librosa.load(filepath, sr=sampling_rate, mono=True)
            wav = preproc.wav_padding(wav = wav, sr = sampling_rate, \
                    frame_period = frame_period, multiple = 4)
            f0, sp, ap = preproc.world_decompose(wav = wav, \
                    fs = sampling_rate, frame_period = frame_period)

            coded_sp = preproc.world_encode_spectral_envelope(sp = sp, \
                    fs = sampling_rate, dim = num_mceps)

            coded_sp_f0 = preproc.world_encode_spectral_envelope(sp=sp, \
                    fs=sampling_rate, dim=23)

            coded_sp_transposed = coded_sp.T

            if conversion_direction == 'A2B':

                coded_sp_norm = (coded_sp_transposed -
                                 mcep_mean_A) / mcep_std_A

                # test mceps
                coded_sp_converted_norm = mcep_conversion(model_mcep_path=model_mcep_path, \
                                                features=np.array([coded_sp_norm]), \
                                                direction=conversion_direction)
                # test f0:
                f0 = scisig.medfilt(f0, kernel_size=3)
                z_idx = np.where(f0 < 10.0)[0]

                f0 = generate_interpolation(f0)
                f0 = smooth(f0, window_len=13)
                f0 = np.reshape(f0, (1, 1, -1))

                coded_sp_f0 = np.expand_dims(coded_sp_f0, axis=0)
                coded_sp_f0 = np.transpose(coded_sp_f0, (0, 2, 1))

                f0_converted = f0_conversion(model_f0_path=model_f0_path,
                                             input_mfc=coded_sp_f0,
                                             input_pitch=f0,
                                             direction='A2B')

                f0_converted = np.asarray(np.reshape(f0_converted, (-1, )),
                                          np.float64)
                f0_converted[z_idx] = 0.0
                f0_converted = np.ascontiguousarray(f0_converted)

            else:
                raise Exception("Please specify A2B as conversion direction")

            coded_sp_converted = coded_sp_converted_norm * mcep_std_B + mcep_mean_B
            coded_sp_converted = coded_sp_converted.T
            coded_sp_converted = np.ascontiguousarray(coded_sp_converted)
            decoded_sp_converted = preproc.world_decode_spectral_envelope(coded_sp=coded_sp_converted, \
                    fs=sampling_rate)
            wav_transformed = preproc.world_speech_synthesis(f0=f0_converted, \
                    decoded_sp=decoded_sp_converted, ap=ap, fs=sampling_rate, \
                    frame_period=frame_period)
            librosa.output.write_wav(os.path.join(output_dir, \
                    os.path.basename(file)), wav_transformed, sampling_rate)

            print("Processed " + filepath)
        except Exception as ex:
            print(ex)
Exemple #11
0
 def smooth(self):
     self._edited_image = helper.smooth(self._edited_image)
     self.update_pixmap()
Exemple #12
0
PProb_file3 = 'PProb_MCMC+, a=100, b=10_2018-08-06 18:03:19.npy'
Regret_file3 = 'Regret_MCMC+, a=100, b=10_2018-08-06 18:03:19.npy'

PProb = np.load(os.path.join(save_dir, PProb_file))
Regret = np.load(os.path.join(save_dir, Regret_file))
PProb2 = np.load(os.path.join(save_dir, PProb_file2))
Regret2 = np.load(os.path.join(save_dir, Regret_file2))
PProb3 = np.load(os.path.join(save_dir, PProb_file3))
Regret3 = np.load(os.path.join(save_dir, Regret_file3))

plt.figure()

max_x_range = 200

ax = plt.subplot(211)
ax.plot(smooth(PProb), label='1')
ax.plot(smooth(PProb2), label='2')
ax.plot(smooth(PProb3), label='3')
ax.set_yticks(np.arange(0, 1., 0.1))
ax.set_xticks(np.arange(0, max_x_range, 10))
ax.grid()

ax2 = plt.subplot(212)
ax2.plot(smooth(Regret), label=Regret_file)
ax2.set_yticks(np.arange(0, 100, 10))
ax2.set_xticks(np.arange(0, max_x_range, 10))
ax2.grid()

plt.subplot(211)
plt.xlabel('t')
plt.ylabel('Probability of optimal action')
Exemple #13
0
def train(model, epoch_count, batch_size, z_dim, star_learning_rate, beta1, beta2, get_batches, data_shape,
          image_mode):
    input_real, input_z, lrate, k_t = model.model_inputs(
        *(data_shape[1:]), z_dim)

    d_loss, g_loss, d_real, d_fake = model.model_loss(
        input_real, input_z, data_shape[3], z_dim, k_t)

    d_opt, g_opt = model.model_opt(d_loss, g_loss, lrate, beta1, beta2)

    losses = []
    learning_rate = 0
    iter = 0

    epoch_drop = 3

    lam = 1e-3
    gamma = 0.5
    k_curr = 0.0

    test_z = np.random.uniform(-1, 1, size=(16, z_dim))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for epoch_i in range(epoch_count):

            learning_rate = star_learning_rate * \
                math.pow(0.2, math.floor((epoch_i + 1) / epoch_drop))

            for batch_images in get_batches(batch_size):
                iter += 1

                batch_images *= 2

                batch_z = np.random.uniform(-1, 1, size=(batch_size, z_dim))

                _, d_real_curr = sess.run([d_opt, d_real], feed_dict={
                                          input_z: batch_z, input_real: batch_images, lrate: learning_rate, k_t: k_curr})

                _, d_fake_curr = sess.run([g_opt, d_fake], feed_dict={
                                          input_z: batch_z, input_real: batch_images, lrate: learning_rate, k_t: k_curr})

                k_curr = k_curr + lam * (gamma * d_real_curr - d_fake_curr)

                # save convergence measure
                if iter % 100 == 0:
                    measure = d_real_curr + \
                        np.abs(gamma * d_real_curr - d_fake_curr)
                    losses.append(measure)

                    print("Epoch {}/{}...".format(epoch_i + 1, epoch_count),
                          'Convergence measure: {:.4}'.format(measure))

                # save test and batch images
                if iter % 700 == 0:
                    helper.show_generator_output(
                        sess, model.generator, input_z, batch_z, data_shape[3], image_mode, 'batch-' + str(iter))

                    helper.show_generator_output(
                        sess, model.generator, input_z, test_z, data_shape[3], image_mode, 'test-' + str(iter))

        print('Training steps: ', iter)

        losses = np.array(losses)

        helper.save_plot([losses, helper.smooth(losses)],
                         'convergence_measure.png')