def evaluate_and_save(self, sess, dataset): for data_id, data in enumerate(dataset): mean_cost, sparsity, filter_v, rfilter_v, bias_v, hidden_final, _ = self.roll_around(sess, data, collect_output = True) dump_fname = env.dataset("{}_sparse_acoustic_data.pkl".format(data_id)) logging.info("Saving hidden data in {}".format(dump_fname)) save_as_sparse(hidden_final, open(dump_fname, "w")) out_final = self.restore_hidden(hidden_final, rfilter_v.reshape(self.cfg.filter_len, self.cfg.filters_num)) self.save_waveform_as(out_final, data_id, env.result("{}_recovery.wav".format(data_id)))
def evaluate_and_save(self, sess, dataset): for data_id, data in enumerate(dataset): mean_cost, sparsity, filter_v, rfilter_v, bias_v, hidden_final, _ = self.roll_around( sess, data, collect_output=True ) dump_fname = env.dataset("{}_sparse_acoustic_data.pkl".format(data_id)) logging.info("Saving hidden data in {}".format(dump_fname)) save_as_sparse(hidden_final, open(dump_fname, "w")) out_final = self.restore_hidden(hidden_final, rfilter_v.reshape(self.cfg.filter_len, self.cfg.filters_num)) self.save_waveform_as(out_final, data_id, env.result("{}_recovery.wav".format(data_id)))
output_norm = tf.nn.l2_normalize(output, dim=1) cost = tf.nn.l2_loss(output_norm - target_norm) / c.seq_size / c.batch_size # optimizer = tf.train.AdamOptimizer(c.lrate) # optimizer = tf.train.RMSPropOptimizer(c.lrate) # optimizer = tf.train.AdagradOptimizer(c.lrate) optimizer = tf.train.GradientDescentOptimizer(c.lrate) tvars = tf.trainable_variables() grads_raw = tf.gradients(cost, tvars) grads, _ = tf.clip_by_global_norm(grads_raw, 5.0) apply_grads = optimizer.apply_gradients(zip(grads, tvars)) df = env.dataset("test_ts.csv") data = np.loadtxt(df) # fname = env.dataset(os.listdir(env.dataset())[0]) # df = env.run("test_data.pkl") # if not os.path.exists(df): # song_data_raw, source_sr = lr.load(fname) # print "Got sampling rate {}, resampling to {} ...".format(source_sr, c.target_sr) # song_data = lr.resample(song_data_raw, source_sr, c.target_sr, scale=True) # song_data = song_data[:data_size,] # np.save(open(df, "w"), song_data) # else: # song_data = np.load(open(df))
cost = tf.nn.l2_loss(output_norm - target_norm) / c.seq_size / c.batch_size # optimizer = tf.train.AdamOptimizer(c.lrate) # optimizer = tf.train.RMSPropOptimizer(c.lrate) # optimizer = tf.train.AdagradOptimizer(c.lrate) optimizer = tf.train.GradientDescentOptimizer(c.lrate) tvars = tf.trainable_variables() grads_raw = tf.gradients(cost, tvars) grads, _ = tf.clip_by_global_norm(grads_raw, 5.0) apply_grads = optimizer.apply_gradients(zip(grads, tvars)) df = env.dataset("test_ts.csv") data = np.loadtxt(df) # fname = env.dataset(os.listdir(env.dataset())[0]) # df = env.run("test_data.pkl") # if not os.path.exists(df): # song_data_raw, source_sr = lr.load(fname) # print "Got sampling rate {}, resampling to {} ...".format(source_sr, c.target_sr) # song_data = lr.resample(song_data_raw, source_sr, c.target_sr, scale=True) # song_data = song_data[:data_size,] # np.save(open(df, "w"), song_data) # else: # song_data = np.load(open(df))
import numpy as np import os import tensorflow as tf from matplotlib import pyplot as plt from os.path import join as pj from util import setup_logging from conv_model import ConvModel from env import current as env setup_logging(logging.getLogger()) data_source = [] for f in sorted(os.listdir(env.dataset())): if f.endswith(".wav"): data_source.append(env.dataset(f)) cm = ConvModel( batch_size = 30000, filter_len = 150, filters_num = 100, target_sr = 3000, gamma = 1e-03, strides = 8, avg_window = 5, lrate = 1e-04 )
epochs = 10000 bptt_steps = 50 seq_size = 150 lrate = 0.0001 decay_rate = 1.0 #0.999 forecast_step = 0 continuous_steps = 1 source_data_file_list = [] for f in sorted(os.listdir(env.dataset())): if f.endswith("sparse_acoustic_data.pkl"): print "Considering {} as input".format(f) source_data_file_list.append(env.dataset(f)) data_file_list = source_data_file_list[:] max_t, input_size = 0, None data_corpus = None data_ends = [] for source_id, inp_file in enumerate(data_file_list): print "Reading {}".format(inp_file)
x_mean = np.sum(x_means, 0) / len(x_means) it = 0 x_vars = [] while it + window_size < data.shape[0]: Rn_v, it = form_batch(it, data, batch_size, window_size) x_vars.append(np.mean(np.square(Rn_v - x_mean), 0)) x_var = np.sum(x_vars, 0) / len(x_vars) return x_mean, x_var df = env.run("test_data.pkl") fname = env.dataset( [f for f in os.listdir(env.dataset()) if f.endswith(".wav")][0]) if not os.path.exists(df): song_data_raw, source_sr = lr.load(fname) print "Got sampling rate {}, resampling to {} ...".format( source_sr, target_sr) data = lr.resample(song_data_raw, source_sr, target_sr, scale=True) # data = song_data[:30000,] np.save(open(df, "w"), data) else: data = np.load(open(df)) x_mean, x_var = calc_mean_and_var(data, batch_size, filter_size) dd = form_batch(0, data, data.shape[0], filter_size)[0]
import logging import sys import numpy as np import os import tensorflow as tf from matplotlib import pyplot as plt from os.path import join as pj from util import setup_logging from conv_model import ConvModel from env import current as env setup_logging(logging.getLogger()) data_source = [] for f in sorted(os.listdir(env.dataset())): if f.endswith(".wav"): data_source.append(env.dataset(f)) cm = ConvModel(batch_size=30000, filter_len=150, filters_num=100, target_sr=3000, gamma=1e-03, strides=8, avg_window=5, lrate=1e-04) sess = tf.Session() dataset = cm.form_dataset(data_source, proportion=0.1)
it = 0 x_vars = [] while it + window_size < data.shape[0]: Rn_v, it = form_batch(it, data, batch_size, window_size) x_vars.append(np.mean(np.square(Rn_v - x_mean), 0)) x_var = np.sum(x_vars, 0)/len(x_vars) return x_mean, x_var df = env.run("test_data.pkl") fname = env.dataset([f for f in os.listdir(env.dataset()) if f.endswith(".wav")][0]) if not os.path.exists(df): song_data_raw, source_sr = lr.load(fname) print "Got sampling rate {}, resampling to {} ...".format(source_sr, target_sr) data = lr.resample(song_data_raw, source_sr, target_sr, scale=True) # data = song_data[:30000,] np.save(open(df, "w"), data) else: data = np.load(open(df)) x_mean, x_var = calc_mean_and_var(data, batch_size, filter_size) dd = form_batch(0, data, data.shape[0], filter_size)[0]