コード例 #1
0
ファイル: conv_run.py プロジェクト: alexeyche/alexeyche-junk
setup_logging(logging.getLogger())


data_source = []
for f in sorted(os.listdir(env.dataset())):
    if f.endswith(".wav"):
        data_source.append(env.dataset(f))



cm = ConvModel(
	batch_size = 30000,
	filter_len = 150,
	filters_num = 100,
	target_sr = 3000,
	gamma = 1e-03,
	strides = 8,
	avg_window = 5,
	lrate = 1e-04
)



sess = tf.Session()

dataset = cm.form_dataset(data_source, proportion = 0.1)

cm.train(sess, dataset, 10000)
cm.evaluate_and_save(sess, dataset)
cm.serialize(sess)
コード例 #2
0
from matplotlib import pyplot as plt
from os.path import join as pj
from util import setup_logging

from conv_model import ConvModel
from env import current as env

setup_logging(logging.getLogger())

data_source = []
for f in sorted(os.listdir(env.dataset())):
    if f.endswith(".wav"):
        data_source.append(env.dataset(f))

cm = ConvModel(batch_size=30000,
               filter_len=150,
               filters_num=100,
               target_sr=3000,
               gamma=1e-03,
               strides=8,
               avg_window=5,
               lrate=1e-04)

sess = tf.Session()

dataset = cm.form_dataset(data_source, proportion=0.1)

cm.train(sess, dataset, 10000)
cm.evaluate_and_save(sess, dataset)
cm.serialize(sess)
コード例 #3
0
ファイル: run.py プロジェクト: alexeyche/alexeyche-junk
                inputs_v = seq_out[step_id:(step_id+bptt_steps)]
            elif len(seq_out)>0:
                for b_id in xrange(batch_size):
                    if  p_sub > np.random.random_sample():
                        for seq_id in xrange(bptt_steps):
                            inputs_v[seq_id][b_id, :] = seq_out[step_id + seq_id][b_id, :]

            feed_dict = {k: v for k, v in zip(inputs, inputs_v) }
            feed_dict[state] = state_v
            feed_dict.update({k: v for k, v in zip(targets, targets_v)})

            outputs_v, state_v, loss_v, _ = sess.run([outputs, finstate, loss, train_step], feed_dict)
            new_seq_out += outputs_v
            losses.append(loss_v)
        
        seq_out = new_seq_out
        batch_ids += forecast_step
        gc.collect()

    print "Epoch {}, learning rate {}".format(e, ep_lrate), "train loss {}".format(sum(losses)/float(len(losses)))
    if e % 100 == 0 and e > 0:
        print "Generating sample"
        generated = generate()
        generated = np.asarray(generated).reshape(len(generated), input_size)        
        cm = ConvModel.deserialize()
        waveform = cm.restore_hidden(generated, sess = sess)        
        
        cm.save_waveform_as(waveform, 0, env.run("{}_generated.wav".format(e)))

print "Saving model {}".format(saver.save(sess, model_fname))
コード例 #4
0
model_fname = env.run("model.ckpt")

batch_size = 30000
L = 150
filters_num = 100
target_sr = 3000
gamma = 1e-03
epochs = 2000
lrate = 1e-04
k = 8 # filter strides
avg_size = 5
sel = None


cm = ConvModel(batch_size, L, filters_num, k, avg_size, lrate, gamma)


sess = tf.Session()
saver = tf.train.Saver()
if os.path.exists(model_fname):
    print "Restoring from {}".format(model_fname)
    saver.restore(sess, model_fname)
    epochs = 0
else:
    sess.run(tf.initialize_all_variables())

def read_song(source_id):
    song_data_raw, source_sr = lr.load(data_source[source_id])
    song_data = lr.resample(song_data_raw, source_sr, target_sr, scale=True)
    song_data = song_data[:song_data.shape[0]/10]