Exemple #1
0
# Reg_de_train = tf.train.AdamOptimizer(0.00001).minimize(loss_Reg_de)
train_step = tf.train.AdamOptimizer(0.0003).minimize(loss)

batchSize = 128
src = 'SF1'
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
tS = time.time()
e=[]
saver = tf.train.Saver()

for i in range(10):
        # if i>2:
        #         lamb = 1
        for j in range(100):
                x_batch,label_batch  = nextbatch(trainData, Label, batchSize)
                train_step.run(feed_dict = {source: x_batch, label: label_batch})
                # Reg_de_train.run(feed_dict={source: x_batch, label: label_batch})
        x_Evabatch, label_Evabatch = nextbatch(trainData, Label, batchSize)
        eva_loss= sess.run([loss], feed_dict={source: x_Evabatch, label: label_Evabatch})
        e.append(eva_loss)
        print('epoch %d' %(i))
        print('reconstruct loss: %f' %(np.mean(eva_loss)))


for i in range(10):
        for j in range(100):
                x_batch, label_batch = nextbatch(trainData, Label, batchSize)
                de = sess.run(W, feed_dict={source: x_batch, label: label_batch})
                Reg_en_train.run(feed_dict={latent: de, label: label_batch})
        reg_loss, enacu = sess.run([loss_Reg_en, Acu_Reg_en], feed_dict={latent: de, label: label_batch})
Exemple #2
0
train_MS = tf.train.AdamOptimizer(0.0001).minimize(loss_MS)
train_KL = tf.train.AdamOptimizer(0.00001).minimize(loss_KL)
train_Reg = tf.train.AdamOptimizer(0.0001).minimize(loss_Reg)

sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
saver = tf.train.Saver()
time_start = time.time()
batchSize = 128
is_training = True

if is_training:
    for epoch in range(15):
        for iter in range(1000):
            src_batch, label_batch = nextbatch(TrainDataPath, featureSize,
                                               batchSize)
            train_MS.run(feed_dict={source: src_batch, label: label_batch})
            train_KL.run(feed_dict={source: src_batch, label: label_batch})

        ms_value, kl_value = sess.run([loss_MS, loss_KL],
                                      feed_dict={
                                          source: src_batch,
                                          label: label_batch
                                      })
        print('epoch: %d' % (epoch))
        print('MSE: %f' % (ms_value))
        print('KL: %f' % (np.mean(kl_value)))
    # for epoch in range(10):
    #     for iter in range(1000):
    #         src_batch, label_batch = nextbatch(TrainDataPath, featureSize, batchSize)
    #         train_Reg.run(feed_dict={source: src_batch, label: label_batch})
Exemple #3
0
trainables = tf.trainable_variables()
loss = tf.nn.l2_loss(recover - source)
train_L2_loss = tf.train.AdamOptimizer(0.0001).minimize(loss)

sess = tf.InteractiveSession()
tf.global_variables_initializer().run()

batchSize = 128
is_training = False
saver = tf.train.Saver()

if is_training:
    for epoch in range(20):
        for iter in range(100):
            x = nextbatch(Trainfilepath, segmentLength, batchSize)
            train_L2_loss.run(feed_dict={source: x})

        loss_value = sess.run(loss, feed_dict={source: x})
        print('epoch: %d' % (epoch))
        print('loss: %f' % (loss_value))

    saver.save(sess, 'ckpt/model.ckpt')
else:
    saver.restore(sess, tf.train.latest_checkpoint('ckpt/'))
    print('Model restored.')

voice, filename = pickOne(Testfilepath, 'SF1', segmentLength)
output = sess.run(recover, feed_dict={source: voice})
output = output.reshape([-1])
scipy.io.wavfile.write(filename + '.wav', sampleRate, output)
Exemple #4
0
loss = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=y))
train_step = tf.train.AdamOptimizer(0.0001).minimize(loss)

correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

batchSize = 128

sess = tf.InteractiveSession()
tf.global_variables_initializer().run()

for i in range(20):
    for j in range(200):
        x_batch, y_batch = nextbatch(TrainDataPath, segmentLength, batchSize)
        train_step.run(feed_dict={source: x_batch, label: y_batch})
    x_Evabatch, y_Evabatch = nextbatch(TestDataPath, segmentLength, batchSize)
    tr_acu = sess.run(accuracy, feed_dict={source: x_batch, label: y_batch})
    te_acu = sess.run(accuracy,
                      feed_dict={
                          source: x_Evabatch,
                          label: y_Evabatch
                      })

    print('epoch %d' % (i))
    print('Train accuracy: %f' % (tr_acu))
    print('Test accuracy: %f' % (te_acu))

tE = time.time()
print("training time: %f" % (tE - tS))
Exemple #5
0
train_KL = tf.train.AdamOptimizer(0.00001).minimize(KL)
train_Regloss = tf.train.AdamOptimizer(0.0001).minimize(speakReg_loss)

sess = tf.InteractiveSession()
tf.global_variables_initializer().run()

tS = time.time()
saver = tf.train.Saver()

batchSize = 64
is_training = False

if is_training:
    for epoch in range(5):
        for iter in range(100):
            x_batch, label_batch = nextbatch(TrainDataPath, segmentLength,
                                             batchSize)
            train_re_loss.run(feed_dict={source: x_batch, label: label_batch})
            train_trans_loss.run(feed_dict={
                source: x_batch,
                label: label_batch
            })
            train_KL.run(feed_dict={source: x_batch, label: label_batch})
        loss_value, loss_KL = sess.run([recover_loss, KL],
                                       feed_dict={
                                           source: x_batch,
                                           label: label_batch
                                       })
        print('epoch: %d' % (epoch))
        print('l2_loss: %f' % (loss_value))
    saver.save(sess, 'ckpt/model')
    # for epoch in range(50):