예제 #1
0
# Training
init = tf.global_variables_initializer()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = gpu_frac)
sess = tf.Session(config=tf.ConfigProto(log_device_placement = False, gpu_options=gpu_options))
sess.run(init)

#%% Training cycle
for epoch in range(training_epochs):
    training_data = [[]]* num_training_files
    reading_phase = True
    for file_idx in range(num_training_files): 
        if reading_phase:
            training_data [file_idx] = data_loader(file_idx, input_dim)
        for  i in range(n_batch):
            #pre_emph is zero since we do it on X if we want separatrely            
            batch_xs = data_parser(training_data[file_idx], input_dim, batch_size,
                                   overlap = overlap)         
            
            ################################################################
            # Update taco
            _, taco_cost_ = sess.run([opt, taco_cost],
                                   feed_dict={X: batch_xs,
                                                    learning_rate: learning_rate_init,
                                                    noise_std : noise_std_init})
#####       Display logs per epoch step
            if i % display_step == 0:
                print("epoch:", '%02d' % (epoch + 1),
                        "File:", '%02d' % (file_idx),
                      "iteration:", '%04d' % (i + 1),
                      "Taco_cost =", "{:.9f}".format( (10**4) * taco_cost_))
                
#               ###  Early stopping!
예제 #2
0
파일: conv_ae.py 프로젝트: HamSade/codes
# Training
init = tf.global_variables_initializer()
sess=tf.Session()
sess.run(init)

# Training cycle
for epoch in range(training_epochs):
    
#    learning_rate_new = learning_rate_new /2.   
    for file_idx in range(10):              
        training_data = data_loader(file_idx)
#        n_batch = int(n_training / batch_size)

        for  i in range(n_batch):
            #pre_emph is zero since we do it on X if we want separatrely
            batch_xs = data_parser(training_data, input_dim, batch_size, preemph=0.0, overlap=True) 
            _, c = sess.run([optimizer, cost], feed_dict={X: batch_xs, mode:0.0})
            
        # Display logs per epoch step
            if i % display_step == 0:
                print("epoch_", '%02d' % (epoch+1),
                        "File_", '%02d' % (file_idx),
                      "iteration_", '%04d' % (i+1),
                      "cost=", "{:.9f}".format(c))   

print("Optimization Finished!")
#%%##########################################################################
# Training error calculation

training_data = data_loader(9)
training_error = 0