def runFullyTrainModel(self, epochs, batch_size, keep_probability):

        save_model_path = './image_classification'

        print('Training...')
        with tf.Session() as sess:
            # Initializing the variables
            sess.run(tf.global_variables_initializer())

            # Training cycle
            for epoch in range(epochs):
                # Loop over all batches
                n_batches = 5
                for batch_i in range(1, n_batches + 1):
                    for batch_features, batch_labels in helper.load_preprocess_training_batch(
                            batch_i, batch_size):
                        train_neural_network(sess, optimizer, keep_probability,
                                             batch_features, batch_labels)
                    print('Epoch {:>2}, CIFAR-10 Batch {}:  '.format(
                        epoch + 1, batch_i),
                          end='')
                    print_stats(sess, batch_features, batch_labels, cost,
                                accuracy)
            saver = tf.train.Saver()
            save_path = saver.save(sess, save_model_path)
        print("Model FullyTrainModel Ran Successfully")
예제 #2
0
def single_train(x, y, keep_prob, cost, optimizer, accuracy,
                 epochs, batch_size, keep_probability,
                 valid_features, valid_labels):
    print('\nChecking the Training on a Single Batch...')
    with tf.Session() as sess:
        # Initializing the variables
        sess.run(tf.global_variables_initializer())

        # Training cycle
        for epoch in range(epochs):
            start = time.time()
            batch_i = 1
            for batch_features, batch_labels in (
                    helper.load_preprocess_training_batch(batch_i, batch_size)):
                # Train neural network
                sess.run(optimizer, feed_dict={
                    x: batch_features,
                    y: batch_labels,
                    keep_prob: keep_probability})

            print('Epoch {:>2}, '.format(epoch + 1), end='')
            print('CIFAR-10 Batch {}:  '.format(batch_i), end='')
            loss = sess.run(cost, feed_dict={
                x: batch_features,
                y: batch_labels,
                keep_prob: 1.})
            valid_acc = sess.run(accuracy, feed_dict={
                x: valid_features,
                y: valid_labels,
                keep_prob: 1.})
            end = time.time()
            print('Loss: {:>10.4f} '.format(loss), end='')
            print('Validation Accuracy: {:.6f} '.format(valid_acc), end='')
            print('({:.1f} sec)'.format(end - start))
예제 #3
0
def train_single_batch():
    print('Checking the Training on a Single Batch...')
    with tf.Session() as sess:
        # Initializing the variables
        sess.run(tf.global_variables_initializer())
    
        # Training cycle
        for epoch in range(epochs):
            batch_i = 1
            for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
                train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
            print('Epoch {:>3}, Batch {} '.format(epoch + 1, batch_i), end='')
            print_stats(sess, batch_features, batch_labels, cost, accuracy)
    print("finished Checking the Training on a Single Batch...!!\n\n")
예제 #4
0
def train_neural_network_full(optimizer, cost, accuracy, x, y, keep_prob,
                              keep_probability, 
                              n_batches, batch_size, shuffle_data, train_phase, 
                              valid_features, valid_labels,
                              epochs, load_data = False, file_path = './training_progress/saved_progress'):
    
    saver = tf.train.Saver()
    with tf.Session() as sess:
        # Initializing the variables
        if load_data:
            print('Continue a started training...')
            saver.restore(sess, file_path)
        else:
            print('Starting training...')
            sess.run(tf.global_variables_initializer())
        
        start_time = time.time()
        
        # Training cycle
        for epoch in range(epochs):
            
            for batch_i in range(1, n_batches + 1):
                start_time_batch = time.time()
                
                for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size, shuffle_data = True):
                    
                    train_neural_network_once(sess, optimizer, keep_probability, batch_features, batch_labels, x, y, keep_prob, train_phase)
                    
                end_time_batch = time.time()
                time_dif = str(timedelta(seconds = int(round(end_time_batch - start_time_batch))))
                aux_text = print_stats(sess, batch_features, batch_labels, batch_size, cost, accuracy, x, y, keep_prob, train_phase)                  
                print('Epoch {:>2}, time  {} sec, CIFAR-10 Batch {} - Training {}'.format(epoch + 1, time_dif, batch_i, aux_text))
                
            # Each epoch print validation cost and accuracy (more samples to run it, so I don´t do it each batch.)
            aux_text = print_stats(sess, valid_features, valid_labels, batch_size, cost, accuracy, x, y, keep_prob, train_phase)     
            print('Epoch {:>2} Finished - Validation {}'.format( epoch + 1, aux_text ))            
            
            
        # Print the time-usage.
        end_time = time.time()
        time_dif = end_time - start_time
        print("Time usage: " + str(timedelta(seconds = int(round(time_dif)))))
        
        save_path = saver.save(sess, file_path)
예제 #5
0
def r_train(data, valid_data, r_x, r_keep_prob):
    #data = train(x,keep_prob)
    n_batches = 5
    r_epoches = 15
    r_prediction = recurrent_neural_network(r_x, r_keep_prob)

    r_cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=r_prediction,
                                                labels=r_y))
    r_optimizer = tf.train.AdamOptimizer().minimize(r_cost)

    with tf.Session() as r_sess:
        r_sess.run(tf.global_variables_initializer())
        #sess.run(data.eval())
        #data = sess.run(data)
        for epoch in range(r_epoches):
            loss = 0
            i = 0
            #for batch_i in range(1, n_batches + 1):
            for batch_i in range(1, n_batches + 1):
                for batch_features, batch_labels in helper.load_preprocess_training_batch(
                        batch_i, batch_size):

                    data_in = data[i]
                    _, c = r_sess.run([r_optimizer, r_cost],
                                      feed_dict={
                                          r_x: data_in,
                                          r_y: batch_labels,
                                          r_keep_prob: 0.9
                                      })
                    loss += c
                    i += 1

            print('Epoch', epoch, 'complete out of', epoches, 'loss:', loss)
            #print(r_prediction.shape)
            correct = tf.equal(tf.argmax(r_prediction, 1), tf.argmax(r_y, 1))
            accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
            print(
                'Accuracy:',
                accuracy.eval({
                    r_x: valid_data,
                    r_y: valid_labels,
                    r_keep_prob: 1.0
                }))
    def runTrainOnSingleBatch(self):

        print('Checking the Training on a Single Batch...')
        with tf.Session() as sess:
            # Initializing the variables
            sess.run(tf.global_variables_initializer())

            # Training cycle
            for epoch in range(epochs):
                batch_i = 1
                for batch_features, batch_labels in helper.load_preprocess_training_batch(
                        batch_i, batch_size):
                    train_neural_network(sess, optimizer, keep_probability,
                                         batch_features, batch_labels)
                print('Epoch {:>2}, CIFAR-10 Batch {}:  '.format(
                    epoch + 1, batch_i),
                      end='')
                print_stats(sess, batch_features, batch_labels, cost, accuracy)
        print("Model TrainOnSingleBatch Ran Successfully")
예제 #7
0
def full_train(x, y, keep_prob, cost, optimizer, accuracy,
               epochs, batch_size, keep_probability,
               valid_features, valid_labels):
    print('\nFull Training...')
    save_model_path = './image_classification'
    with tf.Session() as sess:
        # Initializing the variables
        sess.run(tf.global_variables_initializer())

        # Training cycle
        for epoch in range(epochs):
            # Loop over all batches
            n_batches = 5
            for batch_i in range(1, n_batches + 1):
                start = time.time()
                for batch_features, batch_labels in (
                        helper.load_preprocess_training_batch(
                            batch_i, batch_size)):
                    # Train neural network
                    sess.run(optimizer, feed_dict={
                        x: batch_features,
                        y: batch_labels,
                        keep_prob: keep_probability})

                print('Epoch {:>2}, '.format(epoch + 1), end='')
                print('CIFAR-10 Batch {}:  '.format(batch_i), end='')
                loss = sess.run(cost, feed_dict={
                    x: batch_features,
                    y: batch_labels,
                    keep_prob: 1.})
                valid_acc = sess.run(accuracy, feed_dict={
                    x: valid_features,
                    y: valid_labels,
                    keep_prob: 1.})
                end = time.time()
                print('Loss: {:>10.4f} '.format(loss), end='')
                print('Validation Accuracy: {:.6f} '.format(valid_acc), end='')
                print('({:.1f} sec)'.format(end - start))

        # Save Model
        saver = tf.train.Saver()
        save_path = saver.save(sess, save_model_path)
예제 #8
0
def train_on_5_batches():
    print('Training...on 5')
    with tf.Session() as sess:
        # Initializing the variables
        sess.run(tf.global_variables_initializer())
        
        # Training cycle
        for epoch in range(epochs):
            # Loop over all batches
            n_batches = 5
            for batch_i in range(1, n_batches + 1):
                for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
                    train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
                print('Epoch {:>3}, Batch {} '.format(epoch + 1, batch_i), end='')
                print_stats(sess, batch_features, batch_labels, cost, accuracy)
                if (no_learning == True):
                    break                 # no reduction in loss was recorded for "a while"
                
        # Save Model
        saver = tf.train.Saver()
        save_path = saver.save(sess, save_model_path)
    print("finished Training...on 5 - Fully Train the Model!!\n\n")
예제 #9
0
def train_cnn_all_batches(epochs, batch_size, keep_probability):
    save_model_path = '../model/image_classification'

    print('Training...')
    sess = tf.InteractiveSession()

    # Visualize graph and merge all the summaries
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter('../tmp/cifar/24' + '/train',
                                         sess.graph)
    test_writer = tf.summary.FileWriter('../tmp/cifar/24' + '/test')

    # Initializing the variables
    sess.run(tf.global_variables_initializer())

    # Training cycle
    i = 0
    for epoch in range(epochs):
        # Loop over all batches
        n_batches = 5
        for batch_i in range(1, n_batches + 1):
            for batch_features, batch_labels in helper.load_preprocess_training_batch(
                    batch_i, batch_size):
                train_neural_network(sess, optimizer, keep_probability,
                                     batch_features, batch_labels,
                                     train_writer, merged, i)
                i += 1
            print('Epoch {:>2}, CIFAR-10 Batch {}:  '.format(
                epoch + 1, batch_i),
                  end='')
            print_stats(sess, batch_features, batch_labels, cost, accuracy)

    # Save Model
    saver = tf.train.Saver()
    save_path = saver.save(sess, save_model_path)

    train_writer.close()
    test_writer.close()
예제 #10
0
 def __train_and_report(self, sess, graph, batch_ids, save_model_path):
     """
     Train cycle
     :param sess: tf.Session(graph)
     :param graph: dictionary storing all the necessary tensors and ops
     :param batch_ids: list of batch_ids for as training data
     :param save_model_path: path storing trained session
     """
     dir_processed_data = "./ProcessedData"
     valid_features, valid_labels = pickle.load(
         open('{}/preprocess_validation.p'.format(dir_processed_data),
              mode='rb'))
     # Initializing the variables
     sess.run(tf.global_variables_initializer())
     # Training cycle
     for epoch in range(NeuralNetwork.epochs):
         # Loop over all batches
         for batch_i in batch_ids:
             for batch_features, batch_labels in \
                     helper.load_preprocess_training_batch(batch_i, NeuralNetwork.batch_size):
                 self.train_neural_network(sess, graph['x'], graph['y'],
                                           graph['keep_prob'],
                                           graph['optimizer'],
                                           NeuralNetwork.keep_probability,
                                           batch_features, batch_labels)
             logger.info('\nEpoch {:>2}, CIFAR-10 Batch {}:'.format(
                 epoch + 1, batch_i))
             if len(batch_features) != 0 and len(batch_labels) != 0:
                 self.print_stats(sess, graph['x'], graph['y'],
                                  graph['keep_prob'], batch_features,
                                  batch_labels, valid_features,
                                  valid_labels, graph['cost'],
                                  graph['accuracy'])
     # Save Model
     saver = tf.train.Saver()
     saver.save(sess, save_model_path)
예제 #11
0
# In[ ]:


"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
save_model_path = './image_classification_sol'

with tf.Session() as sess:
    # Initializing the variables
    sess.run(tf.global_variables_initializer())

    # Training cycle
    for epoch in range(epochs):
        for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_size):
            train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
        print('Epoch {:>2}:  '.format(epoch + 1), end='')
        print_stats(sess, batch_features, batch_labels, cost, accuracy)

    # Save Model
    saver = tf.train.Saver()
    save_path = saver.save(sess, save_model_path)


# # Checkpoint
# The model has been saved to disk.
# ## Test Model
# Test your model against the test dataset.  This will be your final accuracy. You should have an accuracy greater than 50%. If you don't, keep tweaking the model architecture and parameters.

# In[ ]:
예제 #12
0
def train(x, keep_prob):
    prediction, features = convolutional_neural_network(x, keep_prob)
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
    optimizer = tf.train.AdamOptimizer().minimize(cost)
    featureset = []
    #featureset_valid = []

    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())
        tf.set_random_seed(1000)

        for epoch in range(epoches):
            loss = 0
            n_batches = 5
            #for _ in range(int(mnist.train.num_examples/batch_size)):
            #for batch_i in range(1, n_batches+1):

            for batch_i in range(1, n_batches + 1):
                for batch_features, batch_labels in helper.load_preprocess_training_batch(
                        batch_i, batch_size):
                    #epoch_x = epoch_x.reshape((batch_size,chunks,chunk_size))
                    _, c = sess.run([optimizer, cost],
                                    feed_dict={
                                        x: batch_features,
                                        y: batch_labels,
                                        keep_prob: 0.9
                                    })
                    features_n = features.eval({
                        x: batch_features,
                        y: batch_labels,
                        keep_prob: 0.9
                    })

                    features_n = np.array(features_n, dtype=np.float32)

                    featureset.append(features_n)
                    loss += c
            print('Epoch', epoch, 'complete out of', epoches, 'loss:', loss)
            #print(prediction.shape)
            features_valid = features.eval({
                x: valid_features,
                y: valid_labels,
                keep_prob: 1.0
            })

            correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
            accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
            print(
                'Accuracy:',
                accuracy.eval({
                    x: valid_features,
                    y: valid_labels,
                    keep_prob: 1.0
                }))

        print(
            'Accuracy:',
            accuracy.eval({
                x: test_features,
                y: test_labels,
                keep_prob: 1.0
            }))
        #featureset = np.array(featureset[-5:])
        return featureset, features_valid
예제 #13
0
# In[16]:


"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
    # Initializing the variables
    sess.run(tf.global_variables_initializer())
    
    # Training cycle
    for epoch in range(epochs):
        batch_i = 1
        for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
            train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
        print('Epoch {:>2}, CIFAR-10 Batch {}:  '.format(epoch + 1, batch_i), end='')
        print_stats(sess, batch_features, batch_labels, cost, accuracy)


# ### Fully Train the Model
# Now that you got a good accuracy with a single CIFAR-10 batch, try it with all five batches.

# In[17]:


"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
save_model_path = './image_classification'
예제 #14
0
if __name__ == '__main__':
    cnn_helper = CNN()
    cnn_helper.preprocess()
    cnn_helper.state_valids()

    cnn_helper.reset_graph()
    cnn_helper.initialize()

    print('Training Image Classifier')
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for epoch in range(cnn_helper.epochs):
            for batch_i in range(1, 6):
                for batch_features, batch_labels in \
                    helper.load_preprocess_training_batch(batch_i,
                        cnn_helper.batch_size):

                    cnn_helper.train_neural_network(
                        sess, cnn_helper.optimizer,
                        cnn_helper.keep_probability, batch_features,
                        batch_labels)

                    print('Epoch {:>2}, CIFAR-10 Batch {}:  '\
                        .format(epoch + 1, batch_i), end='')
                    cnn_helper.print_stats(sess, batch_features, \
                        batch_labels, cnn_helper.cost, cnn_helper.accuracy)

    print('Saving Model')
    save_path = tf.train.Saver().save(sess, save_model_path)
예제 #15
0
def train_convergence():
    msg = "Tune Parameters:    epochs  {0:>5},  batch_size  {1:>5}, keep_probability: {2:>5.0%} "
    logging.info(msg.format(epochs, batch_size, keep_probability))
    print (msg.format(epochs, batch_size,  keep_probability))
    msg = "lowest_loss {0:>5},    stop  {1:>5},  no_learning: {2}                     i:{3}"
    logging.info(msg.format(lowest_loss, stop, no_learning, i))
    print (msg.format(lowest_loss, stop, no_learning, i))

    save_model_path = './image_classification'
    
    chp_dir = 'chapters/'
    if not os.path.exists(chp_dir):
        os.makedirs(chp_dir)
    chp_path = os.path.join(chp_dir, 'chapter')
    
    if (Manual_load == True):                   # for "manual" chapter loads
        save_model_path = "./chapter3"
        
    with tf.Session() as sess:
        # Initializing the variables
        sess.run(tf.global_variables_initializer())
        saver.restore(sess,save_model_path)
        logging.info("restored : %s", save_model_path)
        print("restored :", save_model_path)
    
        if (Manual_load == True):               # after manual loads
            save_model_path = './image_classification'
            
        # Training cycle
        print('...Continue training...')
        for epoch in range(epochs):
            saver = tf.train.Saver()
            chp_path_num = chp_path+str(epoch)
            chapter = saver.save(sess, chp_path_num)
            msg="saved {:>30}"
            logging.info(msg.format(chp_path_num))
            print(msg.format(chp_path_num))
            # Loop over all batches
            n_batches = 5
            for batch_i in range(1, n_batches + 1):
                for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
                    train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
                msg='Epoch {:>3}, Batch {} '
                print(msg.format((epoch + 1), batch_i), end='')
                logging.info(msg.format(epoch + 1, batch_i))
                print_stats(sess, batch_features, batch_labels, cost, accuracy)
                if (no_learning == True):
                    msg="no_learning=={0} - # no reduction in loss was recorded since epoch {1}"
                    logging.info(msg.format(no_learning, best_epoch))
                    print(msg.format(no_learning, best_epoch))
                    break                 # no reduction in loss was recorded for "a while"
            if (no_learning == True):
                msg="no_learning=={0} - # no reduction in loss was recorded since epoch {1}"
                logging.info(msg.format(no_learning, best_epoch))
                print(msg.format(no_learning, best_epoch))
                break                 # no reduction in loss was recorded for "a while"

        # Save Model
        saver = tf.train.Saver()
        save_path = saver.save(sess, save_model_path)
    logging.info("finished ...Continue training !!!!\n")
    print("finished ...Continue training - train_convergence !!!!\n\n")