Example #1
0
def detect(img_path, saved_model_weights):
    sample_img = Image.open(img_path)
    plt.imshow(sample_img)
    plt.show()

    pix = np.array(sample_img)
    norm_pix = (255 - pix) * 1.0 / 255.0
    exp = np.expand_dims(norm_pix, axis=0)

    X = tf.placeholder(tf.float32, shape=(1, 64, 64, 3))
    [logits_1, logits_2, logits_3, logits_4, logits_5] = regression_head(X)

    predict = tf.pack([
        tf.nn.softmax(logits_1),
        tf.nn.softmax(logits_2),
        tf.nn.softmax(logits_3),
        tf.nn.softmax(logits_4),
        tf.nn.softmax(logits_5)
    ])

    best_prediction = tf.transpose(tf.argmax(predict, 2))

    saver = tf.train.Saver()
    with tf.Session() as session:
        saver.restore(session, "regression.ckpt")
        print "Model restored."

        print "Initialized"
        feed_dict = {X: exp}
        start_time = time.time()
        predictions = session.run(best_prediction, feed_dict=feed_dict)
        pred = prediction_to_string(predictions[0])
        end_time = time.time()
        print "Best Prediction", pred, "made in", end_time - start_time
def detect(img_path, saved_model_weights):
    sample_img = Image.open(img_path)
    plt.imshow(sample_img)
    plt.show()

    pix = np.array(sample_img)
    norm_pix = (255-pix)*1.0/255.0
    exp = np.expand_dims(norm_pix, axis=0)

    X = tf.placeholder(tf.float32, shape=(1, 64, 64, 3))
    [logits_1, logits_2, logits_3, logits_4, logits_5] = regression_head(X)

    predict = tf.pack([tf.nn.softmax(logits_1),
                      tf.nn.softmax(logits_2),
                      tf.nn.softmax(logits_3),
                      tf.nn.softmax(logits_4),
                      tf.nn.softmax(logits_5)])

    best_prediction = tf.transpose(tf.argmax(predict, 2))

    saver = tf.train.Saver()
    with tf.Session() as session:
        saver.restore(session, "regression.ckpt")
        print "Model restored."

        print "Initialized"
        feed_dict = {X: exp}
        start_time = time.time()
        predictions = session.run(best_prediction, feed_dict=feed_dict)
        pred = prediction_to_string(predictions[0])
        end_time = time.time()
        print "Best Prediction", pred, "made in", end_time - start_time
Example #3
0
def detect(img_path, saved_model_weights):
    #image = Image.open(img_path)
    #sample_img = tf.image.resize_images(image, (64, 64))

    image = tf.image.decode_png(tf.read_file(img_path), channels=3)
    image = tf.image.resize_images(image, [64, 64])
    image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    sample_img = tf.reshape(image, [1, 64, 64, 3])
    with tf.Session().as_default():
        sample_img = sample_img.eval()


#    plt.imshow(sample_img)
#    plt.show()

    pix = np.array(sample_img)
    norm_pix = (255 - pix) * 1.0 / 255.0
    exp = np.expand_dims(norm_pix, axis=0)

    print(norm_pix.shape)
    (1, 64, 64, 3)

    X = tf.placeholder(tf.float32, shape=(1, 64, 64, 3))
    [logits_1, logits_2, logits_3, logits_4, logits_5] = regression_head(X)

    predict = tf.stack([
        tf.nn.softmax(logits_1),
        tf.nn.softmax(logits_2),
        tf.nn.softmax(logits_3),
        tf.nn.softmax(logits_4),
        tf.nn.softmax(logits_5)
    ])

    best_prediction = tf.transpose(tf.argmax(predict, 2))

    saver = tf.train.Saver()
    with tf.Session() as session:
        saver.restore(session, "regression.ckpt")
        print "Model restored."

        print "Initialized"
        #feed_dict = {X: exp}
        feed_dict = {X: norm_pix}

        start_time = time.time()
        predictions = session.run(best_prediction, feed_dict=feed_dict)
        pred = prediction_to_string(predictions[0])
        end_time = time.time()
        print "Best Prediction", pred, "made in", end_time - start_time
Example #4
0
def detect(img_path, saved_model_weights):
    # load image to be run through model (for output of model's label prediction)
    sample_img = cv.imread(img_path)
    print('sample_img.shape', sample_img.shape)
    sample_img = Image.open(img_path)  # image extension *.png,*.jpg

    # resize the image, using opencv.resize, if image doesn't have the right shape to fit the tf model. req'd shape = (64,64,3)
    img = cv.imread(img_path)
    print('img.shape', img.shape)
    if img.shape != (64, 64, 3):
        print('resizing image now, so it has shape (64,64,3)  !')
        new_width = 64
        new_height = 64
        sample_img = sample_img.resize((new_width, new_height),
                                       Image.ANTIALIAS)
        sample_img.save('output image name.png'
                        )  # format may what u want ,*.png,*jpg,*.gif

    plt.imshow(sample_img)
    plt.show()

    # correct the pixel values (of image) for input into model (such that ideally: mean = 0, variance = 1)
    pix = np.array(sample_img)
    norm_pix = (255 - pix) * 1.0 / 255.0
    exp = np.expand_dims(norm_pix, axis=0)

    X = tf.placeholder(tf.float32, shape=(1, 64, 64, 3))
    [logits_1, logits_2, logits_3, logits_4, logits_5] = regression_head(
        X, dropout=False
    )  # should not use dropout for predictions (regardless of whether dropout used in training regressor)
    # logits_1.shape = [ BATCH_SIZE = 1 , NUM_LABELS = 11 ]

    predict = tf.stack([
        tf.nn.softmax(logits=logits_1),
        tf.nn.softmax(logits=logits_2),
        tf.nn.softmax(logits=logits_3),
        tf.nn.softmax(logits=logits_4),
        tf.nn.softmax(logits=logits_5)
    ])
    # prediction.shape = [NUM_LOGITS = 5 , BATCH_SIZE = 1 , NUM_LABELS = 11]

    pred_tmp = tf.argmax(predict, 2)
    # pred_tmp.shape = [NUM_LOGITS = 5 , BATCH_SIZE = 1]

    best_prediction = tf.squeeze(pred_tmp)
    # best_prediction.shape = [NUM_LOGITS = 5]
    #pred_tmp2 = tf.reshape(best_prediction,[1,999,999999])

    # start model using saved weights (specify graph using met file and restore from a checkpoint (.ckpt) file)
    saver = tf.train.Saver()

    with tf.Session() as session:
        #saver = tf.train.import_meta_graph('./regression.ckpt.meta')
        #saver.restore(session, saved_model_weights)
        optimistic_restore(session=session, save_file=WEIGHTS_FILE)
        print("Model restored.")

        print("Initialized")
        feed_dict = {X: exp}
        start_time = time.time()
        predictions = session.run(best_prediction, feed_dict=feed_dict)
        print('predictions tensor:', predictions)
        pred = prediction_to_string(
            predictions)  # pred = prediction_to_string(predictions[0])
        end_time = time.time()
        print("Best Prediction", pred, "made in", end_time - start_time,
              "seconds.")
Example #5
0
def train_regressor(train_data, train_labels, valid_data, valid_labels,
                    test_data, test_labels, train_size, saved_weights_path):
    global_step = tf.Variable(0, trainable=False)
    # This is where training samples and labels are fed to the graph.
    with tf.name_scope('input'):
        images_placeholder = tf.placeholder(tf.float32,
                                            shape=(BATCH_SIZE, IMG_HEIGHT,
                                                   IMG_WIDTH, NUM_CHANNELS))

    with tf.name_scope('image'):
        tf.summary.image('input', images_placeholder, 10)

    labels_placeholder = tf.placeholder(tf.int32,
                                        shape=(BATCH_SIZE, LABELS_LEN))

    [logits_1, logits_2, logits_3, logits_4, logits_5] = regression_head(images_placeholder, True)

    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_1, labels=labels_placeholder[:, 1])) +\
        tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_2, labels=labels_placeholder[:, 2])) +\
        tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_3, labels=labels_placeholder[:, 3])) +\
        tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_4, labels=labels_placeholder[:, 4])) +\
        tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_5, labels=labels_placeholder[:, 5]))

    learning_rate = tf.train.exponential_decay(LEARN_RATE, global_step*BATCH_SIZE, train_size, DECAY_RATE)
    tf.summary.scalar('learning_rate', learning_rate)

    # Optimizer: set up a variable that's incremented once per batch
    with tf.name_scope('train'):
        optimizer = tf.train.AdagradOptimizer(learning_rate).minimize(loss, global_step=global_step)

    prediction = tf.stack([tf.nn.softmax(regression_head(images_placeholder)[0]),
                                tf.nn.softmax(regression_head(images_placeholder)[1]),
                                tf.nn.softmax(regression_head(images_placeholder)[2]),
                                tf.nn.softmax(regression_head(images_placeholder)[3]),
                                tf.nn.softmax(regression_head(images_placeholder)[4])])

    # Add ops to save and restore all the variables.
    saver = tf.train.Saver()

    start_time = time.time()
    # Create a local session to run the training.
    with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:
        init_op = tf.initialize_all_variables()
        # Restore variables from disk.
        if(saved_weights_path):
            saver.restore(sess, saved_weights_path)
        print("Model restored.")

        reader = tf.train.NewCheckpointReader("classifier.ckpt")
        reader.get_variable_to_shape_map()

        # Run all the initializers to prepare the trainable parameters.
        sess.run(init_op)

        # Add histograms for trainable variables.
        for var in tf.trainable_variables():
            tf.summary.histogram(var.op.name, var)

        with tf.name_scope('accuracy'):
            with tf.name_scope('correct_prediction'):
                best = tf.transpose(prediction, [1, 2, 0])  # permute n_steps and batch_size
                lb = tf.cast(labels_placeholder[:, 1:6], tf.int64)
                correct_prediction = tf.equal(tf.argmax(best, 1), lb)
            with tf.name_scope('accuracy'):
                accuracy = tf.reduce_sum(tf.cast(correct_prediction, tf.float32)) / prediction.get_shape().as_list()[1] / prediction.get_shape().as_list()[0]
            tf.summary.scalar('accuracy', accuracy)

        # Prepare vairables for the tensorboard
        merged = tf.summary.merge_all()

        # train_writer = tf.train.SummaryWriter(TENSORBOARD_SUMMARIES_DIR + '/train', sess.graph)
        train_writer = tf.summary.FileWriter('logs/board_reg_train_writer')  # create writer
        train_writer.add_graph(sess.graph)

        # valid_writer = tf.train.SummaryWriter(TENSORBOARD_SUMMARIES_DIR + '/validation')
        valid_writer = tf.summary.FileWriter('logs/board_reg_valid_writer')  # create writer
        valid_writer.add_graph(sess.graph)

        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()

        ###

        saver = tf.train.Saver()
        saver.save(sess, save_path=TENSORBOARD_SUMMARIES_DIR + '/train', global_step=global_step)
        train_writer = tf.summary.FileWriter('logs/board_train_writer')  # create writer
        train_writer.add_graph(sess.graph)

        saver.save(sess, save_path=TENSORBOARD_SUMMARIES_DIR + '/validation', global_step=global_step)
        valid_writer = tf.summary.FileWriter('logs/board_valid_writer')  # create writer
        valid_writer.add_graph(sess.graph)

        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        ###

        # Loop through training steps.
        for step in xrange(int(NUM_EPOCHS * train_size) // BATCH_SIZE):
            duration = time.time() - start_time
            examples_per_sec = BATCH_SIZE / duration

            # Run the graph and fetch some of the nodes.
            # This dictionary maps the batch data (as a numpy array) to the
            train_feed_dict = fill_feed_dict(train_data, train_labels, images_placeholder, labels_placeholder, step)
            _, l, lr, acc, predictions = sess.run([optimizer, loss, learning_rate,
                                                  accuracy, prediction],
                                                  feed_dict=train_feed_dict)

            train_batched_labels = train_feed_dict.values()[1]

            if step % 1000 == 0:
                valid_feed_dict = fill_feed_dict(valid_data, valid_labels, images_placeholder, labels_placeholder, step)
                valid_batch_labels = valid_feed_dict.values()[1]

                valid_summary, _, l, lr, valid_acc = sess.run([merged, optimizer, loss, learning_rate, accuracy],
                feed_dict=valid_feed_dict, options=run_options, run_metadata=run_metadata)
                print('Validation Accuracy: %.2f' % valid_acc)
                valid_writer.add_run_metadata(run_metadata, 'step%03d' % step)
                valid_writer.add_summary(valid_summary, step)

                train_summary, _, l, lr, train_acc = sess.run([merged, optimizer, loss, learning_rate, accuracy],
                    feed_dict=train_feed_dict)
                train_writer.add_run_metadata(run_metadata, 'step%03d' % step)
                train_writer.add_summary(train_summary, step)
                print('Training Set Accuracy: %.2f' % train_acc)
                print('Adding run metadata for', step)

            elif step % 100 == 0:
                elapsed_time = time.time() - start_time
                start_time = time.time()

                format_str = ('%s: step %d, loss = %.2f  learning rate = %.2f  (%.1f examples/sec; %.3f ''sec/batch)')
                print (format_str % (datetime.now(), step, l, lr, examples_per_sec, duration))

                print('Minibatch accuracy2: %.2f' % acc)
                sys.stdout.flush()

        test_feed_dict = fill_feed_dict(test_data, test_labels, images_placeholder, labels_placeholder, step)
        _, l, lr, test_acc = sess.run([optimizer, loss, learning_rate, accuracy], feed_dict=test_feed_dict, options=run_options, run_metadata=run_metadata)
        print('Test accuracy: %.2f' % test_acc)

        # Save the variables to disk.
        save_path = saver.save(sess, "regression.ckpt")
        print("Model saved in file: %s" % save_path)

        train_writer.close()
        valid_writer.close()
def train_regressor(train_data, train_labels, valid_data, valid_labels,
                    test_data, test_labels, train_size, saved_weights_path):
    global_step = tf.Variable(0, trainable=False)
    # This is where training samples and labels are fed to the graph.
    with tf.name_scope('input'):
        images_placeholder = tf.placeholder(tf.float32,
                                            shape=(BATCH_SIZE, IMG_HEIGHT,
                                                   IMG_WIDTH, NUM_CHANNELS))

    with tf.name_scope('image'):
        tf.image_summary('input', images_placeholder, 10)

    labels_placeholder = tf.placeholder(tf.int32,
                                        shape=(BATCH_SIZE, LABELS_LEN))

    [logits_1, logits_2, logits_3, logits_4, logits_5] = regression_head(images_placeholder, True)

    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits_1, labels_placeholder[:, 1])) +\
        tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits_2, labels_placeholder[:, 2])) +\
        tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits_3, labels_placeholder[:, 3])) +\
        tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits_4, labels_placeholder[:, 4])) +\
        tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits_5, labels_placeholder[:, 5]))

    learning_rate = tf.train.exponential_decay(LEARN_RATE, global_step*BATCH_SIZE, train_size, DECAY_RATE)
    tf.scalar_summary('learning_rate', learning_rate)

    # Optimizer: set up a variable that's incremented once per batch
    with tf.name_scope('train'):
        optimizer = tf.train.AdagradOptimizer(learning_rate).minimize(loss, global_step=global_step)

    prediction = tf.pack([tf.nn.softmax(regression_head(images_placeholder)[0]),
                                tf.nn.softmax(regression_head(images_placeholder)[1]),
                                tf.nn.softmax(regression_head(images_placeholder)[2]),
                                tf.nn.softmax(regression_head(images_placeholder)[3]),
                                tf.nn.softmax(regression_head(images_placeholder)[4])])

    # Add ops to save and restore all the variables.
    saver = tf.train.Saver()

    start_time = time.time()
    # Create a local session to run the training.
    with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:
        init_op = tf.initialize_all_variables()
        # Restore variables from disk.
        if(saved_weights_path):
            saver.restore(sess, saved_weights_path)
        print("Model restored.")

        reader = tf.train.NewCheckpointReader("classifier.ckpt")
        reader.get_variable_to_shape_map()

        # Run all the initializers to prepare the trainable parameters.
        sess.run(init_op)

        # Add histograms for trainable variables.
        for var in tf.trainable_variables():
            tf.histogram_summary(var.op.name, var)

        with tf.name_scope('accuracy'):
            with tf.name_scope('correct_prediction'):
                best = tf.transpose(prediction, [1, 2, 0])  # permute n_steps and batch_size
                lb = tf.cast(labels_placeholder[:, 1:6], tf.int64)
                correct_prediction = tf.equal(tf.argmax(best, 1), lb)
            with tf.name_scope('accuracy'):
                accuracy = tf.reduce_sum(tf.cast(correct_prediction, tf.float32)) / prediction.get_shape().as_list()[1] / prediction.get_shape().as_list()[0]
            tf.scalar_summary('accuracy', accuracy)

        # Prepare vairables for the tensorboard
        merged = tf.merge_all_summaries()
        train_writer = tf.train.SummaryWriter(TENSORBOARD_SUMMARIES_DIR + '/train', sess.graph)
        valid_writer = tf.train.SummaryWriter(TENSORBOARD_SUMMARIES_DIR + '/validation')

        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()

        # Loop through training steps.
        for step in xrange(int(NUM_EPOCHS * train_size) // BATCH_SIZE):
            duration = time.time() - start_time
            examples_per_sec = BATCH_SIZE / duration

            # Run the graph and fetch some of the nodes.
            # This dictionary maps the batch data (as a numpy array) to the
            train_feed_dict = fill_feed_dict(train_data, train_labels, images_placeholder, labels_placeholder, step)
            _, l, lr, acc, predictions = sess.run([optimizer, loss, learning_rate,
                                                  accuracy, prediction],
                                                  feed_dict=train_feed_dict)

            train_batched_labels = train_feed_dict.values()[1]

            if step % 1000 == 0:
                valid_feed_dict = fill_feed_dict(valid_data, valid_labels, images_placeholder, labels_placeholder, step)
                valid_batch_labels = valid_feed_dict.values()[1]

                valid_summary, _, l, lr, valid_acc = sess.run([merged, optimizer, loss, learning_rate, accuracy],
                feed_dict=valid_feed_dict, options=run_options, run_metadata=run_metadata)
                print('Validation Accuracy: %.2f' % valid_acc)
                valid_writer.add_run_metadata(run_metadata, 'step%03d' % step)
                valid_writer.add_summary(valid_summary, step)

                train_summary, _, l, lr, train_acc = sess.run([merged, optimizer, loss, learning_rate, accuracy],
                    feed_dict=train_feed_dict)
                train_writer.add_run_metadata(run_metadata, 'step%03d' % step)
                train_writer.add_summary(train_summary, step)
                print('Training Set Accuracy: %.2f' % train_acc)
                print('Adding run metadata for', step)

            elif step % 100 == 0:
                elapsed_time = time.time() - start_time
                start_time = time.time()

                format_str = ('%s: step %d, loss = %.2f  learning rate = %.2f  (%.1f examples/sec; %.3f ''sec/batch)')
                print (format_str % (datetime.now(), step, l, lr, examples_per_sec, duration))

                print('Minibatch accuracy2: %.2f' % acc)
                sys.stdout.flush()

        test_feed_dict = fill_feed_dict(test_data, test_labels, images_placeholder, labels_placeholder, step)
        _, l, lr, test_acc = sess.run([optimizer, loss, learning_rate, accuracy], feed_dict=test_feed_dict, options=run_options, run_metadata=run_metadata)
        print('Test accuracy: %.2f' % test_acc)

        # Save the variables to disk.
        save_path = saver.save(sess, "regression.ckpt")
        print("Model saved in file: %s" % save_path)

        train_writer.close()
        valid_writer.close()
def train_regressor(train_data, train_labels, valid_data, valid_labels, test_data, test_labels, train_size, saved_weights_path, num_digits_in_data_subset):
    global_step = tf.Variable(0, trainable=False)

    print('train_data.shape',train_data.shape, 'train_labels.shape',train_labels.shape, 'valid_data.shape',valid_data.shape, 'valid_labels.shape',valid_labels.shape, 'test_data.shape',test_data.shape)



    # This is where training samples and labels are fed to the graph.
    with tf.name_scope('input'):
        images_placeholder = tf.placeholder(tf.float32, shape=(BATCH_SIZE, IMG_HEIGHT, IMG_WIDTH, NUM_CHANNELS), name='images_placeholder')

    with tf.name_scope('image'):
        tf.summary.image('input', images_placeholder, 10)

    labels_placeholder = tf.placeholder(tf.int32, shape=(BATCH_SIZE, LABELS_LEN), name='labels_placeholder')

    logits = [0.,0.,0.,0.,0.,0.] # dont use element 0, use elements 1-5
    [logits[1], logits[2], logits[3], logits[4], logits[5]] = regression_head(images_placeholder, dropout=False) # dropout=True
    # logits[1].shape = [ BATCH_SIZE = 32 , NUM_LABELS = 11 ]

    # loss op uses SPARSE softmax since digits are +ve real values (not 0 to 1 probabilities/floats)
    # loss evaluated via comparing predictions (i.e. the logits 1 to 5) to labels[1:6] which reflect the labels for digits 1 to 5
    # i.e. label [0] is the num_of_digits and is not actually utilised (by the regressor model)

    mask_for_digit = [1.,1.,1.,1.,1.,1.]
    #mask_for_digit = [0.,0.,0.,0.,0.,0.]
    #for i in range(num_digits_in_data_subset+1):
    #    mask_for_digit[i] = 1.

    with tf.name_scope('loss'):
        loss = mask_for_digit[1] * tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits[1], labels=labels_placeholder[:, 1])) +\
            mask_for_digit[2] *tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits[2], labels=labels_placeholder[:, 2])) +\
            mask_for_digit[3] *tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits[3], labels=labels_placeholder[:, 3])) +\
            mask_for_digit[4] *tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits[4], labels=labels_placeholder[:, 4])) +\
            mask_for_digit[5] *tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits[5], labels=labels_placeholder[:, 5]))
        tf.summary.scalar('loss', loss)

    # m = np.ma.masked_where(y>2, y)   # filter out values larger than 5

        # cdt TBC
        # loss = [,,,,,] # dont use element 0, use elements 1-5
        # i = 0
        # while (i+1) <= num_digits:
        #     for i in range(5):
        #         loss[i+1] = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits[i+1], labels=labels_placeholder[:, i+1]))
        #         loss_string = 'loss' + str(i+1)
        #         tf.summary.scalar(loss_string, loss[i+1])
        loss1 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits[1], labels=labels_placeholder[:, 1]))
        tf.summary.scalar('loss1', loss1)
        loss2 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits[2], labels=labels_placeholder[:, 2]))
        tf.summary.scalar('loss2', loss2)
        loss3 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits[3], labels=labels_placeholder[:, 3]))
        tf.summary.scalar('loss3', loss3)
        loss4 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits[4], labels=labels_placeholder[:, 4]))
        tf.summary.scalar('loss4', loss4)
        loss5 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits[5], labels=labels_placeholder[:, 5]))
        tf.summary.scalar('loss5', loss5)

    learning_rate = tf.train.exponential_decay(LEARN_RATE, global_step*BATCH_SIZE, train_size, DECAY_RATE)
    tf.summary.scalar('learning_rate', learning_rate)

    # Optimizer: set up a variable that's incremented once per batch
    with tf.name_scope('train'):
        optimizer = tf.train.AdagradOptimizer(learning_rate).minimize(loss, global_step=global_step)

    # we create a prediction function, which SHOULD NOT have dropout. Consequently, we don't utilise [logits[1], logits[2], logits[3], logits[4], logits[5]] since those could have had dropout (although actually in this regressor model we don't use dropout, in contrast to classifier which does use dropout)
    prediction = tf.stack([tf.nn.softmax(regression_head(images_placeholder, dropout=False)[0]),
                                tf.nn.softmax(regression_head(images_placeholder, dropout=False)[1]),
                                tf.nn.softmax(regression_head(images_placeholder, dropout=False)[2]),
                                tf.nn.softmax(regression_head(images_placeholder, dropout=False)[3]),
                                tf.nn.softmax(regression_head(images_placeholder, dropout=False)[4])]) # note: for regression prediction, dropout=False
    # prediction.shape = [NUM_LOGITS = 5 , BATCH_SIZE = 32 , NUM_LABELS = 11]

    # tf.stack = tf.stack(values, axis=0, name='stack') -> Given a list of length N of tensors of shape (A, B, C); if axis == 0 then the output tensor will have the shape (N, A, B, C) ;
    # if axis == 1 then the output tensor will have the shape (A, N, B, C).

    pred_tmp = tf.argmax(prediction, 2) # pred_tmp.shape = [ NUM_LOGITS = 5 , BATCH_SIZE = 32 ]
    # prediction for first image in the batch
    prediction_0 = pred_tmp[:,0]  # prediction_0.shape = [ NUM_LOGITS = 5 ]

    with tf.name_scope('accuracy'):
        with tf.name_scope('correct_prediction_tensor'):
            best = tf.transpose(prediction, [1, 2, 0])  # tf.transpoe operation permutes n_steps and batch_size #
            lb = tf.cast(labels_placeholder[:, 1:6], tf.int64)
            correct_prediction = tf.equal(tf.argmax(best, 1), lb)
        with tf.name_scope('accuracy_scalar'):
            accuracy = tf.reduce_sum(tf.cast(correct_prediction, tf.float32)) / prediction.get_shape().as_list()[1] / prediction.get_shape().as_list()[0]
            tf.summary.scalar('accuracy', accuracy)

    # Add ops to save and restore all the variables.
    saver = tf.train.Saver()

    start_time = time.time()
    # Create a local session to run the training.
    with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:
        init_op = tf.global_variables_initializer()
        # Restore variables from disk.
        if(saved_weights_path):
            string_meta_path = saved_weights_path #+ '.meta'
            print('string_meta_path',string_meta_path)

            # optimistic_restore: restores session (from the classifier model, since regressor model re-uses Layers 1 - 3 of the classifier model) since we want to use the trained weights...
            # ...For each variable in tf.global_variables_initializer() , only restore variable value if it is in the regressor graph. And if not, then ignore it..
            # ...i.e. if (regressor graph) variable is 'new (is not in the classifier graph) , then we don't need to do anything else, since it was intialized already in the above code.

            optimistic_restore(sess, saved_weights_path) # see note above
            print("Model restored.")

        # Run all the initializers to prepare the trainable parameters.
        sess.run(init_op)

        # Add histograms for trainable variables.
        for var in tf.trainable_variables():
            tf.summary.histogram(var.op.name, var)

        # Prepare vairables for the tensorboard
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(TENSORBOARD_SUMMARIES_DIR + '/train', sess.graph)
        valid_writer = tf.summary.FileWriter(TENSORBOARD_SUMMARIES_DIR + '/validation')

        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()

        step_interval = 1000 # 1000
        try: # allow for keyboard interrupts, and always save model
            print('\nStarting TRY section of code!\n')

            # Loop through groups of batches, each group of batches has same num of digits , iterating from 1 to 5, step 1.
            # TBC


            # Loop through training steps.
            for step in range(int(NUM_EPOCHS * train_size) // BATCH_SIZE):
                duration = time.time() - start_time
                examples_per_sec = BATCH_SIZE / duration

                # Run the graph and fetch some of the nodes.
                train_feed_dict = fill_feed_dict( train_data,  train_labels,  images_placeholder,  labels_placeholder, step)
                # train_feed_dict = fill_feed_dict(data = train_data, labels = train_labels, x = images_placeholder, y_ = labels_placeholder, step = step)

                # cdt test: check we are able to extract the data/label from the train_feed_dict (before feeding into model and testing that prediction is similar to printscreen of image)
                if TEST_FLAG:
                    if step % step_interval == 0:
                        test_image_list = []
                        test_label_list = []
                        num_test_images = 1
                        for i  in range(num_test_images):
                            train_feed_dict_step = fill_feed_dict( train_data,  train_labels,  images_placeholder,  labels_placeholder, step)
                            train_feed_dict_keys0 = []
                            train_feed_dict_keys0 = list(train_feed_dict_step.keys())
                            #print('train_feed_dict_step.keys()',train_feed_dict_step.keys())
                            #print('\n\ntrain_feed_dict_step[key0] = image pixel values\n',train_feed_dict_step[train_feed_dict_keys0[0]])
                            print('\n\ntrain_feed_dict_step[key1] = image labels\n',train_feed_dict_step[train_feed_dict_keys0[1]])

                            test_image = train_feed_dict_step[train_feed_dict_keys0[0]][i]
                            print('\n\nnp.array(test_image).shape',np.array(test_image).shape)
                            #print('test_image',test_image)
                            test_label = train_feed_dict_step[train_feed_dict_keys0[1]][i]
                            #print('\n\nnp.array(test_label).shape',np.array(test_label).shape)
                            #print('test_label',test_label,'\n\n')

                            test_image_list.append(test_image)
                            test_label_list.append(test_label)

                _, l, lr, acc, predictions = sess.run([optimizer, loss, learning_rate, accuracy, prediction], feed_dict=train_feed_dict)
                # predictions.shape = [NUM_LOGITS = 5 , BATCH_SIZE = 32 , NUM_LABELS = 10]

                train_batched_labels = list(train_feed_dict.values())

                if step % step_interval == 0: # 1000
                    valid_feed_dict = fill_feed_dict(valid_data, valid_labels, images_placeholder, labels_placeholder, step)
                    valid_batch_labels = list(valid_feed_dict.values())

                    valid_summary, _, l, lr, valid_acc = sess.run([merged, optimizer, loss, learning_rate, accuracy],
                    feed_dict=valid_feed_dict, options=run_options, run_metadata=run_metadata)
                    print('Validation Accuracy: %.2f' % valid_acc)
                    valid_writer.add_run_metadata(run_metadata, 'step%03d' % step)
                    valid_writer.add_summary(valid_summary, step)

                    train_summary, _, l, lr, train_acc , prediction_0_test = sess.run([merged, optimizer, loss, learning_rate, accuracy, prediction_0],
                        feed_dict=train_feed_dict)
                    train_writer.add_run_metadata(run_metadata, 'step%03d' % step)
                    train_writer.add_summary(train_summary, step)
                    print('Training Set Accuracy: %.2f' % train_acc)
                    print('Adding run metadata for', step)


                    # Validation: Printing image to screen to validate that: # cdt
                    #   a. labels in model agree to the observed image
                    #   b. check model's prediction should often be correct (vs label) is accuracy is 'good'
                    # load image to be run through model (for output of model's label prediction)
                    if TEST_FLAG:
                        if step > -1:
                            if step < 5000:
                                if step % (10*step_interval) == 0:
                                    print('\nPrediction for first image in batch is:',prediction_0_test)
                                    print('')
                                    """
                                    sample_img = test_image
                                    print('sample_img.shape',sample_img.shape)
                                    sample_img = Image.open(img_path) # image extension *.png,*.jpg
                                    """

                                    # resize the image, using opencv.resize, if image doesn't have the right shape to fit the tf model. req'd shape = (64,64,3)
                                    test_list = test_image_list
                                    for i in range(num_test_images):
                                        print('test_label_list[i]',test_label_list[i])
                                        sample_img = test_list[i]
                                        print('sample_img.shape',sample_img.shape)
                                        if sample_img.shape != (64,64,3):
                                            print('resizing image now, so it has shape (64,64,3)  !')
                                            new_width  = 64
                                            new_height = 64
                                            sample_img = sample_img.resize((new_width, new_height), Image.ANTIALIAS)
                                            sample_img.save('output image name.png') # format may what u want ,*.png,*jpg,*.gif

                                        plt.imshow(sample_img)
                                        plt.show()

                            print('First label value is the ''num of digits in the image/'', remaining label values are the digit labels')


                elif step % 100 == 0:
                    elapsed_time = time.time() - start_time
                    start_time = time.time()

                    format_str = ('%s: step %d, loss = %.2f  learning rate = %.2f  (%.1f examples/sec; %.3f ''sec/batch)')
                    print (format_str % (datetime.now(), step, l, lr, examples_per_sec, duration))

                    print('Minibatch accuracy2: %.2f' % acc)
                    sys.stdout.flush()
        except KeyboardInterrupt: # allow for keyboard interrupts and always save model
            print('Keyboard Interrupt detected! Saving the model now, location tbc subsequently. Also, calculating the final test accuracy (at this interrupt point), tbc subsequently.')

        finally: # allow for keyboard interrupts and always save model
            test_feed_dict = fill_feed_dict(test_data, test_labels, images_placeholder, labels_placeholder, step)
            _, l, lr, test_acc = sess.run([optimizer, loss, learning_rate, accuracy], feed_dict=test_feed_dict, options=run_options, run_metadata=run_metadata)
            print('Test accuracy: %.2f' % test_acc)

            # Save the variables to disk.
            save_path = saver.save(sess, "regression.ckpt")
            print("Model saved in file: %s" % save_path)

            train_writer.close()
            valid_writer.close()