if (epoch % checkpoint_every == 0):
            print("Saving checkpoint")
            save_path = saver.save(sess, './model/' + model_name + '.ckpt')

            # Now that model is saved set init to false so we reload it next time
            init = False

        # init batch arrays
        batch_cv_acc = []

        # initialize the local variables so we have metrics only on the evaluation
        sess.run(tf.local_variables_initializer())

        print("Evaluating model...")
        # load the test data
        X_cv, y_cv = load_validation_data(percentage=1, how="normal", which=dataset)

        # evaluate the test data
        for X_batch, y_batch in get_batches(X_cv, y_cv, batch_size, distort=False):
            _, _, valid_acc, valid_recall, valid_precision, valid_fscore, valid_cost = sess.run(
                [update_op, extra_update_ops, accuracy, rec_op, prec_op, f1_score, mean_ce],
                feed_dict={
                    X: X_batch,
                    y: y_batch,
                    training: False
                })

            batch_cv_acc.append(valid_acc)

        # Write average of validation data to summary logs
        if log_to_tensorboard:
                save_path = saver.save(sess, './model/' + model_name + '.ckpt')

                # Now that model is saved set init to false so we reload it next time
                init = False

            # init batch arrays
            batch_cv_acc = []
            batch_cv_loss = []
            batch_cv_recall = []

            # initialize the local variables so we have metrics only on the evaluation
            sess.run(tf.local_variables_initializer())

            print("Evaluating model...")
            # load the test data
            X_cv, y_cv = load_validation_data(percentage=1, how=how, which=dataset, scale=True)
            counter = 0

            # evaluate on pre-cropped images
            for X_batch, y_batch in get_batches(X_cv, y_cv, batch_size, distort=False):
                _, valid_acc, valid_recall, valid_cost = sess.run(
                    [metrics_op, accuracy, recall, mean_ce],
                    feed_dict={
                        X: X_batch,
                        y: y_batch,
                        training: False
                    })

            # one more step to get our metrics
            summary, valid_acc, valid_recall, valid_prec = sess.run(
                [merged, accuracy, recall, precision],
Esempio n. 3
0
                # Now that model is saved set init to false so we reload it next time
                init = False

            # init batch arrays
            batch_cv_acc = []
            batch_cv_loss = []
            batch_cv_recall = []

            # initialize the local variables so we have metrics only on the evaluation
            sess.run(tf.local_variables_initializer())

            print("Evaluating model...")
            # load the test data
            X_cv, y_cv = load_validation_data(percentage=1,
                                              how=how,
                                              which=dataset)

            # evaluate the test data
            for X_batch, y_batch in get_batches(X_cv,
                                                y_cv,
                                                batch_size,
                                                distort=False):
                _, _, valid_acc, valid_recall, valid_precision, valid_fscore, valid_cost = sess.run(
                    [
                        update_op, extra_update_ops, accuracy, rec_op, prec_op,
                        f1_score, mean_ce
                    ],
                    feed_dict={
                        X: X_batch,
                        y: y_batch,
Esempio n. 4
0
            # Now that model is saved set init to false so we reload it next time
            init = False

        # init batch arrays
        batch_cv_acc = []
        batch_cv_cost = []
        batch_cv_loss = []
        batch_cv_recall = []
        batch_cv_precision = []

        ## evaluate on test data if it exists, otherwise ignore this step
        if evaluate:
            print("Evaluating model...")
            # load the test data
            X_cv, y_cv = load_validation_data(percentage=1, how="normal")

            # evaluate the test data
            for X_batch, y_batch in get_batches(X_cv, y_cv, batch_size // 2, distort=False):
                summary, valid_acc, valid_recall, valid_precision, valid_cost, valid_loss = sess.run(
                    [merged, accuracy, rec_op, prec_op, mean_ce, loss],
                    feed_dict={
                        X: X_batch,
                        y: y_batch,
                        is_testing: True,
                        training: False
                    })

                batch_cv_acc.append(valid_acc)
                batch_cv_cost.append(valid_cost)
                batch_cv_loss.append(valid_loss)