Exemplo n.º 1
0
    def __init__(self,
                 data_helper,
                 am,
                 input_component,
                 exp_name,
                 batch_size=64,
                 evaluate_every=1000,
                 checkpoint_every=5000,
                 max_to_keep=7):
        self.data_hlp = data_helper
        self.exp_name = exp_name
        self.input_component = input_component
        # the problem tag identifies the experiment setting, currently data name + experiment name
        self.am = am

        logging.warning('TrainTask instance initiated: ' + AM.get_date())
        logging.info("Logging to: " + self.am.get_exp_log_path())

        logging.info("current data is: " + self.data_hlp.problem_name)
        logging.info("current experiment is: " + self.exp_name)

        # network parameters
        self.batch_size = batch_size
        self.evaluate_every = evaluate_every
        self.checkpoint_every = checkpoint_every
        self.max_to_keep = max_to_keep

        logging.info("setting: %s is %s", "batch_size", self.batch_size)
        logging.info("setting: %s is %s", "evaluate_every",
                     self.evaluate_every)
        logging.info("setting: %s is %s", "checkpoint_every",
                     self.checkpoint_every)

        # Load data
        logging.debug("Loading data...")
        if "Six" in input_component:
            self.x_train, self.pos_train, _, self.p2_train, self.p3_train, self.s2_train, self.s3_train, self.y_train, \
            _, _, self.embed_matrix = self.data_hlp.get_train_data()
            self.pref2_vocab_size = len(self.data_hlp.p2_vocab)
            self.pref3_vocab_size = len(self.data_hlp.p3_vocab)
            self.suff2_vocab_size = len(self.data_hlp.s2_vocab)
            self.suff3_vocab_size = len(self.data_hlp.s3_vocab)
            self.pos_vocab_size = len(self.data_hlp.pos_vocab)
        elif "One" in input_component or "PAN11" in input_component:
            self.train_data = self.data_hlp.get_train_data()
        else:
            raise NotImplementedError

        logging.debug("Vocabulary Size: {:d}".format(len(self.data_hlp.vocab)))

        if "Six" in input_component:
            self.x_dev, self.pos_test, _, self.p2_test, self.p3_test, \
            self.s2_test, self.s3_test, self.y_dev, _, _, _ = self.data_hlp.get_test_data()
        elif "One" in input_component or "PAN11" in input_component:
            self.test_data = self.data_hlp.get_test_data()
        else:
            raise NotImplementedError

        logging.info("Train/Dev split: {:d}/{:d}".format(
            len(self.train_data.label_doc), len(self.test_data.label_doc)))
Exemplo n.º 2
0
    def __init__(self,
                 data_helper,
                 am,
                 input_component,
                 middle_component,
                 output_component,
                 batch_size,
                 evaluate_every,
                 checkpoint_every,
                 max_to_keep,
                 restore_path=None):
        self.data_hlp = data_helper
        self.input_comp = input_component
        self.middle_comp = middle_component
        self.output_comp = output_component
        self.am = am

        logging.warning('TrainTask instance initiated: ' + AM.get_date())
        logging.info("Logging to: " + self.am.get_exp_log_path())

        self.exp_dir = self.am.get_exp_dir()

        logging.info("current data is: " + self.data_hlp.problem_name)
        logging.info("current input is: " + type(self.input_comp).__name__)
        logging.info("current middle is: " + type(self.middle_comp).__name__)
        logging.info("current output is: " + type(self.output_comp).__name__)

        self.restore_dir = restore_path
        if restore_path is not None:
            self.restore_latest = tf.train.latest_checkpoint(restore_path +
                                                             "/checkpoints/")
            logging.warning("RESTORE FROM PATH: " + self.restore_latest)

        # network parameters
        self.batch_size = batch_size
        self.evaluate_every = evaluate_every
        self.checkpoint_every = checkpoint_every
        self.max_to_keep = max_to_keep

        logging.info("setting: %s is %s", "batch_size", self.batch_size)
        logging.info("setting: %s is %s", "evaluate_every",
                     self.evaluate_every)
        logging.info("setting: %s is %s", "checkpoint_every",
                     self.checkpoint_every)

        self.train_data = self.data_hlp.get_train_data()
        self.test_data = self.data_hlp.get_test_data()

        logging.info("Vocabulary Size: {:d}".format(len(
            self.train_data.vocab)))
        logging.info("Train/Dev split (DOC): {:d}/{:d}".format(
            len(self.train_data.label_doc), len(self.train_data.label_doc)))
        logging.info("Train/Dev split (IST): {:d}/{:d}".format(
            len(self.train_data.label_instance),
            len(self.test_data.label_instance)))
Exemplo n.º 3
0
    def evaluate(self,
                 experiment_dir,
                 checkpoint_step,
                 doc_acc=False,
                 do_is_training=True):
        if checkpoint_step is not None:
            checkpoint_file = experiment_dir + "/checkpoints/" + "model-" + str(
                checkpoint_step)
        else:
            checkpoint_file = tf.train.latest_checkpoint(experiment_dir +
                                                         "/checkpoints/",
                                                         latest_filename=None)
        file_name = os.path.basename(checkpoint_file)
        self.eval_log = open(os.path.join(experiment_dir,
                                          file_name + "_eval.log"),
                             mode="w+")
        console = logging.StreamHandler()
        logging.getLogger('').addHandler(console)

        self.eval_log.write("Evaluating: " + __file__ + "\n")
        self.eval_log.write("Test for prob: " + self.dater.problem_name + "\n")
        self.eval_log.write(checkpoint_file + "\n")
        self.eval_log.write(AM.get_time() + "\n")
        self.eval_log.write("Total number of test examples: {}\n".format(
            len(self.test_data.label_instance)))

        graph = tf.Graph()
        with graph.as_default():
            session_conf = tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=False)
            sess = tf.Session(config=session_conf)
            with sess.as_default():
                # Load the saved meta graph and restore variables
                saver = tf.train.import_meta_graph(
                    "{}.meta".format(checkpoint_file))
                saver.restore(sess, checkpoint_file)

                # Get the placeholders from the graph by name
                input_x = graph.get_operation_by_name("input_x").outputs[0]
                input_y = graph.get_operation_by_name("input_y").outputs[0]
                dropout_keep_prob = graph.get_operation_by_name(
                    "dropout_keep_prob").outputs[0]
                if do_is_training:
                    is_training = graph.get_operation_by_name(
                        "is_training").outputs[0]
                else:
                    is_training = None

                # Tensors we want to evaluate
                scores = graph.get_operation_by_name(
                    "output/scores").outputs[0]
                predictions = graph.get_operation_by_name(
                    "output/predictions").outputs[0]

                # Generate batches for one epoch
                x_batches = DataHelper.batch_iter(self.test_data.value,
                                                  64,
                                                  1,
                                                  shuffle=False)
                y_batches = DataHelper.batch_iter(
                    self.test_data.label_instance, 64, 1, shuffle=False)

                # Collect the predictions here
                all_score = None
                pred = None
                for [x_test_batch, y_test_batch] in zip(x_batches, y_batches):
                    if do_is_training:
                        batch_scores, batch_pred_max = sess.run(
                            [scores, predictions], {
                                input_x: x_test_batch,
                                dropout_keep_prob: 1.0,
                                is_training: 0
                            })
                    else:
                        batch_scores, batch_pred_max = sess.run(
                            [scores, predictions], {
                                input_x: x_test_batch,
                                dropout_keep_prob: 1.0
                            })

                    batch_scores = tf.nn.softmax(batch_scores).eval()

                    if all_score is None:
                        all_score = batch_scores
                        pred = batch_pred_max
                    else:
                        all_score = np.concatenate([all_score, batch_scores],
                                                   axis=0)
                        pred = np.concatenate([pred, batch_pred_max], axis=0)

        mi_prec = precision_score(y_true=self.y_test_scalar,
                                  y_pred=pred,
                                  average="micro")
        self.eval_log.write("micro prec:\t" + str(mi_prec) + "\n")

        mi_recall = recall_score(y_true=self.y_test_scalar,
                                 y_pred=pred,
                                 average="micro")
        self.eval_log.write("micro recall:\t" + str(mi_recall) + "\n")

        mi_f1 = f1_score(y_true=self.y_test_scalar,
                         y_pred=pred,
                         average="micro")
        self.eval_log.write("micro f1:\t" + str(mi_f1) + "\n")

        ma_prec = precision_score(y_true=self.y_test_scalar,
                                  y_pred=pred,
                                  average='macro')
        self.eval_log.write("macro prec:\t" + str(ma_prec) + "\n")

        ma_recall = recall_score(y_true=self.y_test_scalar,
                                 y_pred=pred,
                                 average='macro')
        self.eval_log.write("macro recall:\t" + str(ma_recall) + "\n")

        ma_f1 = f1_score(y_true=self.y_test_scalar,
                         y_pred=pred,
                         average='macro')
        self.eval_log.write("macro f1:\t" + str(ma_f1) + "\n")

        jaccard = jaccard_similarity_score(y_true=self.y_test_scalar,
                                           y_pred=pred)
        self.eval_log.write("jaccard:\t" + str(jaccard) + "\n")

        hamming = hamming_loss(y_true=self.y_test_scalar, y_pred=pred)
        self.eval_log.write("hamming:\t" + str(hamming) + "\n")

        acc = accuracy_score(y_true=self.y_test_scalar, y_pred=pred)
        self.eval_log.write("acc:\t" + str(acc) + "\n")

        self.eval_log.write("\n")
        self.eval_log.write("\n")

        self.print_a_csv(exp_dir=experiment_dir,
                         file_name=file_name,
                         method_name="NORM",
                         prob=all_score,
                         pred=pred,
                         true=self.y_test_scalar)
Exemplo n.º 4
0
    def evaluate(self,
                 experiment_dir,
                 checkpoint_step,
                 doc_acc=True,
                 do_is_training=True):
        if checkpoint_step is not None:
            checkpoint_file = experiment_dir + "/checkpoints/" + "model-" + str(
                checkpoint_step)
        else:
            checkpoint_file = tf.train.latest_checkpoint(experiment_dir +
                                                         "/checkpoints/",
                                                         latest_filename=None)
        file_name = os.path.basename(checkpoint_file)
        self.eval_log = open(os.path.join(experiment_dir,
                                          file_name + "_eval.log"),
                             mode="w+")

        logging.info("Evaluating: " + __file__)
        self.eval_log.write("Evaluating: " + __file__ + "\n")
        logging.info("Test for prob: " + self.dater.problem_name)
        self.eval_log.write("Test for prob: " + self.dater.problem_name + "\n")
        logging.info(checkpoint_file)
        self.eval_log.write(checkpoint_file + "\n")
        logging.info(AM.get_time())
        self.eval_log.write(AM.get_time() + "\n")
        logging.info("Total number of test examples: {}".format(
            len(self.test_data.label_instance)))
        self.eval_log.write("Total number of test examples: {}\n".format(
            len(self.test_data.label_instance)))

        graph = tf.Graph()
        with graph.as_default():
            session_conf = tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=False)
            sess = tf.Session(config=session_conf)
            with sess.as_default():
                # Load the saved meta graph and restore variables
                saver = tf.train.import_meta_graph(
                    "{}.meta".format(checkpoint_file))
                saver.restore(sess, checkpoint_file)

                # Get the placeholders from the graph by name
                input_x = graph.get_operation_by_name("input_x").outputs[0]
                input_y = graph.get_operation_by_name("input_y").outputs[0]
                dropout_keep_prob = graph.get_operation_by_name(
                    "dropout_keep_prob").outputs[0]
                if do_is_training:
                    is_training = graph.get_operation_by_name(
                        "is_training").outputs[0]
                else:
                    is_training = None

                # Tensors we want to evaluate
                scores = graph.get_operation_by_name(
                    "output/scores").outputs[0]
                predictions_sigmoid = graph.get_operation_by_name(
                    "output/predictions_sigmoid").outputs[0]
                predictions_max = graph.get_operation_by_name(
                    "output/predictions_max").outputs[0]

                # Generate batches for one epoch
                x_batches = DataHelper.batch_iter(self.test_data.value,
                                                  64,
                                                  1,
                                                  shuffle=False)
                y_batches = DataHelper.batch_iter(
                    self.test_data.label_instance, 64, 1, shuffle=False)

                # Collect the predictions here
                all_score = None
                pred_sigmoid_value = None
                pred_max_bool = None
                pred_sigmoid_bool = None
                for [x_test_batch, y_test_batch] in zip(x_batches, y_batches):
                    if do_is_training:
                        batch_scores, batch_pred_sigmoid, batch_pred_max_index = sess.run(
                            [scores, predictions_sigmoid, predictions_max], {
                                input_x: x_test_batch,
                                dropout_keep_prob: 1.0,
                                is_training: 0
                            })
                    else:
                        batch_scores, batch_pred_sigmoid, batch_pred_max_index = sess.run(
                            [scores, predictions_sigmoid, predictions_max], {
                                input_x: x_test_batch,
                                dropout_keep_prob: 1.0
                            })

                    batch_pred_max_bool = tf.one_hot(
                        indices=batch_pred_max_index,
                        depth=self.dater.num_of_classes).eval(
                        ) == 1  # TODO temp

                    if all_score is None:
                        all_score = batch_scores
                        pred_max_bool = batch_pred_max_bool
                        pred_sigmoid_bool = batch_pred_sigmoid > 0.5
                        pred_sigmoid_value = batch_pred_sigmoid
                    else:
                        all_score = np.concatenate([all_score, batch_scores],
                                                   axis=0)
                        pred_max_bool = np.concatenate(
                            [pred_max_bool, batch_pred_max_bool], axis=0)
                        pred_sigmoid_bool = np.concatenate(
                            [pred_sigmoid_bool, batch_pred_sigmoid > 0.5],
                            axis=0)
                        pred_sigmoid_value = np.concatenate(
                            [pred_sigmoid_value, batch_pred_sigmoid], axis=0)

            # logging.info("== PRED MAX ==")
            # self.eval_log.write("== PRED MAX ==")
            # self.sent_accuracy(pred_max_bool)
            logging.info("== PRED SIGMOID ==")
            self.eval_log.write("== PRED SIGMOID ==")
            self.sent_accuracy(pred_sigmoid_bool)

            if doc_acc:
                # print("========== WITH MAX ==========")
                # self.doc_accuracy(pred_max)
                # print("========== WITH SIGMOID ==========")
                self.eval_log.write("========== WITH VOTE ==========\n\n")
                self.doc_accuracy(pred_sigmoid_bool)

                self.eval_log.write(
                    "========== WITH SIGMOID CUMU ==========\n\n")
                self.doc_accuracy_sigmoid_cumulation(pred_sigmoid_value)

            self.eval_log.write("\n")
            self.eval_log.write("\n")
Exemplo n.º 5
0
    def write_file(self,
                   experiment_dir,
                   checkpoint_step,
                   doc_acc=True,
                   do_is_training=True):
        if checkpoint_step is not None:
            checkpoint_file = experiment_dir + "/checkpoints/" + "model-" + str(
                checkpoint_step)
        else:
            checkpoint_file = tf.train.latest_checkpoint(experiment_dir +
                                                         "/checkpoints/",
                                                         latest_filename=None)
        file_name = os.path.basename(checkpoint_file)
        self.eval_log = open(os.path.join(experiment_dir,
                                          file_name + "_eval.log"),
                             mode="w+")

        logging.info("Evaluating: " + __file__)
        self.eval_log.write("Evaluating: " + __file__ + "\n")
        logging.info("Test for prob: " + self.dater.problem_name)
        self.eval_log.write("Test for prob: " + self.dater.problem_name + "\n")
        logging.info(checkpoint_file)
        self.eval_log.write(checkpoint_file + "\n")
        logging.info(AM.get_time())
        self.eval_log.write(AM.get_time() + "\n")
        logging.info("Total number of test examples: {}".format(
            len(self.test_data.label_instance)))
        self.eval_log.write("Total number of test examples: {}\n".format(
            len(self.test_data.label_instance)))

        graph = tf.Graph()
        with graph.as_default():
            session_conf = tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=False)
            sess = tf.Session(config=session_conf)
            with sess.as_default():
                # Load the saved meta graph and restore variables
                saver = tf.train.import_meta_graph(
                    "{}.meta".format(checkpoint_file))
                saver.restore(sess, checkpoint_file)

                # Get the placeholders from the graph by name
                input_x = graph.get_operation_by_name("input_x").outputs[0]
                input_y = graph.get_operation_by_name("input_y").outputs[0]
                dropout_keep_prob = graph.get_operation_by_name(
                    "dropout_keep_prob").outputs[0]
                if do_is_training:
                    is_training = graph.get_operation_by_name(
                        "is_training").outputs[0]
                else:
                    is_training = None

                # Tensors we want to evaluate
                scores = graph.get_operation_by_name(
                    "output/scores").outputs[0]
                predictions = graph.get_operation_by_name(
                    "output/predictions").outputs[0]

                # TRAIN ===========================================================================
                x_batches = DataHelper.batch_iter(self.train_data.value,
                                                  64,
                                                  1,
                                                  shuffle=False)
                y_batches = DataHelper.batch_iter(
                    self.train_data.label_instance, 64, 1, shuffle=False)
                all_score = None
                pred_sigmoid = None

                for [x_test_batch, y_test_batch] in zip(x_batches, y_batches):
                    if do_is_training:
                        batch_scores, batch_pred_sigmoid = sess.run(
                            [scores, predictions], {
                                input_x: x_test_batch,
                                dropout_keep_prob: 1.0,
                                is_training: 0
                            })
                    else:
                        batch_scores, batch_pred_sigmoid = sess.run(
                            [scores, predictions], {
                                input_x: x_test_batch,
                                dropout_keep_prob: 1.0
                            })

                    if all_score is None:
                        all_score = batch_scores
                        pred_sigmoid = batch_pred_sigmoid
                    else:
                        all_score = np.concatenate([all_score, batch_scores],
                                                   axis=0)
                        pred_sigmoid = np.concatenate(
                            [pred_sigmoid, batch_pred_sigmoid], axis=0)

                self.write_dist_file(doc_size_list=self.train_data.doc_size,
                                     all_sigmoids=pred_sigmoid,
                                     label=self.train_data.label_doc,
                                     experiment_dir=experiment_dir,
                                     file_name="train")

                # TEST  ===========================================================================
                all_score = None
                pred_sigmoid = None
                x_batches = DataHelper.batch_iter(self.test_data.value,
                                                  64,
                                                  1,
                                                  shuffle=False)
                y_batches = DataHelper.batch_iter(
                    self.test_data.label_instance, 64, 1, shuffle=False)

                for [x_test_batch, y_test_batch] in zip(x_batches, y_batches):
                    if do_is_training:
                        batch_scores, batch_pred_sigmoid = sess.run(
                            [scores, predictions], {
                                input_x: x_test_batch,
                                dropout_keep_prob: 1.0,
                                is_training: 0
                            })
                    else:
                        batch_scores, batch_pred_sigmoid = sess.run(
                            [scores, predictions], {
                                input_x: x_test_batch,
                                dropout_keep_prob: 1.0
                            })

                    if all_score is None:
                        all_score = batch_scores
                        pred_sigmoid = batch_pred_sigmoid
                    else:
                        all_score = np.concatenate([all_score, batch_scores],
                                                   axis=0)
                        pred_sigmoid = np.concatenate(
                            [pred_sigmoid, batch_pred_sigmoid], axis=0)

            self.write_dist_file(doc_size_list=self.test_data.doc_size,
                                 all_sigmoids=pred_sigmoid,
                                 label=self.test_data.label_doc,
                                 experiment_dir=experiment_dir,
                                 file_name="test")
Exemplo n.º 6
0
    def test(self, experiment_dir, checkpoint_step, documentAcc=True):
        if checkpoint_step is not None:
            checkpoint_file = experiment_dir + "/checkpoints/" + "model-" + str(
                checkpoint_step)
        else:
            checkpoint_file = tf.train.latest_checkpoint(experiment_dir +
                                                         "/checkpoints/",
                                                         latest_filename=None)
        eval_log = open(os.path.join(experiment_dir, "eval.log"), mode="w+")

        logging.info("Evaluating: " + __file__)
        eval_log.write("Evaluating: " + __file__ + "\n")
        logging.info("Test for prob: " + self.dater.problem_name)
        eval_log.write("Test for prob: " + self.dater.problem_name + "\n")
        logging.info(checkpoint_file)
        eval_log.write(checkpoint_file + "\n")
        logging.info(AM.get_time())
        eval_log.write(AM.get_time() + "\n")
        logging.info("Total number of test examples: {}".format(
            len(self.y_test)))
        eval_log.write(
            "Total number of test examples: {}\n".format(len(self.y_test)) +
            "n")

        graph = tf.Graph()
        with graph.as_default():
            session_conf = tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=False)
            sess = tf.Session(config=session_conf)
            with sess.as_default():
                # Load the saved meta graph and restore variables
                saver = tf.train.import_meta_graph(checkpoint_file + ".meta")
                saver.restore(sess, checkpoint_file)

                # Get the placeholders from the graph by name
                input_x = graph.get_operation_by_name("input_x").outputs[0]
                input_pref2 = graph.get_operation_by_name(
                    "input_pref2").outputs[0]
                input_pref3 = graph.get_operation_by_name(
                    "input_pref3").outputs[0]
                input_suff2 = graph.get_operation_by_name(
                    "input_suff2").outputs[0]
                input_suff3 = graph.get_operation_by_name(
                    "input_suff3").outputs[0]
                input_pos = graph.get_operation_by_name("input_pos").outputs[0]
                is_training = graph.get_operation_by_name(
                    "is_training").outputs[0]
                # input_y = graph.get_operation_by_name("input_y").outputs[0]

                dropout_keep_prob = graph.get_operation_by_name(
                    "dropout_keep_prob").outputs[0]

                # Tensors we want to evaluate
                scores = graph.get_operation_by_name(
                    "output/scores").outputs[0]
                predictions = graph.get_operation_by_name(
                    "output/predictions").outputs[0]

                # Generate batches for one epoch
                x_batches = dh.DataHelperMulMol6.batch_iter(self.x_test,
                                                            64,
                                                            1,
                                                            shuffle=False)
                y_batches = dh.DataHelperMulMol6.batch_iter(self.y_test,
                                                            64,
                                                            1,
                                                            shuffle=False)
                pref2_batches = dh.DataHelperMulMol6.batch_iter(self.p2_test,
                                                                64,
                                                                1,
                                                                shuffle=False)
                pref3_batches = dh.DataHelperMulMol6.batch_iter(self.p3_test,
                                                                64,
                                                                1,
                                                                shuffle=False)
                suff2_batches = dh.DataHelperMulMol6.batch_iter(self.s2_test,
                                                                64,
                                                                1,
                                                                shuffle=False)
                suff3_batches = dh.DataHelperMulMol6.batch_iter(self.s3_test,
                                                                64,
                                                                1,
                                                                shuffle=False)
                pos_batches = dh.DataHelperMulMol6.batch_iter(self.pos_test,
                                                              64,
                                                              1,
                                                              shuffle=False)

                # Collect the predictions here
                all_score = None
                all_predictions = np.zeros([0, self.dater.num_of_classes])
                for [
                        x_test_batch, y_test_batch, pref2_batch, pref3_batch,
                        suff2_batch, suff3_batch, pos_batch
                ] in zip(x_batches, y_batches, pref2_batches, pref3_batches,
                         suff2_batches, suff3_batches, pos_batches):
                    batch_scores, batch_predictions = sess.run(
                        [scores, predictions], {
                            input_x: x_test_batch,
                            input_pref2: pref2_batch,
                            input_pref3: pref3_batch,
                            input_suff2: suff2_batch,
                            input_suff3: suff3_batch,
                            input_pos: pos_batch,
                            dropout_keep_prob: 1.0,
                            is_training: 0
                        })
                    # print batch_predictions
                    if all_score is None:
                        all_score = batch_scores
                    else:
                        all_score = np.concatenate([all_score, batch_scores],
                                                   axis=0)
                    all_predictions = np.concatenate(
                        [all_predictions, batch_predictions], axis=0)

        # Print accuracy
        np.savetxt('temp.out', all_predictions, fmt='%1.0f')
        all_predictions = all_predictions >= 0.5
        self.y_test = np.array(self.y_test)
        sentence_result_label_matrix = all_predictions == (self.y_test == 1)
        sentence_result = np.logical_and.reduce(sentence_result_label_matrix,
                                                axis=1)
        correct_predictions = float(np.sum(sentence_result))
        average_accuracy = correct_predictions / float(
            all_predictions.shape[0])

        logging.info("Sent ACC\t" + str(average_accuracy) + "\t\t(cor: " +
                     str(correct_predictions) + ")")
        eval_log.write("Sent ACC\t" + str(average_accuracy) + "\t\t(cor: " +
                       str(correct_predictions) + ")")

        if documentAcc == True:
            doc_prediction = []
            sum_to = 0
            for i in range(len(self.doc_size_test)):
                f_size = self.doc_size_test[i]
                p = all_predictions[sum_to:sum_to + f_size].astype(int)
                sum_to = sum_to + f_size  # increment to next file
                p = np.sum(p, axis=0).astype(float)
                p = p / f_size
                pred_class = p > 0.3
                pred_class = pred_class.astype(int)
                if 1 not in pred_class:
                    pred_class = np.zeros([self.dater.num_of_classes],
                                          dtype=np.int)
                    pred_class[np.argmax(p)] = 1
                doc_prediction.append(pred_class)
                logging.info("pred: " + str(pred_class) + "   " + "true: " +
                             str(self.dater.doc_labels_test[i]))
                eval_log.write("File:" + self.dater.file_id_test[i] + "\n")
                eval_log.write("pred: " + str(pred_class) + "   " + "true: " +
                               str(self.dater.doc_labels_test[i]) + "\n")

            logging.info("")
            eval_log.write("\n")

            logging.info("Document ACC")
            eval_log.write("Document ACC\n")
            total_doc = len(self.dater.file_id_test)
            correct = 0.0
            for i in range(len(doc_prediction)):
                if np.array_equal(doc_prediction[i],
                                  self.dater.doc_labels_test[i]):
                    correct += 1
            doc_acc = correct / total_doc
            logging.info("Doc ACC: " + str(doc_acc))
            eval_log.write("Doc ACC: " + str(doc_acc) + "\n")

        eval_log.write("\n")
        eval_log.write("\n")