def run_baseline(self):
        """
        Runs majority and random baselines.
        """
        if self.config["path_train"] and len(self.config["path_train"]) > 0:
            data_train = []
            for path_train in self.config["path_train"].strip().split(":"):
                data_train += self.read_input_files(
                    file_paths=path_train,
                    max_sentence_length=self.config["max_train_sent_length"])

        majority_sentence_label = Counter(
            Sentence.labels_sent_dict).most_common(1)[0][0]
        majority_token_label = Counter(
            Token.labels_tok_dict).most_common(1)[0][0]

        print("Most common sentence label (as in the train set) = ",
              majority_sentence_label)
        print("Most common token label (as in the train set) = ",
              majority_token_label)

        self.label2id_sent = self.create_labels_mapping(
            Sentence.unique_labels_sent)
        self.label2id_tok = self.create_labels_mapping(Token.unique_labels_tok)
        print("Sentence labels to id: ", self.label2id_sent)
        print("Token labels to id: ", self.label2id_tok)

        df_results = None

        if self.config["path_test"] is not None:
            i = 0
            for path_test in self.config["path_test"].strip().split(":"):
                data_test = self.read_input_files(path_test)
                data_test = self.convert_labels(data_test)

                # Majority baseline.
                majority_pred_sent = [
                    self.label2id_sent[majority_sentence_label]
                ] * len(data_test)
                majority_pred_tok = []
                for sentence in data_test:
                    majority_pred_tok.append(
                        [self.label2id_tok[majority_token_label]] *
                        len(sentence.tokens))

                majority_evaluator = Evaluator(self.label2id_sent,
                                               self.label2id_tok,
                                               self.config["conll03_eval"])
                majority_evaluator.append_data(0.0, data_test,
                                               majority_pred_sent,
                                               majority_pred_tok)

                name = "majority_test" + str(i)
                results = majority_evaluator.get_results(
                    name=name,
                    token_labels_available=self.
                    config["token_labels_available"])

                for key in results:
                    print("%s_%s: %s" % (name, key, str(results[key])))
                majority_evaluator.get_results_nice_print(
                    name=name,
                    token_labels_available=self.
                    config["token_labels_available"])

                if df_results is None:
                    df_results = pd.DataFrame(columns=results.keys())
                df_results = df_results.append(results, ignore_index=True)

                # Random baseline.
                random_pred_sent = []
                random_pred_tok = []
                for sentence in data_test:
                    random_pred_sent.append(
                        random.randint(0,
                                       len(self.label2id_sent) - 1))
                    random_pred_tok.append([
                        random.randint(0,
                                       len(self.label2id_tok) - 1)
                        for _ in range(len(sentence.tokens))
                    ])

                random_evaluator = Evaluator(self.label2id_sent,
                                             self.label2id_tok,
                                             self.config["conll03_eval"])
                random_evaluator.append_data(0.0, data_test, random_pred_sent,
                                             random_pred_tok)

                name = "rand_test" + str(i)
                results = random_evaluator.get_results(
                    name=name,
                    token_labels_available=self.
                    config["token_labels_available"])

                for key in results:
                    print("%s_%s: %s" % (name, key, str(results[key])))
                random_evaluator.get_results_nice_print(
                    name=name,
                    token_labels_available=self.
                    config["token_labels_available"])

                df_results = df_results.append(results, ignore_index=True)
                i += 1

        # Save data frame with all the training and testing results
        df_results.to_csv(
            "".join(self.config["to_write_filename"].split(".")[:-1]) +
            "_df_results.txt",
            index=False,
            sep="\t",
            encoding="utf-8")
    def process_sentences(self, sentences, model, is_training, learning_rate,
                          name):
        """
        Obtains predictions and returns the evaluation metrics.
        :type sentences: List[Sentence]
        :type model: Model
        :type is_training: bool
        :type learning_rate: float
        :type name: str
        :rtype: List[floats]
        """
        evaluator = Evaluator(self.label2id_sent, self.label2id_tok,
                              self.config["conll03_eval"])

        batches_of_sentence_ids = self.create_batches_of_sentence_ids(
            sentences, self.config["batch_equal_size"],
            self.config["max_batch_size"])

        if is_training:
            random.shuffle(batches_of_sentence_ids)

        all_batches, all_sentence_probs, all_token_probs = [], [], []

        for batch_of_sentence_ids in batches_of_sentence_ids:
            batch = [sentences[i] for i in batch_of_sentence_ids]

            cost, sentence_pred, sentence_probs, token_pred, token_probs = \
                model.process_batch(batch, is_training, learning_rate)
            evaluator.append_data(cost, batch, sentence_pred, token_pred)

            if "test" in name and self.config["plot_predictions_html"]:
                all_batches.append(batch)
                all_sentence_probs.append(sentence_probs)
                all_token_probs.append(token_probs)

            # Plot the token scores for each sentence in the batch.
            if "test" in name and self.config["plot_token_scores"]:
                for sentence, token_proba_per_sentence, sent_pred in zip(
                        batch, token_probs, sentence_pred):
                    if sentence.label_sent != 0 and sentence.label_sent == sent_pred and len(
                            sentence.tokens) > 5:
                        visualize.plot_token_scores(
                            token_probs=token_proba_per_sentence,
                            sentence=sentence,
                            id2label_tok=evaluator.id2label_tok,
                            plot_name=self.config["path_plot_token_scores"])

            while self.config["garbage_collection"] and gc.collect() > 0:
                pass

        results = evaluator.get_results(
            name=name,
            token_labels_available=self.config["token_labels_available"])

        for key in results:
            print("%s_%s: %s" % (name, key, str(results[key])))
        evaluator.get_results_nice_print(
            name=name,
            token_labels_available=self.config["token_labels_available"])

        # Create html visualizations based on the test set predictions.
        if "test" in name and self.config["plot_predictions_html"]:
            save_name = (
                self.config["to_write_filename"].split("/")[-1]).split(".")[0]
            visualize.plot_predictions(
                all_sentences=all_batches,
                all_sentence_probs=all_sentence_probs,
                all_token_probs=all_token_probs,
                id2label_tok=evaluator.id2label_tok,
                html_name=self.config["path_plot_predictions_html"] +
                "/%s" % save_name,
                sent_binary=len(self.label2id_sent) == 2)
        return results