def __enter__(self):
        assert (self.__log_dir is not None)

        iter_index_int = self.__get_iter_index()
        iter_index = str(iter_index_int)

        for d_type in self.__iter_supported_data_types():

            train_log_filepath = join(
                self.__log_dir,
                Common.create_log_train_filename(iter_index=iter_index_int,
                                                 data_type=d_type))
            eval_log_filepath = join(
                self.__log_dir,
                Common.create_log_eval_filename(iter_index=iter_index_int,
                                                data_type=d_type))
            eval_verbose_log_filepath = join(
                self.__log_dir,
                self.__log_eval_iter_verbose_filename.format(iter=iter_index,
                                                             dtype=d_type))

            create_dir_if_not_exists(train_log_filepath)
            create_dir_if_not_exists(eval_log_filepath)
            create_dir_if_not_exists(eval_verbose_log_filepath)

            self.__train_iter_log_files[d_type] = open(train_log_filepath,
                                                       u"w",
                                                       buffering=0)
            self.__eval_iter_log_files[d_type] = open(eval_log_filepath,
                                                      u"w",
                                                      buffering=0)
            self.__eval_iter_verbose_log_files[d_type] = open(
                eval_verbose_log_filepath, u"w", buffering=0)
Esempio n. 2
0
    def __iter_files_per_iteration(self, result_type, folding_type):
        assert(isinstance(folding_type, FoldingType))

        iters = self.__cv_count if folding_type == FoldingType.CrossValidation else 1

        if result_type == ResultType.F1 or \
            result_type == ResultType.DSDiffF1Improvement or \
            result_type == ResultType.DSDiffAttImprovement:
            yield join(Common.log_dir, Common.log_test_eval_exp_filename)
        elif result_type == ResultType.TrainingEpochTime or \
                result_type == ResultType.TrainingTotalTime or \
                result_type == ResultType.TrainingAccuracy or \
                result_type == ResultType.EpochsCount:
            for it_index in range(iters):
                yield join(Common.log_dir, Common.create_log_train_filename(data_type=DataType.Train,
                                                                            iter_index=it_index))
        elif result_type == ResultType.F1LastTrain:
            for it_index in range(iters):
                yield join(Common.log_dir, Common.create_log_eval_filename(data_type=DataType.Train,
                                                                           iter_index=it_index))
        elif result_type == ResultType.F1LastTest or \
             result_type == ResultType.F1NeutLastTest or \
             result_type == ResultType.PrecNeutLastTest or \
             result_type == ResultType.RecallNeutLastTest:
            for it_index in range(iters):
                yield join(Common.log_dir, Common.create_log_eval_filename(data_type=DataType.Test,
                                                                           iter_index=it_index))
        elif result_type == ResultType.LearningRate:
            for it_index in range(iters):
                yield join(Common.log_dir, Common.model_config_name)
        elif result_type == ResultType.TrainingPosSamplesCount or \
                result_type == ResultType.TrainingNegSamplesCount or \
                result_type == ResultType.TrainingNeuSamplesCount:
            # returning back from the model dir to the experiment dir.
            yield u".."
        else:
            raise NotImplementedError("Not supported type: {}".format(result_type))