예제 #1
0
 def test_file_creation_correct(self):
     """
     Test that the logger creates the correct file.
     """
     self.assertFalse(os.path.exists(self.log_file),
                      'Log file already present?')
     setup_logger(log_file=self.log_file, name='test1')
     logger = get_logger('test1')
     logger.info('Works?')
     self.assertTrue(os.path.exists(self.log_file),
                     'Failed to create log file.')
예제 #2
0
    def test_append_mode_on(self):
        """
        Test that append mode works when set to true.
        """
        setup_logger(log_file=self.log_file, name='test2')
        logger = get_logger('test2')
        logger.info('Before reset')
        setup_logger(log_file=self.log_file, append=True, name='test2')
        logger.info('After reset')

        with open(self.log_file, 'r') as f:
            lines = f.readlines()

        self.assertEqual(len(lines), 2, 'Wrong number of lines in log file')
        self.assertIn('Before reset', lines[0])
        self.assertIn('After reset', lines[1])
예제 #3
0
def test_level(capsys):
    """
    Test that setting the level limits the output from the logger.
    Note: This relies on a pytest fixture (capsys)
    """
    temp_dir = TemporaryDirectory()
    log_file = os.path.join(temp_dir.name, 'some_log.txt')
    setup_logger(log_file=log_file, level='WARNING', append=True, name='test4')
    logger = get_logger('test4')

    logger.error('An error message')
    logger.warning('A warning message')
    logger.info('An info message')

    out, _ = capsys.readouterr()
    output = out.splitlines()

    assert (len(output) == 2), 'Wrong number of lines in log'
    assert ('An error message' in output[0]), 'Error not captured by log'
    assert ('A warning message' in output[1]), 'Warning not captured by log'
예제 #4
0
"""
Miscellaneous functions and utilities used in fitting benchmarking.
"""

from __future__ import absolute_import, division, print_function

import glob
import os

from fitbenchmarking.utils.exceptions import NoDataError
from fitbenchmarking.utils.log import get_logger

LOGGER = get_logger()


def get_problem_files(data_dir):
    """
    Gets all the problem definition files from the specified problem
    set directory.

    :param data_dir: directory containing the problems
    :type data_dir: str 

    :return: array containing of paths to the problems
             e.g. In NIST we would have
             [low_difficulty/file1.txt, ..., ...]
    :rtype: list of str
    """

    test_data = glob.glob(data_dir + '/*.*')
    if test_data == []: