Example #1
0
    def test_get_logger_only_name_param(self):
        log = get_logger('test_stream')

        handler = log.handlers[0]

        assert isinstance(handler, logging.StreamHandler)
        assert logging.DEBUG == log.level
        assert logging.DEBUG == handler.level
Example #2
0
    def test_get_logger_returns_a_logger_which_is_opening_a_file(
            self, mock_open):
        out_file = 'test_log.log'
        log = get_logger('test_file', log_type='DISK', output_file=out_file)

        handler = log.handlers[0]

        assert isinstance(handler, logging.FileHandler)
        assert logging.DEBUG == log.level
        assert logging.DEBUG == handler.level
        mock_open.assert_called_once()
Example #3
0
    def test_logger_writes_to_stream(self):
        stream_name = 'test_stream'
        stream_message = 'test_message'

        log = get_logger(stream_name)

        test_stream = io.StringIO()

        # replace the real stream with a stream we have control over
        log.handlers[0].stream = test_stream

        log.debug(stream_message)

        stream_content = test_stream.getvalue()
        test_stream.close()

        assert stream_content is not None
        assert '' != stream_content

        # We are not checkning the entire string due to time dependence
        debug_message = f'{stream_name} - DEBUG - {stream_message}\n'
        assert debug_message == ' - '.join(stream_content.split(' - ')[1:])
Example #4
0
 def test_log_type_disk_without_output_param_raises_value_error(self):
     with pytest.raises(ValueError):
         get_logger('test_file', log_type='DISK')
Example #5
0
from abc import ABCMeta, abstractmethod

import numpy as np
import pandas as pd

from mim.util.logs import get_logger

log = get_logger('Cross Validation')


class ClassBalance(metaclass=ABCMeta):
    def balance(self, x, indices) -> np.array:
        x = x.iloc[indices, :]
        positive = x[x['labels'] > 0].loc[:, 'index'].values
        negative = np.setdiff1d(indices, positive)

        balanced = self.sample(positive, negative)
        return balanced.sort()

    def sample(self, positive, negative) -> (np.array, np.array):
        return np.append(positive, negative)


class NullBalance(ClassBalance):
    def balance(self, x, indices):
        return indices


class DownSample(ClassBalance):
    def sample(self, positive, negative) -> (np.array, np.array):
        if len(negative) >= len(positive):
Example #6
0
import re

import numpy as np
import pandas as pd
from sklearn.metrics import (confusion_matrix, precision_score, recall_score,
                             accuracy_score, f1_score)

from mim.util.logs import get_logger
from mim.util.util import ranksort
from mim.experiments.experiments import result_path
from mim.experiments.factory import experiment_from_name

log = get_logger("Presenter")


class Presenter:
    def __init__(self, name):
        self.results = dict()
        log.debug(f'Loading all experiments for {name}')

        self.experiments = experiment_from_name(name)
        for xp in self.experiments:
            path = result_path(xp)
            try:
                self.results[xp.name] = pd.read_pickle(path)
            except FileNotFoundError:
                log.debug(f"Test {xp.name} doesn't exist in path {path}")

    def describe(self, like='.*'):
        results = []
        for name, xp in self._results_that_match_pattern(like):
Example #7
0
File: run.py Project: Tipulidae/mim
import argparse
from time import time

import numpy as np
import pandas as pd
from tqdm import tqdm

from .factory import experiment_from_name
from mim.util.metadata import Metadata
from mim.util.logs import get_logger

log = get_logger("Run")


def run_experiments(experiments, continue_on_error=False):
    """
    Run all experiments in xps and save the results to disk.

    :param experiments: List of experiments to run
    :param continue_on_error: Whether to continue running experiments even
    if one should fail.
    """
    for experiment in experiments:
        try:
            pd.to_pickle(run_one_experiment(experiment),
                         experiment.result_path)
        except Exception as e:
            log.error(f'Something went wrong with task {experiment.name}! '
                      f'Oh no! :(')
            if not continue_on_error:
                raise e