Ejemplo n.º 1
0
def process():
    """Make sure the correct agents are running.

    Args:
        None

    Returns:
        None

    """
    # Get list of configured agents
    config = configuration.Config()
    agents = config.agents()

    # Process each agent
    for agent_dict in agents:
        # Get agent_name
        agent_name = agent_dict['agent_name']
        agentconfig = configuration.ConfigAgent(agent_name)

        # Check for agent existence
        if agentconfig.agent_enabled() is True:
            _check_when_enabled(agentconfig)

        else:
            # Shutdown agent if running
            _check_when_disabled(agentconfig)
Ejemplo n.º 2
0
    def __init__(self):
        """Method initializing the class."""
        # Define key variables
        app_name = 'garnet'
        levels = {
            'debug': logging.DEBUG,
            'info': logging.INFO,
            'warning': logging.WARNING,
            'error': logging.ERROR,
            'critical': logging.CRITICAL
        }

        # Get the logging directory
        config = configuration.Config()
        log_file = config.log_file()
        config_log_level = config.log_level()

        # Set logging level
        if config_log_level in levels:
            log_level = levels[config_log_level]
        else:
            log_level = levels['debug']

        # create logger with app_name
        self.logger_file = logging.getLogger(('%s_file') % (app_name))
        self.logger_stdout = logging.getLogger(('%s_console') % (app_name))

        # Set logging levels to file and stdout
        self.logger_stdout.setLevel(log_level)
        self.logger_file.setLevel(log_level)

        # create file handler which logs even debug messages
        file_handler = logging.FileHandler(log_file)
        file_handler.setLevel(log_level)

        # create console handler with a higher log level
        stdout_handler = logging.StreamHandler()
        stdout_handler.setLevel(log_level)

        # create formatter and add it to the handlers
        formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        file_handler.setFormatter(formatter)
        stdout_handler.setFormatter(formatter)

        # add the handlers to the logger
        self.logger_file.addHandler(file_handler)
        self.logger_stdout.addHandler(stdout_handler)
Ejemplo n.º 3
0
def process(agent_name, pollers):
    """Function where agents poll devices using multiprocessing.

    Args:
        agent_name: Agent name
        pollers: List of polling objects

    Returns:
        None

    """
    # Get configuration
    config = configuration.Config()
    configured_pool_size = config.agent_subprocesses()

    # Spawn processes only if we have files to process
    if bool(pollers) is True:
        # Process lock file
        lockfile = daemon.lock_file(agent_name)
        if os.path.exists(lockfile) is True:
            # Return if lock file is present
            log_message = (
                'Agent lock file %s exists. Multiple agent daemons '
                'running or the daemon may have died '
                'catastrophically in the past, in which case the lockfile '
                'should be deleted. Exiting agent process. '
                'Will try again later.'
                '') % (lockfile)
            log.log2warning(1044, log_message)
            return
        else:
            # Create lockfile
            open(lockfile, 'a').close()

        # Create a pool of sub process resources
        pool_size = int(min(configured_pool_size, len(pollers)))
        with Pool(processes=pool_size) as pool:
            # Create sub processes from the pool
            pool.map(_process, pollers)

        # Return if lock file is present
        if os.path.exists(lockfile) is True:
            os.remove(lockfile)
Ejemplo n.º 4
0
def normalized_timestamp(timestamp=None):
    """Normalize timestamp to a multiple of 'interval' seconds.

    Args:
        timestamp: epoch timestamp in seconds

    Returns:
        value: Normalized value

    """
    # Initialize key variables
    interval = configuration.Config().interval()

    # Process data
    if timestamp is None:
        value = (int(time.time()) // interval) * interval
    else:
        value = (int(timestamp) // interval) * interval
    # Return
    return value
Ejemplo n.º 5
0
def validate_timestamp(timestamp):
    """Validate timestamp to be a multiple of 'interval' seconds.

    Args:
        timestamp: epoch timestamp in seconds

    Returns:
        valid: True if valid

    """
    # Initialize key variables
    valid = False
    interval = configuration.Config().interval()

    # Process data
    test = (int(timestamp) // interval) * interval
    if test == timestamp:
        valid = True

    # Return
    return valid
Ejemplo n.º 6
0
import os
import sys
from os.path import join, exists
import numpy as np
import pandas as pd
from collections import defaultdict
from datetime import datetime
import copy

sys.path.append('.')
sys.path.append('src/.')
from utils import load_models, load_splits, data_cleaning, parsers, hyperparameter_utils, sample_models_across_time, configuration, paths
config = configuration.Config()

# def use_child_specific_wfst(fitting_dict):
#     # we should only get the results of the child-specific WFST when the child and the training data are the same
#     return((fitting_dict['training_split'] == 'Providence-Child') and (fitting_dict['training_dataset'] == fitting_dict['test_dataset']))


def call_single_across_time_model(sample_dict, all_tokens_phono,
                                  this_model_dict):
    '''
        Load the best performing hyperparameter values for a given model and test dataset and run all eval data, saving the data by each 6 month time period
    '''

    # re-use the lambda bounds for Gamma
    optimal_gamma_value = [
        hyperparameter_utils.get_optimal_hyperparameter_value(
            this_model_dict, 'gamma')
    ]
    if config.fail_on_lambda_edge: