示例#1
0
 def __init__(self, conf):
     self.conf = conf
     handler = TimedRotatingFileHandler(conf.log_file, date_format="%Y-%m-%d")
     handler.push_application()
     self.logger = Logger("Firetower-server")
     self.queue = redis_util.get_redis_conn(host=conf.redis_host, port=conf.redis_port, redis_db=conf.redis_db)
     self.classifier = classifier.Levenshtein()
     self.last_archive = None
 def __init__(self):
     handler = TimedRotatingFileHandler('../logs/nodes_discovery.log')
     handler.push_application()
     self.logger = Logger(name='nodes discovery', level='info')
     self.node_hosts, self.nodes_port, self.file_sd_filename, self.nodes_file_backup_name, self.exclude_file, self.metric_filename, self.metric_store_path = self.get_conf()
     self.nodes = {}
     self.ips = {}
     self.nodes_list = []
     self.ips_list = []
示例#3
0
    def __init__(self):
        handler = TimedRotatingFileHandler('../logs/images_manager.log')
        handler.push_application()
        self.logger = Logger(name='Docker Images Manage Api', level=11)

        os.environ['DOCKER_API_VERSION'] = 1.39
        self.logger.debug(os.environ.get('DOCKER_API_VERSION'))
        DOCKER_API_VERSION = 1.39
        self.docker_client = docker.from_env()
    def __init__(self):
        handler = TimedRotatingFileHandler('../logs/filesystem_recovery.log')
        handler.push_application()
        self.logger = Logger(name='filesystem recovery')

        self.path_lists, regex_express = self.get_conf()
        self.reg_express = re.compile(regex_express)
        self.tmp_path = os.path.join(BASE_DIR, 'data')
        self.file_title = '.walden'
    def __init__(self):
        handler = TimedRotatingFileHandler('../logs/filesystem_recovery.log')
        handler.push_application()
        self.logger = Logger(name='filesystem recovery')

        self.node_hosts, self.nodes_port, self.file_sd_filename = self.get_conf()
        self.nodes = {}
        self.ips = {}
        self.nodes_list = []
        self.ips_list = []
    def __init__(self):
        handler = TimedRotatingFileHandler('../logs/nodes_metrics.log')
        handler.push_application()
        self.logger = Logger(name='nodes metrics', level='info')

        self.nodes_filename, self.nodes_file_backup_name, self.exclude_file, self.url, self.query_all_ip, self.query_current_ip = self.get_conf()
        self.nodes = {}
        self.ips = {}
        self.nodes_list = []
        self.ips_list = []
示例#7
0
 def __init__(self, conf):
     """conf: dict, yaml parameters."""
     self.conf = conf
     handler = TimedRotatingFileHandler(
         conf.log_file, date_format='%Y-%m-%d')
     handler.push_application()
     self.logger = Logger('Firetower-admin')
     self.queue = redis_util.get_redis_conn(
         host=conf.redis_host, port=conf.redis_port, redis_db=conf.redis_db
     )
     self.classifier = classifier.Levenshtein()
示例#8
0
 def __init__(self):
     cp = ConfigParser.SafeConfigParser()
     with codecs.open('config/config.ini', 'r', encoding='utf-8') as f:
         cp.readfp(f)
         # self.dir = cp.get('xml', 'dir').strip()
         # self.xml_searchids_file = cp.get('files', 'xml_searchids_file').strip()
     self.pool = threadpool.ThreadPool(30)
     self.files_rights = {}
     self.lock = threading.Lock()
     self.q = Queue.Queue()
     handler = TimedRotatingFileHandler('../logs/get_rights.log')
     handler.push_application()
     self.logger = Logger(name='get rights')
示例#9
0
 def __init__(self, conf):
     self.conf = conf
     handler = TimedRotatingFileHandler(
         conf.log_file, date_format='%Y-%m-%d')
     handler.push_application()
     self.logger = Logger('Firetower-server')
     self.queue = redis_util.get_redis_conn(
         host=conf.redis_host, port=conf.redis_port, redis_db=conf.redis_db
     )
     self.classifiers = []
     for classifier_name in conf.class_order:
         self.classifiers.append(getattr(classifier, classifier_name)())
     self.last_archive = None
示例#10
0
def setup():
    if not os.path.exists(LOG_FILE_DIR):
        os.mkdir(LOG_FILE_DIR)

    file_handler = TimedRotatingFileHandler(
        filename=LOG_FILE_PATH,
        backup_count=config.get_logging_backup_count())

    stream_handler = StreamHandler(sys.stdout, level='CRITICAL')
    stream_handler.format_string = '{record.level_name}: {record.channel}: {record.message}'

    file_handler.push_application()
    stream_handler.push_application()
    def __init__(self):
        # logging.basicConfig(level=logging.INFO,
        #                     filename='./logs/nodes_discovery.log',
        #                     datefmt='%Y/%m/%d %H:%M:%S',
        #                     format='%(asctime)s - %(name)s - %(levelname)s - %(lineno)d - %(module)s - %(message)s')
        # logger = logging.getLogger(__name__)
        handler = TimedRotatingFileHandler('../logs/nodes_discovery.log')
        handler.push_application()
        self.logger = Logger(name='nodes discovery')

        # self.logger = loggor
        self.node_hosts, self.nodes_port, self.file_sd_filename = self.get_conf()
        # self.win_nodes_port, self.node_hosts, self.nodes_port, self.file_sd_filename = self.get_conf()
        self.nodes = {}
        self.ips = {}
示例#12
0
    def __init__(self, log_dir=BASE_DIR, log_name='log.log', backup_count=10, log_type=log_type, stdOutFlag=False):
        if not os.path.exists(log_dir):
            os.mkdir(log_dir)
            
        self.log_dir = log_dir
        self.backup_count = backup_count
        
        handler = TimedRotatingFileHandler(filename= os.path.join(self.log_dir, log_name),
                                        date_format='%Y-%m-%d',
                                        backup_count=self.backup_count)
        self.handler = handler
        if log_type is not None:
            handler.formatter = log_type
        handler.push_application()

        if not stdOutFlag:
            return
        
        handler_std = ColorizedStderrHandler(bubble=True)
        if log_type is not None:
            handler_std.formatter = log_type
        handler_std.push_application()
示例#13
0
def register_handlers():
    try:
        sys_handler = TimedRotatingFileHandler(tpath + os.sep + 'sys_logs.txt', \
                                               level="DEBUG", date_format='%Y-%m-%d', backup_count=30)
        sys_handler.push_application()

        info_handler = TimedRotatingFileHandler(tpath + os.sep + 'info_logs.txt', \
                                                level="INFO", date_format='%Y-%m-%d', backup_count=30)
        info_handler.push_application()
        error_handler = TimedRotatingFileHandler(tpath + os.sep + 'error_logs.txt', \
                                                 level="ERROR", date_format='%Y-%m-%d', backup_count=30)
        error_handler.push_application()

    except:
        write_err("handers注册失败,请检查!")
        print("handers注册失败,请检查!")
        exit(0)
        self.logger.info('************************************')

        return stop_nodes, failed_hosts


if __name__ == "__main__":
    url = 'https://prom.demo.com/prome/api/v1/query?'

    query_current_ip = 'query=node_virtual_type'
    # query_current_ip = 'query=avg({__name__=~"node_disk_read_time_(ms|seconds_total)",job="nodes",device=~"(dm-|sd[a-z]).*$"}) by (instance_ip)'
    # query_current_ip = 'query=node_filesystem_inode_used'
    # query_all_ip = 'query=avg(up{job=~"nodes"}) by (instance_ip)'
    query_all_ip = 'query=up{job="nodes"}'

    handler = TimedRotatingFileHandler('../logs/test.log')
    handler.push_application()
    logger = Logger(name='test')
    # self.logger.add('../logs/test.log', rotation="1 day", compression="zip")

    nodes_metrics = NodesMetrics()
    #get_lost_nodes()
    #get_lost_nodes_file()
    lost_nodes_by_discovery, failed_hosts = nodes_metrics.get_lost_nodes_discovery()







示例#15
0
@author: Amosun Sunday
'''
import requests
import service_config
import json
from logbook import Logger
from logbook import TimedRotatingFileHandler
import importlib
import utility.utility_functions as util_func
import ast
from multiprocessing import Pool

logger = Logger('ProcessControllerLogger')

log_handler = TimedRotatingFileHandler('ProcessController.log',  date_format='%Y-%m-%d')
log_handler.push_application()
headers = {'Content-Type': 'application/json'}
url_process = service_config.HTTP_LONG_PROCESS_REQUEST
url_process_task = service_config.HTTP_LONG_PROCESS_TASK_LIST_REQUEST
process_count = service_config.PROCESS_COUNT




def load_module(full_module_path):
    module = importlib.import_module(full_module_path)
    return module


def load_class(full_class_string):
    """
def main(run_name, settings, exec_mode):
    # File path and test data path
    fp = os.path.dirname(__file__) 
    tdp = join(fp,"..", "tests", "test_data")

    composition = settings.get("composition_file", join(tdp,"composition.fa"))
    coverage = settings.get("coverage_file", join(tdp,"coverage"))
    result_path = settings.get("results_path_base", join(fp,"..","tmp_out_test"))
    kmer_lengths = settings.get("kmer_lengths", [4])
    pcas = settings.get("total_percentage_pca", [80])
    thresholds = settings.get("length_threshold", [1000])
    cv_types = settings.get("covariance_type", ["full"])
    clusters = settings.get("clusters", "2,100,2")
    max_n_processors = settings.get("max_n_processors", 1)
    email = settings.get("email", None)

    log_path = settings.get("log_path", 
                            join(os.path.expanduser("~"),"log","concoctr.log"))
    handler = TimedRotatingFileHandler(log_path)
    logger = Logger(run_name)
    handler.push_application()

    result_rows = []
    indx = []
    
    con_ps = []

    if exec_mode == 'drmaa':
        s = drmaa.Session()
        s.initialize()

    result_dir = os.path.join(result_path, run_name)
    os.mkdir(result_dir)
    slurm_dir = os.path.join(result_dir, 'slurm')
    os.mkdir(slurm_dir)
    sbatch_dir = os.path.join(result_dir, 'sbatch')
    os.mkdir(sbatch_dir)
    concoct_dir = os.path.join(result_dir, 'concoct_output')
    os.mkdir(concoct_dir)

    for k in kmer_lengths:
        for pca in pcas:
            for thr in thresholds:
                for cv in cv_types:
                    job_name = "_".join(map(str, [k, pca, thr, cv]))
                    con_p = ConcoctParams(composition,
                                          coverage,
                                          kmer_length = k,
                                          total_percentage_pca= pca,
                                          length_threshold = thr,
                                          covariance_type = cv,
                                          basename = os.path.join(concoct_dir, job_name) + "/",
                                          max_n_processors = max_n_processors,
                                          clusters = clusters)
                    con_ps.append(con_p)

                    cr = ConcoctR()
                    if (k > 9):
                        # Throw in some extra memory
                        n_cores = 4
                    else:
                        n_cores = 1
                    if exec_mode == 'drmaa':
                        jt = s.createJobTemplate()
                        jt.nativeSpecification = '-A b2010008 -p core -n {} -t 7-00:00:00'.format(n_cores)
                        jt.email = email
                        jt.workingDirectory = result_path
                        jobid = cr.run_concoct(con_p, drmaa_s=s, drmaa_jt=jt)
                    elif exec_mode == 'sbatch':
                        script_file = os.path.join(result_dir, 'sbatch', job_name)
                        sbatch_params = ['-A b2010008', 
                                         '-p core', 
                                         '-n {}'.format(n_cores), 
                                         '-t 7-00:00:00', 
                                         "-J {}".format(job_name),
                                         "-o {}".format(os.path.join(result_dir, 'slurm', 'slurm-%j.out'))]
                        cr.generate_sbatch_script(con_p, sbatch_params, script_file)
                        jobid = cr.run_concoct(con_p, sbatch_script = script_file)
                    if jobid:
                        result_rows.append(con_p.options)
                        indx.append(jobid)
                        logger.info("Submitted jobid {0}".format(jobid))

    results_df = p.DataFrame(result_rows, index=indx)
    results_df.to_csv(os.path.join(result_path, run_name + "_all_results.csv"))

    handler.pop_application()
示例#17
0
    def __init__(self):
        handler = TimedRotatingFileHandler('../logs/delete_files.log')
        handler.push_application()
        self.logger = Logger(name='delete files')

        self.path_lists = self.get_conf()
示例#18
0
import simplejson as json

from logbook import Logger
from logbook import TimedRotatingFileHandler

import config
import redis_util

handler = TimedRotatingFileHandler('firetower-client.log',
        date_format='%Y-%m-%d')
handler.push_application()
log = Logger('Firetower-client')

class Client(object):
    """Basic Firetower Client."""

    def __init__(self, conf):
        self.conf = config.Config(conf)
        self.redis_host = self.conf.redis_host
        self.redis_port = self.conf.redis_port
        self.redis_db = self.conf.redis_db
        self.queue_key = self.conf.queue_key
        self.queue = redis_util.get_redis_conn(
                host=self.redis_host,
                port=self.redis_port,
                redis_db=self.redis_db)

    def push_event(self, event):
        self.queue.lpush(self.queue_key, event)

    def emit(self, event):
示例#19
0
 def get_logger():
     log_name = sys.argv[0].split('/')[-1][:-4]
     handler = TimedRotatingFileHandler('../logs/' + log_name + '.log')
     handler.push_application()
     return Logger(name=log_name, level=11)