def log_too_many_requests_and_exit(period_start, period_end): get_logger().error( f'{MAX_REQUESTS} requests received from the period {period_start} to {period_end}.' + ' Some requests may not have been downloaded properly as a result.' + ' The period size should be decreased to ensure all requests are downloaded.' ) exit(1)
def run_query(client, start_timestamp, end_timestamp): response = client.start_query( logGroupName='matomo', startTime=int(start_timestamp.timestamp() * 1000), endTime=int(end_timestamp.timestamp() * 1000), queryString="""fields @message | sort @timestamp asc | filter @logStream like /matomo-nginx/ | filter status >= '500' | filter user_agent!='ELB-HealthChecker/2.0' | filter user_agent!='Smokey Test' | filter path like /idsite=1/ | filter path like /rec=1/""", limit=MAX_REQUESTS) queryId = response['queryId'] status = 'Running' seconds_slept = 0 while status != 'Complete': time.sleep(1) seconds_slept += 1 if seconds_slept % 30 == 0: get_logger().debug( f'Still waiting for a request. Spent {seconds_slept} seconds waiting so far.' ) response = client.get_query_results(queryId=queryId) try: status = response['status'] except KeyError: get_logger.exception( f"'status' not found in response: {response.keys()}") raise return start_timestamp, end_timestamp, response
def get_number_of_days(): try: return int(os.getenv(NUM_OF_DAYS)) except ValueError: get_logger().exception( 'NUM_OF_DAYS has an invalid format. Please specify an integer.') exit(1)
def get_period_width(): try: return int(os.getenv(PERIOD_WIDTH_IN_SECONDS, 60 * 5)) except ValueError: get_logger().exception( 'PERIOD_WIDTH_IN_SECONDS has an invalid format. Please specify an integer.' ) exit(1)
def get_start_datetime(): try: start_date_env = os.getenv(START_DATE) if start_date_env == 'yesterday': start_of_day = datetime.combine(date.today(), datetime_time()) return start_of_day - timedelta(days=1) return datetime.strptime(start_date_env, DATE_FORMAT) except ValueError: get_logger().exception( f'START_DATE has an invalid format. Please follow the format "{DATE_FORMAT}"' + ' or use the keyword "yesterday".') exit(1)
def write_requests_to_file(requests, output_filename): total_written = 0 if os.path.exists(f'/app/logs/{output_filename}'): get_logger().info('Log file already exists - deleting before write') os.remove(f'/app/logs/{output_filename}') with open(f'/app/logs/{output_filename}', 'a+') as f: for request in sorted( requests, key=lambda request: re.findall(r'msec": "(.+?)"', request)[0]): total_written += 1 f.write(request + '\n') return total_written
def extract_requests_from_response(response, period_start, period_end): messages = [] for message_list in response['results']: if len(message_list) >= MAX_REQUESTS: log_too_many_requests_and_exit(period_start, period_end) for message in message_list: if message['field'] == '@message': messages.append(message['value']) break get_logger().debug( f'extracted {len(messages)} requests for the period {period_start} to {period_end}' ) return messages
def __init__(self, **kwargs): Compression.__init__(self) self.log = _helpers.get_logger(__name__) self.source_path = kwargs.get('source_path', None) self.dest_dir = kwargs.get('dest_dir', '%s/' % os.path.dirname(self.source_path)) self.compressed_path = None self.remove_source = kwargs.get('remove_source', False) self.compress_success = None self.decompress_success = None self.compressed_path = None self.decompressed_path = None # validations assert self.source_path, '%s missing' assert isinstance(self.remove_source, bool), \ "remove_source must be True/False" assert os.path.exists(self.source_path), \ '%s does not exist, skipping compression' % self.source_path # real paths please self.source_path = os.path.realpath(self.source_path) self.dest_dir = os.path.realpath(self.dest_dir) _helpers.ensure_dir(self.dest_dir)
def __init__(self): """ This object is to check the ACTIVE alerts in the DB, and if any update, process it. """ super().__init__() self._futures = None self._logger = get_logger( logger_name="ACTIVE_LISTENER", file_name=config.ACTIVE_LISTENER_LOG_PATH, ) self.active_collection = get_collection( connection_uri=config.MONGO_CONNECTION_URI, db_name=config.ACTIVE_ALERTS_DB, collection_name=config.ACTIVE_ALERTS_COLLECTION, ) self.thread_pool = ThreadPoolExecutor( max_workers=config.ACTIVE_LISTENER_THREAD_POOL_SIZE) req = Request(connect_timeout=5, ) self.bot = Bot( token=config.TG_TOKEN, request=req, ) self.api_client = APIClient( logger_name="ACTIVE_API_CLIENT", logger_path=config.ACTIVE_API_CLIENT_LOG_PATH, ) self._logger.info("ActiveListener created")
def __init__(self, **kwargs): Compression.__init__(self) self.log = _helpers.get_logger(__name__) self.source_path = kwargs.get('source_path', None) self.dest_dir = kwargs.get( 'dest_dir', '%s/' % os.path.dirname(self.source_path) ) self.compressed_path = None self.remove_source = kwargs.get('remove_source', False) self.compress_success = None self.decompress_success = None self.compressed_path = None self.decompressed_path = None # validations assert self.source_path, '%s missing' assert isinstance(self.remove_source, bool), \ "remove_source must be True/False" assert os.path.exists(self.source_path), \ '%s does not exist, skipping compression' % self.source_path # real paths please self.source_path = os.path.realpath(self.source_path) self.dest_dir = os.path.realpath(self.dest_dir) _helpers.ensure_dir(self.dest_dir)
def __init__(self): self.log = _helpers.get_logger(__name__) try: import tarfile as tarfile global tarfile except ImportError, e: self.log.warn( "tarfile module missing, gzip compression not available") return None
def __init__(self, **kwargs): self.log = _helpers.get_logger(__name__) Compression.__init__(self) self.stream = None self.output_path = kwargs.get('output_path', None) assert not os.path.exists(self.output_path), \ '%s already exists!' % self.output_path _helpers.ensure_dir(os.path.dirname(self.output_path))
def __init__(self): self.log = _helpers.get_logger(__name__) Compression.__init__(self) try: import gzip as gzip global gzip except ImportError, e: self.log.warn( "gzip module missing, gzip compression not available") return None
def __init__(self, config: MutableMapping): """Initialize our Imap instance with our toml config""" self.config = config self.logger = get_logger(__name__, config) self.hostname = self.config["imap"]["hostname"] self.username = self.config["imap"]["username"] self.password = self.config["imap"]["password"] self.folder = self.config["imap"].get("folder", "INBOX") self.timeout = int(self.config["imap"].get("timeout", "300")) self._client = None
def __init__(self): self.log = _helpers.get_logger(__name__) try: import tarfile as tarfile global tarfile except ImportError, e: self.log.warn( "tarfile module missing, gzip compression not available" ) return None
def main(): args = get_args() logpath = os.path.join(paths.root_dir, paths.logpath, 'new_project.log') print(logpath) logger = h.get_logger(logpath) logger.info('Creating folders for {} project'.format(args.project_name)) create_folders(args.project_name, logger) logger.info('New project created')
def __init__(self): self.log = _helpers.get_logger(__name__) Compression.__init__(self) try: import gzip as gzip global gzip except ImportError, e: self.log.warn( "gzip module missing, gzip compression not available" ) return None
def download_failed_requests(client, start_datetime, end_datetime): period_width = get_period_width() output_filename = get_output_filename(start_datetime, end_datetime) period_start = datetime.utcfromtimestamp( start_datetime.replace(tzinfo=timezone.utc).timestamp()) total_written = 0 futures_list = [] requests = [] get_logger().info(f'Using {NUM_THREADS} threads to download event logs.') with futures.ThreadPoolExecutor(NUM_THREADS) as executor: while period_start <= end_datetime: period_end = period_start + timedelta(seconds=period_width, microseconds=-1) get_logger().debug( f'Scheduling query from {period_start} to {period_end}') futures_list.append( executor.submit(run_query, client, period_start, period_end)) period_start += timedelta(seconds=period_width) for future in futures.as_completed(futures_list): period_start, period_end, response = future.result() requests += extract_requests_from_response(response, period_start, period_end) total_written = write_requests_to_file(requests, output_filename) get_logger().info( f'Wrote {total_written} requests to file "{output_filename}".') return output_filename
def replay_events(events_log_file): try: subprocess.run( [ 'python2', '-u', '/app/log-analytics/import_logs.py', f'--url={os.getenv(MATOMO_URL)}', f'--token-auth={os.getenv(MATOMO_API_TOKEN)}', '--log-format-name=nginx_json', '--replay-tracking', '--enable-static', '--enable-bots', '--enable-reverse-dns', f'./{events_log_file}' ], check=True, stdout=sys.stderr ) except subprocess.CalledProcessError: get_logger().error("Failed to replay events") raise
def Connector(p_id, log_queue, control_queue, con_str, multiproc_queue): """Gets data for one channel from karabo and forwards to the multiprocessing queue. """ log = helpers.get_logger("Connector-{}".format(p_id), log_queue) log.info("Connecter #{} connecting to Karabo bridge with: {}".format( p_id, con_str)) # print("Connecter #{} connecting to Karabo bridge with: {}".format(p_id, con_str)) krb_client = KaraboBridge(con_str) control_signal = None #for i in range(10): while control_signal is None: try: control_signal = control_queue.get_nowait() log.info("Received stopping signal. Abort.") break except Empty: pass try: # start_t = time.time() data = krb_client.next() # stop_t = time.time() # delay_request = stop_t - start_t # data_keys = list(data.keys()) # source_id = data_keys[0] # det_data = data[source_id] #print("data keys", data_keys) # trainid = det_data['metadata']['timestamp']['tid'] # sec = det_data['metadata']['timestamp']['sec'] # frac = det_data['metadata']['timestamp']['frac'] # ti = np.float64(str(sec) + '.' + str(frac)) # delay_detector = stop_t - ti #proc_t = time.time() #log.debug("New data received, forwarding it: {}, {}, request delay: {}, proc time {}" # .format(p_id, trainid, delay_request, proc_t - stop_t)) # log.debug("New data received, forwarding it: {}, {}, request delay: {}, detector delay: {}" # .format(p_id, trainid, delay_request, delay_detector)) # print("Connecter #{}: New data received, forwarding it.".format(p_id)) multiproc_queue.put((p_id, data)) except: log.error("Error in krk_client", exc_info=True)
def get_logging_object(log: bool, logFile: str, logObject): """ Returns a logger object. :param log: Boolean that determines where logging is enabled or not. :param logFile: File to log to. :param logObject: Log object to return if there is one already specified. :return: Logger object or None. """ if logObject: return logObject else: if not log: return None else: return get_logger(logFile, logFile)
def __init__(self, log_queue, multiproc_queue, run_event, lock, n_connectors, sync_buffer_size): self.log = helpers.get_logger("Synchronizing", log_queue) self.multiproc_queue = multiproc_queue self.all_data = {} self.lock = lock self.n_connectors = n_connectors self.sync_buffer = collections.deque(maxlen=sync_buffer_size) self._run_event = run_event threading.Thread.__init__(self)
def reset(ctx, debug=False): """Resets the database. All sites and their related data are wiped out.""" global logger logger = helpers.get_logger(debug) client = get_api_client() for site in client.sites.get(): logger.info("Deleting site: {}".format(site)) _reset_resources(client, site['id']) _reset_resources(client, site['id']) _reset_resources(client, site['id']) try: client.sites(site['id']).delete() except HttpClientError as e: logger.error(e) logger.error(e.response.json()) sys.exit(-1)
def __init__(self): self.logger = get_logger('dashboard') self.server = SocketServer.UDPServer( (settings.DASHBOARD_HOST, settings.DASHBOARD_PORT), self.handle_request) self.server.timeout = settings.DASHBOARD_REQUEST_TIMEOUT self.raw_telem_time = 0 self.raw_telem_bat = 0 self.raw_telem_temp = 0 self.raw_telem_photo = 0 self.__init_main() self.__init_motor() self.__init_light() self.__init_video() self.__init_telem()
def __init__(self, config: MutableMapping): """Initializes our RulesProcessor instance from toml config""" self.config = config self.logger = get_logger(__name__, config) self.rules = [] # build list of rules for rule_name in self.config["rules"]: rule_config = self.config["rules"][rule_name] rule = Rule( name=rule_name, from_=rule_config.get("from", None), subject=rule_config.get("subject", None), priority=rule_config.get("priority", None), token=rule_config.get("token", None), extras=rule_config.get("extras", None), ) self.rules.append(rule)
def __init__(self): """ This object is to listen to the queue in the DB, and if there is anything, process it. """ super().__init__() self._futures = None self._logger = get_logger( logger_name="QUEUE_LISTENER", file_name=config.QUEUE_LISTENER_LOG_PATH, ) self.queue_collection = get_collection( connection_uri=config.MONGO_CONNECTION_URI, db_name=config.QUEUE_ALERT_DB, collection_name=config.QUEUE_COLLECTION, ) self.frozen_collection = get_collection( connection_uri=config.MONGO_CONNECTION_URI, db_name=config.FROZEN_ALERT_DB, collection_name=config.FROZEN_ALERT_COLLECTION, ) self.active_collection = get_collection( connection_uri=config.MONGO_CONNECTION_URI, db_name=config.ACTIVE_ALERTS_DB, collection_name=config.ACTIVE_ALERTS_COLLECTION, ) self.airline_designator_collection = get_collection( connection_uri=config.MONGO_CONNECTION_URI, db_name=config.AIRLINE_DESIGNATOR_DB, collection_name=config.AIRLINE_DESIGNATOR_COLLECTION, ) self.thread_pool = ThreadPoolExecutor( max_workers=config.QUEUE_LISTENER_THREAD_POOL_SIZE) req = Request(connect_timeout=5, ) self.bot = Bot( token=config.TG_TOKEN, request=req, ) self.api_client = APIClient( logger_name="QUEUE_API_CLIENT", logger_path=config.QUEUE_API_CLIENT_LOG_PATH, ) self._logger.info("QueueListener created")
def loopbacks(ctx, site, filename="data/services.yml", debug=False): """Read service definition for loopbacks service and add data to the backend.""" global logger logger = helpers.get_logger(debug) client = get_api_client() with open(filename, 'r') as f: service = yaml.load(f.read())['loopbacks'] logger.debug(service) site_id = helpers.get_site_id(client, site) network = _create_networks(client, service['network_ranges'], 'loopback', site_id)['loopbacks'] ifaces = [] for device in client.sites(site_id).devices.get(): ifaces.append(_create_interface(client, site_id, device['id'], 'lo0', 'loopbacks', 'loopback', 'loopback', network)) return ifaces
def create(ctx, name, description="", debug=False): """Create a site.""" global logger logger = helpers.get_logger(debug) client = get_api_client() site = client.sites.get(name=name) site_data = { 'name': name, 'description': description, } if not site: site = client.sites.post(site_data) else: site = client.sites(site[0]['id']).put(site_data) logger.info(site) return site
def ipfabric(ctx, site, filename="data/services.yml", debug=False): """Read service definition for ipfabric service and add data to the backend.""" global logger logger = helpers.get_logger(debug) client = get_api_client() with open(filename, 'r') as f: service = yaml.load(f.read())['ipfabric'] logger.debug(service) site_id = helpers.get_site_id(client, site) supernet = _create_networks(client, service['network_ranges'], 'ipfabric', site_id)['fabric_links'] for link in service['definition']['links']: network = None logger.debug("Processing {}".format(link)) left_id = helpers.get_host_id(client, site_id, link['left_device']) right_id = helpers.get_host_id(client, site_id, link['right_device']) logger.debug("Found hosts {} and {}".format(left_id, right_id)) left_interface = _create_interface(client, site_id, left_id, link['left_iface'], 'fabric_links', link['right_device'], link['right_iface']) right_interface = _create_interface(client, site_id, right_id, link['right_iface'], 'fabric_links', link['left_device'], link['left_iface']) if not left_interface['networks']: network = _create_subnet(client, site_id, supernet, 127) left_interface = _create_interface(client, site_id, left_id, link['left_iface'], 'fabric_links', link['right_device'], link['right_iface'], network) if not right_interface['networks']: network = client.sites(site_id).networks(left_interface['networks'][0]).get() right_interface = _create_interface(client, site_id, right_id, link['right_iface'], 'fabric_links', link['left_device'], link['left_iface'], network) return network
def __init__(self): self.logger = get_logger('server') gpio.init() self.serial_tty = serial.Serial(settings.TTY_ADDRESS, baudrate=settings.TTY_BAUDRATE, timeout=settings.TTY_TIMEOUT) self.motor_left = Motor("\xAA\x0A\x06", self.serial_tty, 'left', "\x0B", "\x0A", "\x09", "\x08", self.logger) self.motor_right = Motor("\xAA\x0A\x07", self.serial_tty, 'right', "\x0F", "\x0E", "\x0D", "\x0C", self.logger) self.light = Light(getattr(port, settings.LIGHT_PORT)) self.video = Video() self.i2c_controller = Controller(settings.I2C_ADDRESS, settings.I2C_PORT) self.server = SocketServer.UDPServer( (settings.SERVER_HOST, settings.SERVER_PORT), self.handle_request) self.server.timeout = settings.MOTOR_COMMAND_TIMEOUT self.sender = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) self.sender.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.sender.bind( (settings.SOCKET_CLIENT_HOST, settings.SOCKET_CLIENT_PORT)) self.is_running = False self.last_handle_request_time = 0 self.last_light_value = 0 self.last_light_value_time = time() self.last_video_value = 0 self.last_video_value_time = time() # дата актуальности, данные телеметрии, температура, фото датчик self.telem_values = [0, 0, 0, 0]
def add_atribbutes(ctx, site, filename="data/attributes.yml", debug=False): """Add attributes to a site.""" global logger logger = helpers.get_logger(debug) client = get_api_client() with open(filename, 'r') as f: attributes = yaml.load(f.read())['attributes'] site_id = helpers.get_site_id(client, site) result = {} for resource, attrs in attributes.items(): for attr_name, attr_options in attrs.items(): attribute = client.sites(site_id).attributes.get(resource=resource, name=attr_name) options = { 'name': attr_name, 'resource_name': resource, } options.update(attr_options) try: if not attribute: logger.info("Creating attribute: {}".format(options)) attribute = client.sites(site_id).attributes.post(options) else: logger.info("Updating attribute: {}".format(attribute)) attribute = client.sites(site_id).attributes( attribute[0]['id']).put(options) except HttpClientError as e: logger.error(e) logger.error(e.response.json()) sys.exit(-1) result[attr_name] = attribute return result
def add_devices(ctx, site, filename="data/devices.yml", debug=False): """Add devices to a site.""" global logger logger = helpers.get_logger(debug) client = get_api_client() with open(filename, 'r') as f: devices = yaml.load(f.read())['devices'] site_id = helpers.get_site_id(client, site) result = {} for host, data in devices.items(): device = client.sites(site_id).devices.get(hostname=host) data = {'attributes': data, 'hostname': host} if not device: logger.info("Creating device: {}".format(data)) device = client.sites(site_id).devices.post(data) else: logger.info("Updating device: {}".format(device)) device = client.sites(site_id).devices(device[0]['id']).put(data) result[host] = device return result
import logging import logging.handlers import multiprocessing import os import progressbar import re import subprocess import sys import time from get_users import get_users from settings import * from helpers import BackupBase, timeit, get_logger SYSTEM = "gmail" logger = get_logger(SYSTEM) class GmailBackup(BackupBase): def __init__(self, user_email): super(GmailBackup, self).__init__(SYSTEM, user_email) def impersonate_user(self, scope='https://mail.google.com/'): self.logger.debug("Impersonating user") f = file(SERVICE_ACCOUNT_PKCS12_FILE_PATH, 'rb') key = f.read() f.close() credentials = SignedJwtAssertionCredentials(SERVICE_ACCOUNT_EMAIL, key, scope=scope, sub=self.user_email) http = httplib2.Http(".cache")
import helpers import slugify logger = helpers.get_logger(__name__) args = helpers.get_args() def auto_slugify(context): if args.action != "generate": return context for page in context["pages"]: if "slug" in page: continue page["slug"] = slugify.slugify(page["title"]) logger.debug("Adding `{}` as slug for `{}` page..." .format(page["slug"], page["title"])) return context def inject_middlewares(middlewares): middlewares.add("auto_slugify", auto_slugify) return middlewares
import urllib import zlib import requests import six from .webtrader import NotLoginError from .webtrader import WebTrader from trade.util import * import time import helpers import os if six.PY2: import urllib2 log = helpers.get_logger(__file__) class TraderError(Exception): def __init__(self, result=None): super(TraderError, self).__init__() self.result = result class XueQiuTrader(WebTrader): config_path = os.path.dirname(__file__) + '/config/xq.json' def __init__(self): super(XueQiuTrader, self).__init__() self.cookies = {} self.str_cookies = ""
config = json.load(cfg) token = config["token"] bot_name = config["bot_name"] bot_avatar = config["bot_avatar"] prefix = config["command_prefix"] log_file = config["log_file"] log_messages = config["log_messages"] log_commands = config["log_commands"] cmd_on_edit = config["commands_on_edit"] # Grab the blacklist with open("db/blacklist.json") as bl: blacklist = json.load(bl)["users"] log = get_logger(log_file) # Set the bot and basic variables up description=""" General purpose chat and administration bot. """ bot = c.Bot(c.when_mentioned_or(prefix), pm_help=True, description=description) plugins = [] first_launch = True # Helper function to load plugins def load_plugins(): for p in os.listdir("plugins"): if p.endswith(".py"): p = p.rstrip(".py") try:
def __init__(self, **kwargs): FileCompression.__init__(self, **kwargs) BZ2Compression.__init__(self) self.log = _helpers.get_logger(__name__)
import subprocess import pickle import requests import regex import click import copy import pyperclip import lxml.html from pynput import keyboard import csv_utf8 import helpers logger = helpers.get_logger() class Jornal_da_ciencia_impresso(): def __init__(self): # tabela.append([edicao, titulo + ': ' + subtitulo, # publicacao, link, tags]) # tabela2.append([edicao, titulo, subtitulo, publicacao, link, tags]) self.publicacao = u'Jornal da Ciencia Impresso' self.url = u'http://jcnoticias.jornaldaciencia.org.br/' self.tabela = [] self.tabela2 = []
diff_ttest """ import sys import multiprocessing as mp from itertools import product, islice, repeat if sys.version_info <= (3, 0): from itertools import izip as zip import numpy as np from scipy import stats from helpers import get_logger, grouper log = get_logger(__name__) n_cpu = mp.cpu_count() def _diff_ttest(position, time_series, break_points, direction='+', n_before=None, n_after=None, phase=0, diff_length=1): """ perform diff_ttest algorithm on time_series, return (position, pvalue) pair :position: spatial position (y, x, z) :time_series: (1D numpy array) :break_points: (tuple) index number of image when event start and end. :direction: ('+'/'-'/'~') '+' for simulation region '-' for suppression region, '~' for not consider direction :n_before: (int) how many images to consider before the event occur.
def __init__(self, **kwargs): self.log = _helpers.get_logger(__name__) IOCompressionStream.__init__(self, **kwargs) GzipCompression.__init__(self)