if os.path.isfile(get_key()):
    os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = get_key()

alert = False

path = os.path.expanduser('~/python-logs')
logfile = os.path.expanduser('~/python-logs/security.log')

if os.path.isdir(path):
    pass
else:
    os.mkdir(path)


logger = logging.getLogger("Rotating Log")
log_formatter = logging.Formatter('%(asctime)s\t %(levelname)s %(message)s')
logger.setLevel(logging.INFO)
handler = RotatingFileHandler(logfile, maxBytes=5*1024*1024, backupCount=5)
handler.setFormatter(log_formatter)
logger.addHandler(handler)

for project in get_projects():
    project_name = 'projects/' + project
    service = discovery.build('iam', 'v1')
    request = service.projects().serviceAccounts().list(name=project_name)
    response = request.execute()

    if len(response) > 0:
        accounts = response['accounts']

        for account in accounts:
Esempio n. 2
0
#! /usr/bin/env python3.6
# coding: utf-8
import logging
import os
from logging import StreamHandler

log = logging.getLogger()
formatter = logging.Formatter("%(filename)s %(levelname)s - %(message)s")

handler = StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(os.environ.get("loglevel", "INFO"))
Esempio n. 3
0
    import shlex, requests
    import logging
    from datetime import datetime
    import sys
    import os
    sys.path.append('/home/pi/droneponics')
    from AtlasI2C import (AtlasI2C)
    import blynklib
    import blynktimer

    import subprocess
    import re

    # tune console logging
    _log = logging.getLogger('BlynkLog')
    logFormatter = logging.Formatter(
        "%(asctime)s [%(levelname)s]  %(message)s")
    consoleHandler = logging.StreamHandler()
    consoleHandler.setFormatter(logFormatter)
    _log.addHandler(consoleHandler)
    _log.setLevel(logging.DEBUG)

    # Initialize the sensor.
    try:
        do = AtlasI2C(97)

        _log.info("sensor created")
    except:
        _log.info("Unexpected error: Atlas")
    else:
        try:
            print(do.query("O,%,0"))
Esempio n. 4
0
import time

now = datetime.datetime.now()
log_file = 'user360clift_' + now.strftime('%Y%m%d%H%M%S') + '.log'
#create logger
logger = logging.getLogger(log_file)
logger.setLevel(logging.DEBUG)

# create console handler and set level to debug
printout_log = logging.StreamHandler()
printout_log.setLevel(logging.INFO)
# create file handler and set level to warning
toFile_log = logging.FileHandler(log_file)
toFile_log.setLevel(logging.DEBUG)
# create formatter
formatter_printout = logging.Formatter('%(asctime)s - %(message)s')
#formatter_toFile_log = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(lineno)d - %(message)s')
formatter_toFile_log = logging.Formatter(
    '%(asctime)s - %(levelname)s - %(message)s')
# add formatter to ch and fh
printout_log.setFormatter(formatter_printout)
toFile_log.setFormatter(formatter_toFile_log)
# add ch and fh to logger
logger.addHandler(printout_log)
logger.addHandler(toFile_log)

# 'application' code
'''
logger.debug('debug message')
logger.info('info message')
logger.warn('warn message')
# Copyright (C) 2016 Intel Corporation
# Released under the MIT license (see COPYING.MIT)

import sys
import os

import unittest
import logging
import os

logger = logging.getLogger("oeqa")
logger.setLevel(logging.INFO)
consoleHandler = logging.StreamHandler()
formatter = logging.Formatter('OEQATest: %(message)s')
consoleHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)

def setup_sys_path():
    directory = os.path.dirname(os.path.abspath(__file__))
    oeqa_lib = os.path.realpath(os.path.join(directory, '../../../'))
    if not oeqa_lib in sys.path:
        sys.path.insert(0, oeqa_lib)

class TestBase(unittest.TestCase):
    def setUp(self):
        self.logger = logger
        directory = os.path.dirname(os.path.abspath(__file__))
        self.cases_path = os.path.join(directory, 'cases')

    def _testLoader(self, d={}, modules=[], tests=[], filters={}):
        from oeqa.core.context import OETestContext
import logging
from binance_d import SubscriptionClient
from binance_d.constant.test import *
from binance_d.model import *
from binance_d.exception.binanceapiexception import BinanceApiException

from binance_d.base.printobject import *

logger = logging.getLogger("binance-client")
logger.setLevel(level=logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)

sub_client = SubscriptionClient(api_key=g_api_key, secret_key=g_secret_key)


def callback(data_type: 'SubscribeMessageType', event: 'any'):
    if data_type == SubscribeMessageType.RESPONSE:
        print("Event ID: ", event)
    elif  data_type == SubscribeMessageType.PAYLOAD:
        PrintBasic.print_obj(event)
        sub_client.unsubscribe_all()
    else:
        print("Unknown Data:")
    print()


def error(e: 'BinanceApiException'):
    print(e.error_code + e.error_message)
class Display:
    BASE_FORMAT = logging.Formatter(fmt="[%(asctime)s] %(message)s",
                                    datefmt="%d/%b/%Y %H:%M:%S")

    SH_DEFAULT = "\033[0m" if "win" not in sys.platform else ""  # TODO: colors for Windows
    SH_YELLOW = "\033[33m" if "win" not in sys.platform else ""
    SH_BG_RED = "\033[41m" if "win" not in sys.platform else ""
    SH_BG_YELLOW = "\033[43m" if "win" not in sys.platform else ""

    def __init__(self, log_file):
        self.output_dir = ""
        self.output_dir_set = False
        self.log_file = os.path.join(PATH, log_file)

        self.logger = logging.getLogger("SafariBooks")
        self.logger.setLevel(logging.INFO)
        logs_handler = logging.FileHandler(filename=self.log_file)
        logs_handler.setFormatter(self.BASE_FORMAT)
        logs_handler.setLevel(logging.INFO)
        self.logger.addHandler(logs_handler)

        self.columns, _ = shutil.get_terminal_size()

        self.logger.info("** Welcome to SafariBooks! **")

        self.book_ad_info = False
        self.css_ad_info = Value("i", 0)
        self.images_ad_info = Value("i", 0)
        self.last_request = (None, )
        self.in_error = False

        self.state_status = Value("i", 0)
        sys.excepthook = self.unhandled_exception

    def set_output_dir(self, output_dir):
        self.info("Output directory:\n    %s" % output_dir)
        self.output_dir = output_dir
        self.output_dir_set = True

    def unregister(self):
        self.logger.handlers[0].close()
        sys.excepthook = sys.__excepthook__

    def log(self, message):
        try:
            self.logger.info(str(message, "utf-8", "replace"))

        except (UnicodeDecodeError, Exception):
            self.logger.info(message)

    def out(self, put):
        pattern = "\r{!s}\r{!s}\n"
        try:
            s = pattern.format(" " * self.columns, str(put, "utf-8",
                                                       "replace"))

        except TypeError:
            s = pattern.format(" " * self.columns, put)

        sys.stdout.write(s)

    def info(self, message, state=False):
        self.log(message)
        output = (self.SH_YELLOW + "[*]" +
                  self.SH_DEFAULT if not state else self.SH_BG_YELLOW + "[-]" +
                  self.SH_DEFAULT) + " %s" % message
        self.out(output)

    def error(self, error):
        if not self.in_error:
            self.in_error = True

        self.log(error)
        output = self.SH_BG_RED + "[#]" + self.SH_DEFAULT + " %s" % error
        self.out(output)

    def fatal(self, error):
        self.error(str(error))

        if self.output_dir_set:
            output = (self.SH_YELLOW + "[+]" + self.SH_DEFAULT +
                      " Please delete the output directory '" +
                      self.output_dir + "'"
                      " and restart the program.")
            self.out(output)

        output = self.SH_BG_RED + "[!]" + self.SH_DEFAULT + " Aborting..."
        self.out(output)

        self.save_last_request()
        raise SafariBooksError(str(error))

    def unhandled_exception(self, _, o, tb):
        self.log("".join(traceback.format_tb(tb)))
        self.fatal("Unhandled Exception: %s (type: %s)" %
                   (o, o.__class__.__name__))

    def save_last_request(self):
        if any(self.last_request):
            self.log(
                "Last request done:\n\tURL: {0}\n\tDATA: {1}\n\tOTHERS: {2}\n\n\t{3}\n{4}\n\n{5}\n"
                .format(*self.last_request))

    def intro(self):
        output = self.SH_YELLOW + ("""
       ____     ___         _     
      / __/__ _/ _/__ _____(_)    
     _\ \/ _ `/ _/ _ `/ __/ /     
    /___/\_,_/_/ \_,_/_/ /_/      
      / _ )___  ___  / /__ ___    
     / _  / _ \/ _ \/  '_/(_-<    
    /____/\___/\___/_/\_\/___/    
""" if random() > 0.5 else """
 ██████╗     ██████╗ ██╗  ██╗   ██╗██████╗ 
██╔═══██╗    ██╔══██╗██║  ╚██╗ ██╔╝╚════██╗
██║   ██║    ██████╔╝██║   ╚████╔╝   ▄███╔╝
██║   ██║    ██╔══██╗██║    ╚██╔╝    ▀▀══╝ 
╚██████╔╝    ██║  ██║███████╗██║     ██╗   
 ╚═════╝     ╚═╝  ╚═╝╚══════╝╚═╝     ╚═╝                                           
""") + self.SH_DEFAULT
        output += "\n" + "~" * (self.columns // 2)

        self.out(output)

    def parse_description(self, desc):
        if not desc:
            return "n/d"

        try:
            return html.fromstring(desc).text_content()

        except (html.etree.ParseError, html.etree.ParserError) as e:
            self.log("Error parsing the description: %s" % e)
            return "n/d"

    def book_info(self, info):
        description = self.parse_description(info.get("description",
                                                      None)).replace(
                                                          "\n", " ")
        for t in [
            ("Title", info.get("title", "")),
            ("Authors",
             ", ".join(aut.get("name", "")
                       for aut in info.get("authors", []))),
            ("Identifier", info.get("identifier", "")),
            ("ISBN", info.get("isbn", "")),
            ("Publishers", ", ".join(
                pub.get("name", "") for pub in info.get("publishers", []))),
            ("Rights", info.get("rights", "")),
            ("Description", description[:500] +
             "..." if len(description) >= 500 else description),
            ("Release Date", info.get("issued", "")),
            ("URL", info.get("web_url", ""))
        ]:
            self.info(
                "{0}{1}{2}: {3}".format(self.SH_YELLOW, t[0], self.SH_DEFAULT,
                                        t[1]), True)

    def state(self, origin, done):
        progress = int(done * 100 / origin)
        bar = int(progress * (self.columns - 11) / 100)
        if self.state_status.value < progress:
            self.state_status.value = progress
            sys.stdout.write("\r    " + self.SH_BG_YELLOW + "[" +
                             ("#" * bar).ljust(self.columns - 11, "-") + "]" +
                             self.SH_DEFAULT + ("%4s" % progress) + "%" +
                             ("\n" if progress == 100 else ""))

    def done(self, epub_file):
        self.info(
            "Done: %s\n\n" % epub_file +
            "    If you like it, please * this project on GitHub to make it known:\n"
            "        https://github.com/lorenzodifuccia/safaribooks\n"
            "    e don't forget to renew your Safari Books Online subscription:\n"
            "        " + SAFARI_BASE_URL + "\n\n" + self.SH_BG_RED + "[!]" +
            self.SH_DEFAULT + " Bye!!")

    @staticmethod
    def api_error(response):
        message = "API: "
        if "detail" in response and "Not found" in response["detail"]:
            message += "book's not present in Safari Books Online.\n" \
                       "    The book identifier is the digits that you can find in the URL:\n" \
                       "    `" + SAFARI_BASE_URL + "/library/view/book-name/XXXXXXXXXXXXX/`"

        else:
            os.remove(COOKIES_FILE)
            message += "Out-of-Session%s.\n" % (" (%s)" % response["detail"]) if "detail" in response else "" + \
                                                                                                           Display.SH_YELLOW + "[+]" + Display.SH_DEFAULT + \
                                                                                                           " Use the `--cred` or `--login` options in order to perform the auth login to Safari."

        return message
Esempio n. 8
0
def run_dataset(paras):
    dataset, sparsity, trade_off, learning_rate, max_iter, epsilon, write_to_dir, num_nodes, deg, normalize = paras

    if not write_to_dir:
        logger = logging.getLogger('fei')
    else:
        log_fn = '{}_nodes_{}_deg_{}_sparsity_{:d}_trade_{}_lr_{}_{}.txt'.format(DATASET, num_nodes, deg, sparsity, trade_off, learning_rate, normalize)

        if os.path.isfile(os.path.join(write_to_dir, log_fn)):
            print('file exist !!!')
            return

        logger = logging.getLogger(log_fn)
        formatter = logging.Formatter('')
        file_handler = logging.FileHandler(filename=os.path.join(write_to_dir, log_fn), mode='w')
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)

    all_performance = []
    logger.debug('-' * 5 + ' setting ' + '-' * 5)
    logger.debug('sparsity: {:d}'.format(sparsity))
    logger.debug('learning rate: {:.5f}'.format(learning_rate))
    logger.debug('trade off: {:.5f}'.format(trade_off))
    for i, instance in enumerate(dataset):
        logger.debug('instance: {:d}'.format(i))

        if normalize:
            logger.debug('feature normalized')
            instance['features'] = instance['features'] / np.max(instance['features'])

        opt_x, run_time = optimize(instance, sparsity, trade_off, learning_rate, max_iter, epsilon, logger=None)

        raw_pred_subgraph = np.nonzero(opt_x)[0]

        prec, rec, fm, iou = evaluate_block(instance['true_subgraph'], raw_pred_subgraph)

        logger.debug('-' * 5 + ' performance of raw prediction ' + '-' * 5)
        logger.debug('precision: {:.5f}'.format(prec))
        logger.debug('recall   : {:.5f}'.format(rec))
        logger.debug('f-measure: {:.5f}'.format(fm))
        logger.debug('iou      : {:.5f}'.format(iou))

        refined_pred_subgraph = post_process_block(instance['graph'], raw_pred_subgraph, dataset=DATASET)
        refined_prec, refined_rec, refined_fm, refined_iou = evaluate_block(instance['true_subgraph'],
                                                                            refined_pred_subgraph)

        logger.debug('-' * 5 + ' performance of refined prediction ' + '-' * 5)
        logger.debug('refined precision: {:.5f}'.format(refined_prec))
        logger.debug('refined recall   : {:.5f}'.format(refined_rec))
        logger.debug('refined f-measure: {:.5f}'.format(refined_fm))
        logger.debug('refined iou      : {:.5f}'.format(refined_iou))

        all_performance.append((prec, rec, fm, iou, refined_prec, refined_rec, refined_fm, refined_iou, run_time))

        # break # test 1 instance

    all_performance = np.array(all_performance)
    avg_performance = np.mean(all_performance, axis=0)
    logger.debug('-' * 5 + ' average performance ' + '-' * 5)
    logger.debug('average presision: {:.5f}'.format(avg_performance[0]))
    logger.debug('average recall   : {:.5f}'.format(avg_performance[1]))
    logger.debug('average f-measure: {:.5f}'.format(avg_performance[2]))
    logger.debug('average iou      : {:.5f}'.format(avg_performance[3]))
    logger.debug('avg refined prec : {:.5f}'.format(avg_performance[4]))
    logger.debug('avg refined rec  : {:.5f}'.format(avg_performance[5]))
    logger.debug('avg refined fm   : {:.5f}'.format(avg_performance[6]))
    logger.debug('avg refined iou  : {:.5f}'.format(avg_performance[7]))
    logger.debug('average run time : {:.5f}'.format(avg_performance[8]))
import logging
from logging.handlers import RotatingFileHandler

PROGRAM_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))

backup_count = 5 if platform.system() == 'Linux' else 0

# make the directory for logs if it doesn't exist
logs_dir = os.path.join(PROGRAM_DIR, 'logs')
if not os.path.isdir(logs_dir):
    os.makedirs(logs_dir)

LOG = logging.getLogger('Spellbook')

stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(logging.Formatter('%(asctime)s | %(levelname)s | %(message)s'))
LOG.addHandler(stream_handler)

file_handler = RotatingFileHandler(os.path.join(logs_dir, 'spellbook.txt'), maxBytes=10000000, backupCount=backup_count)  # Todo change to concurrent_log_handler.ConcurrentRotatingFileHandler with backupCount 5 after python3 conversion
file_handler.setFormatter(logging.Formatter('%(asctime)s | %(levelname)s | %(message)s'))
LOG.addHandler(file_handler)

LOG.setLevel(logging.DEBUG)

# Create a log file for the http requests to the REST API
REQUESTS_LOG = logging.getLogger('api_requests')

file_handler = RotatingFileHandler(os.path.join(logs_dir, 'requests.txt'), maxBytes=10000000, backupCount=backup_count)  # Todo change to concurrent_log_handler.ConcurrentRotatingFileHandler with backupCount 5 after python3 conversion
file_handler.setFormatter(logging.Formatter('%(message)s'))
REQUESTS_LOG.addHandler(file_handler)
Esempio n. 10
0
'''
Created on Sep 6, 2018

@author: sedlmeier
'''

import logging

synclogger = logging.getLogger('sync')
synclogger.setLevel(logging.DEBUG)
syncfh = logging.FileHandler("log/sync.log")
syncfh.setLevel(logging.DEBUG)
syncch = logging.StreamHandler()
syncch.setLevel(logging.INFO)
syncformatter = logging.Formatter(
    "%(threadName)s %(asctime)s  %(levelname)s: %(message)s", "%H:%M:%S")
syncch.setFormatter(syncformatter)
synclogger.addHandler(syncfh)
synclogger.addHandler(syncch)
Esempio n. 11
0
# Local timezone
TIMEZONE = "UTC"

# Date format for all logs
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"

# The SQLAlchemy connection string.
DATABASE_URI = "sqlite:///" + os.path.join(DATA_PATH, "data.db")

# Set verbosity of sqlalchemy
SQLALCHEMY_ECHO = False

# Logger
LOG_FILE = os.path.join(DATA_PATH, "logs.log")
LOG_FORMAT = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
LOG_LEVEL = "DEBUG"
LOGGER = logging.getLogger(__name__)

hdlr = logging.FileHandler(LOG_FILE)
hdlr.setFormatter(LOG_FORMAT)

LOGGER.addHandler(hdlr)
LOGGER.setLevel(LOG_LEVEL)

# All time spans in minutes
DEFAULT_WORKDAY_DURATION = 8 * 60
DEFAULT_EVENT_DURATION = 10


# Config overried by local file
Esempio n. 12
0
import json
import logging
from logging import handlers
import time

from kafka import KafkaAdminClient, KafkaConsumer, KafkaProducer, admin, errors

# Logging Parameters
logger = logging.getLogger(__name__)
file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
file_handler.setFormatter(formatter)
stream_handler.setFormatter(stream_formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)

# NOTE: It is required to have global parameters for kafka objects
consumer, producer, topic = None, None, None


def create_consumer(topic_name, bootstrap_servers=None):
    global consumer

    bootstrap_servers = bootstrap_servers or ["kafka:19092"]
    # Create the kafka consumer
    tries = 30
    exit = False
    while not exit:
Esempio n. 13
0
import datetime
import logging

import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim

from models import *
import test

log = logging.getLogger('defalut')
log.setLevel(logging.DEBUG)

formatter = logging.Formatter(
    '[%(asctime)s][%(levelname)s] (%(filename)s:%(lineno)d) > %(message)s')

fileHandler = logging.FileHandler('./log.txt')
streamHandler = logging.StreamHandler()

fileHandler.setFormatter(formatter)
streamHandler.setFormatter(formatter)

log.addHandler(fileHandler)
log.addHandler(streamHandler)

device = 'cuda' if torch.cuda.is_available() else 'cpu'


def train_model(model_file_name='model.pth', prev_train_model_file_name=''):
Esempio n. 14
0
    def __init__(self,loggername='App',filename='logfile.log',path ='/tmp/', screen=True,level=logging.WARNING):

        level = logging.DEBUG
        try:
            #path = '/home/app/log'
            filepath = path + '/' + filename

            #Make sure file path is accessible
            pathAccess = self.path_accessibility(path)
            if not pathAccess:
                raise('Given path was not accessible')

            #Make sure file path has enough space to log messages, default is 90% usage
            pathSpace = self.space_constraints(path)
            if not pathSpace:
                raise('Given path did not have enough space to log information')

            #Make sure file exists in the path
            fileExistance = self.file_existance(filepath)

            #If file doesn't exist, try creating it and assign it necessary permissions
            if not fileExistance:
                createFile = self.create_file(filepath)
                if not createFile:
                    raise('Unable to create file. Please check if directory has write permissions')
                fileExistance = self.file_existance(filepath)

            #Make sure file has the right permissions
            filePermissions = self.file_permissions(filepath)

            #Logger starts here
            if pathAccess and fileExistance and filePermissions:
  
                self.screen = screen
                
                #This step is used to display the file/method/line that called logger methods
                #func = inspect.currentframe().f_back.f_code
                #format = '%(thread)d||%(asctime)s||%(levelname)s||%(message)s||' + '{}||{}||{}' .format(func.co_filename,func.co_name,func.co_firstlineno)
                #format = '%(thread)d||%(asctime)s||%(levelname)s||%(message)s||'
                format = '%(thread)d||%(asctime)s||%(levelname)s||%(message)s||%(filename)s||%(funcName)s||%(lineno)d'

                self.logger = logging.getLogger(loggername)
                self.logger.name = loggername

                #Makes sure Logger instance is called only once.
                if not getattr(self.logger, 'handler_set', None):

                    # add a rotating handler
                    handler = RotatingFileHandler(filepath, maxBytes=2000000,
                                  backupCount=10)
                    formatter = logging.Formatter(format)

                    handler.setFormatter(formatter)
                    self.logger.addHandler(handler) 
                    self.logger.setLevel(level)
                    self.logger.handler_set = True

            else:

                print "CRITICAL :: Please fix errors and try running again"
                sys.exit(0)

        except:

            print "CRITICAL :: Please fix errors and try running again"
            sys.exit(0)
Esempio n. 15
0
def train_lm(
        data_dir: str,
        model_dir: str,
        dataset: str,
        baseline: str,
        hyper_params: Dict[str, Any],
        loss_type: str,
        compute_train_batch_size: int,
        predict_batch_size: int,
        gpu_ids: Optional[List[int]],
        logger: Optional[logging.Logger] = None
) -> None:
    """Fine-tune a pre-trained LM baseline on a scruples dataset.

    Fine-tune ``baseline`` on ``dataset``, writing all results and
    artifacts to ``model_dir``. Return the best calibrated xentropy achieved on
    dev after any epoch.

    Parameters
    ----------
    data_dir : str
        The path to the directory containing the dataset.
    model_dir : str
        The path to the directory in which to save results.
    dataset : str
        The dataset to use when fine-tuning ``baseline``. Must be either
        "resource" or "corpus".
    baseline : str
        The pre-trained LM to fine-tune. Should be one of the keys for
        ``scruples.baselines.$dataset.FINE_TUNE_LM_BASELINES`` where
        ``$dataset`` corresponds to the ``dataset`` argument to this
        function.
    hyper_params : Dict[str, Any]
        The dictionary of hyper-parameters for the model.
    loss_type : str
        The type of loss to use. Should be one of ``"xentropy-hard"``,
        ``"xentropy-soft"``, ``"xentropy-full"`` or
        ``"dirichlet-multinomial"``.
    compute_train_batch_size : int
        The largest batch size that will fit on the hardware during
        training. Gradient accumulation will be used to make sure the
        actual size of the batch on the hardware respects this limit.
    predict_batch_size : int
        The number of instances to use in a predicting batch.
    gpu_ids : Optional[List[int]]
        A list of IDs for GPUs to use.
    logger : Optional[logging.Logger], optional (default=None)
        The logger to use when logging messages. If ``None``, then no
        messages will be logged.

    Returns
    -------
    float
        The best calibrated xentropy on dev achieved after any epoch.
    bool
        ``True`` if the training loss diverged, ``False`` otherwise.
    """
    gc.collect()
    # collect any garbage to make sure old torch objects are cleaned up (and
    # their memory is freed from the GPU). Otherwise, old tensors can hang
    # around on the GPU, causing CUDA out-of-memory errors.

    if loss_type not in settings.LOSS_TYPES:
        raise ValueError(
            f'Unrecognized loss type: {loss_type}. Please use one of'
            f' "xentropy-hard", "xentropy-soft", "xentropy-full" or'
            f' "dirichlet-multinomial".')

    # Step 1: Manage and construct paths.

    if logger is not None:
        logger.info('Creating the model directory.')

    checkpoints_dir = os.path.join(model_dir, 'checkpoints')
    tensorboard_dir = os.path.join(model_dir, 'tensorboard')
    os.makedirs(model_dir)
    os.makedirs(checkpoints_dir)
    os.makedirs(tensorboard_dir)

    config_file_path = os.path.join(model_dir, 'config.json')
    log_file_path = os.path.join(model_dir, 'log.txt')
    best_checkpoint_path = os.path.join(
        checkpoints_dir, 'best.checkpoint.pkl')
    last_checkpoint_path = os.path.join(
        checkpoints_dir, 'last.checkpoint.pkl')

    # Step 2: Setup the log file.

    if logger is not None:
        logger.info('Configuring log files.')

    log_file_handler = logging.FileHandler(log_file_path)
    log_file_handler.setLevel(logging.DEBUG)
    log_file_handler.setFormatter(logging.Formatter(settings.LOG_FORMAT))
    logging.root.addHandler(log_file_handler)

    # Step 3: Record the script's arguments.

    if logger is not None:
        logger.info(f'Writing arguments to {config_file_path}.')

    with open(config_file_path, 'w') as config_file:
        json.dump({
            'data_dir': data_dir,
            'model_dir': model_dir,
            'dataset': dataset,
            'baseline': baseline,
            'hyper_params': hyper_params,
            'loss_type': loss_type,
            'compute_train_batch_size': compute_train_batch_size,
            'predict_batch_size': predict_batch_size,
            'gpu_ids': gpu_ids
        }, config_file)

    # Step 4: Configure GPUs.

    if gpu_ids:
        if logger is not None:
            logger.info(
                f'Configuring environment to use {len(gpu_ids)} GPUs:'
                f' {", ".join(str(gpu_id) for gpu_id in gpu_ids)}.')

        os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, gpu_ids))

        if not torch.cuda.is_available():
            raise EnvironmentError('CUDA must be available to use GPUs.')

        device = torch.device('cuda')
    else:
        if logger is not None:
            logger.info('Configuring environment to use CPU.')

        device = torch.device('cpu')

    # Step 5: Fetch the baseline information and training loop parameters.

    if logger is not None:
        logger.info('Retrieving baseline and related parameters.')

    if dataset == 'resource':
        Model, baseline_config, _, make_transform =\
            resource.FINE_TUNE_LM_BASELINES[baseline]
    elif dataset == 'corpus':
        Model, baseline_config, _, make_transform =\
            corpus.FINE_TUNE_LM_BASELINES[baseline]
    else:
        raise ValueError(
            f'dataset must be either "resource" or "corpus", not'
            f' {dataset}.')

    n_epochs = hyper_params['n_epochs']
    train_batch_size = hyper_params['train_batch_size']
    n_gradient_accumulation = math.ceil(
        train_batch_size / (compute_train_batch_size * len(gpu_ids)))

    # Step 6: Load the dataset.

    if logger is not None:
        logger.info(f'Loading the dataset from {data_dir}.')

    featurize = make_transform(**baseline_config['transform'])
    if dataset == 'resource':
        Dataset = ScruplesResourceDataset
        labelize = None
        labelize_scores = lambda scores: np.array(scores).astype(float)
    elif dataset == 'corpus':
        Dataset = ScruplesCorpusDataset
        labelize = lambda s: getattr(Label, s).index
        labelize_scores = lambda scores: np.array([
            score
            for _, score in sorted(
                    scores.items(),
                    key=lambda t: labelize(t[0]))
        ]).astype(float)
    else:
        raise ValueError(
            f'dataset must be either "resource" or "corpus", not'
            f' {dataset}.')

    train = Dataset(
        data_dir=data_dir,
        split='train',
        transform=featurize,
        label_transform=labelize,
        label_scores_transform=labelize_scores)
    dev = Dataset(
        data_dir=data_dir,
        split='dev',
        transform=featurize,
        label_transform=labelize,
        label_scores_transform=labelize_scores)

    train_loader = DataLoader(
        dataset=train,
        batch_size=train_batch_size // n_gradient_accumulation,
        shuffle=True,
        num_workers=len(gpu_ids),
        pin_memory=bool(gpu_ids))
    dev_loader = DataLoader(
        dataset=dev,
        batch_size=predict_batch_size,
        shuffle=False,
        num_workers=len(gpu_ids),
        pin_memory=bool(gpu_ids))

    # Step 7: Create the model, optimizer, and loss.

    if logger is not None:
        logger.info('Initializing the model.')

    model = Model(**baseline_config['model'])
    model.to(device)

    n_optimization_steps = n_epochs * math.ceil(len(train) / train_batch_size)
    parameter_groups = [
        {
            'params': [
                param
                for name, param in model.named_parameters()
                if 'bias' in name
                or 'LayerNorm.bias' in name
                or 'LayerNorm.weight' in name
            ],
            'weight_decay': 0
        },
        {
            'params': [
                param
                for name, param in model.named_parameters()
                if 'bias' not in name
                and 'LayerNorm.bias' not in name
                and 'LayerNorm.weight' not in name
            ],
            'weight_decay': hyper_params['weight_decay']
        }
    ]
    optimizer = AdamW(parameter_groups, lr=hyper_params['lr'])

    if loss_type == 'xentropy-hard':
        loss = torch.nn.CrossEntropyLoss()
    elif loss_type == 'xentropy-soft':
        loss = SoftCrossEntropyLoss()
    elif loss_type == 'xentropy-full':
        loss = SoftCrossEntropyLoss()
    elif loss_type == 'dirichlet-multinomial':
        loss = DirichletMultinomialLoss()

    xentropy = SoftCrossEntropyLoss()

    scheduler = WarmupLinearSchedule(
        optimizer=optimizer,
        warmup_steps=int(
            hyper_params['warmup_proportion']
            * n_optimization_steps
        ),
        t_total=n_optimization_steps)

    # add data parallelism support
    model = torch.nn.DataParallel(model)

    # Step 8: Run training.

    n_train_batches_per_epoch = math.ceil(len(train) / train_batch_size)
    n_dev_batch_per_epoch = math.ceil(len(dev) / predict_batch_size)

    writer = tensorboardX.SummaryWriter(log_dir=tensorboard_dir)

    best_dev_calibrated_xentropy = math.inf
    for epoch in range(n_epochs):
        # set the model to training mode
        model.train()

        # run training for the epoch
        epoch_train_loss = 0
        epoch_train_xentropy = 0
        for i, (_, features, labels, label_scores) in tqdm.tqdm(
                enumerate(train_loader),
                total=n_gradient_accumulation * n_train_batches_per_epoch,
                **settings.TQDM_KWARGS
        ):
            # move the data onto the device
            features = {k: v.to(device) for k, v in features.items()}

            # create the targets
            if loss_type == 'xentropy-hard':
                targets = labels
            elif loss_type == 'xentropy-soft':
                targets = label_scores / torch.unsqueeze(
                    torch.sum(label_scores, dim=-1), dim=-1)
            elif loss_type == 'xentropy-full':
                targets = label_scores
            elif loss_type == 'dirichlet-multinomial':
                targets = label_scores
            # create the soft labels
            soft_labels = label_scores / torch.unsqueeze(
                torch.sum(label_scores, dim=-1), dim=-1)

            # move the targets and soft labels to the device
            targets = targets.to(device)
            soft_labels = soft_labels.to(device)

            # make predictions
            logits = model(**features)[0]

            batch_loss = loss(logits, targets)
            batch_xentropy = xentropy(logits, soft_labels)

            # update training statistics
            epoch_train_loss = (
                batch_loss.item() + i * epoch_train_loss
            ) / (i + 1)
            epoch_train_xentropy = (
                batch_xentropy.item() + i * epoch_train_xentropy
            ) / (i + 1)

            # update the network
            batch_loss.backward()

            if (i + 1) % n_gradient_accumulation == 0:
                optimizer.step()
                optimizer.zero_grad()

                scheduler.step()

            # write training statistics to tensorboard

            step = n_train_batches_per_epoch * epoch + (
                (i + 1) // n_gradient_accumulation)
            if step % 100 == 0 and (i + 1) % n_gradient_accumulation == 0:
                writer.add_scalar('train/loss', epoch_train_loss, step)
                writer.add_scalar('train/xentropy', epoch_train_xentropy, step)

        # run evaluation
        with torch.no_grad():
            # set the model to evaluation mode
            model.eval()

            # run validation for the epoch
            epoch_dev_loss = 0
            epoch_dev_soft_labels = []
            epoch_dev_logits = []
            for i, (_, features, labels, label_scores) in tqdm.tqdm(
                    enumerate(dev_loader),
                    total=n_dev_batch_per_epoch,
                    **settings.TQDM_KWARGS):
                # move the data onto the device
                features = {k: v.to(device) for k, v in features.items()}

                # create the targets
                if loss_type == 'xentropy-hard':
                    targets = labels
                elif loss_type == 'xentropy-soft':
                    targets = label_scores / torch.unsqueeze(
                        torch.sum(label_scores, dim=-1), dim=-1)
                elif loss_type == 'xentropy-full':
                    targets = label_scores
                elif loss_type == 'dirichlet-multinomial':
                    targets = label_scores

                # move the targets to the device
                targets = targets.to(device)

                # make predictions
                logits = model(**features)[0]

                batch_loss = loss(logits, targets)

                # update validation statistics
                epoch_dev_loss = (
                    batch_loss.item() + i * epoch_dev_loss
                ) / (i + 1)
                epoch_dev_soft_labels.extend(
                    (
                        label_scores
                        / torch.unsqueeze(torch.sum(label_scores, dim=-1), dim=-1)
                    ).cpu().numpy().tolist()
                )
                epoch_dev_logits.extend(logits.cpu().numpy().tolist())

            # compute validation statistics
            epoch_dev_soft_labels = np.array(epoch_dev_soft_labels)
            epoch_dev_logits = np.array(epoch_dev_logits)

            calibration_factor = utils.calibration_factor(
                logits=epoch_dev_logits,
                targets=epoch_dev_soft_labels)

            epoch_dev_xentropy = utils.xentropy(
                y_true=epoch_dev_soft_labels,
                y_pred=softmax(epoch_dev_logits, axis=-1))
            epoch_dev_calibrated_xentropy = utils.xentropy(
                y_true=epoch_dev_soft_labels,
                y_pred=softmax(epoch_dev_logits / calibration_factor, axis=-1))

            # write validation statistics to tensorboard
            writer.add_scalar('dev/loss', epoch_dev_loss, step)
            writer.add_scalar('dev/xentropy', epoch_dev_xentropy, step)
            writer.add_scalar(
                'dev/calibrated-xentropy', epoch_dev_calibrated_xentropy, step)

            if logger is not None:
                logger.info(
                    f'\n\n'
                    f'  epoch {epoch}:\n'
                    f'    train loss              : {epoch_train_loss:.4f}\n'
                    f'    train xentropy          : {epoch_train_xentropy:.4f}\n'
                    f'    dev loss                : {epoch_dev_loss:.4f}\n'
                    f'    dev xentropy            : {epoch_dev_xentropy:.4f}\n'
                    f'    dev calibrated xentropy : {epoch_dev_calibrated_xentropy:.4f}\n'
                    f'    calibration factor      : {calibration_factor:.4f}\n')

        # update checkpoints

        torch.save(
            {
                'epoch': epoch,
                'model': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'calibration_factor': calibration_factor
            },
            last_checkpoint_path)

        # update the current best model
        if epoch_dev_calibrated_xentropy < best_dev_calibrated_xentropy:
            shutil.copyfile(last_checkpoint_path, best_checkpoint_path)
            best_dev_calibrated_xentropy = epoch_dev_calibrated_xentropy

        # exit early if the training loss has diverged
        if math.isnan(epoch_train_loss):
            logger.info('Training loss has diverged. Exiting early.')

            return best_dev_calibrated_xentropy, True

    logger.info(
        f'Training complete. Best dev calibrated xentropy was'
        f' {best_dev_calibrated_xentropy:.4f}.')

    return best_dev_calibrated_xentropy, False
Esempio n. 16
0
def importSourceHierarchies(db, filename, user):
    def findNextRidno(ridstrt):
        with DbTxn(_("Find next ridno"), db):
            prefix_save = db.get_repository_prefix()
            db.set_repository_id_prefix(ridstrt + '%04d')
            next_ridno = db.find_next_repository_gramps_id()
            LOG.debug('Next ridno = ' + next_ridno)
            db.set_repository_id_prefix(prefix_save)
        return next_ridno

    def findNextSidno(ridno):
        with DbTxn(_("Find next sidno"), db):
            prefix_save = db.get_source_prefix()
            db.set_source_id_prefix(ridno + '%04d')
            next_sidno = db.find_next_source_gramps_id()
            LOG.debug('Next sidno = ' + next_sidno)
            db.set_source_id_prefix(prefix_save)
        return next_sidno

    def addRepository(repositoryName, reftag):
        ridno = db.find_next_repository_gramps_id()
        repository = Repository()
        repositoryType = RepositoryType()
        repositoryType.set(RepositoryType.ARCHIVE)
        repository.set_type(repositoryType)
        repository.set_gramps_id(ridno)
        repository.set_name(repositoryName)
        repository.set_url_list(())
        repository.set_change_time(chgtime)
        if reftag != None:
            repository.add_tag(reftag.get_handle())
        with DbTxn(_("Add Repository"), db) as trans:
            rhandle = db.add_repository(repository, trans)
        return repository

    def addSource(sourceName, attribs, reftag, repository):
        snote = addNote(attribs[3], NoteType.SOURCE)
        sidno = db.find_next_source_gramps_id()
        source = Source()
        source.set_gramps_id(sidno)
        source.set_title(sourceName)
        source.set_author(attribs[0])
        source.set_publication_info(attribs[1])
        source.set_abbreviation(attribs[2])
        source.add_note(snote.get_handle())
        if reftag != None:
            source.add_tag(reftag.get_handle())
        repoRef = RepoRef()
        repoRef.set_reference_handle(repository.get_handle())
        source.add_repo_reference(repoRef)
        source.set_change_time(chgtime)
        with DbTxn(_("Add Source"), db) as trans:
            shandle = db.add_source(source, trans)
        return source

    def addNote(ntext, ntype):
        nidno = db.find_next_note_gramps_id()
        note = Note(ntext)
        note.set_gramps_id(nidno)
        note.set_type(ntype)
        if reftag != None:
            note.add_tag(reftag.get_handle())
        note.set_change_time(chgtime)
        with DbTxn(_("Add Note"), db) as trans:
            nhandle = db.add_note(note, trans)
            LOG.debug('Note added: ' + ntext + ' ' + nhandle)
        return note

    def checkTagExistence(otext):
        with DbTxn(_("Read Tag"), db):
            tag = db.get_tag_from_name(otext)
        if tag != None:
            LOG.debug('Tag found by name, no duplicates: ' + otext + ' ' +
                      tag.get_name())
        else:
            tag = Tag()
            tag.set_name(otext)
            tag.set_color("#EF2929")
            with DbTxn(_("Add Tag"), db) as trans:
                thandle = db.add_tag(tag, trans)
                LOG.debug('Tag added: ' + tag.get_name() + ' ' + thandle)
        return tag

    fdir = os.path.dirname(filename)

    fh = logging.FileHandler(fdir + '\\sourceimport.log')
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setLevel(logging.INFO)
    fh.setFormatter(formatter)
    LOG.addHandler(fh)

    LOG.info("   fdir = " + fdir)
    LOG.debug('ini file handling')

    config = configman.register_manager("importsources")
    '''
    config.register("options.repositoryidrng", "1000")    
    config.register("options.repositoryincr", "1") 
    config.register("options.sourceidrng", "1000")    
    config.register("options.sourceidincr", "1") 
    '''
    config.register("options.refstring", "r")
    config.load()
    config.save()

    r_count = 0
    s_count = 0

    reftag = checkTagExistence('Referenssi')
    chgtime = int(time.time())
    LOG.info("   chgtime = " + str(chgtime))

    try:
        currRepoName = ''
        with open(filename, 'r', encoding="utf-8-sig") as t_in:
            t_dialect = csv.Sniffer().sniff(t_in.read(1024))
            t_dialect.delimiter = ";"
            t_in.seek(0)
            t_reader = csv.reader(t_in, t_dialect)
            LOG.info('CSV input file delimiter is ' + t_dialect.delimiter)
            global repository
            for row in t_reader:
                repoName = row[0].strip()  # Repository name
                if repoName != currRepoName:
                    currRepoName = repoName
                    LOG.debug('New repository: ' + currRepoName)
                    repository = addRepository(currRepoName, reftag)
                    r_count += 1
                sourceName = repoName + " " + row[1].strip(
                ) + " " + row[2].strip()
                attribs = (repoName, "", row[3], row[4])
                LOG.debug('New source: ' + sourceName)
                addSource(sourceName, attribs, reftag, repository)
                s_count += 1
#                        LOG.debug('Unknown rectype: ' + rectype)
#                        raise GrampsImportError('Unknown record type ' + rectype)

    except:
        exc = sys.exc_info()[0]
        LOG.error('*** Something went really wrong! ', exc)
        return ImportInfo({_('Results'): _('Something went really wrong  ')})

    results = {
        _('Results'): _('Input file handled.'),
        _('    Repositories   '): str(r_count),
        _('    Sources        '): str(s_count)
    }

    LOG.info('Input file handled.')
    LOG.info('    Repositories   ' + str(r_count))
    LOG.info('    Sources        ' + str(s_count))

    db.enable_signals()
    db.request_rebuild()

    return ImportInfo(results)
Esempio n. 17
0
import logging

logger = logging.getLogger(__name__)

# logging.basicConfig(level=logging.INFO,
#                 # format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
#                 format='%(asctime)s [line:%(lineno)d] %(levelname)s %(message)s',
#                 datefmt='%a, %d %b %Y %H:%M:%S',
#                 # filename='global_subscribe_error.log',
#                 filemode='a')

ch = logging.StreamHandler()
fh = logging.FileHandler('global_subscribe_error.log')
logger.addHandler(ch)
logger.addHandler(fh)
formatter = logging.Formatter('%(asctime)s [line:%(lineno)d] %(levelname)s %(message)s') #logging.Formatter(%(asctime)s [line:%(lineno)d] %(levelname)s %(message)s)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.setLevel(logging.INFO)

def run_subscribe():
    #select user and user,s match_table and get industry, keyword and tags
    start_time = datetime.date.today() + datetime.timedelta(hours=-624)
    end_time = datetime.date.today()
    projects = Project.objects.filter(add_time__range=(start_time, end_time), status=2)
    if len(projects) == 0:
        return
    new_projects = []
    new_project = namedtuple("new_project", ["project", "project_tags", "project_industries"])
    for project in projects:
        project_industries, project_tags = find_project_tags(project)
Esempio n. 18
0
     import build_scripts_2to3 as build_scripts
except ImportError:
    # python 2.x
    from distutils.command.build_py import build_py
    from distutils.command.build_scripts import build_scripts
import os
from os.path import isfile, join, isdir
import sys
import warnings
from glob import glob

if setuptools and "test" in sys.argv:
    import logging
    logSys = logging.getLogger("fail2ban")
    hdlr = logging.StreamHandler(sys.stdout)
    fmt = logging.Formatter("%(asctime)-15s %(message)s")
    hdlr.setFormatter(fmt)
    logSys.addHandler(hdlr)
    if set(["-q", "--quiet"]) & set(sys.argv):
        logSys.setLevel(logging.CRITICAL)
        warnings.simplefilter("ignore")
        sys.warnoptions.append("ignore")
    elif set(["-v", "--verbose"]) & set(sys.argv):
        logSys.setLevel(logging.DEBUG)
    else:
        logSys.setLevel(logging.INFO)
elif "test" in sys.argv:
    print("python distribute required to execute fail2ban tests")
    print("")

longdesc = '''
Esempio n. 19
0
except ImportError:
    try:
        ModuleNotFoundError
        try:
            import tango as pt
        except ModuleNotFoundError:
            pass
    except NameError:
        pt = None


logger = logging.getLogger("Task")
while len(logger.handlers):
    logger.removeHandler(logger.handlers[0])

f = logging.Formatter("%(asctime)s - %(name)s.   %(funcName)s - %(levelname)s - %(message)s")
fh = logging.StreamHandler()
fh.setFormatter(f)
logger.addHandler(fh)
logger.setLevel(logging.DEBUG)


# From: https://stackoverflow.com/questions/323972/is-there-any-way-to-kill-a-thread
def _async_raise(tid, exctype):
    """Raises an exception in the threads with id tid"""
    if not inspect.isclass(exctype):
        raise TypeError("Only types can be raised (not instances)")
    res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid),
                                                     ctypes.py_object(exctype))
    if res == 0:
        raise ValueError("invalid thread id")
Esempio n. 20
0
                        dest="dict",
                        default="",
                        help='dictionary')
    parser.add_argument("-p",
                        "--punc",
                        action="store",
                        dest="punc",
                        default="puncs.list",
                        help='punctuation lists')

    args = parser.parse_args()
    options = vars(args)

    logger = logging.getLogger()
    formatter = logging.Formatter(
        '[%(asctime)s][*%(levelname)s*][%(filename)s:%(lineno)d|%(funcName)s] - %(message)s',
        '%Y%m%d-%H:%M:%S')
    file_handler = logging.FileHandler('LOG-selectWord.txt',
                                       'w',
                                       encoding='utf-8')
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)

    stream_handler = logging.StreamHandler()
    stream_handler.setFormatter(formatter)
    logger.addHandler(stream_handler)
    logger.setLevel(logging.INFO)

    allStartTP = time.time()
    appInst = SelectWord(options, logger)
    appInst.process()
Esempio n. 21
0
import subprocess
# sys.path.insert(0,'/usr/local/lib/python3.7/site-packages')
# print(sys.path)
import cv2
from datetime import datetime
file_name='Video'+datetime.now().strftime('_%d_%m_%H_%M_%S')+'.avi'

from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
#import os

logger = logging.getLogger('TfPoseEstimator-WebCam')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)

movement_tracker_x = {}
movement_tracker_y = {}

status_tracker = {"SITTING" : [], "STANDING": [], "LAYING": [], "COOKING": [], "NOTHING": []}
status = "NOTHING"
factor = None
pose = ' '

timer = 0

#------------------ OBJECT DETECTION & CLASSIFICATION ------------------------------
import pandas as pd
Esempio n. 22
0
def info():
    logger = logging.getLogger('discord')
    logger.setLevel(logging.INFO)
    handler = logging.FileHandler(filename=f'Logs/{datetime.date.today()}-info.log', encoding='utf-8')
    handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
    logger.addHandler(handler)
Esempio n. 23
0
 def add_syslog_handler(address):        
     from logging.handlers import SysLogHandler
     handler = logging.handlers.SysLogHandler(address,514)
     handler.setFormatter(logging.Formatter(Logger.SYSLOG_FORMAT))
     Logger.logger.addHandler(handler)
Esempio n. 24
0
def main():
    banner = '''
   ________              __      ____
  / ____/ /_  ___  _____/ /__   / __ \___  ____
 / /   / __ \/ _ \/ ___/ //_/  / /_/ / _ \/ __ \ 
/ /___/ / / /  __/ /__/ ,<    / _, _/  __/ /_/ /
\____/_/ /_/\___/\___/_/|_|  /_/ |_|\___/ .___/
                                       /_/
'''

    print(Fore.CYAN + banner + Style.RESET_ALL)
    print("Check IP and Domain Reputation")

    parser = argparse.ArgumentParser(
        description='Check IP or Domain Reputation',
        formatter_class=argparse.RawTextHelpFormatter,
        epilog='''
    Options
    --------------------
    freegeoip [freegeoip.live]  - free/opensource geolocation service     
    virustotal [virustotal.com] - online multi-antivirus scan engine            
    
    * NOTE: 
    Use of the VirusTotal option requires an API key.  
    The service is "free" to use, however you must register 
    for an account to receive an API key.''')

    optional = parser._action_groups.pop()
    required = parser.add_argument_group('required arguments')
    required.add_argument('query', help='query ip address or domain')
    optional.add_argument('--log',
                          action='store_true',
                          help='log results to file')
    optional.add_argument('--vt', action='store_true', help='check virustotal')

    group = optional.add_mutually_exclusive_group()
    group.add_argument('--fg',
                       action='store_true',
                       help='use freegeoip for geolocation')  # nopep8
    group.add_argument(
        '--mx',
        nargs='+',
        metavar='FILE',
        help='geolocate multiple ip addresses or domains')  # nopep8

    parser._action_groups.append(optional)
    args = parser.parse_args()
    QRY = args.query

    if len(sys.argv[1:]) == 0:
        parser.print_help()
        parser.exit()

    # Initialize utilities
    workers = Workers(QRY)

    print("\n" + Fore.GREEN + "[+] Running checks..." + Style.RESET_ALL)

    if args.log:
        if not os.path.exists('logfile'):
            os.mkdir('logfile')
        dt_stamp = datetime.now().strftime("%Y-%m-%d_%H%M%S")
        file_log = logging.FileHandler(f"logfile/logfile_{dt_stamp}.txt")
        file_log.setFormatter(
            logging.Formatter("[%(asctime)s %(levelname)s] %(message)s",
                              datefmt="%m/%d/%Y %I:%M:%S"))  # nopep8
        logger.addHandler(file_log)

    if args.fg:
        map_free_geo(QRY)

    if args.mx:
        print(
            colored.stylize("\n--[ Processing Geolocation Map ]--",
                            colored.attr("bold")))  # nopep8
        multi_map(input_file=args.mx[0])
        print(colored.stylize("\n--[ GeoIP Map File ]--",
                              colored.attr("bold")))  # nopep8
        try:
            multi_map_file = Path('multi_map.html').resolve(strict=True)
        except FileNotFoundError:
            logger.info(
                "[-] Geolocation map file was not created or does not exist."
            )  # nopep8
        else:
            logger.info(f"[>] Geolocation map file saved to: {multi_map_file}")
        sys.exit(1)

    if args.vt:
        print(
            colored.stylize("\n--[ VirusTotal Detections ]--",
                            colored.attr("bold")))  # nopep8
        if not config['VIRUS-TOTAL']['api_key']:
            logger.warning(
                "Please add VirusTotal API key to the 'settings.yml' file, or add it below"
            )  # nopep8
            user_vt_key = input("Enter key: ")
            config['VIRUS-TOTAL']['api_key'] = user_vt_key

            with open('settings.yml', 'w') as output:
                yaml.dump(config, output)

        api_key = config['VIRUS-TOTAL']['api_key']
        virustotal = VirusTotalChk(api_key)
        if DOMAIN.findall(QRY):
            virustotal.vt_run('domains', QRY)
        elif IP.findall(QRY):
            virustotal.vt_run('ip_addresses', QRY)
        elif URL.findall(QRY):
            virustotal.vt_run('urls', QRY)
        else:
            virustotal.vt_run('files', QRY)
            print(
                colored.stylize("\n--[ Team Cymru Detection ]--",
                                colored.attr("bold")))  # nopep8
            workers.tc_query(qry=QRY)
            sys.exit("\n")

    if DOMAIN.findall(QRY) and not EMAIL.findall(QRY):
        print(
            colored.stylize("\n--[ Querying Domain Blacklists ]--",
                            colored.attr("bold")))  # nopep8
        workers.spamhaus_dbl_worker()
        workers.blacklist_dbl_worker()
        print(
            colored.stylize(f"\n--[ WHOIS for {QRY} ]--",
                            colored.attr("bold")))  # nopep8
        workers.whois_query(QRY)

    elif IP.findall(QRY):
        # Check if cloudflare ip
        print(
            colored.stylize("\n--[ Using Cloudflare? ]--",
                            colored.attr("bold")))  # nopep8
        if workers.cflare_results(QRY):
            logger.info("Cloudflare IP: Yes")
        else:
            logger.info("Cloudflare IP: No")

        print(
            colored.stylize("\n--[ Querying DNSBL Lists ]--",
                            colored.attr("bold")))  # nopep8
        workers.dnsbl_mapper()
        workers.spamhaus_ipbl_worker()
        print(
            colored.stylize("\n--[ Querying IP Blacklists ]--",
                            colored.attr("bold")))  # nopep8
        workers.blacklist_ipbl_worker()

    elif NET.findall(QRY):
        print(
            colored.stylize("\n--[ Querying NetBlock Blacklists ]--",
                            colored.attr("bold")))  # nopep8
        workers.blacklist_netblock_worker()

    else:
        print(Fore.YELLOW +
              "[!] Please enter a valid query -- Domain or IP address" +
              Style.RESET_ALL)  # nopep8
        print("=" * 60, "\n")
        parser.print_help()
        parser.exit()

    # ---[ Results output ]-------------------------------
    print(colored.stylize("\n--[ Results ]--", colored.attr("bold")))
    TOTALS = workers.DNSBL_MATCHES + workers.BL_MATCHES
    BL_TOTALS = workers.BL_MATCHES
    if TOTALS == 0:
        logger.info(f"[-] {QRY} is not listed in any Blacklists\n")
    else:
        _QRY = Fore.YELLOW + QRY + Style.BRIGHT + Style.RESET_ALL
        _DNSBL_MATCHES = Fore.WHITE + Back.RED + str(
            workers.DNSBL_MATCHES) + Style.BRIGHT + Style.RESET_ALL  # nopep8
        _BL_TOTALS = Fore.WHITE + Back.RED + str(
            BL_TOTALS) + Style.BRIGHT + Style.RESET_ALL  # nopep8
        logger.info(
            f"[>] {_QRY} is listed in {_DNSBL_MATCHES} DNSBL lists and {_BL_TOTALS} Blacklists\n"
        )  # nopep8

    # ---[ Geo Map output ]-------------------------------
    if args.fg or args.mx:
        print(colored.stylize("--[ GeoIP Map File ]--",
                              colored.attr("bold")))  # nopep8
        time_format = "%d %B %Y %H:%M:%S"
        try:
            ip_map_file = prog_root.joinpath('geomap/ip_map.html').resolve(
                strict=True)  # nopep8
        except FileNotFoundError:
            logger.warning(
                "[-] Geolocation map file was not created/does not exist.\n"
            )  # nopep8
        else:
            ip_map_timestamp = datetime.fromtimestamp(
                os.path.getctime(ip_map_file))  # nopep8
            logger.info(
                f"[>] Geolocation map file created: {ip_map_file} [{ip_map_timestamp.strftime(time_format)}]\n"
            )  # nopep8
Esempio n. 25
0
'''Tasks:
1) find multiples of 3 or 5 under 10.
2) Find sum of all the multiples found.'''

import abc
import logging
logger = logging.getLogger(__name__)  #step1: create logger object

#step2: create handler object and configure (formatter and level for channel handler)
console_stream = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_stream.setFormatter(formatter)
console_stream.setLevel(logging.DEBUG)

#step3: config logger object by adding channel handler and set logging level
logger.addHandler(console_stream)
logger.setLevel(logging.DEBUG)

class FactorComputeMachineInterface(object):
    ''' Interface for factor's multiples computing machine under a given
    upperlimit.'''

    __metaclass__ = abc.ABCMeta

    @abc.abstractmethod
    def add_base_factor(self, value_object):
        raise NotImplementedError("Implement add factors to a container.")

    @abc.abstractmethod
    def add_super_limit(self, value_object):
        raise NotImplementedError("Implement to set upperlimit values for \
Esempio n. 26
0
import logging
import time

from shadowsocksr_cli.init_utils import *

# 初始化工具类init_utils.InitConfig
init_config = InitConfig()

# 初始化全局logger记录格式及级别
logger = logging.getLogger("shadowsocksr-cli")
logger.setLevel(logging.DEBUG)

# 初始化全局logger控制台终端记录格式及级别
stream_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(filename)s[line:%(lineno)d] - [%(funcName)s] - %(levelname)s: %('
                              'message)s')
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)

# 初始化全局logger文件记录格式及级别
log_file_handler = logging.FileHandler(init_config.log_file)
log_file_handler.setLevel(logging.DEBUG)
log_file_handler.setFormatter(formatter)
logger.addHandler(log_file_handler)

# 初始化全局ssr_logger记录格式及级别
ssr_logger = logging.getLogger("shadowsocksr")
ssr_logger.setLevel(logging.INFO)

# 初始化全局ssr_logger文件记录格式及级别
Esempio n. 27
0
from logging.handlers import TimedRotatingFileHandler
<<<<<<< HEAD
import os
=======
>>>>>>> 6162c318b58b225e0061fccd6c64cd67fe205c1b
import sys

from regression_model.config import config

# Multiple calls to logging.getLogger('someLogger') return a
# reference to the same logger object.  This is true not only
# within the same module, but also across modules as long as
# it is in the same Python interpreter process.

FORMATTER = logging.Formatter(
    "%(asctime)s — %(name)s — %(levelname)s —"
    "%(funcName)s:%(lineno)d — %(message)s")
<<<<<<< HEAD
=======
LOG_FILE = config.LOG_DIR / 'ml_models.log'
>>>>>>> 6162c318b58b225e0061fccd6c64cd67fe205c1b


def get_console_handler():
    console_handler = logging.StreamHandler(sys.stdout)
    console_handler.setFormatter(FORMATTER)
    return console_handler
<<<<<<< HEAD
=======

Esempio n. 28
0
    os.mkdir('logs')

if not os.path.exists('outputs'):
    os.mkdir('outputs')

# create logger with 'spam_application'
logger = logging.getLogger('collision')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
file_handler = logging.FileHandler('logs/{}'.format(Config.LOG_FILENAME))
file_handler.setLevel(Config.FILE_LOGGING_LEVEL)
# create console handler with a higher log level
console_handler = logging.StreamHandler()
console_handler.setLevel(Config.CONSOLE_LOGGING_LEVEL)
# create formatter and add it to the handlers
file_formatter = logging.Formatter(
    '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
console_formatter = logging.Formatter('%(levelname)s: %(message)s')
# console_formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
file_handler.setFormatter(file_formatter)
console_handler.setFormatter(console_formatter)
# add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(console_handler)

logger.debug('Collision app has started')


# Signal handler To exit on Ctrl+C
def signal_handler(sig, frame):
    print('\n\nYou pressed Ctrl+C! .. exiting..')
    sys.exit('Bye!')
Esempio n. 29
0
import logging

LOG_LEVEL = logging.INFO
LOG_FILE = 'scrader.log'
LOG_FORMAT = '%(asctime)s %(levelname)-8s %(filename)s:%(lineno)-4d: %(message)s'
LOG_DATE_FORMAT = '%H:%M:%S'
log_formatter = logging.Formatter(LOG_FORMAT)
root_logger = logging.getLogger('scrader')
root_logger.setLevel(LOG_LEVEL)
file_handler = logging.FileHandler(LOG_FILE)
file_handler.setFormatter(log_formatter)
root_logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
root_logger.addHandler(console_handler)
global LOG
LOG = root_logger
LOG.info('Logger initialized')
Esempio n. 30
0
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')

args = parser.parse_args()

args.save = 'eval-{}-{}-{}'.format(args.dataset, args.save, time.strftime("%Y%m%d-%H%M%S"))
if args.random:
  args.save = 'random_' + args.save
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))

log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
    format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)

# Get normalized dataset name
dataset = DSET_NAME_TBL[args.dataset.lower().strip()]

# If the default set of primitives is requested, use the normalized name of the dataset
primitives_name = dataset if args.primitives == 'Default' else args.primitives

def main():
  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  torch.cuda.set_device(args.gpu)