Ejemplo n.º 1
0
            train_loss_sed.append(float(ss[-3].replace(',', '')))
            train_loss_doa.append(float(ss[-1].replace(',', '')))
            flg = True
        elif flg:
            valid_loss_sed.append(float(ss[3].replace(',', '')))
            valid_loss_doa.append(float(ss[5].replace(',', '')))
            flg = False

    return [train_loss_sed, train_loss_doa, valid_loss_sed, valid_loss_doa]


if __name__ == '__main__':
    logger.setLevel(INFO)
    handler = StreamHandler()
    handler.setFormatter(
        Formatter(
            '%(asctime)s %(name)s,l%(lineno)03d[%(levelname)s]%(message)s'))
    logger.addHandler(handler)
    args = parser.parse_args()

    experiment_id = args.eid
    dirpath = os.path.join('./article_figure/', experiment_id)
    try:
        os.mkdir(dirpath)
    except FileExistsError:
        logger.info('Output directory ({:s}) already exists.'.format(dirpath))

    exp_conds = [
        # Enumerate experimental conditions and the corresponding training log file paths here like below:
        # ('Base.', './result/2020-08-05_18-42-39_017557.log'),
        # ('Base. (w time equiv.)', './result/2020-08-05_18-39-45_357551.log'),
        # ('Base. (w rot. equiv.)', './result/2020-08-05_18-41-56_214096.log'),
Ejemplo n.º 2
0
import json
import glob
import os
from logging import getLogger, StreamHandler, Formatter, DEBUG, INFO

logger = getLogger(__name__)
logger.setLevel(DEBUG)
handler = StreamHandler()
formatter = Formatter('{} - %(levelname)s - %(message)s'.format(__file__))
handler.setFormatter(formatter)
handler.setLevel(INFO)
logger.addHandler(handler)

# should include '/' at the end.
GITHUB_LINK = 'OpenJij/OpenJijTutorial/blob/master/'


def add_google_colab_link(nb_name, github_link, output_nb):
    with open(nb_name, "r", encoding='utf-8') as f:
        nb_json = json.load(f)

    # Check if the second cell has a Colab link
    def check_colab_link(cell):
        if cell['cell_type'] != 'markdown':
            return False
        elif '[![Open in Colab]' in cell['source'][
                0] or '<a href="https://colab' in cell['source'][0]:
            return True
        else:
            return False
Ejemplo n.º 3
0
#!/usr/bin/env python
# coding:utf-8

from logging import getLogger, Formatter, FileHandler, StreamHandler, DEBUG, INFO
import logging.config
import mysql.connector

#ロギング設定
logger = getLogger(__name__)
loggerJSON = getLogger(__name__)
if not logger.handlers and not loggerJSON.handlers:
    fileHandler = FileHandler('twitter.log')
    fileHandlerJSON = FileHandler('json.log')
    formatter = Formatter(
        '%(asctime)s [%(levelname)s] [%(filename)s: %(funcName)s: %(lineno)d] %(message)s'
    )

    fileHandler.setFormatter(formatter)
    fileHandler.setLevel(INFO)
    streamHander = StreamHandler()
    streamHander.setLevel(INFO)
    logger.setLevel(INFO)
    logger.addHandler(fileHandler)
    logger.addHandler(streamHander)

    fileHandlerJSON.setFormatter(formatter)
    fileHandlerJSON.setLevel(DEBUG)
    loggerJSON.setLevel(DEBUG)
    loggerJSON.addHandler(fileHandlerJSON)

#Twitter接続キー
Ejemplo n.º 4
0
    df2.columns = ['question']
    df_que2 = pandas.concat([df1, df2], ignore_index=True)
    df_que2 = df_que2.drop_duplicates().fillna('')
    logger.info('df_que2 {}'.format(df_que2.shape))
    df_que2['qid'] = numpy.arange(df_que2.shape[0])

    map_test = dict(zip(df_que2['question'], range(df_que2.shape[0])))

    return map_train, map_test, train_num


if __name__ == '__main__':
    from logging import StreamHandler, DEBUG, Formatter, FileHandler

    log_fmt = Formatter(
        '%(asctime)s %(name)s %(lineno)d [%(levelname)s][%(funcName)s] %(message)s '
    )
    handler = FileHandler('doc2vec.py.log', 'w')
    handler.setLevel(DEBUG)
    handler.setFormatter(log_fmt)
    logger.setLevel(DEBUG)
    logger.addHandler(handler)

    handler = StreamHandler()
    handler.setLevel('INFO')
    handler.setFormatter(log_fmt)
    logger.setLevel('INFO')
    logger.addHandler(handler)

    # load_data()
    train()
Ejemplo n.º 5
0
app.register_blueprint(root.root, url_prefix="")

sockets = Sockets(app)

app.config.from_pyfile('flask_config.py')

if not os.path.exists("logs"):
    os.mkdir("logs")

file_handler = RotatingFileHandler('logs/hive-discovery.log',
                                   maxBytes=10000,
                                   backupCount=1)
file_handler.setLevel(logging.DEBUG)

file_handler.setFormatter(
    Formatter(
        "%(asctime)s %(levelname)s [%(module)s:%(lineno)d]: %(message)s"))

app.logger.addHandler(file_handler)
print app.url_map


@sockets.route('/echo')
def echo_socket(ws):
    while True:
        message = ws.receive()
        ws.send(message)


# TODO: add authorization to the ws endpoint
@sockets.route('/hive')
def hive_socket(ws):
Ejemplo n.º 6
0
    def format(self, record):
        now = time()
        prev_time = self.prev_time
        self.prev_time = max(self.prev_time, now)
        record.delta_secs = now - prev_time
        record.relative_created_secs = record.relativeCreated / 1000
        return super(DeltaSecondsFormatter, self).format(record)


if boolify(os.environ.get('CONDA_TIMED_LOGGING')):
    _FORMATTER = DeltaSecondsFormatter(
        "%(relative_created_secs) 7.2f %(delta_secs) 7.2f "
        "%(levelname)s %(name)s:%(funcName)s(%(lineno)d): %(message)s")
else:
    _FORMATTER = Formatter(
        "%(levelname)s %(name)s:%(funcName)s(%(lineno)d): %(message)s")


def dashlist(iterable, indent=2):
    return ''.join('\n' + ' ' * indent + '- ' + str(x) for x in iterable)


class ContextDecorator(object):
    """Base class for a context manager class (implementing __enter__() and __exit__()) that also
    makes it a decorator.
    """

    # TODO: figure out how to improve this pattern so e.g. swallow_broken_pipe doesn't have to be instantiated  # NOQA

    def __call__(self, f):
        @wraps(f)
Ejemplo n.º 7
0
import logging
import json
import os
import semver
import docker
from docker import APIClient

import os, sys, logging
from logging import Formatter, StreamHandler

handlers = []
formatter = Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

stream_handler = StreamHandler(sys.stderr)
stream_handler.setFormatter(formatter)
stream_handler.setLevel(logging.INFO)
handlers.append(stream_handler)

logging.basicConfig(level=logging.DEBUG, handlers=handlers)

CONFIG_FILENAME = 'config.json'
HERE = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(HERE, CONFIG_FILENAME)
dockerfile_path = HERE

DOCKER_USERNAME = os.environ.get('DOCKER_USERNAME')
DOCKER_PASSWORD = os.environ.get('DOCKER_PASSWORD')
BUMP_VERSION = os.environ['BUMP_VERSION']

with open(config_path, encoding='utf-8') as data_file:
    config = json.loads(data_file.read())
Ejemplo n.º 8
0
'''
Deprecated: old app startup file.
[email protected]
'''
from app import app

import logging
from logging import Formatter, FileHandler

file_handler = FileHandler('app.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(Formatter('%(asctime)s %(levelname)s: %(message)s'))

app.logger.addHandler(file_handler)


################ Flask-APScheduler #################
## http://stackoverflow.com/questions/32424148/how-to-use-flask-apscheduler-in-existed-flask-app
class Config(object):
    JOBS = [{
        'id': 'job1',
        'func': '__main__:job1',
        'args': (1, 2),
        'trigger': 'interval',
        'seconds': 3600
    }, {
        'id': 'startup_job',
        'func': '__main__:startup_job',
        'args': ["Hello"]
    }]
Ejemplo n.º 9
0
import datetime
from logging import (getLogger, FileHandler, StreamHandler, Formatter, INFO,
                     DEBUG)

from daskperiment.config import _LOG_DIR

_TIME_FMT = datetime.datetime.today().strftime('%Y%m%d_%H%M%S')
_FILE_FMT = 'log_{}.log'

_FORMATTER = Formatter('%(asctime)s [%(name)s] [%(levelname)s] %(message)s')


def set_config(logger, level='INFO'):
    """
    Add config to logger
    """
    if not _LOG_DIR.exists():
        _LOG_DIR.mkdir()
    log_file = _LOG_DIR / _FILE_FMT.format(_TIME_FMT)

    # for file output
    fout = FileHandler(filename=str(log_file), mode='w')
    fout.setFormatter(_FORMATTER)
    logger.addHandler(fout)

    # for stdout
    stdout = StreamHandler()
    stdout.setFormatter(_FORMATTER)
    logger.addHandler(stdout)

    if level == 'INFO':
Ejemplo n.º 10
0
            # rollback the whole transaction
            db.session.rollback()

            app.logger.error("DB exception: {}".format(sys.exc_info()[0]))
            response = Response(response="Error in the Party DB.", status=500, mimetype="text/html")
            return response

        collection_path = response.headers["location"] = "/respondents/" + str(new_respondent.id)
        etag = hashlib.sha1(collection_path).hexdigest()
        response.set_etag(etag)

        response.headers["id"] = "/respondents/" + str(new_respondent.id)
        return response, 201

    return jsonify({"message": "Please provide a valid Json object.",
                    "hint": "you may need to pass a content-type: application/json header"}), 400


if __name__ == '__main__':
    # Create a file handler to handle our logging
    handler = RotatingFileHandler('application.log', maxBytes=10000, backupCount=1)
    handler.setLevel(logging.INFO)
    app.logger.addHandler(handler)
    handler.setFormatter(Formatter('%(asctime)s %(levelname)s: %(message)s ' '[in %(pathname)s:%(lineno)d]'))
    # Initialise SqlAlchemy configuration here to avoid circular dependency
    db.init_app(app)

    # Run
    PORT = int(os.environ.get('PORT', 5062))
    app.run(host='0.0.0.0', port=PORT, debug=False)
def is_url(url):
    """ Checks if the URL exists """
    # https://stackoverflow.com/a/13641613
    try:
        if head(url).status_code == 200:
            return True
    except requests.ConnectionError as e:
        logger.warning("Error %s while connecting to %s" % (e, url))
    return False


config = ConfigObj("config")

handler = FileHandler(config["dir"]["log"])
logger = getLogger("audio_date_formatter")
formatter = Formatter("%(asctime)s - %(lineno)s: %(levelname)s %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(INFO)

logger.info("START %s" % (argv[0]))

past_date_start = (date.today() -
                   timedelta(days=int(config["days_back_start"]))).strftime(
                       config["broadcast_date_format"])
past_date_end = (date.today() -
                 timedelta(days=int(config["days_back_end"]))).strftime(
                     config["broadcast_date_format"])

files_to_upload = []
Ejemplo n.º 12
0
def main():
    """Entry point."""
    debug = False
    try:
        argparser = ArgumentParser(description=modules[__name__].__doc__)
        argparser.add_argument('device',
                               nargs='?',
                               default='ftdi:///?',
                               help='serial port device name')
        argparser.add_argument('-S',
                               '--no-smb',
                               action='store_true',
                               default=False,
                               help='use regular I2C mode vs. SMBbus scan')
        argparser.add_argument('-P',
                               '--vidpid',
                               action='append',
                               help='specify a custom VID:PID device ID, '
                               'may be repeated')
        argparser.add_argument('-V',
                               '--virtual',
                               type=FileType('r'),
                               help='use a virtual device, specified as YaML')
        argparser.add_argument('-v',
                               '--verbose',
                               action='count',
                               default=0,
                               help='increase verbosity')
        argparser.add_argument('-d',
                               '--debug',
                               action='store_true',
                               help='enable debug mode')
        args = argparser.parse_args()
        debug = args.debug

        if not args.device:
            argparser.error('Serial device not specified')

        loglevel = max(DEBUG, ERROR - (10 * args.verbose))
        loglevel = min(ERROR, loglevel)
        if debug:
            formatter = Formatter(
                '%(asctime)s.%(msecs)03d %(name)-20s '
                '%(message)s', '%H:%M:%S')
        else:
            formatter = Formatter('%(message)s')
        FtdiLogger.log.addHandler(StreamHandler(stderr))
        FtdiLogger.set_formatter(formatter)
        FtdiLogger.set_level(loglevel)

        if args.virtual:
            from pyftdi.usbtools import UsbTools
            # Force PyUSB to use PyFtdi test framework for USB backends
            UsbTools.BACKENDS = ('pyftdi.tests.backend.usbvirt', )
            # Ensure the virtual backend can be found and is loaded
            backend = UsbTools.find_backend()
            loader = backend.create_loader()()
            loader.load(args.virtual)

        try:
            add_custom_devices(Ftdi, args.vidpid)
        except ValueError as exc:
            argparser.error(str(exc))

        I2cBusScanner.scan(args.device, not args.no_smb)

    except (ImportError, IOError, NotImplementedError, ValueError) as exc:
        print('\nError: %s' % exc, file=stderr)
        if debug:
            print(format_exc(chain=False), file=stderr)
        exit(1)
    except KeyboardInterrupt:
        exit(2)
Ejemplo n.º 13
0
# -*- coding: utf-8 -*-
"""
"""
import json
from logging import getLogger, StreamHandler, Formatter, INFO
import xml.etree.ElementTree as ET
from urllib import request as urllib

from googleapiclient.discovery import build
from tqdm import tqdm

logger = getLogger(__name__)
logger.setLevel(INFO)
handler = StreamHandler()
handler_format = Formatter('[%(asctime)s] - %(message)s')
handler.setFormatter(handler_format)
handler.setLevel(INFO)
logger.setLevel(INFO)
logger.addHandler(handler)


# FIXME ganna be duplicated.
class Search(object):
    """
    Search class for YouTube

    Args:
        * developer_key (string): developer key to access Google YouTube API(v3)
        * caption_mode (string): select mode for caption type. ['standard', 'ASR', 'both']
                - standard -> handmade caption
                - ASR -> captioned by ASR(Auto Speech Recognition)
Ejemplo n.º 14
0
LOGGER_NAME = 'JapaneseTokenizer'

import logging
import sys
from logging import getLogger, Formatter, Logger, StreamHandler

# Formatter
custmoFormatter = Formatter(
    fmt=
    '[%(asctime)s]%(levelname)s - %(filename)s#%(funcName)s:%(lineno)d: %(message)s',
    datefmt='Y/%m/%d %H:%M:%S')

# StreamHandler
STREAM_LEVEL = logging.DEBUG
STREAM_FORMATTER = custmoFormatter
STREAM = sys.stderr

st_handler = StreamHandler(stream=STREAM)
st_handler.setLevel(STREAM_LEVEL)
st_handler.setFormatter(STREAM_FORMATTER)


def init_logger(logger):
    # type: (logging.Logger) -> logging.Logger
    logger.addHandler(st_handler)
    logger.propagate = False

    return logger
Ejemplo n.º 15
0
    def __init__(
            self,
            message_number=default_message_number,
            exception_number=default_exception_number,
            permanent_progressbar_slots=default_permanent_progressbar_slots,
            redraw_frequency_millis=default_redraw_frequency_millis,
            console_level=default_level,
            task_millis_to_removal=default_task_millis_to_removal,
            console_format_strftime=default_console_format_strftime,
            console_format=default_console_format,
            file_handlers=None,
            application_name=None):
        """
        Initializes a new logger and starts its process immediately using given configuration.
        :param message_number:              [Optional] Number of simultaneously displayed messages below progress bars.
        :param exception_number:            [Optional] Number of simultaneously displayed exceptions below messages.
        :param permanent_progressbar_slots: [Optional] The amount of vertical space (bar slots) to keep at all times,
                                            so the message logger will not move anymore if the bar number is equal or
                                            lower than this parameter.
        :param redraw_frequency_millis:     [Optional] Minimum time lapse in milliseconds between two redraws. It may be
                                            more because the redraw rate depends upon time AND method calls.
        :param console_level:               [Optional] The logging level (from standard logging module).
        :param task_millis_to_removal:      [Optional] Minimum time lapse in milliseconds at maximum completion before
                                            a progress bar is removed from display. The progress bar may vanish at a
                                            further time as the redraw rate depends upon time AND method calls.
        :param console_format_strftime:     [Optional] Specify the time format for console log lines using python
                                            strftime format. Defaults to format: '29 november 2016 21:52:12'.
        :param console_format:              [Optional] Specify the format of the console log lines. There are two
                                            variables available: {T} for timestamp, {L} for level. Will then add some
                                            tabulations in order to align text beginning for all levels.
                                            Defaults to format: '{T} [{L}]'
                                            Which will produce: '29 november 2016 21:52:12 [INFO]      my log text'
                                                                '29 november 2016 21:52:13 [WARNING]   my log text'
                                                                '29 november 2016 21:52:14 [DEBUG]     my log text'
        :param file_handlers:               [Optional] Specify the file handlers to use. Each file handler will use its
                                            own regular formatter and level. Console logging is distinct from file
                                            logging. Console logging uses custom stdout formatting, while file logging
                                            uses regular python logging rules. All handlers are permitted except
                                            StreamHandler if used with stdout or stderr which are reserved by this
                                            library for custom console output.
        :param application_name:            [Optional] Used only if 'file_handlers' parameter is ignored. Specifies the
                                            application name to use to format the default file logger using format:
                                            application_%Y-%m-%d_%H-%M-%S.log
        """
        super(FancyLogger, self).__init__()

        # Define default file handlers
        if not file_handlers:
            if not application_name:
                app_name = 'application'
            else:
                app_name = application_name

            handler = RotatingFileHandler(
                filename=os.path.join(
                    os.getcwd(),
                    '{}_{}.log'.format(app_name,
                                       strftime('%Y-%m-%d_%H-%M-%S'))),
                encoding='utf8',
                maxBytes=5242880,  # 5 MB
                backupCount=10,
                delay=True)
            handler.setLevel(logging.INFO)
            handler.setFormatter(
                fmt=Formatter(fmt='%(asctime)s [%(levelname)s]\t%(message)s',
                              datefmt=self.default_console_format_strftime))
            self.default_file_handlers.append(handler)

            file_handlers = self.default_file_handlers

        if not self.queue:
            self.queue = Queue()
            self.process = MultiprocessingLogger(
                queue=self.queue,
                console_level=console_level,
                message_number=message_number,
                exception_number=exception_number,
                permanent_progressbar_slots=permanent_progressbar_slots,
                redraw_frequency_millis=redraw_frequency_millis,
                task_millis_to_removal=task_millis_to_removal,
                console_format_strftime=console_format_strftime,
                console_format=console_format,
                file_handlers=file_handlers)
            self.process.start()
Ejemplo n.º 16
0
from logging import Formatter

LOG_FILENAME = "log.txt"
METADATA_FILENAME = "metadata.json"
ACTIONS_FILENAME = "actions.json"
ACTIONSHTML_FILENAME = "actions.html"
LOGFORMAT = Formatter(
    fmt='%(levelname)-8s %(asctime)-8s.%(msecs)03d: %(message)s',
    datefmt="%H:%M:%S")
DEFAULT_CONFIG_FILENAME = "default.config.json"
Ejemplo n.º 17
0
 def setUpClass(cls):
     """
     Perform class setup before running the testcase
     Remove shared memory files, start vpp and connect the vpp-api
     """
     gc.collect()  # run garbage collection first
     random.seed()
     cls.logger = getLogger(cls.__name__)
     cls.tempdir = tempfile.mkdtemp(prefix='vpp-unittest-%s-' %
                                    cls.__name__)
     cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
     cls.file_handler.setFormatter(
         Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
                   datefmt="%H:%M:%S"))
     cls.file_handler.setLevel(DEBUG)
     cls.logger.addHandler(cls.file_handler)
     cls.shm_prefix = cls.tempdir.split("/")[-1]
     cls.punt_socket_path = '%s/%s' % (cls.tempdir, vpp_uds_socket_name)
     os.chdir(cls.tempdir)
     cls.logger.info("Temporary dir is %s, shm prefix is %s", cls.tempdir,
                     cls.shm_prefix)
     cls.setUpConstants()
     cls.reset_packet_infos()
     cls._captures = []
     cls._zombie_captures = []
     cls.verbose = 0
     cls.vpp_dead = False
     cls.registry = VppObjectRegistry()
     cls.vpp_startup_failed = False
     cls.reporter = KeepAliveReporter()
     # need to catch exceptions here because if we raise, then the cleanup
     # doesn't get called and we might end with a zombie vpp
     try:
         cls.run_vpp()
         cls.reporter.send_keep_alive(cls)
         cls.vpp_stdout_deque = deque()
         cls.vpp_stderr_deque = deque()
         cls.pump_thread_stop_flag = Event()
         cls.pump_thread_wakeup_pipe = os.pipe()
         cls.pump_thread = Thread(target=pump_output, args=(cls, ))
         cls.pump_thread.daemon = True
         cls.pump_thread.start()
         cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls)
         if cls.step:
             hook = StepHook(cls)
         else:
             hook = PollHook(cls)
         cls.vapi.register_hook(hook)
         cls.sleep(0.1, "after vpp startup, before initial poll")
         try:
             hook.poll_vpp()
         except:
             cls.vpp_startup_failed = True
             cls.logger.critical(
                 "VPP died shortly after startup, check the"
                 " output to standard error for possible cause")
             raise
         try:
             cls.vapi.connect()
         except:
             if cls.debug_gdbserver:
                 print(
                     colorize(
                         "You're running VPP inside gdbserver but "
                         "VPP-API connection failed, did you forget "
                         "to 'continue' VPP from within gdb?", RED))
             raise
     except:
         t, v, tb = sys.exc_info()
         try:
             cls.quit()
         except:
             pass
         raise t, v, tb
Ejemplo n.º 18
0
 def set_formatter(self, fmt):
     self.__formatter = Formatter(fmt)
Ejemplo n.º 19
0
from contextlib import closing

# Local imports
from . import giop
from . import zmqforward

# Logging import
import logging
from logging import getLogger, Formatter, StreamHandler

# Create logger
logger = getLogger("Tango gateway")
# Create console handler
log_handler = StreamHandler()
# Create formater
log_format = Formatter('%(levelname)s - %(message)s')
log_handler.setFormatter(log_format)
logger.addHandler(log_handler)
logger.setLevel(logging.INFO)

# Tokens

IMPORT_DEVICE = b'DbImportDevice'
GET_CSDB_SERVER = b'DbGetCSDbServerList'
ZMQ_SUBSCRIPTION_CHANGE = b'ZmqEventSubscriptionChange'

# Enumerations


class Patch(Enum):
    NONE = 0
Ejemplo n.º 20
0
import time
import sys
import glob
import serial
import re
import threading
import queue
import os
from logging import getLogger, StreamHandler, FileHandler, Formatter, DEBUG

logger = getLogger(__name__)
logger.setLevel(DEBUG)

stream_formatter = Formatter('%(message)s')
stream_handler = StreamHandler()
stream_handler.setLevel(DEBUG)
stream_handler.setFormatter(stream_formatter)
logger.addHandler(stream_handler)

os.makedirs("./log", exist_ok=True)
log_file_name = "./log/log-" + time.strftime(
    "%Y%m%d-%H%M%S", time.strptime(time.ctime())) + ".txt"
file_handler = FileHandler(log_file_name)
file_handler.setLevel(DEBUG)
file_formatter = Formatter('[%(asctime)s] %(message)s')
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
logger.propagate = False

stm_available = False
allarduino_available = False
Ejemplo n.º 21
0
    #app.logger.debug(userid)
    dao = UserDao()
    #app.logger.debug(userid)
    use = dao.selectByUserid(userid)
    if use is None:
        return True
    else:
        return False


def nottaken(userid, password):
    #app.logger.debug(userid)
    dao = UserDao()
    #app.logger.debug(userid)
    use = dao.selectByUserid(userid)
    if use is None:
        return True
    else:
        return False


if __name__ == "__main__":
    app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
    #    context = ('ssl.cert', 'ssl.key')
    handler = RotatingFileHandler('output.log', maxBytes=10000, backupCount=1)
    handler.setFormatter(
        Formatter("[%(filename)s:%(lineno)s - %(funcName)10s() ] %(message)s"))
    app.logger.addHandler(handler)
    app.logger.setLevel(logging.DEBUG)
    app.run(host='0.0.0.0')
Ejemplo n.º 22
0
from multiprocessing import Process
import asyncio
import re
from sys import argv
import importlib
import hashlib
import signal

logpath = os.getcwd() + '/logs/'
log_path = os.path.dirname(logpath)
if not os.path.exists(log_path):
    print('log directory created')
    os.makedirs(log_path)

log_fmt = '%(asctime)s [%(filename)-12.12s] [%(levelname)-5.5s]  %(message)s'
logFormatter = Formatter(log_fmt)
nowtime = datetime.datetime.now()
nowtimestr = '{0:%Y-%m-%d_%H-%M-%S}'.format(nowtime)
logfile = logpath + nowtimestr + '_raidscan.log'
basicConfig(filename=logfile, format=log_fmt, level=DEBUG)

LOG = getLogger('')
console = StreamHandler()
console.setLevel(INFO)
console.setFormatter(logFormatter)
LOG.addHandler(console)

rfh = handlers.RotatingFileHandler(filename=logfile,
                                   maxBytes=16384,
                                   backupCount=3)
rfh.setLevel(CRITICAL)
Ejemplo n.º 23
0
def main():
    """Main routine"""
    debug = False
    try:
        argparser = ArgumentParser(description=modules[__name__].__doc__)
        argparser.add_argument('device',
                               nargs='?',
                               default='ftdi:///?',
                               help='serial port device name')
        argparser.add_argument('-x',
                               '--hexdump',
                               action='store_true',
                               help='dump EEPROM content as ASCII')
        argparser.add_argument('-o',
                               '--output',
                               type=FileType('wt'),
                               help='output ini file to save EEPROM content')
        argparser.add_argument('-s',
                               '--serial-number',
                               help='set serial number')
        argparser.add_argument('-m',
                               '--manufacturer',
                               help='set manufacturer name')
        argparser.add_argument('-p', '--product', help='set product name')
        argparser.add_argument('-e',
                               '--erase',
                               action='store_true',
                               help='erase the whole EEPROM content')
        argparser.add_argument('-u',
                               '--update',
                               action='store_true',
                               help='perform actual update, use w/ care')
        argparser.add_argument('-v',
                               '--verbose',
                               action='count',
                               default=0,
                               help='increase verbosity')
        argparser.add_argument('-d',
                               '--debug',
                               action='store_true',
                               help='enable debug mode')
        args = argparser.parse_args()
        debug = args.debug

        if not args.device:
            argparser.error('Serial device not specified')

        loglevel = max(DEBUG, ERROR - (10 * args.verbose))
        loglevel = min(ERROR, loglevel)
        if debug:
            formatter = Formatter(
                '%(asctime)s.%(msecs)03d %(name)-20s '
                '%(message)s', '%H:%M:%S')
        else:
            formatter = Formatter('%(message)s')
        FtdiLogger.set_formatter(formatter)
        FtdiLogger.set_level(loglevel)
        FtdiLogger.log.addHandler(StreamHandler(stderr))

        eeprom = FtdiEeprom()
        eeprom.open(args.device)
        if args.erase:
            eeprom.erase()
        if args.serial_number:
            eeprom.set_serial_number(args.serial_number)
        if args.manufacturer:
            eeprom.set_manufacturer_name(args.manufacturer)
        if args.product:
            eeprom.set_product_name(args.product)
        if args.hexdump:
            print(hexdump(eeprom.data))
        if args.update:
            eeprom.commit(False)
        if args.verbose > 0:
            eeprom.dump_config()
        if args.output:
            eeprom.save_config(args.output)

    except (IOError, ValueError) as exc:
        print('\nError: %s' % exc, file=stderr)
        if debug:
            print(format_exc(chain=False), file=stderr)
        exit(1)
    except KeyboardInterrupt:
        exit(2)
Ejemplo n.º 24
0
    def __init__(self,
                 config_file=None,
                 key_value_store_class_list=None,
                 create_message_producer=False,
                 async_message_producer=False,
                 create_zookeeper_client=False):
        assert config_file is None or PanoptesValidators.valid_nonempty_string(config_file), \
            'config_file must be a non-empty string'
        assert key_value_store_class_list is None or isinstance(
            key_value_store_class_list,
            list), 'key_value_store_class_list must be a list'

        self.__redis_connections = dict()
        self.__kv_stores = dict()
        self.__message_producer = None
        """
        Setup a default root logger so that in case configuration parsing or logger hierarchy creation fails, we have
        a place to send the error messages for failures
        """
        if not self.__class__.__rootLogger:
            try:
                self.__class__.__rootLogger = logging.getLogger(
                    const.DEFAULT_ROOT_LOGGER_NAME)
                self.__class__.__rootLogger.setLevel(logging.INFO)
                handler = StreamHandler()
                handler.setFormatter(Formatter(fmt=const.DEFAULT_LOG_FORMAT))
                self.__class__.__rootLogger.addHandler(handler)
            except Exception as e:
                raise PanoptesContextError('Could not create root logger: %s' %
                                           str(e))

        if not config_file:
            if const.CONFIG_FILE_ENVIRONMENT_VARIABLE in os.environ:
                config_file = os.environ[
                    const.CONFIG_FILE_ENVIRONMENT_VARIABLE]
            else:
                config_file = const.DEFAULT_CONFIG_FILE_PATH

        try:
            self.__logger = self.__class__.__rootLogger
            self.__config = self._get_panoptes_config(config_file)
            self.__logger = self._get_panoptes_logger()
        except Exception as e:
            raise PanoptesContextError('Could not create PanoptesContext: %s' %
                                       str(e))

        self.__redis_pool = self.get_redis_connection(
            const.DEFAULT_REDIS_GROUP_NAME)
        """
        Instantiate the KeyValueStore classes provide in the list (if any) and store the reference to the objects
        created in an object dictionary called __kv_stores
        """
        if key_value_store_class_list is not None:
            for key_value_store_class in key_value_store_class_list:
                if not inspect.isclass(key_value_store_class):
                    raise PanoptesContextError(
                        'Current item in key_value_store_class_list is not a class'
                    )
                if not issubclass(key_value_store_class,
                                  PanoptesKeyValueStore):
                    raise PanoptesContextError(
                        key_value_store_class.__name__ +
                        " in key_value_store_class_list does not subclass PanoptesKeyValueStore"
                    )

            for key_value_store_class in key_value_store_class_list:
                self.__kv_stores[
                    key_value_store_class.__name__] = self._get_kv_store(
                        key_value_store_class)

        if create_message_producer:
            self._kafka_client = self._get_kafka_client()

        if create_message_producer:
            self.__message_producer = self._get_message_producer(
                async_message_producer)

        if create_zookeeper_client:
            self.__zookeeper_client = self._get_zookeeper_client()
Ejemplo n.º 25
0
FMT_VALTESTTRUTH_PATH = MODEL_DIR + "/{}_eval_poly_truth.csv"
FMT_VALTESTPOLY_OVALL_PATH = MODEL_DIR + "/eval_poly.csv"
FMT_VALTESTTRUTH_OVALL_PATH = MODEL_DIR + "/eval_poly_truth.csv"
FMT_VALMODEL_EVALTHHIST = MODEL_DIR + "/{}_val_evalhist_th.csv"

# ---------------------------------------------------------
# Prediction & polygon result
FMT_TESTPOLY_PATH = MODEL_DIR + "/{}_poly.csv"
FN_SOLUTION_CSV = "/data/{}.csv".format(MODEL_NAME)

# Logger
warnings.simplefilter("ignore", UserWarning)
warnings.simplefilter("ignore", FutureWarning)
handler = StreamHandler()
handler.setLevel(INFO)
handler.setFormatter(Formatter(LOGFORMAT))
fh_handler = FileHandler(".{}.log".format(MODEL_NAME))
fh_handler.setFormatter(Formatter(LOGFORMAT))
logger = getLogger('spacenet2')
logger.setLevel(INFO)

if __name__ == '__main__':
    logger.addHandler(handler)
    logger.addHandler(fh_handler)


def directory_name_to_area_id(datapath):
    """
    Directory name to AOI number

    Usage:
Ejemplo n.º 26
0
# @app.before_request
# def before_request():
# 	g.db = DB( 'MONGOLAB_URI' )
#
# @app.teardown_request
# def teardown_request( exception ):
# 	del g.db

# Log to stderr (so heroku logs will pick'em up)

stderr_handler = StreamHandler()
stderr_handler.setLevel(DEBUG)
stderr_handler.setFormatter(
    Formatter(
        '%(asctime)s [%(process)s] [%(levelname)s] [Flask: %(name)s] %(message)s',
        '%Y-%m-%d %H:%M:%S'))
app.logger.addHandler(stderr_handler)
app.logger.setLevel(DEBUG)

# Helpers


def textify(text, code=200):
    response = make_response(text + '\n', code)
    response.headers['Content-Type'] = 'text/plain; charset=UTF-8'
    return response


def ident2email(ident):
    if '@' in ident: email = ident
Ejemplo n.º 27
0
import os
import logging
import httplib
import traceback
from flask import jsonify
from logging import Formatter, StreamHandler
from logging.handlers import SysLogHandler

if not app.config.get('TESTING'):
    newrelic.agent.initialize('newrelic.ini')
os.environ['TZ'] = 'US/Eastern'

# Initialize logging
streamhandler = StreamHandler(sys.stdout)
sysloghandler = SysLogHandler(address=(PAPERTRAIL_URL, PAPERTRAIL_PORT))
formatter = Formatter(LOG_FORMAT)
streamhandler.setFormatter(formatter)
sysloghandler.setFormatter(formatter)
app.logger.addHandler(sysloghandler)
app.logger.addHandler(streamhandler)
app.logger.setLevel(logging.DEBUG)


def get_credentials(request_info):
    """Get credentials from request."""
    try:
        return getcredentials.get_credentials(request_info.get('args'))
    except ValueError as err:
        print "ValueError in credentials: " + err.message
        if DEBUG:
            print traceback.format_exc()
Ejemplo n.º 28
0
import os

# third party lib
import cv2

# your lib

# logger setup
import logging
from logging import getLogger, StreamHandler, Formatter
logger_name = "Pokeca-AR"
logger = getLogger(logger_name)
logger.setLevel(logging.DEBUG)
stream_handler = StreamHandler()
stream_handler.setLevel(logging.DEBUG)
handler_format = Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
stream_handler.setFormatter(handler_format)
logger.addHandler(stream_handler)

# parse args
parser = argparse.ArgumentParser(description='This script is XXX')
parser.add_argument('--XXX', help='XXX')
parser.add_argument('--YYY', type=int, default=12345, help='YYY')
parser.add_argument('-Z', '--ZZZ', default="012345", help='ZZZ')
args = parser.parse_args()

# global
cap = cv2.VideoCapture(0)


def cap_image():
Ejemplo n.º 29
0
@app.errorhandler(404)
def not_found_error(error):
    return render_template('errors/404.html'), 404


@app.errorhandler(500)
def server_error(error):
    return render_template('errors/500.html'), 500


if not app.debug:
    file_handler = FileHandler('error.log')
    file_handler.setFormatter(
        Formatter(
            '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
        ))
    app.logger.setLevel(logging.INFO)
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)
    app.logger.info('errors')

#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#

# Default port:
if __name__ == '__main__':
    app.run()

# Or specify port manually:
Ejemplo n.º 30
0
import logging
import sys
from logging import Formatter
from logging.handlers import BufferingHandler, RotatingFileHandler, SysLogHandler
from typing import Any, Dict

from finrl.exceptions import OperationalException


logger = logging.getLogger(__name__)
LOGFORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'

# Initialize bufferhandler - will be used for /log endpoints
bufferHandler = BufferingHandler(1000)
bufferHandler.setFormatter(Formatter(LOGFORMAT))


def _set_loggers(verbosity: int = 0, api_verbosity: str = 'info') -> None:
    """
    Set the logging level for third party libraries
    :return: None
    """

    logging.getLogger('requests').setLevel(
        logging.INFO if verbosity <= 1 else logging.DEBUG
    )
    logging.getLogger("urllib3").setLevel(
        logging.INFO if verbosity <= 1 else logging.DEBUG
    )
    logging.getLogger('ccxt.base.exchange').setLevel(
        logging.INFO if verbosity <= 2 else logging.DEBUG