示例#1
0
import praw,re,requests,pprint,importlib,os, time, datetime, urllib.request,string, sys
from bs4 import BeautifulSoup
from PIL import Image
from InstagramAPI import InstagramAPI
from AuthenticationInfo import *

#change current working directory
os.chdir(sys.path[0])

import logging
from logging.handlers import RotatingFileHandler
logger=logging.getLogger(__name__)
handler=RotatingFileHandler('InstagramBot.log', maxBytes=100000, backupCount=1)
logger.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
logger.info("Program started.")

try:
    from Hashtags import Hashtags
except:
    Hashtags=[]

#initial wait time when starting program. Else, bot starts posting during scheduled time
if len(sys.argv)==2:
    WaitTime=int(sys.argv[1])


def SavePosts(posts):
    try:
        os.unlink('Posts.py')
示例#2
0
            auth = (app.config["MAIL_USERNAME"], app.config["MAIL_PASSWORD"])
        secure = None
        if app.config["MAIL_USE_TLS"]:
            secure = ()
        mail_handler = SMTPHandler(
            mailhost=(app.config["MAIL_SERVER"], app.config["MAIL_PORT"]),
            fromaddr="no-reply@" + app.config["MAIL_SERVER"],
            toaddrs=app.config["ADMINS"],
            subject="Falha no Microblog",
            credentials=auth,
            secure=secure,
        )
        mail_handler.setLevel(logging.ERROR)
        app.logger.addHandler(mail_handler)

    if not os.path.exists("logs"):
        os.mkdir("logs")
    file_handler = RotatingFileHandler("logs/microblog.log",
                                       maxBytes=10240,
                                       backupCount=10)
    file_handler.setFormatter(
        logging.Formatter(
            "%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]"
        ))
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)
    app.logger.setLevel(logging.INFO)
    app.logger.info("Microblog Iniciando!!!")

from app import routes, models, errors
            secure = ()
        mail_handler = SMTPHandler(
            mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
            fromaddr='noreply@' + app.config['MAIL_SERVER'],
            toaddrs=app.config['ADMINS'],
            subject='Live Traffic Chatbot Failure',
            credentials=auth,
            secure=secure)
        mail_handler.setLevel(logging.ERROR)
        app.logger.addHandler(mail_handler)
    if app.config['LOG_TO_STDOUT']:
        stream_handler = logging.StreamHandler()
        stream_handler.setLevel(logging.INFO)
        app.logger.addHandler(stream_handler)
    else:
        if not os.path.exists('logs'):
            os.mkdir('logs')
        file_handler = RotatingFileHandler('logs/live_traffic_chatbot.log',
                                           maxBytes=10240,
                                           backupCount=10)
        file_handler.setFormatter(
            logging.Formatter(
                '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
            ))
        file_handler.setLevel(logging.INFO)
        app.logger.addHandler(file_handler)
        app.logger.setLevel(logging.INFO)
        app.logger.info('Live WhatsApp Traffic Chatbot')

from app import routes
示例#4
0
# -*- coding:utf-8 -*-
from logging.handlers import RotatingFileHandler
from flask import Flask
import logging
"""
    设置日志
"""
logging.basicConfig(level=logging.INFO)
file_log_handler = RotatingFileHandler("D:\\logs\\vm_server",
                                       encoding="UTF-8",
                                       maxBytes=1024 * 1024 * 100,
                                       backupCount=10)
formatter = logging.Formatter(
    '%(levelname)s %(filename)s:%(lineno)d %(message)s')
file_log_handler.setFormatter(formatter)
logging.getLogger().addHandler(file_log_handler)


def register_blueprints(app):
    from app.api.v1 import create_blueprint_v1
    app.register_blueprint(create_blueprint_v1(), url_prefix='/v1')


def create_app():
    app = Flask(__name__)
    #app.config.from_object('app.configs.config')
    register_blueprints(app)
    #register_plugin(app)
    return app
示例#5
0
    if app.config['MAIL_SERVER']:
        auth = None
        if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
            auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
        secure = None
        if app.config['MAIL_USE_TLS']:
            secure = ()
        mail_handler = SMTPHandler(
            mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
            fromaddr='no-reply@' + app.config['MAIL_SERVER'],
            toaddrs=app.config['ADMINS'], subject='Secret Santa Failure',
            credentials=auth, secure=secure)
        mail_handler.setLevel(logging.ERROR)
        app.logger.addHandler(mail_handler)

    # log file
    if not os.path.exists('logs'):
        os.mkdir('logs')
    file_handler = RotatingFileHandler('logs/secretsanta.log', maxBytes=10240,
                                       backupCount=10)
    file_handler.setFormatter(logging.Formatter(
        '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)

    app.logger.setLevel(logging.INFO)
    app.logger.info('Secret Santa startup')

from service import models
from service.main import service
    session.pop('user', None)
    return redirect(url_for('show_login'))


@app.route('/demo')
def dem():
    if 'user' not in session:
        return render_template("DemoCal.html")
    return redirect(url_for('show_login'))


if __name__ == '__main__':
    # initialize the log handler
    formatter = logging.Formatter("%(asctime)s %(levelname)s - %(message)s")
    logHandler = RotatingFileHandler('Logs\\UserActivity.log',
                                     maxBytes=100000,
                                     backupCount=100)
    # set the log handler level
    logHandler.setLevel(logging.INFO)
    logHandler.setFormatter(formatter)
    # set the app logger level
    app.logger.setLevel(logging.INFO)

    app.logger.addHandler(logHandler)
    # schedule.every(1).minutes.do(automatedExcelSheet)

    schedule.every().day.at("16:36").do(automatedExcelSheet)
    # t = Thread(target=scheduler)
    # t.start()

    app.run(host='0.0.0.0', port=7070)
示例#7
0
from flask_wtf import CSRFProtect
from logging.handlers import RotatingFileHandler
import redis
import logging

# 创建数据库
db = SQLAlchemy()

# 创建reids 连接对象
redis_store = None

# 设置日志的的登记
logging.basicConfig(level=logging.DEBUG)
# 创建日志记录器,设置日志的保存路径和每个日志的大小和日志的总大小
file_log_handler = RotatingFileHandler("logs/log",
                                       maxBytes=1024 * 1024 * 100,
                                       backupCount=10)
# 创建日志记录格式,日志等级,输出日志的文件名 行数 日志信息
formatter = logging.Formatter(
    "%(levelname)s %(filename)s: %(lineno)d %(message)s")
# 为日志记录器设置记录格式
file_log_handler.setFormatter(formatter)
# 为全局的日志工具对象(flaks app使用的)加载日志记录器
logging.getLogger().addHandler(file_log_handler)


# 工厂模式
def create_app(config_name):
    """
    创建flask的应用对象
    :param config:  配置模式的名字(“develop”, "product")
import boto3
import argparse
import logging
from logging.handlers import RotatingFileHandler
import json
import requests
import sys

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
# handler = logging.StreamHandler()
handler = RotatingFileHandler(
    "../templates/terraform/log-archive-account-new-S3-bucket-with-new-trail/account_register.log",
    maxBytes=20971520,
    backupCount=5)
formatter = logging.Formatter('%(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)


def register_falcon_discover_account(payload) -> bool:
    url = "https://api.crowdstrike.com/cloud-connect-aws/entities/accounts/v1?mode=manual"
    auth_token = get_auth_token()
    if auth_token:
        auth_header = get_auth_header(auth_token)
    else:
        print("Failed to auth token")
        sys.exit(1)
    headers = {
        'Content-Type': 'application/json',
示例#9
0
    os.mkdir(path_logs)
# 全局日志格式
logging.basicConfig(
    level=logger_level,
    format=
    '%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
# 定义一个日志记录器
logger = logging.getLogger("macropodus")
logger.setLevel(level=logger_level)
# 日志文件名,为启动时的日期
log_file_name = time.strftime('macropodus-%Y-%m-%d', time.localtime(
    time.time())) + ".log"
log_name_day = os.path.join(path_logs, log_file_name)
# 文件输出, 定义一个RotatingFileHandler,最多备份32个日志文件,每个日志文件最大32K
fHandler = RotatingFileHandler(log_name_day,
                               maxBytes=32 * 1024,
                               backupCount=32)
fHandler.setLevel(logger_level)
# 日志输出格式
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fHandler.setFormatter(formatter)
# 控制台输出
console = logging.StreamHandler()
console.setLevel(logger_level)
console.setFormatter(formatter)
# logger加到handel里边
logger.addHandler(fHandler)
logger.addHandler(console)

示例#10
0
# Command line usage
if __name__ == "__main__":

    # Print the ASCII header
    header = """ \
__  __  __            
|__)|  \|__)|\/| _  _  
|   |__/|__)|  |(_||_) 
                   |   """
    print header

    root = logging.getLogger()
    root.setLevel(logging.INFO)

    fh = RotatingFileHandler("pdbmap.log",
                             maxBytes=(1048576 * 5),
                             backupCount=7)
    formatter = logging.Formatter(
        '%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s')
    fh.setFormatter(formatter)
    fh.setLevel(logging.WARNING)
    root.addHandler(fh)

    # Setup the Config File Parser
    conf_parser = argparse.ArgumentParser(add_help=False)
    conf_parser.add_argument("-c",
                             "--conf_file",
                             help="Specify config file",
                             metavar="FILE")
    args, remaining_argv = conf_parser.parse_known_args()
    defaults = {
示例#11
0
def setup_logging(level,
                  console_stream=None,
                  log_dir=None,
                  scope=None,
                  log_name=None):
    """Configures logging for a given scope, by default the global scope.

  :param str level: The logging level to enable, must be one of the level names listed here:
                    https://docs.python.org/2/library/logging.html#levels
  :param file console_stream: The stream to use for default (console) logging. If None (default),
                              this will disable console logging.
  :param str log_dir: An optional directory to emit logs files in.  If unspecified, no disk logging
                      will occur.  If supplied, the directory will be created if it does not already
                      exist and all logs will be tee'd to a rolling set of log files in that
                      directory.
  :param str scope: A logging scope to configure.  The scopes are hierarchichal logger names, with
                    The '.' separator providing the scope hierarchy.  By default the root logger is
                    configured.
  :param str log_name: The base name of the log file (defaults to 'pants.log').
  :returns: The full path to the main log file if file logging is configured or else `None`.
  :rtype: str
  """

    # TODO(John Sirois): Consider moving to straight python logging.  The divide between the
    # context/work-unit logging and standard python logging doesn't buy us anything.

    # TODO(John Sirois): Support logging.config.fileConfig so a site can setup fine-grained
    # logging control and we don't need to be the middleman plumbing an option for each python
    # standard logging knob.

    log_filename = None
    log_stream = None

    logger = logging.getLogger(scope)
    for handler in logger.handlers:
        logger.removeHandler(handler)

    if console_stream:
        console_handler = StreamHandler(stream=console_stream)
        console_handler.setFormatter(
            Formatter(fmt='%(levelname)s] %(message)s'))
        console_handler.setLevel(level)
        logger.addHandler(console_handler)

    if log_dir:
        safe_mkdir(log_dir)
        log_filename = os.path.join(log_dir, log_name or 'pants.log')
        file_handler = RotatingFileHandler(log_filename,
                                           maxBytes=10 * 1024 * 1024,
                                           backupCount=4)
        log_stream = file_handler.stream

        class GlogFormatter(Formatter):
            LEVEL_MAP = {
                logging.FATAL: 'F',
                logging.ERROR: 'E',
                logging.WARN: 'W',
                logging.INFO: 'I',
                logging.DEBUG: 'D'
            }

            def format(self, record):
                datetime = time.strftime('%m%d %H:%M:%S',
                                         time.localtime(record.created))
                micros = int((record.created - int(record.created)) * 1e6)
                return '{levelchar}{datetime}.{micros:06d} {process} {filename}:{lineno}] {msg}'.format(
                    levelchar=self.LEVEL_MAP[record.levelno],
                    datetime=datetime,
                    micros=micros,
                    process=record.process,
                    filename=record.filename,
                    lineno=record.lineno,
                    msg=record.getMessage())

        file_handler.setFormatter(GlogFormatter())
        file_handler.setLevel(level)
        logger.addHandler(file_handler)

    logger.setLevel(level)

    # This routes warnings through our loggers instead of straight to raw stderr.
    logging.captureWarnings(True)

    return LoggingSetupResult(log_filename, log_stream)
示例#12
0
        app.logger.info('Info: request /todo/api/v1.0/nltk_entities')
        result = object_extractor.get_entities(data)
        return jsonify(result)
    except Exception as e:
        app.logger.error(e)
        abort(400)
    return jsonify({})


@app.route('/todo/api/v1.0/nltk_process', methods=['GET', 'POST'])
def nlkt_process():
    try:
        data = request.get_json()
        object_extractor = NltkExtract()
        app.logger.info('Info: request /todo/api/v1.0/nltk_process')
        result = object_extractor.map_entities(data)
        return jsonify(result)
    except Exception as e:
        app.logger.error(e)
        abort(400)
    return jsonify({})


if __name__ == '__main__':
    handler = RotatingFileHandler('api_rest.log',
                                  maxBytes=10000,
                                  backupCount=1)
    handler.setLevel(logging.INFO)
    app.logger.addHandler(handler)
    app.run(debug=True)
示例#13
0
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import logging
from logging.handlers import RotatingFileHandler
import json
from insurance.EligibilityStatus.Prime.PatientNewRequestData import PatientNewRequestData
import os.path
from random import *

logging.basicConfig(handlers=[RotatingFileHandler(filename='insurance/logs/SystemLog.log', mode='a', maxBytes=512000, backupCount=4)],
                    level='DEBUG',
                    format='%(asctime)s %(levelname)s %(message)s',
                    datefmt='%m/%d/%Y%I:%M:%S %p')

logger = logging.getLogger('my_logger')

search_patient_path = '//*[@id="Form1"]/div[4]/div[2]/a[4]'
member_id_path = '//*[@id="txtMemberID"]'
search_member_path = '//*[@id="btnSearch"]'
member_name_path = '//*[@id="tblMain"]/tbody/tr[3]/td[2]/a'
eligibility_value_path = '//*[@id="tblMain"]/tbody/tr[3]/td[10]'
status_element_path = '//*[@id="gvEligHistoryStatus"]/tbody/tr[2]/td[5]'
tru_element_path = '//*[@id="gvEligHistoryStatus"]/tbody/tr[2]/td[2]'


class EligibilityCheck:
    download_directory = '\\data\\'
    input_file_path = 'HPSJ AUTH Project.xlsx'
示例#14
0
def ml_exception_handler(job, exc_type, exc_value, traceback):
    account = job.id.split('-')[0].strip()
    logger.error('[{}] job {} execution failed. status: {}'.format(
        account, job.id, job.get_status()))
    dbHandler.update_queue_status(account, 3, dbHandler.FAILED)
    return False


if __name__ == '__main__':
    # TODO: move log file name to config file
    logger = logging.getLogger('rq.worker')

    # configure file handler
    rfh = RotatingFileHandler('logs/worker_ml.log',
                              maxBytes=10 * 1024 * 1024,
                              backupCount=10)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    rfh.setFormatter(formatter)
    logger.addHandler(rfh)

    # configure stream handler
    sh = logging.StreamHandler()
    sh.setFormatter(formatter)
    logging.getLogger().addHandler(sh)
    #logging.config.dictConfig(yaml.load(open('./config/logging-workers.conf')))

    with Connection(conn3):
        worker = Worker(list(map(Queue, listen)),
                        exception_handlers=[ml_exception_handler])
示例#15
0
文件: app.py 项目: Lasithih/vortex
log_level = logging.DEBUG
ytdl_update_last_checked = None
run_updater = True

if config.config_get_env() == Env.Production:
    log_level = logging.INFO

# Initializing logger
logFormatter = logging.Formatter(
    '%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s')
rootLogger = logging.getLogger()
rootLogger.setLevel(log_level)

fileHandler = RotatingFileHandler("{0}/{1}.log".format('logs', 'vortex'),
                                  maxBytes=50000000,
                                  backupCount=10)
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)

consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)

#start ytdl-auto updater
thread_ytdl_updater = threading.Thread(target=run_auto_updater)
thread_ytdl_updater.start()

# start worker
thread_downloader = threading.Thread(target=run_downloader)
thread_downloader.start()
示例#16
0
mail = Mail(app)

from app import views, models

# send mail to ADMINS when error occurs in production
if not app.debug:
    import logging
    from logging.handlers import SMTPHandler
    credentials = None
    if MAIL_USERNAME or MAIL_PASSWORD:
        credentials = (MAIL_USERNAME, MAIL_PASSWORD)
    mail_handler = SMTPHandler((MAIL_SERVER, MAIL_PORT),
                               'no-reply@' + MAIL_SERVER, ADMINS,
                               'microblog failure', credentials)
    mail_handler.setLevel(logging.ERROR)
    app.logger.addHandler(mail_handler)

if not app.debug:
    import logging
    from logging.handlers import RotatingFileHandler
    file_handler = RotatingFileHandler('tmp/microblog.log', 'a',
                                       1 * 1024 * 1024, 10)
    file_handler.setFormatter(
        logging.Formatter(
            '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
        ))
    app.logger.setLevel(logging.INFO)
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)
    app.logger.info('microblog startup')
示例#17
0
#SAVE_PATH = '/home/nelley/casperPractice/PTT/mecab_' + datetime.now().strftime('%Y%m%d') + '/'

invalid_tags = ['html', 'body', 'head', 'title',  'iframe', 'span', 'div', 'link', 'a', 'meta']
AUTHOR_ptt = u'作者'
CREATETIME_ptt = u'時間'
TITLE_ptt = u'標題'
NEGATIVE_ptt= u'噓'

wait_insert_posts=[]
wait_update_posts=[]
stdout_json={}
# init logger
logging.basicConfig(level=logging.DEBUG)

logger = logging.getLogger('pttParser_mecab')
handler = RotatingFileHandler('/home/nelley/casperPractice/PTT/log_PTT_crawler.txt', maxBytes=1024*1024*5, backupCount=1)

fmt = logging.Formatter('[%(asctime)s %(msecs)d][%(name)s][%(levelname)s]: %(message)s',datefmt='%Y/%m/%d %H:%M:%S')
handler.setFormatter(fmt)

logger.addHandler(handler)

'''insert streaming logs into db'''
def updateLog(c, logs):
    
    conn = get_db()
    #logger.debug(logs) 
    conn.update_content_Logs.insert({'category':c, 
                                     'logs':logs['logs'],
                                     'date':datetime.now().strftime('%Y/%m/%d %H:%M:%S')})
示例#18
0
def create_app(config_class=Config):
    app = Flask(__name__)
    app.config.from_object(config_class)

    db.init_app(app)
    migrate.init_app(app, db)
    login.init_app(app)
    mail.init_app(app)
    bootstrap.init_app(app)
    moment.init_app(app)

    from app.auth import bp as auth_bp
    app.register_blueprint(auth_bp, url_prefix='/auth')

    from app.main import bp as main_bp
    app.register_blueprint(main_bp)

    from app.errors import bp as errors_bp
    app.register_blueprint(errors_bp)

    from app.augmd import bp as augmd_bp
    app.register_blueprint(augmd_bp, url_prefix='/augmd')

    from app.visualize import bp as visualize_bp
    app.register_blueprint(visualize_bp, url_prefix='/viz')

    from app.optimize import bp as optimize_bp
    app.register_blueprint(optimize_bp, url_prefix='/optimize')

    if not app.debug and not app.testing:
        if app.config['MAIL_SERVER']:
            auth = None
            if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
                auth = (app.config['MAIL_USERNAME'],
                        app.config['MAIL_PASSWORD'])
            secure = None
            if app.config['MAIL_USE_TLS']:
                secure = ()
            mail_handler = SMTPHandler(
                mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
                fromaddr='no-reply@' + app.config['MAIL_SERVER'],
                toaddrs=app.config['ADMINS'],
                subject='Flaskapp Failure',
                credentials=auth,
                secure=secure)
            mail_handler.setLevel(logging.ERROR)
            app.logger.addHandler(mail_handler)

        if app.config['LOG_TO_STDOUT']:
            stream_handler = logging.StreamHandler()
            stream_handler.setLevel(logging.INFO)
            app.logger.addHandler(stream_handler)

        else:
            if not os.path.exists('logs'):
                os.mkdir('logs')
            file_handler = RotatingFileHandler('logs/flaskapp.log',
                                               maxBytes=10240,
                                               backupCount=10)
            file_handler.setFormatter(
                logging.Formatter('%(asctime)s %(levelname)s: %(message)s '
                                  '[in %(pathname)s:%(lineno)d]'))
            file_handler.setLevel(logging.INFO)
            app.logger.addHandler(file_handler)

        app.logger.setLevel(logging.INFO)
        app.logger.info('Flaskapp startup')

    return app
示例#19
0
def init_logger() -> logging.Logger:
    # Create the logger

    base_logger = logging.getLogger("matchmaking")
    base_logger.setLevel(logging.DEBUG)

    # noinspection SpellCheckingInspection
    formatter = logging.Formatter(
        '%(asctime)s :: %(levelname)s :: %(message)s')

    # Logging to a file
    from logging.handlers import RotatingFileHandler

    file_handler = RotatingFileHandler('all.log', 'a', 10000000, 1)
    file_handler.setFormatter(formatter)
    file_handler.setLevel(logging.DEBUG)
    base_logger.addHandler(file_handler)

    file_handler = RotatingFileHandler('errors.log', 'a', 10000000, 1)
    file_handler.setFormatter(formatter)
    file_handler.setLevel(logging.WARNING)
    base_logger.addHandler(file_handler)

    # And to console

    # You can probably collapse the following two StreamHandlers.
    # They list the colors codes for windows and unix systems

    class _AnsiColorStreamHandler(logging.StreamHandler):
        DEFAULT = '\x1b[0m'
        RED = '\x1b[31m'
        GREEN = '\x1b[32m'
        YELLOW = '\x1b[33m'
        CYAN = '\x1b[36m'

        CRITICAL = RED
        ERROR = RED
        WARNING = YELLOW
        INFO = GREEN
        DEBUG = CYAN

        @classmethod
        def _get_color(cls, level):
            if level >= logging.CRITICAL:
                return cls.CRITICAL
            elif level >= logging.ERROR:
                return cls.ERROR
            elif level >= logging.WARNING:
                return cls.WARNING
            elif level >= logging.INFO:
                return cls.INFO
            elif level >= logging.DEBUG:
                return cls.DEBUG
            else:
                return cls.DEFAULT

        def __init__(self, stream=None):
            logging.StreamHandler.__init__(self, stream)

        def format(self, record):
            text = logging.StreamHandler.format(self, record)
            color = self._get_color(record.levelno)
            return color + text + self.DEFAULT

    # noinspection SpellCheckingInspection
    class _WinColorStreamHandler(logging.StreamHandler):
        # wincon.h
        FOREGROUND_BLACK = 0x0000
        FOREGROUND_BLUE = 0x0001
        FOREGROUND_GREEN = 0x0002
        FOREGROUND_CYAN = 0x0003
        FOREGROUND_RED = 0x0004
        FOREGROUND_MAGENTA = 0x0005
        FOREGROUND_YELLOW = 0x0006
        FOREGROUND_GREY = 0x0007
        FOREGROUND_INTENSITY = 0x0008  # foreground color is intensified.
        FOREGROUND_WHITE = FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED

        BACKGROUND_BLACK = 0x0000
        BACKGROUND_BLUE = 0x0010
        BACKGROUND_GREEN = 0x0020
        BACKGROUND_CYAN = 0x0030
        BACKGROUND_RED = 0x0040
        BACKGROUND_MAGENTA = 0x0050
        BACKGROUND_YELLOW = 0x0060
        BACKGROUND_GREY = 0x0070
        BACKGROUND_INTENSITY = 0x0080  # background color is intensified.

        DEFAULT = FOREGROUND_WHITE
        CRITICAL = BACKGROUND_YELLOW | FOREGROUND_RED | FOREGROUND_INTENSITY | BACKGROUND_INTENSITY
        ERROR = FOREGROUND_RED | FOREGROUND_INTENSITY
        WARNING = FOREGROUND_YELLOW | FOREGROUND_INTENSITY
        INFO = FOREGROUND_GREEN
        DEBUG = FOREGROUND_CYAN

        @classmethod
        def _get_color(cls, level):
            if level >= logging.CRITICAL:
                return cls.CRITICAL
            elif level >= logging.ERROR:
                return cls.ERROR
            elif level >= logging.WARNING:
                return cls.WARNING
            elif level >= logging.INFO:
                return cls.INFO
            elif level >= logging.DEBUG:
                return cls.DEBUG
            else:
                return cls.DEFAULT

        def _set_color(self, code):
            import ctypes
            ctypes.windll.kernel32.SetConsoleTextAttribute(self._outhdl, code)

        def __init__(self, stream=None):
            logging.StreamHandler.__init__(self, stream)
            # get file handle for the stream
            import ctypes.util
            # for some reason find_msvcrt() sometimes doesn't find msvcrt.dll on my system?
            crtname = ctypes.util.find_msvcrt()
            if not crtname:
                crtname = ctypes.util.find_library("msvcrt")
            crtlib = ctypes.cdll.LoadLibrary(crtname)
            # noinspection PyProtectedMember
            self._outhdl = crtlib._get_osfhandle(self.stream.fileno())

        def emit(self, record):
            color = self._get_color(record.levelno)
            self._set_color(color)
            logging.StreamHandler.emit(self, record)
            self._set_color(self.FOREGROUND_WHITE)

    # select ColorStreamHandler based on platform
    import platform

    if platform.system() == 'Windows':
        # noinspection PyPep8Naming
        ColorStreamHandler = _WinColorStreamHandler
    else:
        # noinspection PyPep8Naming
        ColorStreamHandler = _AnsiColorStreamHandler

    steam_handler = ColorStreamHandler()
    steam_handler.setLevel(logging.DEBUG)

    steam_handler.setFormatter(formatter)
    base_logger.addHandler(steam_handler)

    discord_logger = logging.getLogger('discord')
    discord_logger.setLevel(logging.WARNING)

    # noinspection SpellCheckingInspection
    discord_formatter = logging.Formatter(
        '%(asctime)s :: %(levelname)s :: %(message)s')

    discord_steam_handler = ColorStreamHandler()
    discord_steam_handler.setLevel(logging.INFO)
    discord_steam_handler.setFormatter(discord_formatter)
    discord_logger.addHandler(discord_steam_handler)

    return base_logger
示例#20
0
import asyncio
import aiohttp
from collections import Counter
from pytz import timezone
import discord
from discord.ext import commands
import loadconfig

__version__ = '1.4.4'

logger = logging.getLogger('discord')
#logger.setLevel(logging.DEBUG)
logger.setLevel(logging.WARNING)
handler = RotatingFileHandler(filename='discordbot.log',
                              maxBytes=1024 * 5,
                              backupCount=2,
                              encoding='utf-8',
                              mode='w')
handler.setFormatter(
    logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)

description = '''Der-Eddys anime discord bot, developed with discord.py\n
                 A full list of all commands are available here: https://github.com/Der-Eddy/discord_bot#commands-list'''
intents = discord.Intents.default()
intents.presences = True
intents.members = True
bot = commands.Bot(command_prefix=loadconfig.__prefix__,
                   description=description,
                   intents=intents)
示例#21
0
lm.anonymous_user = AnonUser
lm.init_app(app)
lm.login_view = 'login'
lm.login_message = 'Please log in to access this page.'
mail = Mail(app)
babel = Babel(app)
csrf = CSRFProtect(app)

if "debug" in sys.argv:
    print("Installing debug toolbar!")
    toolbar = DebugToolbarExtension(app)

if not app.debug:
    import logging
    from logging.handlers import RotatingFileHandler
    file_handler = RotatingFileHandler('tmp/tob2.log', 'a', 1 * 1024 * 1024,
                                       10)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(
        logging.Formatter(
            '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
        ))
    app.logger.addHandler(file_handler)
    app.logger.setLevel(logging.INFO)
    app.logger.info('tob2 startup')

from app import views
from app import models
from app import tag_definitions
# from .models import Users, Translators

CACHE_SIZE = 5000
            logging.info(json.loads(request.data)["admin"])
            self.addAccountsInQueue(self.accountInfo, self.availableAccQu, li)
            return request.data
        return json.dumps({"error": "request error"})


if __name__ == '__main__':
    logging.basicConfig(
        level=logging.DEBUG,
        format=
        '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
        datefmt='%a, %d %b %Y %H:%M:%S',
        filename=sys.path[0] + '/log.log',
        filemode='w')

    Rthandler = RotatingFileHandler('log.log',
                                    maxBytes=2 * 1024 * 1024,
                                    backupCount=5)
    Rthandler.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
    Rthandler.setFormatter(formatter)
    logging.getLogger('').addHandler(Rthandler)

    streamHandler = logging.StreamHandler()
    streamHandler.setLevel(logging.DEBUG)
    streamHandler.setFormatter(formatter)
    logging.getLogger('').addHandler(streamHandler)

    obj = master()
    obj.run()
示例#23
0
def main():
    """main"""
    # create logger
    logger = logging.getLogger('myems-modbus-tcp')
    # specifies the lowest-severity log message a logger will handle,
    # where debug is the lowest built-in severity level and critical is the highest built-in severity.
    # For example, if the severity level is INFO, the logger will handle only INFO, WARNING, ERROR, and CRITICAL
    # messages and will ignore DEBUG messages.
    logger.setLevel(logging.ERROR)
    # create file handler which logs messages
    fh = RotatingFileHandler('myems-modbus-tcp.log',
                             maxBytes=1024 * 1024,
                             backupCount=1)
    # create formatter and add it to the handlers
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    # add the handlers to logger
    logger.addHandler(fh)

    # Get Data Sources
    while True:
        # TODO: This service has to RESTART to reload latest data sources and this should be fixed
        cnx_system_db = None
        cursor_system_db = None
        try:
            cnx_system_db = mysql.connector.connect(**config.myems_system_db)
            cursor_system_db = cnx_system_db.cursor()
        except Exception as e:
            logger.error("Error in main process " + str(e))
            if cursor_system_db:
                cursor_system_db.close()
            if cnx_system_db:
                cnx_system_db.close()
            # sleep several minutes and continue the outer loop to reload points
            time.sleep(60)
            continue

        try:
            query = (
                " SELECT ds.id, ds.name, ds.connection "
                " FROM tbl_data_sources ds, tbl_gateways g "
                " WHERE ds.protocol = 'modbus-tcp' AND g.id = %s AND g.token = %s "
                " ORDER BY ds.id ")
            cursor_system_db.execute(query, (
                config.gateway['id'],
                config.gateway['token'],
            ))
            rows_data_source = cursor_system_db.fetchall()
        except Exception as e:
            logger.error("Error in main process " + str(e))
            # sleep several minutes and continue the outer loop to reload points
            time.sleep(60)
            continue
        finally:
            if cursor_system_db:
                cursor_system_db.close()
            if cnx_system_db:
                cnx_system_db.close()

        if rows_data_source is None or len(rows_data_source) == 0:
            logger.error("Data Source Not Found, Wait for minutes to retry.")
            # wait for a while and retry
            time.sleep(60)
            continue
        else:
            # Stop to connect these data sources
            break

    for row_data_source in rows_data_source:
        print("Data Source: ID=%s, Name=%s, Connection=%s " %
              (row_data_source[0], row_data_source[1], row_data_source[2]))

        if row_data_source[2] is None or len(row_data_source[2]) == 0:
            logger.error("Data Source Connection Not Found.")
            continue

        try:
            server = json.loads(row_data_source[2], encoding='utf-8')
        except Exception as e:
            logger.error("Data Source Connection JSON error " + str(e))
            continue

        if 'host' not in server.keys() \
                or 'port' not in server.keys() \
                or server['host'] is None \
                or server['port'] is None \
                or len(server['host']) == 0 \
                or not isinstance(server['port'], int) \
                or server['port'] < 1:
            logger.error("Data Source Connection Invalid.")
            continue

        # fork worker process for each data source
        # todo: how to restart the process if the process terminated unexpectedly
        Process(target=acquisition.process,
                args=(logger, row_data_source[0], server['host'],
                      server['port'])).start()
示例#24
0
    "IS_BLACK_LIST_ED_MESSAGE_TEXT",
    ("You have been <b>banned</b> forever.\n\n"
     "<u>Reason</u>: <code>{reason}</code>"))
# IDEKWBYRW
REASON_DE_LIMIT_ER = get_config("REASON_DE_LIMIT_ER", "\n\n")
# message to show when user is unbanned
IS_UN_BANED_MESSAGE_TEXT = get_config("IS_UN_BANED_MESSAGE_TEXT",
                                      ("You have been <b>un-banned</b>.\n\n"
                                       "<u>Reason</u>: <code>{reason}</code>"))
# message to show if bot was blocked by user
BOT_WS_BLOCKED_BY_USER = get_config("BOT_WS_BLOCKED_BY_USER",
                                    "Bot was blocked by the user.")
# path to store LOG files
LOG_FILE_ZZGEVC = get_config("LOG_FILE_ZZGEVC", "NoPMsBot.log")

logging.basicConfig(
    level=logging.INFO,
    format="[%(asctime)s - %(levelname)s] - %(name)s - %(message)s",
    datefmt='%d-%b-%y %H:%M:%S',
    handlers=[
        RotatingFileHandler(LOG_FILE_ZZGEVC, maxBytes=50000000,
                            backupCount=10),
        logging.StreamHandler()
    ])
logging.getLogger("pyrogram").setLevel(logging.WARNING)


def LOGGER(name: str) -> logging.Logger:
    """ get a Logger object """
    return logging.getLogger(name)
    if app.config['MAIL_SERVER']:
        auth = None
        if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
            auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
        secure = None
        if app.config['MAIL_USE_TLS']:
            secure = ()
        mail_handler = SMTPHandler(
            mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
            fromaddr='no-reply@' + app.config['MAIL_SERVER'],
            toaddrs=app.config['ADMINS'], subject='Project Tracker Failure',
            credentials=auth, secure=secure)
        mail_handler.setLevel(logging.ERROR)
        app.logger.addHandler(mail_handler)

    if not os.path.exists('logs'):
        os.mkdir('logs')
    file_handler = RotatingFileHandler('logs/ProjectTracker.log', maxBytes=10240,
                                       backupCount=10)
    file_handler.setFormatter(logging.Formatter(
        '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)

    app.logger.setLevel(logging.INFO)
    app.logger.info('ProjectTracker startup')



from app import routes, models, errors
示例#26
0
# Обрезка марки
CROP_WIDTH = 7
CROP_HEIGHT = 5

MAIN_PATH = Path(os.path.dirname(os.path.realpath(__file__)))
INPUT_PATH = Path(os.path.dirname(os.path.realpath(__file__))) / 'input'
OUTPUT_PATH = Path(os.path.dirname(os.path.realpath(__file__))) / 'output'
WATERMARKS_PATH = Path(os.path.dirname(os.path.realpath(__file__))) / 'watermarks'
INDENT_X -= CROP_WIDTH
INDENT_Y -= CROP_HEIGHT


logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)

file_handler = RotatingFileHandler(u'' + str(MAIN_PATH / 'createwmark.log'),
                                   maxBytes=1000000, backupCount=10, encoding='utf8')
file_handler.setLevel(logging.INFO)

con_handler = logging.StreamHandler()
con_handler.setLevel(logging.INFO)

formatter = logging.Formatter(u'[%(asctime)s] %(levelname)s - (%(filename)s:%(lineno)d) %(message)s')
file_handler.setFormatter(formatter)
con_handler.setFormatter(formatter)

logger.addHandler(file_handler)
logger.addHandler(con_handler)


def get_relative_path(path):
    return Path(path).relative_to(MAIN_PATH)
示例#27
0
from flask import Flask
import logging, sys
from logging.handlers import RotatingFileHandler

from blueprints import app, manager

if __name__ == '__main__':

    formatter = logging.Formatter(
        "[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
    log_handler = RotatingFileHandler(
        "%s/%s" % (app.root_path, '../storage/log/app.log'),
        maxBytes=10000,
        backupCount=10)
    log_handler.setLevel(logging.INFO)
    log_handler.setFormatter(formatter)
    app.logger.addHandler(log_handler)

    try:
        if sys.argv[1] == 'db':
            manager.run()
        else:
            app.run(debug=True, host='0.0.0.0', port=5000)
    except IndexError as e:
        app.run(debug=True, host='0.0.0.0', port=5000)
示例#28
0
import logging
from logging.handlers import RotatingFileHandler

from flask import Flask

app = Flask(__name__)


@app.route('/')
def foo():
    app.logger.warning('A warning occurred (%d apples)', 42)
    app.logger.error('An error occurred')
    app.logger.info('Info')
    return "foo"


if __name__ == '__main__':
    handler = RotatingFileHandler('foo.log', maxBytes=10000, backupCount=1)
    #handler.setLevel(logging.INFO)
    #log = logging.getLogger('werkzeug')
    #log.setLevel(logging.DEBUG)

    handler.setLevel(logging.DEBUG)
    formatter = logging.Formatter(
        "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    handler.setFormatter(formatter)
    app.logger.addHandler(handler)
    #app.logger.addHandler(log)
    app.run()
示例#29
0
    def create_logger(
            self,
            log_file_name='{0}/e2etest_running.log'.format(config.BASE_DIR),
            log_level=logging.DEBUG,
            log_date_format='%Y-%m-%d %H:%M:%S%z',
            log_formater='%(asctime)s %(filename)s:%(funcName)s %(levelname)s [line:%(lineno)d] %(message)s',
            max_log_files=3,
            one_day_one_file=True,
            max_log_file_size=10485760,
            log_to_standard_output=False):
        '''
        @summary: create the logger
        @param log_file_name: the log file name, should be absolute path. default value is /tmp/vamp/videocenter_running.log
                             if the value is None or "", print the log to standard output
        @param log_level: Integer of the log level. default value is logging.DEBUG
        @param max_log_files: the max number of files. It is valid when one_day_one_file equal False. default value is 3
        @param one_day_one_file: whether only create a file in one day. default value is True, one day one log file
        @param max_log_file_size: the max size of the log file. unit is byte. default value is 10 MB
        @param log_date_format: String of log date format. default value is '%Y-%m-%d %H:%M:%S%z', like 2017-06-01 11:44:06+0000
        @param log_to_standard_output: whether print logs into standard output, this argument will ignore log_file_name value
        @return: the logger
        '''
        # initialize log file
        if log_file_name:
            log_file_name = os.path.abspath(
                log_file_name)  # change path to absolute path
            if not os.path.exists(os.path.dirname(log_file_name)):
                os.makedirs(os.path.dirname(log_file_name))

        # write log into file or standard output
        if log_file_name and type(log_file_name) == type(
                '') and log_file_name != '':
            # write log to file
            logger = logging.getLogger(log_file_name)
            logger.setLevel(log_level)

            # write a new log file every day
            if one_day_one_file:
                Rthandler = TimedRotatingFileHandler(log_file_name,
                                                     when='D',
                                                     backupCount=max_log_files)
            else:
                Rthandler = RotatingFileHandler(log_file_name,
                                                maxBytes=max_log_file_size,
                                                backupCount=max_log_files)
            formatter = logging.Formatter(fmt=log_formater,
                                          datefmt=log_date_format)
            Rthandler.setFormatter(formatter)
            logger.addHandler(Rthandler)

            # write log to standard output synchronously
            if log_to_standard_output:
                console = logging.StreamHandler()
                console.setLevel(log_level)
                console.setFormatter(formatter)
                logger.addHandler(console)

        # write log to standard output default
        else:
            logging.basicConfig(level=log_level,
                                format=log_formater,
                                datefmt=log_date_format)
            logger = logging

        return logger
示例#30
0
        auth = None
        if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
            auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
        secure = None
        if app.config['MAIL_USE_TLS']:
            secure = ()
        mail_handler = SMTPHandler(
            mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
            fromaddr='no-reply@' + app.config['MAIL_SERVER'],
            toaddrs=app.config['ADMINS'],
            subject='nayra Failure',
            credentials=auth,
            secure=secure)
        mail_handler.setLevel(logging.ERROR)
        app.logger.addHandler(mail_handler)

    if not os.path.exists('logs'):
        os.mkdir('logs')
    file_handler = RotatingFileHandler('logs/nayra.log',
                                       maxBytes=10240,
                                       backupCount=10)
    file_handler.setFormatter(
        logging.Formatter(
            '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
        ))
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)

    app.logger.setLevel(logging.INFO)
    app.logger.info('nayra startup')