Exemplo n.º 1
0
app = Flask(__name__)
app.template_folder = "tpl"
app.config["MAKO_DEFAULT_FILTERS"] = ["h"]
mako.init_app(app)


class RegexConverter(BaseConverter):
    def __init__(self, url_map, *items):
        super(RegexConverter, self).__init__(url_map)
        self.regex = items[0]


app.url_map.converters["regex"] = RegexConverter
logger = getLogger("newtrackon_logger")
logger.setLevel(INFO)
handler = FileHandler("data/trackon.log")
logger_format = Formatter("%(asctime)s - %(message)s")
handler.setFormatter(logger_format)
logger.addHandler(handler)
logger.info("Server started")


@app.route("/")
def main():
    trackers_list = db.get_all_data()
    trackers_list = utils.format_uptime_and_downtime_time(trackers_list)
    return render_template("main.mako", trackers=trackers_list, active="main")


@app.route("/", methods=["POST"])
def new_trackers():
Exemplo n.º 2
0
from logging import getLogger, StreamHandler, DEBUG, Formatter, FileHandler

TRAIN_DATA = 'input/train.csv'
TEST_DATA = 'input/test.csv'

logger = getLogger(__name__)
DIR = 'log/'

log_fmt = Formatter(
    '%(asctime)s %(lineno)d [%(levelname)s][%(funcName)s] %(message)s ')
handler = StreamHandler()
handler.setLevel('INFO')
handler.setFormatter(log_fmt)
logger.addHandler(handler)

handler = FileHandler(DIR + 'load_data.py.log', 'a')
handler.setLevel(DEBUG)
handler.setFormatter(log_fmt)
logger.setLevel(DEBUG)
logger.addHandler(handler)


def load_data(path):
    logger.debug('enter')
    df = pd.read_csv(path)
    logger.debug('exit')
    return df


def load_train_data():
    logger.debug('enter')
Exemplo n.º 3
0
import math

logger = getLogger(__name__)
pd.options.mode.chained_assignment = None  # default='warn'

DIR = './result_tmp/'

log_fmt = Formatter(
    '%(asctime)s %(name)s %(lineno)d [%(levelname)s][%(funcName)s] %(message)s '
)
handler = StreamHandler()
handler.setLevel('INFO')
handler.setFormatter(log_fmt)
logger.addHandler(handler)

handler = FileHandler(DIR + 'train.py.log', 'a')
handler.setLevel(DEBUG)
handler.setFormatter(log_fmt)
logger.setLevel(DEBUG)
logger.addHandler(handler)

logger.info('start')


def create_feature_map(features):
    outfile = open('xgb.fmap', 'w')
    for i, feat in enumerate(features):
        outfile.write('{0}\t{1}\tq\n'.format(i, feat))
    outfile.close()

Exemplo n.º 4
0
"""
Created on Sat Nov  7 09:35:46 2020

@author: Lina
"""
import sqlite3
import logging
from logging import Formatter, FileHandler
from flask import Flask
import pandas as pd

app = Flask(__name__)

# this is for logging ---------------------------
LOGGER = logging.getLogger('whatever')
file_handler = FileHandler('test.log')
handler = logging.StreamHandler()
file_handler.setFormatter(
    Formatter('%(asctime)s %(levelname)s: %(message)s '
              '[in %(pathname)s:%(lineno)d]'))
handler.setFormatter(
    Formatter('%(asctime)s %(levelname)s: %(message)s '
              '[in %(pathname)s:%(lineno)d]'))
LOGGER.addHandler(file_handler)
LOGGER.addHandler(handler)
LOGGER.setLevel(logging.INFO)
#------------------------------------------------


@app.route('/loadNutrients/<inputValue>')
def getAllNutrients(inputValue):
Exemplo n.º 5
0
# Init app


class FlaskHTTPSaltStackClient(HTTPSaltStackClient):
    def get_token(self):
        return session.get('user_token')


app = Flask("SaltPad", template_folder="templates")
app.config.from_object('settings')

# Setup logging
if not app.debug:
    from logging import FileHandler
    app.logger.addHandler(FileHandler(app.config['LOG_FILE']))

# Setup sentry
try:
    from raven.contrib.flask import Sentry
    sentry = Sentry(app, dsn=app.config['SENTRY_DSN'])
except ImportError:
    if app.config.get('SENTRY_DSN'):
        install_cmd = "pip install raven[flask]"
        print "Couldn't import raven, please install it with '%s'" % install_cmd
        sys.exit(1)

client = FlaskHTTPSaltStackClient(app.config['API_URL'],
                                  app.config.get('VERIFY_SSL', True))

from flask_wtf import Form
Exemplo n.º 6
0
def doXVal(folds,
           percent,
           verbose,
           multicore,
           noisy,
           predName,
           domain,
           mlnfile,
           dbfiles,
           logicLearn,
           logicInfer,
           inverse=False,
           testSetCount=1):
    startTime = time.time()

    directory = time.strftime(
        "%a_%d_%b_%Y_%H:%M:%S_K=" + str(folds) + "_TSC=" + str(testSetCount),
        time.localtime())
    os.mkdir(directory)
    os.mkdir(os.path.join(directory, 'FOL'))
    os.mkdir(os.path.join(directory, 'FUZZY'))
    # set up the logger
    log = logging.getLogger('xval')
    fileLogger = FileHandler(os.path.join(directory, 'xval.log'))
    fileLogger.setFormatter(praclog.formatter)
    log.addHandler(fileLogger)

    log.info('Results will be written into %s' % directory)

    # preparations: Read the MLN and the databases
    mln_ = readMLNFromFile(mlnfile,
                           verbose=verbose,
                           logic='FuzzyLogic',
                           grammar='PRACGrammar')
    log.info('Read MLN %s.' % mlnfile)
    dbs = []
    for dbfile in dbfiles:
        db = readDBFromFile(mln_, dbfile)
        if type(db) is list:
            dbs.extend(db)
        else:
            dbs.append(db)
    log.info('Read %d databases.' % len(dbs))

    cwpreds = [pred for pred in mln_.predicates if pred != predName]

    # create the partition of data
    subsetLen = int(math.ceil(len(dbs) * percent / 100.0))
    if subsetLen < len(dbs):
        log.info('Using only %d of %d DBs' % (subsetLen, len(dbs)))
    dbs = sample(dbs, subsetLen)

    if len(dbs) < folds:
        log.error(
            'Cannot do %d-fold cross validation with only %d databases.' %
            (folds, len(dbs)))
        exit(0)

    shuffle(dbs)
    partSize = int(math.ceil(len(dbs) / float(folds)))
    partition = []
    for i in range(folds):
        partition.append(dbs[i * partSize:(i + 1) * partSize])

    foldRunnables = []
    for foldIdx in range(folds):
        partion_ = list(partition)
        params = XValFoldParams()
        params.mln = mln_.duplicate()
        params.testDBs = []
        params.learnDBs = []

        for i in range(0, testSetCount):
            if (foldIdx >= len(partion_)):
                params.testDBs.extend(partion_[0])
                del partion_[0]
            else:
                params.testDBs.extend(partion_[foldIdx])
                del partion_[foldIdx]

        for part in partion_:
            params.learnDBs.extend(part)
        print 'LEARN DBS :' + str(len(params.learnDBs))
        print 'TEST DBS :' + str(len(params.testDBs))

        params.foldIdx = foldIdx
        params.foldCount = folds
        params.noisyStringDomains = noisy
        params.directory = directory
        params.queryPred = predName
        params.queryDom = domain
        params.logicInfer = logicInfer
        foldRunnables.append(XValFold(params))

    if multicore:
        # set up a pool of worker processes
        try:
            workerPool = Pool()
            log.info('Starting %d-fold Cross-Validation in %d processes.' %
                     (folds, workerPool._processes))
            result = workerPool.map_async(runFold, foldRunnables).get()
            workerPool.close()
            workerPool.join()
            cm = ConfusionMatrix()
            for r in result:
                cm.combine(r.confMatrix)
            elapsedTimeMP = time.time() - startTime
            prepareResults(directory, 'FOL')
            prepareResults(directory, 'FUZZY')
        except (KeyboardInterrupt, SystemExit, SystemError):
            log.critical("Caught KeyboardInterrupt, terminating workers")
            workerPool.terminate()
            workerPool.join()
            exit(1)
        except:
            log.error('\n' +
                      ''.join(traceback.format_exception(*sys.exc_info())))
            exit(1)


#     startTime = time.time()
    else:
        log.info('Starting %d-fold Cross-Validation in 1 process.' % (folds))

        for fold in foldRunnables:
            runFold(fold)

        prepareResults(directory, 'FOL')
        prepareResults(directory, 'FUZZY')

        elapsedTimeSP = time.time() - startTime

    if multicore:
        log.info('%d-fold crossvalidation (MP) took %.2f min' %
                 (folds, elapsedTimeMP / 60.0))
    else:
        log.info('%d-fold crossvalidation (SP) took %.2f min' %
                 (folds, elapsedTimeSP / 60.0))
Exemplo n.º 7
0
import os
from logging import getLogger, StreamHandler, FileHandler, Formatter, DEBUG

logger = getLogger(__name__)
logger.setLevel(DEBUG)

stream_formatter = Formatter('%(message)s')
stream_handler = StreamHandler()
stream_handler.setLevel(DEBUG)
stream_handler.setFormatter(stream_formatter)
logger.addHandler(stream_handler)

os.makedirs("./log", exist_ok=True)
log_file_name = "./log/log-" + time.strftime(
    "%Y%m%d-%H%M%S", time.strptime(time.ctime())) + ".txt"
file_handler = FileHandler(log_file_name)
file_handler.setLevel(DEBUG)
file_formatter = Formatter('[%(asctime)s] %(message)s')
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
logger.propagate = False

stm_available = False
allarduino_available = False
legs = 6
scenario_repeat = 2

if legs == 4:
    motor_id_mapping = {0: "2", 1: "3", 2: "5", 3: "6"}

#
Exemplo n.º 8
0
from sys import exit as sysexit
from sys import stdout, version_info
from time import time

from aioredis import create_redis_pool

LOG_DATETIME = datetime.now().strftime("%d_%m_%Y-%H_%M_%S")
LOGDIR = f"{__name__}/logs"

# Make Logs directory if it does not exixts
if not path.isdir(LOGDIR):
    mkdir(LOGDIR)

LOGFILE = f"{LOGDIR}/{__name__}_{LOG_DATETIME}.txt"

file_handler = FileHandler(filename=LOGFILE)
stdout_handler = StreamHandler(stdout)

basicConfig(
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
    level=INFO,
    handlers=[file_handler, stdout_handler],
)

getLogger("pyrogram").setLevel(WARNING)
LOGGER = getLogger(__name__)

# if version < 3.6, stop bot.
if version_info[0] < 3 or version_info[1] < 7:
    LOGGER.error(("You MUST have a Python Version of at least 3.7!\n"
                  "Multiple features depend on this. Bot quitting."), )
Exemplo n.º 9
0
import logging
from logging import FileHandler

from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.babel import Babel
from flask_oauth import OAuth
from flask.ext.login import LoginManager
from flask.ext.principal import Principal

handler = FileHandler('/var/log/jeto/debug.log')
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)

app = Flask(__name__)
app.config.from_object('jeto.settings')
app.url_map.strict_slashes = False
app.logger.addHandler(handler)

db = SQLAlchemy(app)
babel = Babel(app)
oauth = OAuth()
principal = Principal(app)

google = oauth.remote_app(
    'google',
    base_url='https://www.google.com/accounts/',
    authorize_url='https://accounts.google.com/o/oauth2/auth',
    request_token_url=None,
    request_token_params={
        'scope': 'openid profile email',
Exemplo n.º 10
0
        rate_svo,
        rate_s,
        rate_v,
        rate_o
    ]

    return list_ret


if __name__ == '__main__':
    from logging import StreamHandler, DEBUG, Formatter, FileHandler

    log_fmt = Formatter(
        '%(asctime)s %(name)s %(lineno)d [%(levelname)s][%(funcName)s] %(message)s '
    )
    handler = FileHandler('svo.py.log', 'w')
    handler.setLevel(DEBUG)
    handler.setFormatter(log_fmt)
    logger.setLevel(DEBUG)
    logger.addHandler(handler)

    handler = StreamHandler()
    handler.setLevel('INFO')
    handler.setFormatter(log_fmt)
    logger.setLevel('INFO')
    logger.addHandler(handler)
    p = Pool()

    df = pandas.read_csv('../data/train_clean2_rev.csv',
                         usecols=['question1', 'question2']).values
Exemplo n.º 11
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from os import path, mkdir
from datetime import datetime
from logging import getLogger, FileHandler

from waitress import serve
from paste.translogger import TransLogger

import app
from conf import conf

LOG_PATH = path.join("log")

if __name__ == "__main__":
    if not path.isdir(path.join(LOG_PATH)):
        mkdir(LOG_PATH)

    logger = getLogger(name="wsgi")

    logger.addHandler(hdlr=FileHandler(filename=path.join(
        LOG_PATH, f"{datetime.today().strftime('%Y-%m-%d %Hh %Mm %Ss')}.log")))

    app = app.create_app()
    serve(app=TransLogger(application=app, setup_console_handler=True),
          port=conf['server']['port'])
Exemplo n.º 12
0

@app.route('/messages')
@login_required()
def messages_list():
    return render_template('messages/messages_list.html',
                           active_nav='messages')


@app.route('/messages/<message_id>')
@login_required()
def message_detail(message_id):
    return render_template('messages/message_detail.html',
                           active_nav='messages')


userClass = user.User(app.config)
courseClass = course.Course(app.config)

if not app.config['DEBUG']:
    import logging
    from logging import FileHandler
    file_handler = FileHandler(app.config['LOG_FILE'])
    file_handler.setLevel(logging.WARNING)

if __name__ == '__main__':
    app.run(host="0.0.0.0",
            port=int(os.environ.get("PORT", 8080)),
            debug=app.config['DEBUG'],
            threaded=True)
Exemplo n.º 13
0
# -*- coding: utf-8 -*-

import os
import json
import paho.mqtt.client as mqtt
import camera_mode_selector
from logging import getLogger, FileHandler, StreamHandler, DEBUG
logger = getLogger(__name__)
if not logger.handlers:
    fileHandler = FileHandler(r'./log/camera_runner.log')
    fileHandler.setLevel(DEBUG)
    streamHander = StreamHandler()
    streamHander.setLevel(DEBUG)
    logger.setLevel(DEBUG)
    logger.addHandler(fileHandler)
    logger.addHandler(streamHander)


# MQTT broker server
host = os.getenv('SSS_MQTT_HOST')
port = int(os.getenv('SSS_MQTT_PORT'))

# subscribe topic
sub_topic = 'sensor/event'
# publish topic
pub_topic = 'sensor/feedback/result/'


def on_connect(client, data, flags, response_code):
    logger.info('status {0}'.format(response_code))
    camera_mode_selector.change_mode('2')
Exemplo n.º 14
0
    return grad, hess


if __name__ == '__main__':

    from logging import StreamHandler, DEBUG, Formatter, FileHandler

    log_fmt = Formatter(
        '%(asctime)s %(name)s %(lineno)d [%(levelname)s][%(funcName)s] %(message)s '
    )
    handler = StreamHandler()
    handler.setLevel('INFO')
    handler.setFormatter(log_fmt)
    logger.addHandler(handler)

    handler = FileHandler('train_rank.py.log', 'a')
    handler.setLevel(DEBUG)
    handler.setFormatter(log_fmt)
    logger.setLevel(DEBUG)
    logger.addHandler(handler)
    all_params = {
        'max_depth': [5],
        'learning_rate': [0.1],  # [0.06, 0.1, 0.2],
        'n_estimators': [5],
        'min_child_weight': [10],
        'colsample_bytree': [0.7],
        #'boosting_type': ['rf'],  # ['gbdt'],
        #'xgboost_dart_mode': [False],
        #'num_leaves': [96],
        'subsample': [0.9],
        #'min_child_samples': [10],
Exemplo n.º 15
0
import os
from app import app
from logging import FileHandler

if not app.debug:
    import logging

    file_handler = FileHandler(app.config['LOG_PATH'], 'a+', encoding="utf-8")
    file_handler.setLevel(logging.DEBUG)
    app.logger.addHandler(file_handler)

def is_url(url):
    """ Checks if the URL exists """
    # https://stackoverflow.com/a/13641613
    try:
        if head(url).status_code == 200:
            return True
    except requests.ConnectionError as e:
        logger.warning("Error %s while connecting to %s" % (e, url))
    return False


config = ConfigObj("config")

handler = FileHandler(config["dir"]["log"])
logger = getLogger("audio_date_formatter")
formatter = Formatter("%(asctime)s - %(lineno)s: %(levelname)s %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(INFO)

logger.info("START %s" % (argv[0]))

past_date_start = (date.today() -
                   timedelta(days=int(config["days_back_start"]))).strftime(
                       config["broadcast_date_format"])
past_date_end = (date.today() -
                 timedelta(days=int(config["days_back_end"]))).strftime(
                     config["broadcast_date_format"])
Exemplo n.º 17
0
 def open_logfile():
     if not options.logfile:
         options.logfile = deluge.configmanager.get_config_dir("deluged.log")
         file_handler = FileHandler(options.logfile)
         log.addHandler(file_handler)
Exemplo n.º 18
0
'''
Deprecated: old app startup file.
[email protected]
'''
from app import app

import logging
from logging import Formatter, FileHandler

file_handler = FileHandler('app.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(Formatter('%(asctime)s %(levelname)s: %(message)s'))

app.logger.addHandler(file_handler)


################ Flask-APScheduler #################
## http://stackoverflow.com/questions/32424148/how-to-use-flask-apscheduler-in-existed-flask-app
class Config(object):
    JOBS = [{
        'id': 'job1',
        'func': '__main__:job1',
        'args': (1, 2),
        'trigger': 'interval',
        'seconds': 3600
    }, {
        'id': 'startup_job',
        'func': '__main__:startup_job',
        'args': ["Hello"]
    }]
Exemplo n.º 19
0
# Login manager for frontend
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "login"

# User Auth DB file - create if not existing
if not os.path.exists(app.config.get('USER_DB_FILE')):
    from glider_util.bdb import UserDB
    UserDB.init_db(app.config.get('USER_DB_FILE'))

# Create logging
if app.config.get('LOG_FILE') == True:
    import logging
    from logging import FileHandler
    file_handler = FileHandler('logs/glider_dac.txt')
    formatter = logging.Formatter(
        '%(asctime)s - %(process)d - %(name)s - %(module)s:%(lineno)d - %(levelname)s - %(message)s'
    )
    file_handler.setFormatter(formatter)
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)
    app.logger.info('Application Process Started')


# Create datetime jinja2 filter
def datetimeformat(value, format='%a, %b %d %Y at %I:%M%p'):
    if isinstance(value, datetime.datetime):
        return value.strftime(format)
    return value
Exemplo n.º 20
0
    def __add_handlers(self):
        pass

        # REMIND 添加控制台日志
        if not (self._judge_logger_has_handler_type(ColorHandler)
                or self._judge_logger_has_handler_type(
                    logging.StreamHandler)) and self._is_add_stream_handler:
            handler = ColorHandler(
            ) if not self._do_not_use_color_handler else logging.StreamHandler(
            )  # 不使用streamhandler,使用自定义的彩色日志
            # handler = logging.StreamHandler()
            self.__add_a_hanlder(handler)

        # REMIND 添加多进程安全切片的文件日志
        if not (self._judge_logger_has_handler_type(
                ConcurrentRotatingFileHandler)
                or self._judge_logger_has_handler_type(
                    ConcurrentRotatingFileHandlerWithBufferInitiativeWindwos)
                or self._judge_logger_has_handler_type(
                    ConcurrentRotatingFileHandlerWithBufferInitiativeLinux)
                or self._judge_logger_has_handler_type(
                    ConcurrentDayRotatingFileHandler)
                or self._judge_logger_has_handler_type(FileHandler)
                or self._judge_logger_has_handler_type(
                    ConcurrentRotatingFileHandler)) and all(
                        [self._log_path, self._log_filename]):
            if not os.path.exists(self._log_path):
                os.makedirs(self._log_path)
            log_file = os.path.join(self._log_path, self._log_filename)
            file_handler = None
            if self._log_file_handler_type == 1:
                if os_name == 'nt':
                    # 在win下使用这个ConcurrentRotatingFileHandler可以解决多进程安全切片,但性能损失惨重。
                    # 10进程各自写入10万条记录到同一个文件消耗15分钟。比不切片写入速度降低100倍。
                    file_handler = ConcurrentRotatingFileHandlerWithBufferInitiativeWindwos(
                        log_file,
                        maxBytes=self._log_file_size * 1024 * 1024,
                        backupCount=nb_log_config_default.
                        LOG_FILE_BACKUP_COUNT,
                        encoding="utf-8")
                elif os_name == 'posix':
                    # linux下可以使用ConcurrentRotatingFileHandler,进程安全的日志方式。
                    # 10进程各自写入10万条记录到同一个文件消耗100秒,还是比不切片写入速度降低10倍。因为每次检查切片大小和文件锁的原因。
                    file_handler = ConcurrentRotatingFileHandlerWithBufferInitiativeLinux(
                        log_file,
                        maxBytes=self._log_file_size * 1024 * 1024,
                        backupCount=nb_log_config_default.
                        LOG_FILE_BACKUP_COUNT,
                        encoding="utf-8")

            elif self._log_file_handler_type == 4:
                file_handler = WatchedFileHandler(log_file)
            elif self._log_file_handler_type == 2:
                file_handler = ConcurrentDayRotatingFileHandler(
                    self._log_filename,
                    self._log_path,
                    back_count=nb_log_config_default.LOG_FILE_BACKUP_COUNT)
            elif self._log_file_handler_type == 3:
                file_handler = FileHandler(log_file,
                                           mode='a',
                                           encoding='utf-8')
            elif self._log_file_handler_type == 5:
                file_handler = ConcurrentRotatingFileHandler(
                    log_file,
                    maxBytes=self._log_file_size * 1024 * 1024,
                    backupCount=nb_log_config_default.LOG_FILE_BACKUP_COUNT,
                    encoding="utf-8")
            self.__add_a_hanlder(file_handler)

        # REMIND 添加mongo日志。
        if not self._judge_logger_has_handler_type(
                MongoHandler) and self._mongo_url:
            self.__add_a_hanlder(MongoHandler(self._mongo_url))

        if not self._judge_logger_has_handler_type(
                ElasticHandler
        ) and self._is_add_elastic_handler and nb_log_config_default.RUN_ENV == 'test':  # 使用kafka。不直接es。
            """
            生产环境使用阿里云 oss日志,不使用这个。
            """
            self.__add_a_hanlder(
                ElasticHandler([nb_log_config_default.ELASTIC_HOST],
                               nb_log_config_default.ELASTIC_PORT))

        # REMIND 添加kafka日志。
        # if self._is_add_kafka_handler:
        if not self._judge_logger_has_handler_type(
            KafkaHandler) and nb_log_config_default.RUN_ENV == 'test' \
            and nb_log_config_default.ALWAYS_ADD_KAFKA_HANDLER_IN_TEST_ENVIRONENT:
            self.__add_a_hanlder(
                KafkaHandler(nb_log_config_default.KAFKA_BOOTSTRAP_SERVERS, ))

        # REMIND 添加钉钉日志。
        if not self._judge_logger_has_handler_type(
                DingTalkHandler) and self._ding_talk_token:
            self.__add_a_hanlder(
                DingTalkHandler(self._ding_talk_token,
                                self._ding_talk_time_interval))

        if not self._judge_logger_has_handler_type(
                CompatibleSMTPSSLHandler) and self._is_add_mail_handler:
            self.__add_a_hanlder(
                CompatibleSMTPSSLHandler(
                    **self._mail_handler_config.get_dict()))
Exemplo n.º 21
0
@app.route('/favicon.ico')
def favicon():
    return static_file('favicon.ico', root='static/imgs')


@app.hook('after_request')
def check_host_http_header():
    accepted_hosts = {
        'localhost:8080', 'localhost', '127.0.0.1:8080', '127.0.0.1'
    }
    if request.headers['host'] not in accepted_hosts:
        redirect('http://localhost:8080/', 301)


update_status = threading.Thread(target=trackon.update_outdated_trackers)
update_status.daemon = True
update_status.start()

get_trackerlist_project_list = threading.Thread(
    target=trackerlist_project.main)
get_trackerlist_project_list.daemon = True
get_trackerlist_project_list.start()

handlers = [
    FileHandler('access.log'),
]
app = WSGILogger(app, handlers, ApacheFormatter())

if __name__ == '__main__':
    run(app, host='0.0.0.0', port=8080, server='waitress')
Exemplo n.º 22
0
from services import root_dir, nice_json
from flask import Flask
from werkzeug.exceptions import NotFound
import json
from logging import FileHandler, WARNING

app = Flask(__name__)

file_handler = FileHandler('logs/showtimes_log.log')
file_handler.setLevel(WARNING)

app.logger.addHandler(file_handler)

with open("{}/database/showtimes.json".format(root_dir()), "r") as f:
    showtimes = json.load(f)


@app.route("/", methods=['GET'])
def hello():
    return nice_json({
        "uri": "/",
        "subresource_uris": {
            "showtimes": "/showtimes",
            "showtime": "/showtimes/<date>"
        }
    })


@app.route("/showtimes", methods=['GET'])
def showtimes_list():
    return nice_json(showtimes)
        flash("An error occurred. Show could not be listed.")
    return render_template("pages/home.html")


@app.errorhandler(404)
def not_found_error(error):
    return render_template("errors/404.html"), 404


@app.errorhandler(500)
def server_error(error):
    return render_template("errors/500.html"), 500


if not app.debug:
    file_handler = FileHandler("error.log")
    file_handler.setFormatter(
        Formatter(
            "%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]"
        ))
    app.logger.setLevel(logging.INFO)
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)
    app.logger.info("errors")

# ----------------------------------------------------------------------------#
# Launch.
# ----------------------------------------------------------------------------#

# Default port:
if __name__ == "__main__":
Exemplo n.º 24
0
 def setFileHandler(self, filename="ObjectDetection.log", log_level=10):
     handler = FileHandler(filename=filename)
     handler.setLevel(log_level)
     # handler.setFormatter(self.formatter)
     self.root_logger.addHandler(handler)
Exemplo n.º 25
0
 def setUpClass(cls):
     """
     Perform class setup before running the testcase
     Remove shared memory files, start vpp and connect the vpp-api
     """
     gc.collect()  # run garbage collection first
     random.seed()
     cls.logger = getLogger(cls.__name__)
     cls.tempdir = tempfile.mkdtemp(prefix='vpp-unittest-%s-' %
                                    cls.__name__)
     cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
     cls.file_handler.setFormatter(
         Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
                   datefmt="%H:%M:%S"))
     cls.file_handler.setLevel(DEBUG)
     cls.logger.addHandler(cls.file_handler)
     cls.shm_prefix = cls.tempdir.split("/")[-1]
     cls.punt_socket_path = '%s/%s' % (cls.tempdir, vpp_uds_socket_name)
     os.chdir(cls.tempdir)
     cls.logger.info("Temporary dir is %s, shm prefix is %s", cls.tempdir,
                     cls.shm_prefix)
     cls.setUpConstants()
     cls.reset_packet_infos()
     cls._captures = []
     cls._zombie_captures = []
     cls.verbose = 0
     cls.vpp_dead = False
     cls.registry = VppObjectRegistry()
     cls.vpp_startup_failed = False
     cls.reporter = KeepAliveReporter()
     # need to catch exceptions here because if we raise, then the cleanup
     # doesn't get called and we might end with a zombie vpp
     try:
         cls.run_vpp()
         cls.reporter.send_keep_alive(cls)
         cls.vpp_stdout_deque = deque()
         cls.vpp_stderr_deque = deque()
         cls.pump_thread_stop_flag = Event()
         cls.pump_thread_wakeup_pipe = os.pipe()
         cls.pump_thread = Thread(target=pump_output, args=(cls, ))
         cls.pump_thread.daemon = True
         cls.pump_thread.start()
         cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls)
         if cls.step:
             hook = StepHook(cls)
         else:
             hook = PollHook(cls)
         cls.vapi.register_hook(hook)
         cls.sleep(0.1, "after vpp startup, before initial poll")
         try:
             hook.poll_vpp()
         except:
             cls.vpp_startup_failed = True
             cls.logger.critical(
                 "VPP died shortly after startup, check the"
                 " output to standard error for possible cause")
             raise
         try:
             cls.vapi.connect()
         except:
             if cls.debug_gdbserver:
                 print(
                     colorize(
                         "You're running VPP inside gdbserver but "
                         "VPP-API connection failed, did you forget "
                         "to 'continue' VPP from within gdb?", RED))
             raise
     except:
         t, v, tb = sys.exc_info()
         try:
             cls.quit()
         except:
             pass
         raise t, v, tb
    parser.add_argument("--label_ratio", type=float, default=1.0)
    parser.add_argument("--sparse_ratio", type=float, default=1.0)

    parser.add_argument("--suffix", type=str, default="tmp")
    parser.add_argument("--exp_number", type=int, default=0)
    parser.add_argument("--gpu", type=int, default=-1)
    parser.add_argument("--save_score", action="store_true")

    parser.add_argument("--data", type=str, default="wiki.data.json")

    args = parser.parse_args()

    logger = getLogger("main")
    logger.setLevel(INFO)
    handler = FileHandler("logs/ExpLog_{}_{}.log".format(args.suffix, args.exp_number))
    handler.setLevel(INFO)
    logger.addHandler(handler)

    logger.info(str(args))

    # Load Data.
    # Load data.
    if (args.label_ratio < 1.0) or (args.sparse_ratio < 1.0):
        items, predicates = load_raw_data(args.data)

    if args.label_ratio < 1.0:
        # Randomly filter out label information.
        # Iterate over all train data, and count each predicate's frequency
        print("Counting predicate frequency...")
        pred_keys = {} #predicate id -> list of keys
Exemplo n.º 27
0
    return render_template('pages/home.html')


@app.errorhandler(404)
def not_found_error(error):
    return render_template('errors/404.html'), 404


@app.errorhandler(500)
def server_error(error):
    return render_template('errors/500.html'), 500


if not app.debug:
    file_handler = FileHandler('error.log')
    file_handler.setFormatter(
        Formatter(
            '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
        ))
    app.logger.setLevel(logging.INFO)
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)
    app.logger.info('errors')

#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#

# Default port:
if __name__ == '__main__':
Exemplo n.º 28
0
# all the imports
import sqlite3
import myconfig
from flask import Flask, request, session, g, redirect, url_for, \
        abort, render_template, flash

# create our little application :)
app = Flask(__name__)
app.config.from_object(myconfig.developmentConfig)
app.config.from_envvar('FLASKR_SETTINGS', silent=True)

import logging
from logging import FileHandler
file_handler = FileHandler('./logs/test.log', 'a')
file_handler.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler)


def connect_db():
    return sqlite3.connect(app.config['DATABASE'])


@app.before_request
def before_request():
    g.db = connect_db()


@app.teardown_request
def teardown_request(exception):
    db = getattr(g, 'db', None)
    if db is not None:
Exemplo n.º 29
0
# Instance of flask app
app = Flask(__name__)

# load Environment configurations
if app.config["ENV"] == "production":
    app.config.from_object(ProductionConfig())
elif app.config["ENV"] == "testing":
    app.config.from_object(TestingConfig())
else:
    app.config.from_object(DevelopmentConfig())

from .utils.constants import LOGS_FILE

# Error Handle
file_handler = FileHandler(app.config[LOGS_FILE])
file_handler.setLevel(WARNING)

# Add Error Handler to App
app.logger.addHandler(file_handler)

# Middle Ware
app.wsgi_app = MiddleWare(app.wsgi_app)

db = SQLAlchemy(app)
from rq import Queue
from worker import conn

app.queue = Queue(connection=conn)
from app.routes import conversion
from app.routes import user
Exemplo n.º 30
0
    return x_train, y_train, cv


if __name__ == '__main__':

    from logging import StreamHandler, DEBUG, Formatter, FileHandler

    log_fmt = Formatter(
        '%(asctime)s %(name)s %(lineno)d [%(levelname)s][%(funcName)s] %(message)s '
    )
    handler = StreamHandler()
    handler.setLevel('INFO')
    handler.setFormatter(log_fmt)
    logger.addHandler(handler)

    handler = FileHandler(DIR + 'train_cont.py.log', 'a')
    handler.setLevel(DEBUG)
    handler.setFormatter(log_fmt)
    logger.setLevel(DEBUG)
    logger.addHandler(handler)

    all_params = {
        'min_child_weight': [10],
        'subsample': [0.7],
        'seed': [114],
        'application': ['binary'],
        'colsample_bytree': [0.9],
        'verbose': [0],
        'learning_rate': [0.01],
        'max_depth': [5],
        'min_data_in_bin': [8],