Пример #1
0
 def __init__(self,template_dir):
     auto_escape = config.get('autoescape',True)
     self.encoding = config.get('encoding','utf8')
     self._env = jinja2.Environment(
         loader=jinja2.FileSystemLoader(template_dir
         ),
         autoescape=auto_escape)
Пример #2
0
def get_moves_data(move_id):
    if not hasattr(get_moves_data, 'en'):
        file_path = "static/moves_{}.json".format(config.get('locale', 'en'))
        with open(file_path, 'r') as f:
            get_moves_data.moves = json.loads(f.read())

    return get_moves_data.moves[str(move_id)]
Пример #3
0
def server(options):
    # configure logging
    if options.debug or config.get('debug', False):
        if options.log_stderr:
            logging.basicConfig(level=logging.DEBUG,
                                stream=sys.stderr,
                                format='%(asctime)s [%(threadName)s] %(message)s',
                                datefmt='%Y-%m-%d %H:%M:%S')
        else:
            logging.basicConfig(level=logging.DEBUG,
                                filename=config['logfile'],
                                format='%(asctime)s [%(threadName)s] %(message)s',
                                datefmt='%Y-%m-%d %H:%M:%S')
    else:
        if options.log_stderr:
            logging.basicConfig(level=logging.INFO,
                                stream=sys.stderr,
                                format='%(asctime)s [%(threadName)s] %(message)s',
                                datefmt='%Y-%m-%d %H:%M:%S')
        else:
            logging.basicConfig(level=logging.INFO,
                                filename=config['logfile'],
                                format='%(asctime)s [%(threadName)s] %(message)s',
                                datefmt='%Y-%m-%d %H:%M:%S')


    # initialize the FakeAP
    fakeAP = FakeAP(config['interface'], config['ESSID'], config['BSSID'], config['channel'], config['beacon_interval_sec'], packet_callback)

    # start the FakeAP
    fakeAP.start()
Пример #4
0
def drain_queues(transport, loop):

    conn = yield from aioredis.create_connection(
                    (config.get('redis', 'host'), 6379), loop=loop)
    while True:
        # check redis for list of my clients
        clients = yield from conn.execute('zrange', '%s:clients' % myid(), 0, -1)

        # for each client, check redis if there is a message in their queue
        for client in clients:
            next_message_in_queue = yield from conn.execute('lpop', '%s:queue' % client)

            if next_message_in_queue is not None:
                # not needed but makes nice debug
                msg = message.MQTTSNMessage()
                msg.parse(next_message_in_queue)
                logger.debug("Sending %s to %s" % (msg, client,))

                # get the socket from redis
                socket = pickle.loads(client)

                # if I want more info about this, it's here
                # client_info = r.hget('%s:socket' % client, 'socket'))

                # get transport somehow
                transport.sendto(next_message_in_queue, socket)

    conn.close()
Пример #5
0
def server(options):
    # configure logging
    if options.debug or config.get('debug', False):
        if options.log_stderr:
            logging.basicConfig(level=logging.DEBUG,
                                stream=sys.stderr,
                                format='%(asctime)s [%(threadName)s] %(message)s',
                                datefmt='%Y-%m-%d %H:%M:%S')
        else:
            logging.basicConfig(level=logging.DEBUG,
                                filename=config['logfile'],
                                format='%(asctime)s [%(threadName)s] %(message)s',
                                datefmt='%Y-%m-%d %H:%M:%S')
    else:
        if options.log_stderr:
            logging.basicConfig(level=logging.INFO,
                                stream=sys.stderr,
                                format='%(asctime)s %(message)s',
                                datefmt='%Y-%m-%d %H:%M:%S')
        else:
            logging.basicConfig(level=logging.INFO,
                                filename=config['logfile'],
                                format='%(asctime)s %(message)s',
                                datefmt='%Y-%m-%d %H:%M:%S')

    # start http server
    http_server = HttpServer(config)
    http_server.start(config['http_host'], config['http_port'])
Пример #6
0
def main():
    """Set up logging, start the event loop"""
    # File and console
    fh = logging.FileHandler(config.get('logging', 'path'))
    ch = logging.StreamHandler()

    # Set log levels
    log_level = config.get('logging', 'level')
    logger.setLevel(log_level)
    fh.setLevel(log_level)
    ch.setLevel(log_level)

    # Set format
    formatter = logging.Formatter(config.get('logging', 'format'))
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)

    # Add handlers
    logger.addHandler(fh)
    logger.addHandler(ch)

    # ready to go
    logger.info("pymqttsn broker started")

    # start the asyncio loop
    loop = asyncio.get_event_loop()
    if signal is not None:
        loop.add_signal_handler(signal.SIGINT, loop.stop)

    host = config.get('mqtt_sn', 'listen_host')
    port = config.getint('mqtt_sn', 'listen_port')

    server = start_server(loop, (host, port))

    try:
        loop.run_forever()  # and ever and ever
    finally:
        server.close()
        loop.close()
        logger.info('Goodnight, sweet prince')
Пример #7
0
def i18n(word):
    locale = config.get('locale', 'en')
    if locale == "en":
        return word

    if not hasattr(i18n, 'dictionary'):
        file_path = 'static/locales/{}.json'.format(locale)
        with open(file_path, 'r') as f:
            i18n.dictionary = json.loads(f.read())

    if word in i18n.dictionary:
        return i18n.dictionary[word]
    else:
        return word
Пример #8
0
def meerkatd(options):
    # configure logging
    if options.debug or config.get('debug', False):
        if options.log_stderr:
            logging.basicConfig(
                level=logging.DEBUG,
                stream=sys.stderr,
                format='%(asctime)s [%(threadName)s] %(message)s',
                datefmt='%Y-%m-%d %H:%M:%S')
        else:
            logging.basicConfig(
                level=logging.DEBUG,
                filename=config['logfile'],
                format='%(asctime)s [%(threadName)s] %(message)s',
                datefmt='%Y-%m-%d %H:%M:%S')
    else:
        if options.log_stderr:
            logging.basicConfig(level=logging.INFO,
                                stream=sys.stderr,
                                format='%(asctime)s %(message)s',
                                datefmt='%Y-%m-%d %H:%M:%S')
        else:
            logging.basicConfig(level=logging.INFO,
                                filename=config['logfile'],
                                format='%(asctime)s %(message)s',
                                datefmt='%Y-%m-%d %H:%M:%S')

    # initialize the scheduler
    scheduler = Scheduler(config["datafile"], config["probespath"],
                          config["probes"])

    # start http server
    http_server = HttpServer(scheduler, config)
    http_server.start()

    # register with mission control
    #r = requests.post(config["mission_control"]["register_url"], data=http_server.info_json())
    r = http_server.register_control_json()
    logging.info("Registered with mission control: %s" % r)

    # start the scheduler
    scheduler.start(paused=True)
Пример #9
0
def meerkatd(options):
    # configure logging
    if options.debug or config.get('debug', False):
        if options.log_stderr:
            logging.basicConfig(level=logging.DEBUG,
                                stream=sys.stderr,
                                format='%(asctime)s [%(threadName)s] %(message)s',
                                datefmt='%Y-%m-%d %H:%M:%S')
        else:
            logging.basicConfig(level=logging.DEBUG,
                                filename=config['logfile'],
                                format='%(asctime)s [%(threadName)s] %(message)s',
                                datefmt='%Y-%m-%d %H:%M:%S')
    else:
        if options.log_stderr:
            logging.basicConfig(level=logging.INFO,
                                stream=sys.stderr,
                                format='%(asctime)s %(message)s',
                                datefmt='%Y-%m-%d %H:%M:%S')
        else:
            logging.basicConfig(level=logging.INFO,
                                filename=config['logfile'],
                                format='%(asctime)s %(message)s',
                                datefmt='%Y-%m-%d %H:%M:%S')

    # initialize the scheduler
    scheduler = Scheduler(config["datafile"], config["probespath"], config["probes"])

    # start http server
    http_server = HttpServer(scheduler, config)
    http_server.start()

    # register with mission control
    #r = requests.post(config["mission_control"]["register_url"], data=http_server.info_json())
    r = http_server.register_control_json()
    logging.info("Registered with mission control: %s" % r)

    # start the scheduler
    scheduler.start(paused=True)
Пример #10
0
    def __init__(self, queue):
        self.__bot_token = config['bot_token']
        self.__chat_id = config['chat_id']
        self.__client = TelegramBot(self.__bot_token)

        self.__timezone = config.get('timezone', 0)
        self.__notify_levels = config['notify_levels']
        self.__notify_pokemon = config['notify_pokemon']

        self.__queue = queue
        self.__raids = {}
        self.__messages = {}

        retry_time = 1
        try:
            spawn(self.__run())
            retry_time = 1
        except Exception as e:
            log.exception("Exception during runtime spawn: {}".format(repr(e)))
            retry_time *= 2
            sleep(retry_time)
            pass
Пример #11
0
def senders(message):
    bot_token = config.get("TOK_TELEGRAM")
    bot_chatid = config.get("CHAT_ID")
    send_text = 'https://api.telegram.org/bot' + bot_token + '/sendMessage?chat_id=' + bot_chatid + '&parse_mode=Markdown&text=' + message
    response = requests.get(send_text)
    return response.json()
Пример #12
0
from threading import Thread
from gevent import monkey
from gevent import wsgi
from flask import Flask, request

# Custom files and packages
from config.config import config
from teleraid.teleraid import TeleRaid

monkey.patch_all()
logging.basicConfig(
    format='%(asctime)s [%(threadName)18s][%(module)14s][%(levelname)8s] ' +
    '%(message)s')
log = logging.getLogger()
if config.get('debug', False):
    log.setLevel(logging.DEBUG)
else:
    log.setLevel(logging.INFO)

app = Flask(__name__)
data_queue = Queue.Queue()


@app.route('/', methods=['POST'])
def accept_webhook():
    try:
        data = json.loads(request.data)
        data_queue.put(data)
    except Exception as e:
        log.exception(
Пример #13
0
# -*- coding: utf-8 -*-
__author__ = 'baio'

import requests as req
import simplejson as json
import yaml
from config.config import config

_elastic_host_url = config.get("ES_URI", None)

def _req_hits(q):
    if not _elastic_host_url: return []
    res = req.get(_elastic_host_url + "/" + q)
    hits = yaml.load(res.content)["hits"]
    return hits["hits"] if len(hits) > 0 else []

def _req_hits_multi(index_data):
    """
    index_data - list of buckets:
    index : index/type for es request
    data : [requests]
    """
    if not _elastic_host_url: return []
    index_data = filter(lambda x: len(x[1]) > 0, index_data)
    d = "".join(map(lambda x: u"".join(
                    map(
                        lambda y: "{}\n{}\n".format(json.dumps({"index" : x[0]}),json.dumps(y)),
                        x[1]))
        , index_data))
    res = req.get(_elastic_host_url + "/_msearch", data=d)
    content = yaml.load(res.content)
Пример #14
0
        for message in messages:
            await message.delete()
    # print(nombre[0].isdigit())
    elif nombre[0].isdigit():
        messages = await ctx.channel.history(limit=int(nombre[0]) + 1).flatten()
        for message in messages:
            await message.delete()
    elif nombre[0] == 'all':
        await channel.purge(limit=None)
    elif nombre[0] == 'on':
        print(nombre[1])
        messages = await ctx.channel.history().flatten()
        i = 0
        for _ in reversed(messages):
            await _.delete()
            if i == int(nombre[1] - 1):
                break
            i += 1
    elif nombre[0] == 'off':
        messages = await ctx.channel.history().flatten()
        print(len(messages))
        i = 0
        for _ in messages:
            if i == 0 or i > int(nombre[1]):
                await _.delete()
            i += 1
    print(f'Fin de suppression des messages du channel {channel}')


bot.run(config.get('TOKEN'))
Пример #15
0
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from redis import StrictRedis

from config.config import config
app = Flask(__name__)
db = SQLAlchemy()


# 初始化配置文件
config = config.get('default')
app.config.from_object(config)
xredis = StrictRedis(host='127.0.0.1', port=6379, db=2, charset='utf-8', decode_responses=True)

# 初始化数据库
db.init_app(app)
Пример #16
0
import asyncio
import logging
import redis
import aioredis
import os
import time
import pickle

from config.config import config

from broker import message
from broker import actions

logger = logging.getLogger('broker.socketio')
r = redis.Redis(config.get('redis', 'host'))

class MQTTSNBrokerProtocol:

    def connection_made(self, transport):
        logger.info(('start', transport,))
        self.transport = transport

    def datagram_received(self, data, addr):
        logger.debug(('Data received:', data, addr,))
        msg = message.MQTTSNMessage()

        if not msg.parse(data):
            logger.warning('Message parse error!')
            return

        logger.debug('Parsed: %s' % (msg, ))
Пример #17
0
def myid():
    """ How I am referred to internally """
    return 'broker:%s:%s' % (config.get('redis', 'broker_name'), os.getpid(),)
Пример #18
0
import concurrent.futures
import sys
import time

from binance_api import BinanceAPI
from config.config import config
from algo import follow

# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    while True:
        try:
            test = BinanceAPI(key=config.get("KEY"), secret=config.get("SECRET"),
                              recv_windows=config.get("recv_windows"), percent=config.get('percent'), loop_time=5)
            with concurrent.futures.ThreadPoolExecutor(len(test.assets)) as executor:
                results = executor.map(follow, test.assets)
        except:
            print(f"erreur {sys.exc_info()[0]}")
            time.sleep(5)
Пример #19
0
# -*- coding: utf-8 -*-
"""
exam app runserver
"""
import os

from flask_migrate import MigrateCommand, Migrate
from etl import create_app, db, celery  # noqa
from config.config import config
from flask_script import Manager, Server

envirement = os.environ.get("ETL_ENVIREMENT", "testing")

app = create_app(config.get(envirement, config["testing"]))
migrate = Migrate(app, db, compare_type=True)

manager = Manager(app)
manager.add_command(
    "runserver",
    Server(host="0.0.0.0",
           use_reloader=True if envirement in ("dev", "testing") else False),
)
manager.add_command("db", MigrateCommand)

if __name__ == "__main__":
    manager.run()
Пример #20
0
from etl.etl import create_app, celery
from config.config import config
import os

envirement = os.environ.get('ETL_ENVIREMENT', 'dev')

app = create_app(config.get(envirement, config["dev"]))
print(celery.Task)
Пример #21
0
# -*- coding: utf-8 -*-
__author__ = 'baio'

import requests as req
import simplejson as json
import yaml
from config.config import config

_elastic_host_url = config.get("ES_URI", None)


def _req_hits(q):
    if not _elastic_host_url: return []
    res = req.get(_elastic_host_url + "/" + q)
    hits = yaml.load(res.content)["hits"]
    return hits["hits"] if len(hits) > 0 else []


def _req_hits_multi(index_data):
    """
    index_data - list of buckets:
    index : index/type for es request
    data : [requests]
    """
    if not _elastic_host_url: return []
    index_data = filter(lambda x: len(x[1]) > 0, index_data)
    d = "".join(
        map(
            lambda x: u"".join(
                map(
                    lambda y: "{}\n{}\n".format(json.dumps({"index": x[0]}),
Пример #22
0
# -*- coding:utf-8-*-
import redis
from config.config import config

# 因为这里用的是多个单线程的进程起的服务器,因此连接池应该是不需要的
# _pool = redis.Connection(
#     host=config['redis_host'],
#     port=6379,
#     decode_responses=True
# )

password = config.get('redis_pswd')
redis_client = redis.Redis(
    host=config['redis_host'],
    port=6379,
    decode_responses=True,
) if password else redis.Redis(
    host=config['redis_host'],
    port=6379,
    decode_responses=True,
    password=password
)
Пример #23
0
import time
from datetime import datetime
import re
from etl.tasks.tasks import task_warehouse, task_extract_data
import json
from traceback import print_exc
from flask import current_app
from etl.etl import celery
from etl.tasks.unload_s3 import UploadS3

sql_service = DatasourceSqlService()
datasource_service = DatasourceService()
cleaninfo_service = ExtCleanInfoService()
log_service = ExtLogSqlService()

setting = config.get(os.getenv("ETL_ENVIREMENT", "dev"))
S3 = boto3.resource("s3")
S3_BUCKET = "ext-etl-data"


class RollbackError(Exception):
    def __init__(self, content: dict) -> None:
        log_service.add_log(**content)
        self.content = content

    def __str__(self):
        return str(self.content)


class RollbackTaskSet:
    upsert_tables = {"chain_store", "chain_goods", "chain_category"}
Пример #24
0
import datetime
import random
import time

from binance_api import BinanceAPI
from config.config import config
from telegram import senders

ba = BinanceAPI(key=config.get("KEY"),
                secret=config.get("SECRET"),
                recv_windows=config.get("recv_windows"),
                percent=config.get('percent'),
                loop_time=5)


def follow(asset):
    _nowt = datetime.datetime.now()
    while True:
        if datetime.datetime.now() > _nowt + datetime.timedelta(
                minutes=ba.loop_time):
            break
        # print(f"Début suivi sur {asset}")
        i = ba.portfolio
        # print(i)
        # print(asset[:-3])
        if asset[:-3] in i:
            # print(f"{asset[:-3]} {asset} présent !!!!")
            # print(f"j : {j}, asset : {asset}")
            if ba.portfolio[asset[:-3]]['free'] != 0:
                # print("ok")
                for k in ba.products['symbols']: