Ejemplo n.º 1
0
 def __init__(self, cfg=None):
     self.blocked_set_name = cfg['setname']
     try:
         self.redis_connection = Walrus(host=cfg['host'],
                                        port=cfg['port'],
                                        db=cfg['db'])
     except Exception as e:
         print("Exception occured when connecting to Redis :", e)
Ejemplo n.º 2
0
    def __init__(self):
        self.logger = retrieve_logger("messages_processor")

        self.walrus = Walrus(host=REDIS_HOSTNAME, port=REDIS_PORT)
        self.devices_messages = self.walrus.List('devices_messages')

        self.thread_pool_executor = ThreadPoolExecutor(
            max_workers=self.MAX_WORKERS, )

        self.devices_service = DevicesService(logger=self.logger)
        self.rules_service = RulesService(logger=self.logger)
        self.rules_executor = RulesExecutor(logger=self.logger)
        self.protocol_filter_handler = ProtocolFilterHandler(self.logger)
Ejemplo n.º 3
0
def transform_and_roll_out():
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)
    sys.excepthook = uncaught_ex_handler

    parser = create_argparser()
    args = parser.parse_args()

    configuration = get_bot_defaults()

    if args.conf:
        with open(args.conf) as cfile:
            configuration.update(parse_config(cfile))

    # Environment variables override configuration file settings
    env_config = get_environment_configuration()
    configuration.update(env_config)

    if ROLLBAR_ACCESS_TOKEN in configuration:
        rollbar.init(configuration[ROLLBAR_ACCESS_TOKEN],
                     configuration[ROLLBAR_ENVIRONMENT])
        rollbar_handler = RollbarHandler()
        rollbar_handler.setLevel(logging.ERROR)
        logging.getLogger('').addHandler(rollbar_handler)

    # This is hack-city, but since we're constructing the redis data
    # after the fact, we'll now bolt the database back into the baseclass
    walrus = Walrus(host=configuration[REDIS_URL],
                    port=configuration[REDIS_PORT],
                    password=configuration[REDIS_PASSWORD])
    AutoBotBaseModel.set_database(walrus)

    bot = AutoBot(configuration)
    bot.run(args.forever, args.interval)
Ejemplo n.º 4
0
def get_conn():
    """Get a redis connection, reusing one if it exists."""
    global __redis
    if __redis.get("walrus") is None:
        conf = current_app.config
        try:
            __redis["walrus"] = Walrus(host=conf["REDIS_ADDR"],
                                       port=conf["REDIS_PORT"],
                                       password=conf["REDIS_PW"],
                                       db=conf["REDIS_DB_NUMBER"])
        except Exception as error:
            raise PicoException('Internal server error. ' +
                                'Please contact a system administrator.',
                                data={'original_error': error})
    return __redis["walrus"]
class ProtocolFilterHandler(object):
    PROTOCOLS_FILTER_QUEUES_NAME = {
        "MQTT": "devices/mqtt",
        "COAP": "devices/coap",
    }

    def __init__(self, logger):
        self.logger = logger

        self.walrus = Walrus(REDIS_HOSTNAME, REDIS_PORT)

    def parse(self, message):
        protocol = message["device_info"]["protocol"]
        protocol_queue_name = self.PROTOCOLS_FILTER_QUEUES_NAME[protocol]
        protocol_queue = self.walrus.List(protocol_queue_name)
        protocol_queue.append(json.dumps(message))

        self.logger.info("Pushed the message {} to queue {}".format(
            message, protocol_queue_name))
Ejemplo n.º 6
0
class RedisManager(object):
    def __init__(self, cfg=None):
        self.blocked_set_name = cfg['setname']
        try:
            self.redis_connection = Walrus(host=cfg['host'],
                                           port=cfg['port'],
                                           db=cfg['db'])
        except Exception as e:
            print("Exception occured when connecting to Redis :", e)

    @property
    def connection(self):
        return self.redis_connection

    def setup(self):
        # We can setup all DB related pre-operations here.
        # For the sake of simplicity of the app we only try to connect and display errors if any
        try:
            with open('src/model/data/blocked_words.json', 'r') as f:
                members = json.loads(f.read())
            return self.redis_connection.Set(
                self.blocked_set_name).add(*members)
        except Exception as e:
            print('Unable to Setup Redis: {}'.format(e))
Ejemplo n.º 7
0
class DevicesMessagesProcessor(object):
    MAX_WORKERS = 50

    GASS_SERVER_GATEWAY_DEVICES_PERFORMED_ACTIONS = "{}/gateways/{}/devices/actions" \
        .format(GASS_SERVER_API_URL, GATEWAY_UUID)
    GASS_SERVER_GATEWAY_DEVICES_RECEIVED_ACTIONS = "{}/gateways/{}/devices/actions/test" \
        .format(GASS_SERVER_API_URL, GATEWAY_UUID)

    def __init__(self):
        self.logger = retrieve_logger("messages_processor")

        self.walrus = Walrus(host=REDIS_HOSTNAME, port=REDIS_PORT)
        self.devices_messages = self.walrus.List('devices_messages')

        self.thread_pool_executor = ThreadPoolExecutor(
            max_workers=self.MAX_WORKERS, )

        self.devices_service = DevicesService(logger=self.logger)
        self.rules_service = RulesService(logger=self.logger)
        self.rules_executor = RulesExecutor(logger=self.logger)
        self.protocol_filter_handler = ProtocolFilterHandler(self.logger)

    def _rule_is_inside_activation_interval(self, interval_start,
                                            interval_end):
        current_date = datetime.datetime.utcnow()
        weekday_inside_interval = interval_start[
            "weekday"] <= current_date.weekday() <= interval_end["weekday"]
        if not weekday_inside_interval:
            return False

        hour_inside_interval = interval_start[
            "hour"] <= current_date.hour <= interval_end["hour"]
        if not hour_inside_interval:
            return False

        minute_inside_interval = interval_start[
            "minute"] <= current_date.minute <= interval_end["minute"]
        if not minute_inside_interval:
            return False

        return True

    def _filter_rules_by_active_interval(self, rules):
        if not rules:
            return []

        valid_rules = []
        for rule in rules:
            if not rule.get("interval"):
                valid_rules.append(rule)
                continue

            interval_start = rule["interval"]["start"]
            interval_end = rule["interval"]["end"]
            if not self._rule_is_inside_activation_interval(
                    interval_start, interval_end):
                self.logger.error(
                    "The rule {} should not be verified because at the moment rule isn't active"
                    .format(str(rule["_id"])))
                continue

            valid_rules.append(rule)

        return valid_rules

    def _send_performed_actions(self, performed_actions):
        body = performed_actions
        attempts = 0
        while True:
            self.logger.info(
                "Attempts {} to send to GaaS-Server the performed actions".
                format(attempts))
            try:
                response = requests.post(
                    self.GASS_SERVER_GATEWAY_DEVICES_PERFORMED_ACTIONS,
                    json=body)
                if response.status_code == HTTPStatusCodes.CREATED:
                    self.logger.debug(
                        "The performed actions has been sent successfully to the Gass Server"
                    )
                    return

                attempts += 1
            except Exception as err:
                self.logger.error(
                    "Failed to send the performed actions to the GaaS Server. Reason: {}"
                    .format(err), )

                self.logger.info("Sleeping...")
                time.sleep(5)
                attempts += 1
                self.logger.info(
                    "Retry sending the performed actions to the GaSS Server")

    def _test_forward_receive_actions(self, actions):
        body = actions
        attempts = 0

        while True:
            self.logger.info(
                "Attempts {} to send to GaaS-Server the received actions")
            try:
                response = requests.post(
                    self.GASS_SERVER_GATEWAY_DEVICES_RECEIVED_ACTIONS,
                    json=body)
                if response.status_code == HTTPStatusCodes.CREATED:
                    self.logger.debug(
                        "The performed actions has been sent successfully to the Gass Server"
                    )
                    return

                attempts += 1
            except Exception as err:
                self.logger.error(
                    "Failed to send the performed actions to the GaaS Server. Reason: {}"
                    .format(err),
                    exc_info=True,
                )

                self.logger.info("Sleeping...")
                time.sleep(5)
                attempts += 1
                self.logger.info(
                    "Retry sending the performed actions to the GaSS Server")

    def _send_devices_new_values(self, devices_new_values):
        devices_ids = [device['id'] for device in devices_new_values]
        devices = list(self.devices_service.find_multiple_devices(devices_ids))
        device_id_to_device_info = {device["id"]: device for device in devices}

        for device_new_value in devices_new_values:
            device_id, device_value = device_new_value["id"], device_new_value[
                "value"]
            device_info = device_id_to_device_info[device_id]
            message = {
                "device_info": {
                    "device_uuid": device_id,
                    "protocol": device_info["protocol"],
                    "ip": device_info["ip"],
                    "port": device_info.get("port", 0)
                },
                "value": device_value
            }
            self.protocol_filter_handler.parse(message)

    def _parse_message(self, message):
        try:
            self.logger.debug("Received message: {}".format(message))

            device_id, new_value = message["id"], message["v"]
            updated = self.devices_service.update(device_id, new_value)
            if not updated:
                self.logger.debug("Failed to update the sensor's value")
                return

            performed_actions = [{
                "type": ACTIONS_TYPES.CHANGE_VALUE,
                "device": device_id,
                "value": new_value,
                "timestamp": get_utc_timestamp(),
            }]
            self.logger.debug("Device has been updated")

            rules_to_check = list(
                self.rules_service.find_that_involves_devices([device_id]))
            rules_to_check = self._filter_rules_by_active_interval(
                rules_to_check)
            self.logger.debug("Have to check {} rules".format(
                len(rules_to_check)))

            devices_new_values, performed_actions_by_rules = self.rules_executor.execute(
                rules_to_check)
            performed_actions.extend(performed_actions_by_rules)
            self.logger.debug(
                "Performed actions: {}".format(performed_actions))

            self.logger.debug("Send the performed actions to the server")
            self._send_performed_actions(performed_actions)

            # Send the new values to devices to update their state
            self.logger.debug(
                "Devices new values: {}".format(devices_new_values))
            if not devices_new_values:
                return

            self._send_devices_new_values(devices_new_values)
            self.logger.debug("The new values have been sent to the devices")

        except Exception as err:
            self.logger.error(
                "Some error occurred while the message received from device was parsed. Reason: {}"
                .format(err),
                exc_info=True)

    def _write_request_latency(self, emit_time):
        now = time.time()
        latency = now - emit_time

        with open("latency.txt", mode="a") as file_handler:
            file_handler.write("{}\n".format(latency))

        self.logger.info("Current Average Latency: {}".format(latency))

    def start(self):
        self.logger.debug("Waiting for messages from devices")

        while True:
            message = self.devices_messages.bpopleft(timeout=120)
            if not message:
                continue

            message = json.loads(message.decode())
            self.logger.info("Received message: {}".format(message))
            self.thread_pool_executor.submit(self._parse_message, message)
def get_db():
    global _db
    if _db is None:
        _db = Walrus(host='redis', port=6379, db=1)
    return _db
import time

from walrus import Walrus

if __name__ == '__main__':
    db = Walrus(host='localhost', port=6379, db=0)

    # asignacion y obtencion de variables
    db['titulo'] = 'Python a fondo'
    db['calorias'] = 8987
    print(db['titulo'])
    print(db.get('calorias'))
    print(db.get('nombre_perro'))

    # contendedores o tipos de datos
    h = db.Hash('planetas')
    h.update(mercurio=3.303e+23, venus=4.869e+24)
    print(h)
    for key, value in h:
        print(f'Nombre: {key} -> {value}')
    del h['tierra']
    print(h)
    print('venus' in h)

    # autocompletado
    ac = db.autocomplete()
    textos = [
        'Python es el mejor lenguaje del mundo',
        'La comunidad de python usa mucho software libre',
        'La mayoría de módulos de python son software libre',
        'Redis está construido usando Ruby'
Ejemplo n.º 10
0
DL_USERNAME = os.getenv("DL_USERNAME")
DL_PASSWORD = os.getenv("DL_PASSWORD")
DL_ARL = os.getenv("DL_ARL")

# Load APIs
THECATAPI = os.getenv("THECATAPI")

# Admin IDs
ADMINS = os.getenv("ADMINS")

# Threads
MAXIMUM_WORKERS = int(os.getenv("MAXIMUM_WORKERS"))

# YT ACCOUNT
YT_USERNAME = os.getenv("YT_USERNAME")
YT_PASSWORD = os.getenv("YT_PASSWORD")

# Disable terrorist content
DISABLE_TERRORIST_CONTENT = bool(os.getenv("DISABLE_TERRORIST_CONTENT"))

# Setup client
BOT = Client(session_name=SESSION_NAME,
             api_id=API_ID,
             api_hash=API_HASH,
             bot_token=BOT_TOKEN)

# Setup redis
db = Walrus(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB)

# Setup executor
executor = concurrent.futures.ProcessPoolExecutor(max_workers=MAXIMUM_WORKERS)
Ejemplo n.º 11
0
import os
import jwt

from datetime import datetime, timedelta

from flask import Flask, jsonify, request
from flask_cors import CORS

from werkzeug.security import generate_password_hash, check_password_hash
from walrus import Walrus

app = Flask(__name__)
app.config['SECRET_KEY'] = 'Herocoders'
CORS(app, resources={r"/data/*": {"origins": "*"}})

dbw = Walrus(host='127.0.0.1', port=6379, db=0)
cache = dbw.cache()

data = model.Data
user = model.Users


def check_character(character):
    """Danh sách các ký tự không mong muốn"""
    special = '''`~!#$%^&*()_=+-*/\[]{}|:;<>'"?'''
    for i in range(len(special)):
        if character is special[i]:
            return 0
    return 1

Ejemplo n.º 12
0
                    result = self.queue.get_result(self.task_id)
                    if result is None:
                        time.sleep(0.1)
                    else:
                        break

            if result is not None:
                self._result = result

        if self._result is not None and isinstance(self._result, TaskError):
            raise Exception('task failed: %s' % self._result.error)

        return self._result

if __name__ == '__main__':
    db = Walrus()  # roughly equivalent to db = Redis().
    queue = TaskQueue(db)

    @queue.task
    def object_detect(task_data):
      node_name      = task_data['name']
      node_video_url = task_data['url']
      node_type      = task_data['type']
      check_all_frames = False

      # fetch from the node's ip the video file
      # use opencv to do object detection
      if node_type == 'jpg':
        downloaded_video_path = "/tmp/video.jpg"
        check_all_frames = True
      else:
    def __init__(self, logger):
        self.logger = logger

        self.walrus = Walrus(REDIS_HOSTNAME, REDIS_PORT)
Ejemplo n.º 14
0
from flask_mongoengine import MongoEngine
from walrus import Walrus

DB_ENGINE = MongoEngine()
WALRUS_DB = Walrus(host='127.0.0.1', port=6379, db=5)
Ejemplo n.º 15
0
from rq import Queue
from rq_scheduler import Scheduler
from walrus import Walrus

from models import push, pull, export
from validate import validate

app = Flask(__name__)

conn = redis.from_url('redis://127.0.0.1:6379')
q = Queue(connection=conn)

high_queue = Queue('high', connection=conn)
scheduler = Scheduler(queue=high_queue, connection=conn)

wr = Walrus(host='localhost', port=6379, db=0)


def remove_submission(submission):
    for job in wr.List(submission + '_tasks'):
        job = q.fetch_job(job.decode('UTF-8'))
        if job:
            job.cancel()
    wr.Set('submission_ids').remove(submission)


def cron():
    s = wr.Set('submission_ids')
    print(f'[*] Total submissions: {len(s)}', flush=True)
    for submission in wr.Set('submission_ids'):
        print(f'[*] Active submission: {submission}', flush=True)
Ejemplo n.º 16
0
config: Munch = None

env = jinja2.Environment(loader=jinja2.PackageLoader('m0rkcoin_explorer'))


def load_config():
    with open(config_file_path) as config_file:
        _config = yaml.load(config_file)
        globals()['config'] = Munch.fromDict(_config)
    try:
        with open(config_override_file_path) as config_override_file:
            _config_override = yaml.load(config_override_file)
            globals()['config'].update(_config_override)
    except Exception:
        pass


load_config()

cache_client = Walrus(host=config.redis.host, port=config.redis.port)
cache = cache_client.cache()

CACHE_HOUR = 60 * 60
CACHE_DAY = CACHE_HOUR * 24
CACHE_WEEK = CACHE_DAY * 7
CACHE_MONTH = CACHE_DAY * 30

M0RKCOIN_PREV_HEIGHT = 'm0rkcoin:prev_height'
M0RKCOIN_EMISSION_KEY = 'm0rkcoin:emission'
Ejemplo n.º 17
0
from os import getenv
from os.path import dirname, abspath, join as path_join
from importlib import import_module
from datetime import datetime

from walrus import Walrus
from huey import RedisHuey

settings = import_module(getenv("CONFIG", "config.local"))

db = Walrus(
    host=getenv("REDIS-HOST", "localhost"),
    port=getenv("REDIS-PORT", 6379),
    db=getenv("REDIS-DB", 1),
)

Huey = RedisHuey(
    host=getenv("REDIS-HOST", "localhost"),
    port=getenv("REDIS-PORT", 6379),
    db=getenv("REDIS-DB", 2),
)

ROOT_DIR = dirname(abspath(__file__))
LOG_PATH = path_join(ROOT_DIR, "log")
# from blinker import signal

# from scheduler import HEARTBEAT

# SCHEDULER_SIGNAL = signal('SCHEDULER')
# heartbeat_meta = {
#     'last_checkpoint': None
Ejemplo n.º 18
0
import json
import os
from walrus import Model, Walrus, IntegerField, ListField, TextField, DateTimeField, BooleanField

_config = json.load(
    os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config.json'))
redis_db = Walrus(host=_config['redis']['host'],
                  port=_config['redis']['port'],
                  db=0)


class UploadCache(Model):
    __database__ = redis_db

    _id = 0
    id = IntegerField(primary_key=True)
    cache = ListField()
    sequence_number = IntegerField(default=0)
    max_sequence_number = IntegerField(default=0)
    # store upload file information
    size = IntegerField(default=0)
    path = TextField(default='/')
    md5 = TextField()
    upload_date = DateTimeField()
    file_name = TextField()
    filetype = IntegerField()
    user_id = IntegerField()
    # store saving task information
    saving_task_start = BooleanField(default=False)

    @classmethod
Ejemplo n.º 19
0
import re

from pydash import py_
from pyquery import PyQuery as pq
from sanic import Sanic
from sanic.response import json
from sanic.exceptions import InvalidUsage
from sanic_cors import cross_origin
from walrus import Walrus

from kekette import get_env

config = get_env(prefix='ILEARN_DEV_')
app = Sanic()

wdb = Walrus()
cache = wdb.cache()

parse_number = lambda x: float(re.findall(r'[\d\.]+', x)[0])


@cache.cached(timeout=1200)
def fetch_data(endpoint, **params):
    remote_url = f'http://{config.x_server}/{endpoint}'
    headers = {'Host': config.x_server}

    return requests.get(remote_url, params=params, headers=headers)


def proxy_keywords(url):
    r = fetch_data('extract_keywords', jsdata=url)