Esempio n. 1
0
from tinygraph.tinygraphd.signals import pre_poll, post_poll, poll_error, \
    value_change, poll_start, poll_stop
from tinygraph.apps.rules.models import PackageInstanceMembership
from functools import wraps
from tinygraph.apps.data.models import Poll
import django.dispatch
import socket
import logging
import datetime

SNMP_GETBULK_SIZE = getattr(settings, 'TINYGRAPH_SNMP_GETBULK_SIZE', 25)
DEVICE_TRANSPORT_ERROR_MESSAGE = getattr(settings, 
    'TINYGRAPH_DEVICE_TRANSPORT_ERROR_MESSAGE', 
    'Could not connect to device.')

logger = logging.getLogger('tinygraph.tinygraphd.PollDaemon')

VARIABLE_DATA_VALUE_TYPES = (
    'time_ticks',
    'counter',
    'gauge',
)

def watch_for_exceptions(f):
    def wrapper(*args, **kwargs):
        try:
            return f(*args, **kwargs)
        except Exception, e:
            logger.critical('%s() threw an exception: %s' % (f.__name__, e))
        
    return wraps(f)(wrapper)
Esempio n. 2
0
import paho.mqtt.client as mqtt
import socket
import os
import getpass
import time

from utils import logging
logger = logging.getLogger(__name__)


class Singleton(type):
    """Generic singleton pattern for classes."""
    _instances = {}

    def __call__(cls, *args, **kwargs):
        if cls not in cls._instances:
            cls._instances[cls] = super(Singleton,
                                        cls).__call__(*args, **kwargs)
        return cls._instances[cls]


class MQTT(metaclass=Singleton):
    """Create and hold connection through MQTT."""
    QOS = 0

    def __init__(self, broker_url="mqtt.tinker.haus", broker_port=1883):
        self.__client = mqtt.Client()
        self.__client.enable_logger()
        self.__client.on_connect = self.on_connect

        self.__broker_url = broker_url
Esempio n. 3
0
    'right': [
        {'title' : 'Login', 'endpoint' : 'admin.login', "login_required" : False, 'icon' : "fa fa-sign-in"},
        {'title' : 'Logout', 'endpoint' : 'admin.logout', "login_required" : True, 'icon' : "fa fa-sign-in"},
        {'title' : 'Register', 'endpoint' : 'admin.register', "login_required" : False, 'icon' : "fa fa-user"},
    ]
}

app = spa.SinglePageApp(app, navitems=NAV_BAR_ITEMS)

app.register_blueprint(welcome, url_prefix='/')
app.register_blueprint(demo, url_prefix='/demo')
app.register_blueprint(user, url_prefix='/user')

app.register_blueprint(admin, url_prefix='/admin')
app.enable_login_manager(login_manager, login_view='admin.login')

if __name__ == '__main__':

    # Turn off werkzeug  logging as it's very noisy

    aps_log = logging.getLogger('werkzeug')
    aps_log.setLevel(logging.ERROR)

    # Set SPA logging level (if needed)

    log.setLevel(logging.INFO)

    print('\nvisit http://localhost:8050/\n')

    app.run_server(debug=False, threaded=False)
Esempio n. 4
0
truncfile(debuglog)

# for logging things in main
mainlog = make_logger('main_log',
    frmt=frmt,
    fpath=debuglog,
    stdout=True,
    # lvl=logging.DEBUG, # use one of the following to easily set the stdout log level
    lvl=logging.INFO,
    # lvl=logging.WARNING,
    # lvl=logging.ERROR,
    # lvl=logging.CRITICAL,
    flvl = logging.INFO
    )

man_log = logging.getLogger("main_log.managers")

'''
man_log = make_logger('managers',
    frmt=frmt,
    fpath=debuglog,
    stdout=True,
    # lvl=logging.DEBUG, # use one of the following to easily set the stdout log level
    lvl=logging.INFO,
    # lvl=logging.WARNING,
    # lvl=logging.ERROR,
    # lvl=logging.CRITICAL,
    flvl = logging.DEBUG
    )
'''
Esempio n. 5
0
import os
from typing import List

import torch
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import DataLoader

from utils import logging

logger = logging.getLogger()

SAVED_MODELS_PATH = 'output/saved_models'


class Trainer():
    """Parent to the trainers. Implement methods that are common across trainers.
    """
    def __init__(self, model: torch.nn.Module, classes: List[int]):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = model.to(self.device)
        self.classes = classes

    def _get_optimizer(self, opt, model, lr, betas):
        # NOTE: Might need to set more parameters
        if opt == "adam":
            return optim.Adam(model.parameters(), lr, betas)
        elif opt == "sgd":
            return optim.SGD(model.parameters(), lr)
        else:
            logger.warning(f'Optimizer "{opt}" not recognized. Falling back to adam by default')
Esempio n. 6
0
import torch
import torch.distributed as dist
import torch.optim as optim
from dataset import Dataset
from neural_network import NeuralNetwork
from torch.autograd import Variable
from torch.nn.functional import nll_loss
from torch.optim.optimizer import Optimizer
from utils.logging import getLogger

from . import DEFAULT_NUM_EPOCHS, DistTraining
from .epoch import Epoch
from .results import TrainingResults

logger = getLogger(__name__)


class SyncedSGD(DistTraining):
    def __init__(self) -> None:
        self.loss_fn = nll_loss

    def train(
        self,
        rank: int,
        world_size: int,
        dataset: Dataset,
        neural_network: NeuralNetwork,
        num_epochs: int = DEFAULT_NUM_EPOCHS,
        verbose: bool = False,
    ) -> TrainingResults:
Esempio n. 7
0
# import etcd
from etcd import Client
from utils import logging

logger = logging.getLogger('data')


class Data(Client):
    def __init__(self, host=None, port=None):
        logger.debug('Initiate Data')
        self._host = host or 'localhost'
        self._port = port or 2379
        super(Data, self).__init__(host=self._host, port=self._port)

    # def write_key(self, data, key='default_key'):
    #     self.write(key, data)

    # def read_key(self, key='default_key'):
    #     o = self.get(key).value
    #     return o

    @staticmethod
    def dump(nodes, file='/tmp/nodes.txt'):
        with open(file, 'w+') as f:
            f.writelines(['%s\n' % item for item in nodes])


def main():
    d = Data()
    d.write('{"aa": 1, "bb": 2}')
    logger.info(d.get())
Esempio n. 8
0
#!/usr/bin/env pyhton3
# coding: utf-8

import os
import re
import json
import string
import html
import operator
from lxml import etree

from utils import logging
log = logging.getLogger(__file__)
# parsefiles.py
# 1. parse existing json files and extract the article content
# 2. save the article content to database

def handler(articlesdir):
    # articlesdir = os.path.join(os.path.dirname(__file__), '/articles/')
    if not os.path.exists(articlesdir):
        log.error("Articles folder does not exist!")
    
    wDict = {}
    for dirpath, _, files in os.walk(articlesdir):
        for file in files:
            jsonfile  = os.path.join(dirpath, file)
            with open(jsonfile, 'r') as f:
                jd = json.load(f)
                ndata = _extract_json(jd)
                # save2db(ndata, db)
                _freq(ndata, wDict)
Esempio n. 9
0
#!/usr/bin/env python

import os
import json
# from deploy import Compose
# from play import Playbook
# from run_etcd_cmd import Deploy
from set_auth_cmd import Auth
from data import Data
from utils import logging

PATH = os.path.dirname(os.path.abspath(__file__))

logger = logging.getLogger('ssh_auth')


class SSHAuth(object):
    def __init__(self):
        self.server = 'localhost'
        self.port = 12379
        self.key = 'nodes'
        self.former = set()

    def watch(self, inst):
        inst.watch(key=self.key, timeout=0)
        cur_nodes = set(inst.get(key=self.key).value.split(','))
        logger.debug(cur_nodes)
        diff = cur_nodes.difference(self.former)
        self.former = self.former.union(cur_nodes)
        logger.debug('diff is {}'.format(diff))
        return diff
Esempio n. 10
0
#!/usr/bin/env python3

from utils import logging, configuration
from crawl import crawler
from graph import G

log = logging.getLogger('root')

if __name__ == '__main__':
    
    G.start()
    
    crawler.start_crawl("http://www.fluierul.ro")
else:
    print(configuration.get_prop('prop1'))
Esempio n. 11
0
truncfile(debuglog)

# for logging things in main
mainlog = make_logger(
    'main_log',
    frmt=frmt,
    fpath=debuglog,
    stdout=True,
    # lvl=logging.DEBUG, # use one of the following to easily set the stdout log level
    lvl=logging.INFO,
    # lvl=logging.WARNING,
    # lvl=logging.ERROR,
    # lvl=logging.CRITICAL,
    flvl=logging.INFO)

man_log = logging.getLogger("main_log.managers")
'''
man_log = make_logger('managers',
    frmt=frmt,
    fpath=debuglog,
    stdout=True,
    # lvl=logging.DEBUG, # use one of the following to easily set the stdout log level
    lvl=logging.INFO,
    # lvl=logging.WARNING,
    # lvl=logging.ERROR,
    # lvl=logging.CRITICAL,
    flvl = logging.DEBUG
    )
'''

map_log = logging.getLogger("main_log.mappings")
Esempio n. 12
0
from utils import logging
from graph.graph import Graph

log = logging.getLogger('graph')

G = Graph()
Esempio n. 13
0
 def __init__(self):
     self.logger = logging.getLogger('HttpReqHdl')
                # Reset the form fields. If we don't do this the
                # field content will be preserved in the DOM and will be
                # represented when the form is next shown. This may or may not
                # be desirable. If sensitive data fields a present in the form
                # then this will return them to the default values.

                form_fields = formFields()

        return redirect, form_fields, flash

    redirect = dhc.Location(id="signin-redirect", refresh=False)
    form = form_container("Sign In", formFields(), id="form")

    return html.Div([redirect, form])

#
# python -m examples.form.signin_form
#
# http://localhost:8050
#

if __name__ == "__main__":
    print("\nvisit: http://localhost:8050\n")

    aps_log = logging.getLogger("werkzeug")
    aps_log.setLevel(logging.ERROR)

    app.layout = layout()
    app.run_server(host='0.0.0.0', debug=False, threaded=False)
Esempio n. 15
0
from tinygraph.tinygraphd.signals import pre_poll, post_poll, poll_error, \
    value_change, poll_start, poll_stop
from tinygraph.apps.rules.models import PackageInstanceMembership
from functools import wraps
from tinygraph.apps.data.models import Poll
import django.dispatch
import socket
import logging
import datetime

SNMP_GETBULK_SIZE = getattr(settings, 'TINYGRAPH_SNMP_GETBULK_SIZE', 25)
DEVICE_TRANSPORT_ERROR_MESSAGE = getattr(
    settings, 'TINYGRAPH_DEVICE_TRANSPORT_ERROR_MESSAGE',
    'Could not connect to device.')

logger = logging.getLogger('tinygraph.tinygraphd.PollDaemon')

VARIABLE_DATA_VALUE_TYPES = (
    'time_ticks',
    'counter',
    'gauge',
)


def watch_for_exceptions(f):
    def wrapper(*args, **kwargs):
        try:
            return f(*args, **kwargs)
        except Exception, e:
            logger.critical('%s() threw an exception: %s' % (f.__name__, e))
Esempio n. 16
0
    def __init__(self, cfg_path):
        """
        This this a TCP relay with Web-socket(not consistent with RFC in data phase) client part if configured to use Web
        socket mode connecting out. Basically we need three URIs to make this relay work, an URI looks like this:
            'tcp://18.220.245.147:1080'
            First part in URI is connection type, we support 'tcp' and 'ws'
            Second part is a host, it can be a address or hostname.
            Last part is the port you want to specify.
        """
        self.debug = True
        self.logger = logging.getLogger('RelayLocal')
        self.configs = load_relay_cfg(cfg_path)
        if self.configs["EnableSSL"]:
            #######################
            rand_str = ''.join(
                random.choice(string.ascii_uppercase + string.digits)
                for _ in range(16))
            self.communication_key = hashlib.sha256(
                rand_str.encode('utf8')).digest()

            self.IV = ''.join(
                random.choice(string.ascii_uppercase + string.digits)
                for _ in range(16)).encode('utf8')
            # Initialization vector should always be 16 bit
            self.aes_obj = AES.new(self.communication_key, AES.MODE_CFB,
                                   self.IV)
            self.context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
            self.context.check_hostname = False
            self.context.load_verify_locations(cafile='cacert.crt')
        #######################
        self.tbb_ctrl_hdl = TBBCtrlPortHDL(self.configs["TBBAuthCookiePath"])
        self.local_listen_conn_uri = URI(self.configs["LocalListenConnURI"])
        self.local_listen_config_uri = URI(
            self.configs["LocalListenConfigURI"])
        self.local_listen_ws_url = URI(self.configs["LocalListenWSURI"])
        self.ticket_hdl = TicketHDL(self.configs['BrokerTicketKeyUrl'])
        self.remote_uri = None
        self.listen_data_socket = self.init_listen_socket(
            self.local_listen_conn_uri.host, self.local_listen_conn_uri.port)

        self.listen_config_socket = self.init_listen_socket(
            self.local_listen_config_uri.host,
            self.local_listen_config_uri.port)
        self.listen_ws_socket = self.init_listen_socket(
            self.local_listen_ws_url.host, self.local_listen_ws_url.port)
        self.long_live_sockets = []
        self.http_req_hdl = HttpReqHdl()
        # auxiliary data structures
        self.connection_list = [
            self.listen_data_socket, self.listen_config_socket,
            self.listen_ws_socket
        ]
        self.ws_helper_dict = {}
        self.config_sockets = []
        self.lr_dict = {}
        self.rl_dict = {}
        self.remote_end_failed_cnt = 0
        self.socks5_parser = Socks5ProtocolHdl()
        self.socks5_parser.enable_socks5_routing = self.configs[
            "EnableSocks5Routing"]
        self.socks5_parser.enable_per_tab_instance = self.configs[
            "EnablePerTabPerHostInstance"]

        self.hebtor_helper = HebtorProtoHdl()
        self.stop = False
        self.cfg_timestamp = 0
        self.url_to_jump = ""