예제 #1
0
 def __init__(self, database_config):
     connection_string = self.__create_connection_string(database_config)
     engine = create_engine(connection_string)
     Base.metadata.create_all(engine)
     session = sessionmaker(bind=engine)
     self.session = session()
     self.logger = structlog.get_logger()
    def __init__(self, notifier_config):
        self.logger = structlog.get_logger()
        self.twilio_configured = self.__validate_required_config('twilio', notifier_config)
        if self.twilio_configured:
            self.twilio_client = TwilioNotifier(
                twilio_key=notifier_config['twilio']['required']['key'],
                twilio_secret=notifier_config['twilio']['required']['secret'],
                twilio_sender_number=notifier_config['twilio']['required']['sender_number'],
                twilio_receiver_number=notifier_config['twilio']['required']['receiver_number']
            )

        self.slack_configured = self.__validate_required_config('slack', notifier_config)
        if self.slack_configured:
            self.slack_client = SlackNotifier(
                slack_key=notifier_config['slack']['required']['key'],
                slack_channel=notifier_config['slack']['required']['channel']
            )

        self.gmail_configured = self.__validate_required_config('gmail', notifier_config)
        if self.gmail_configured:
            self.gmail_client = GmailNotifier(
                username=notifier_config['gmail']['required']['username'],
                password=notifier_config['gmail']['required']['password'],
                destination_addresses=notifier_config['gmail']['required']['destination_emails']
            )

        self.integram_configured = self.__validate_required_config('integram', notifier_config)
        if self.integram_configured:
            self.integram_client = IntegramNotifier(
                url=notifier_config['integram']['required']['url']
            )
예제 #3
0
파일: server.py 프로젝트: insolite/mergeit
def run(host, port, shell_host, shell_port, project_config, log,
        application_factory=web.Application,
        config_factory=Config,
        config_source_factory=YamlFileConfigSource,
        push_handler_factory=PushHandler,
        repo_manager_factory=RepoManager,
        cmd_factory=MergeitShell,
        telnet_shell_factory=CmdTelsh,
        telnet_server_factory=TelnetServer):
    logger = get_logger()
    loop = asyncio.get_event_loop()
    app = application_factory()
    config = config_factory(config_source_factory(project_config))
    init_logging(log, config.get('name'))
    app.router.add_route('POST', '/push', lambda request: gitlab_push(request, config))
    logger.info('application_start')
    loop.run_until_complete(loop.create_server(app.make_handler(), host, port))
    shell = cmd_factory(config=config,
                        push_handler_factory=push_handler_factory,
                        repo_manager_factory=repo_manager_factory,
                        forward=True)
    shell_factory = (lambda server, stream=TelnetShellStream, log=logging:
                     telnet_shell_factory(server, stream, log, cmd=shell))
    loop.run_until_complete(
        loop.create_server(lambda: telnet_server_factory(log=logging.getLogger(telnetlib3.__name__), shell=shell_factory),
                           shell_host, shell_port)
    )
    try:
        loop.run_forever()
    except KeyboardInterrupt:
        logger.info('application_interrupt')
예제 #4
0
def test_mozdef(app):
    sent = []
    orig_MozDefEvent = mozdef_client.MozDefEvent
    with mock.patch('mozdef_client.MozDefEvent') as MozDefEvent:
        def constructor(target):
            msg = orig_MozDefEvent(target)
            msg.send = lambda: sent.append(msg)
            return msg
        MozDefEvent.side_effect = constructor

        logger = structlog.get_logger(__name__)
        logger.warn("unseen")
        logger.warn("test message", mozdef=True)
        logger.warn("with attr", attr="foo", mozdef=True)

    # check that 'unseen' wasn't seen, since mozdef was not true
    eq_({m.summary for m in sent}, {"test message", "with attr"})

    # check a few other fields in one of the messages
    msg = sent[0]
    eq_(msg.source, __name__)
    eq_(msg._severity, orig_MozDefEvent.SEVERITY_WARNING)
    eq_(msg.tags, ['relengapi'])

    # and verify that the attribute showed up in details
    assert any([m.details.get('attr') == 'foo' for m in sent])
 def __init__(self, username, password, destination_addresses):
     self.logger = structlog.get_logger()
     smtp_server = 'smtp.gmail.com:587'
     self.smtp_handler = smtplib.SMTP(smtp_server)
     self.username = username
     self.password = password
     self.destination_addresses = ','.join(destination_addresses)
예제 #6
0
def attach_console(request):

    event_log = request.app.get('smartmob.event_log') or structlog.get_logger()

    # Must connect here with a WebSocket.
    if request.headers.get('Upgrade', '').lower() != 'websocket':
        pass

    # Resolve the process.
    processes = request.app.setdefault('smartmob.processes', {})
    slug = request.match_info['slug']
    if slug not in processes:
        raise web.HTTPNotFound

    # WebSocket handshake.
    stream = web.WebSocketResponse()
    yield from stream.prepare(request)

    # TODO: retrieve data from the process and pipe it to the WebSocket.
    #       Strawboss implementation doesn't provide anything for this at the
    #       moment, so we'll have to do this later.

    # Log the request.
    event_log.info('process.attach', slug=slug)

    # Close the WebSocket.
    yield from stream.close()

    # Required by the framework, but I don't know why.
    return stream
예제 #7
0
def main():
    """Initializes the application
    """
     # Load settings and create the config object
    config = Configuration()
    settings = config.settings

    # Set up logger
    logs.configure_logging(settings['log_level'], settings['log_mode'])
    logger = structlog.get_logger()

    # Configure and run configured behaviour.
    exchange_interface = ExchangeInterface(config.exchanges)
    notifier = Notifier(config.notifiers)

    behaviour = Behaviour(
        config,
        exchange_interface,
        notifier
    )

    while True:
        behaviour.run(settings['market_pairs'], settings['output_mode'])
        logger.info("Sleeping for %s seconds", settings['update_interval'])
        time.sleep(settings['update_interval'])
예제 #8
0
def backup():
    logger = get_logger(__name__).bind(
        action='backup_db'
    )
    logger.info('starting')
    command = 'pg_dump -U ' \
              '{username} -h {host} -p {port} -F tar -O -d {db_name} -f {file} -w'
    if settings.POSTGRES_PASSWORD:
        command = 'PGPASSWORD={password} ' + command

    file_path = '/usr/src/app/core/dump_file_{unique_id}.tar'.format(unique_id=uuid.uuid4())

    full_command = command.format(password=settings.POSTGRES_PASSWORD,
                                  username=settings.POSTGRES_USER,
                                  host=settings.POSTGRES_HOST,
                                  port=settings.POSTGRES_PORT,
                                  file=file_path,
                                  db_name=settings.POSTGRES_DB
                                  )

    logger.debug('full_command', command=full_command)
    os.system(full_command)

    logger.debug('finished_successfully')

    return file_path
예제 #9
0
def restore_db(file_name):
    logger = get_logger(__name__).bind(
        action='restore_db'
    )
    logger.info('starting')
    command = "pg_restore --verbose --clean --no-acl --no-owner" \
              " -h {host}  -p {port}  -U {username} -d {db_name} {file_path}"

    if settings.POSTGRES_PASSWORD:
        command = 'PGPASSWORD={password} ' + command

    file_path = "/usr/src/app/core/{}".format(file_name)

    full_command = command.format(
        password=settings.POSTGRES_PASSWORD,
        username=settings.POSTGRES_USER,
        host=settings.POSTGRES_HOST,
        port=settings.POSTGRES_PORT,
        file_path=file_path,
        db_name=settings.POSTGRES_DB
    )
    logger.debug('full_command', command=full_command)

    os.system(full_command)

    logger.debug('finished successfully')


#'PGPASSWORD=mysecretpassword pg_dump -U postgres -h db -p 5432 -d slash_air -w'
# pg_dump -h db -p 5432 -F c -O -U postgres postgres > backup.dump
# pg_restore --verbose --clean --no-acl --no-owner -h db  -p 5432  -U postgres -d postgres backup.dump
# /usr/src/app/core
예제 #10
0
파일: gh2rtm.py 프로젝트: zoni/rtm
def main(args):
    init_logger(level=args.log_level.upper())

    GITHUB_ACCESS_TOKEN = os.environ.get('GITHUB_ACCESS_TOKEN', None)
    if GITHUB_ACCESS_TOKEN is None:
        print("Missing GITHUB_ACCESS_TOKEN!", file=sys.stderr)
        sys.exit(1)

    RTM_API_KEY = os.environ.get('RTM_API_KEY', None)
    RTM_SHARED_SECRET = os.environ.get('RTM_SHARED_SECRET', None)
    RTM_TOKEN = os.environ.get('RTM_TOKEN', None)

    if None in (RTM_API_KEY, RTM_SHARED_SECRET, RTM_TOKEN):
        print("Missing RTM_API_KEY, RTM_SHARED_SECRET or RTM_TOKEN!", file=sys.stderr)
        sys.exit(1)

    log = get_logger()
    log = log.bind(component="main")
    log.info("gh.init")

    gh = github.GitHub(access_token=GITHUB_ACCESS_TOKEN)
    log.info("gh.get_issues")
    issues = get_github_issues(gh, GITHUB_ISSUE_QUERIES)

    log.info("rtm.init")
    rtm = createRTM(RTM_API_KEY, RTM_SHARED_SECRET, RTM_TOKEN)

    log.info("complete_missing_issues.start")
    complete_missing_issues(rtm, issues)
    log.info("complete_missing_issues.finish")
    log.info("add_new_issues.start")
    add_new_issues(rtm, issues)
    log.info("add_new_issues.finish")
예제 #11
0
파일: log.py 프로젝트: Gifflen/fleece
def get_logger(level=logging.DEBUG, name=None, stream=DEFAULT_STREAM):
    """Configure and return a logger with structlog and stdlib."""
    wrap_dict_class = structlog.threadlocal.wrap_dict(dict)
    structlog.configure(
        processors=[
            structlog.stdlib.filter_by_level,
            structlog.stdlib.add_log_level,
            structlog.stdlib.add_logger_name,
            structlog.stdlib.PositionalArgumentsFormatter(),
            structlog.processors.TimeStamper(fmt='iso'),
            structlog.processors.StackInfoRenderer(),
            structlog.processors.format_exc_info,
            structlog.processors.JSONRenderer(sort_keys=True)
        ],
        context_class=wrap_dict_class,
        logger_factory=structlog.stdlib.LoggerFactory(),
        wrapper_class=structlog.stdlib.BoundLogger,
        cache_logger_on_first_use=True)
    log = structlog.get_logger(name)
    if not _has_streamhandler(logging.getLogger(name),
                              level=level, stream=stream):
        streamhandler = logging.StreamHandler(stream)
        streamhandler.setLevel(level)
        streamhandler.setFormatter(logging.Formatter(fmt=LOG_FORMAT))
        log.addHandler(streamhandler)
    log.setLevel(level)
    return log
    def __init__(self, core, logical_device):
        try:
            self.core = core
            self.local_handler = core.get_local_handler()
            self.logical_device_id = logical_device.id

            self.root_proxy = core.get_proxy('/')
            self.flows_proxy = core.get_proxy(
                '/logical_devices/{}/flows'.format(logical_device.id))
            self.groups_proxy = core.get_proxy(
                '/logical_devices/{}/flow_groups'.format(logical_device.id))
            self.self_proxy = core.get_proxy(
                '/logical_devices/{}'.format(logical_device.id))

            self.flows_proxy.register_callback(
                CallbackType.POST_UPDATE, self._flow_table_updated)
            self.groups_proxy.register_callback(
                CallbackType.POST_UPDATE, self._group_table_updated)
            self.self_proxy.register_callback(
                CallbackType.POST_ADD, self._port_added)
            self.self_proxy.register_callback(
                CallbackType.POST_REMOVE, self._port_removed)

            self.port_proxy = {}

            self.event_bus = EventBusClient()
            self.packet_in_subscription = self.event_bus.subscribe(
                topic='packet-in:{}'.format(logical_device.id),
                callback=self.handle_packet_in_event)

            self.log = structlog.get_logger(logical_device_id=logical_device.id)

            self._routes = None
        except Exception, e:
            self.log.exception('init-error', e=e)
예제 #13
0
파일: gh2rtm.py 프로젝트: zoni/rtm
def get_github_issues(gh, queries):
    """
    Return a dict of issues found matching specified filters.


    :param gh: github.GitHub client
    :param queries: A list containing individual search queries to perform
    :return: A dictionary whose elements are in the form of
        {'repo#000: Summary': {..data from github}}
    """
    log = get_logger()
    log = log.bind(component='get_github_issues')

    issues = {}
    results = []
    for query in queries:
        log.debug("gh.issues.get", q=query)
        items = gh.issues.get(**query)
        log.debug("gh.issues.result", q=query, result_count=len(items))
        results += items

    for issue in results:
        title = "{repository[name]}#{number}: {title}".format(**issue).strip()
        log.debug("gh.parse_issue", title=title)
        if title not in issues:
            issues[title] = issue
    return issues
예제 #14
0
    def send_invite_email(self):
        from sentry.utils.email import MessageBuilder

        context = {
            'email': self.email,
            'organization': self.organization,
            'url': absolute_uri(reverse('sentry-accept-invite', kwargs={
                'member_id': self.id,
                'token': self.token,
            })),
        }

        msg = MessageBuilder(
            subject='Join %s in using Sentry' % self.organization.name,
            template='sentry/emails/member-invite.txt',
            html_template='sentry/emails/member-invite.html',
            type='organization.invite',
            context=context,
        )

        try:
            msg.send_async([self.get_email()])
        except Exception as e:
            logger = get_logger(name='sentry.mail')
            logger.exception(e)
예제 #15
0
def delete_process(request):

    event_log = request.app.get('smartmob.event_log') or structlog.get_logger()

    # Resolve the process.
    processes = request.app.setdefault('smartmob.processes', {})
    slug = request.match_info['slug']
    try:
        process = processes[slug]
    except KeyError:
        raise web.HTTPNotFound

    # Log the request.
    event_log.info('process.delete', slug=slug)

    # Kill the process and wait for it to complete.
    process['stop'].set_result(None)
    try:
        yield from process['task']
    except Exception:  # TODO: be more accurate!
        pass

    # Erase bookkeeping.
    del processes[slug]

    # Format the response.
    return web.Response(
        content_type='application/json',
        body=json.dumps({
            # ...
        }).encode('utf-8'),
    )
예제 #16
0
파일: rmq.py 프로젝트: meantheory/wrabbit
    def __init__(self, **options):

        #: logging facility
        self.logger = get_logger()

        #: keep track of retry attempts
        self.retry_attempt = 0

        #: retry limit
        self.retry_limit = options.get('retry_limit', 30)

        #: retry interval
        self.retry_interval = options.get('retry_interval', 3)

        #: rabbit mq connection
        self.connection = None

        #: rabbit mq channnels
        self.channels = dict()

        #: services callback
        self.services_callback = None

        #: keep track of whether or not we are retrying
        self.retry_mode = False
예제 #17
0
    def emit(self, record, logger=get_logger()):
        # If anyone wants to use the 'extra' kwarg to provide context within
        # structlog, we have to strip all of the default attributes from
        # a record because the RootLogger will take the 'extra' dictionary
        # and just turn them into attributes.
        kwargs = {
            k: v for k, v in six.iteritems(vars(record)) if k not in throwaways and v is not None
        }
        kwargs.update({
            'level': record.levelno,
            'event': record.msg,
        })

        if record.args:
            # record.args inside of LogRecord.__init__ gets unrolled
            # if it's the shape `({},)`, a single item dictionary.
            # so we need to check for this, and re-wrap it because
            # down the line of structlog, it's expected to be this
            # original shape.
            if isinstance(record.args, (tuple, list)):
                kwargs['positional_args'] = record.args
            else:
                kwargs['positional_args'] = (record.args, )

        logger.log(**kwargs)
예제 #18
0
def update_logging(instance_id, vcore_id):
    """
    Add the vcore id to the structured logger
    :param vcore_id:  The assigned vcore id
    :return: structure logger
    """
    def add_exc_info_flag_for_exception(_, name, event_dict):
        if name == 'exception':
            event_dict['exc_info'] = True
        return event_dict

    def add_instance_id(_, __, event_dict):
        event_dict['instance_id'] = instance_id
        return event_dict

    def add_vcore_id(_, __, event_dict):
        event_dict['vcore_id'] = vcore_id
        return event_dict

    processors = [
        add_exc_info_flag_for_exception,
        structlog.processors.StackInfoRenderer(),
        structlog.processors.format_exc_info,
        add_instance_id,
        add_vcore_id,
        FluentRenderer(),
    ]
    structlog.configure(processors=processors)

    # Mark first line of log
    log = structlog.get_logger()
    log.info("updated-logger")
    return log
예제 #19
0
 def __init__(self, handler, alloc_id, traffic_descriptor, entity_id,
              name=None, vont_ani=None, is_mock=False):
     super(OnuTCont, self).__init__(alloc_id, traffic_descriptor,
                                    name=name, vont_ani=vont_ani)
     self._handler = handler
     self._is_mock = is_mock
     self._entity_id = entity_id
     self.log = structlog.get_logger(device_id=handler.device_id, alloc_id=alloc_id)
예제 #20
0
 def __init__(self, namespace=None, source=None):
     if not namespace:
         try:
             frame = inspect.currentframe(1)
             namespace = frame.f_globals['__name__']
         except:
             namespace = 'unknown'
     self._log = structlog.get_logger(namespace=namespace)
예제 #21
0
    def initialize(self, dispatcher, logger=None):
        '''Initialize the handler.

        :param dispatcher: a ~:class:`utils.dispatch.Dispatcher` instance
        :param logger: logger instance
        '''
        self.dispatcher = dispatcher
        self.logger = logger or structlog.get_logger(__name__)
예제 #22
0
파일: log.py 프로젝트: bugrevelio/Anvil
def obtain_logger(name):
    """Get's a logger and attaches the correct DCC compatible Handler.
    Args:
        name (str): Name of the logger to get / create.
    Returns:
        Logger: Logger.
    """
    return structlog.get_logger(name)
예제 #23
0
 def __init__(self, survey, ftpconn, base_url, endpoint_name):
     self.logger = get_logger()
     self.survey = survey
     self.tx_id = ""
     self._setup_logger()
     self.ftp = ftpconn
     self._base_url = base_url
     self._endpoint_name = endpoint_name
예제 #24
0
def _create_logger(request):
    logger = structlog.get_logger("warehouse.request")

    # This has to use **{} instead of just a kwarg because request.id is not
    # an allowed kwarg name.
    logger = logger.bind(**{"request.id": request.id})

    return logger
예제 #25
0
    def __init__(self, parent, **kwargs):
        # TODO: Weed out those properties supported by common 'Port' object
        assert parent, 'parent is None'
        assert 'port_no' in kwargs, 'Port number not found'

        self.log = structlog.get_logger(port_no=kwargs.get('port_no'))
        self.log.info('creating')

        self._port_no = kwargs.get('port_no')
        self._name = kwargs.get('name', 'nni-{}'.format(self._port_no))
        self._port = None
        self._logical_port = None
        self._parent = parent

        self._sync_tick = 10.0
        self._sync_deferred = None

        self._stats_tick = 5.0
        self._stats_deferred = None

        self._deferred = None
        self._state = NniPort.State.INITIAL

        # Local cache of NNI configuration

        self._enabled = None
        self._ianatype = '<type xmlns:ianaift="urn:ietf:params:xml:ns:yang:iana-if-type">ianaift:ethernetCsmacd</type>'

        # And optional parameters
        # TODO: Currently cannot update admin/oper status, so create this enabled and active
        # self._admin_state = kwargs.pop('admin_state', AdminState.UNKNOWN)
        # self._oper_status = kwargs.pop('oper_status', OperStatus.UNKNOWN)
        self._admin_state = AdminState.ENABLED
        self._oper_status = OperStatus.ACTIVE

        self._label = kwargs.pop('label', 'NNI port {}'.format(self._port_no))
        self._mac_address = kwargs.pop('mac_address', '00:00:00:00:00:00')
        # TODO: Get with JOT and find out how to pull out MAC Address via NETCONF
        # TODO: May need to refine capabilities into current, advertised, and peer

        self._ofp_capabilities = kwargs.pop('ofp_capabilities', OFPPF_100GB_FD | OFPPF_FIBER)
        self._ofp_state = kwargs.pop('ofp_state', OFPPS_LIVE)
        self._current_speed = kwargs.pop('current_speed', OFPPF_100GB_FD)
        self._max_speed = kwargs.pop('max_speed', OFPPF_100GB_FD)
        self._device_port_no = kwargs.pop('device_port_no', self._port_no)

        # Statistics
        self.rx_packets = 0
        self.rx_bytes = 0
        self.tx_packets = 0
        self.tx_bytes = 0
        self.rx_dropped = 0
        self.rx_errors = 0
        self.rx_bcast = 0
        self.rx_mcast = 0
        self.tx_dropped = 0
        self.tx_bcast = 0
        self.tx_mcast = 0
예제 #26
0
    def __init__(self, stream, dispatcher, container):
        self._dispatcher = dispatcher
        self._stream = stream
        self._stream.read_until(FRAME_END, self._read_frame)
        self.container = container

        self.data = ''

        self.logger = structlog.get_logger(__name__)
예제 #27
0
    def __init__(self, config, max_attempts=5, wait_time=0.5):
        super(IPMIToolProvider, self).__init__(config)

        # maximum number of times to try/validate a command
        self.max_attempts = max_attempts
        # wait time between attempts
        self.wait_time = wait_time

        self.log = structlog.get_logger()
예제 #28
0
    def get(self, req):
        logger = get_logger(__name__)
        logger.info('request_data', data=req.GET)
        backup_list = self.list_backups()
        if req.GET.get('path'):
            self.restore_db(req.GET['path'])

        return render(req, 'home.html',
                      {'backup_list': backup_list})
예제 #29
0
    def __init__(self, behaviour_config, exchange_interface, strategy_analyzer, notifier,
        db_handler):

        self.logger = structlog.get_logger()
        self.behaviour_config = behaviour_config
        self.exchange_interface = exchange_interface
        self.strategy_analyzer = strategy_analyzer
        self.notifier = notifier
        self.db_handler = db_handler
예제 #30
0
 def __call__(self, *args, **kwargs):
     if app.config["TESTING"]:
         # When testing, celery tasks are called eagerly, from the same thread
         # so don't push an app context, the request's app context is already there
         return self.run(*args, **kwargs)
     else:
         with app.app_context():
             g.log = structlog.get_logger().new()
             return self.run(*args, **kwargs)
예제 #31
0
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.http import HttpResponse
from django.core.paginator import Paginator, PageNotAnInteger, InvalidPage, EmptyPage
from django.db.models import Q

from ..views import staff_member_required
from ...table.models import Table
from ...decorators import user_trail
import json

from structlog import get_logger

logger = get_logger(__name__)


@staff_member_required
def list(request):
    try:
        options = Table.objects.all().order_by('-id')
        page = request.GET.get('page', 1)
        paginator = Paginator(options, 10)
        try:
            options = paginator.page(page)
        except PageNotAnInteger:
            options = paginator.page(1)
        except InvalidPage:
            options = paginator.page(1)
        except EmptyPage:
            options = paginator.page(paginator.num_pages)
        data = {
예제 #32
0
"""p2 replication controller"""
import copy
from shutil import copyfileobj
from time import time

from structlog import get_logger

from p2.components.replication.constants import (TAG_BLOB_SOURCE_UUID,
                                                 TAG_REPLICATION_TARGET)
from p2.core.components.base import ComponentController
from p2.core.models import Blob, Volume

LOGGER = get_logger()


# pylint: disable=too-few-public-methods
class ReplicationController(ComponentController):
    """Replicate Blobs 1:1 between volumes"""

    template_name = 'components/replication/card.html'
    form_class = 'p2.components.replication.forms.ReplicationForm'

    @property
    def target_volume(self):
        """Get Target volume"""
        return Volume.objects.get(
            pk=self.instance.tags.get(TAG_REPLICATION_TARGET))

    def _get_target_blob(self, source_blob):
        target_volume = self.target_volume
        # Check if there's a blob that's our source UUID as attribute
예제 #33
0
    def __init__(self):
        """Initializes Breakout class
        """

        self.logger = structlog.get_logger()
from structlog import get_logger

import os
import json
from urllib.parse import urlparse

from onelogin.saml2.auth import OneLogin_Saml2_Auth
from onelogin.saml2.settings import OneLogin_Saml2_Settings
from onelogin.saml2.utils import OneLogin_Saml2_Utils
from onelogin.saml2.idp_metadata_parser import OneLogin_Saml2_IdPMetadataParser

from . import NEED_TO_SIGN_IN_MSG
from .flash import flash

saml_routes = RouteTableDef()
logger = get_logger('fsdr-ui')


# Check if the user is logged in
async def is_logged_in(request):
    session = await get_session(request)
    return 'samlUserdata' in session


async def get_role_id(request):
    session = await get_session(request)
    if not 'samlUserdata' in session:
        redirect_to_login(request)
    roleids = session['samlUserdata']['roleID']
    # TODO extra checking here?
    return roleids[0]
예제 #35
0
import random

import structlog

log = structlog.get_logger()


class Graph:
    def __init__(self, cost_matrix: list):
        self.matrix = cost_matrix
        self.rank = len(self.matrix)
        self.pheromone = [[1 / self.rank**2 for j in range(self.rank)]
                          for i in range(self.rank)]


class ACO:
    def __init__(
        self,
        ant_count: int,
        run_without_improvement: int,
        alpha: float,
        beta: float,
        rho: float,
        q: float,
        pheromone_strategy: str,
    ):
        """
        :param ant_count:
        :param run_without_improvement:
        :param alpha: relative importance of pheromone (distance)
        :param beta: relative importance of heuristic information (pheromone)
예제 #36
0
from raiden.constants import NULL_ADDRESS
from raiden.exceptions import (
    AddressWrongContract,
    ContractVersionMismatch,
    TransactionThrew,
    UnknownAddress,
)
from raiden.network.rpc.client import check_address_has_code
from raiden.network.rpc.smartcontract_proxy import ContractProxy
from raiden.network.rpc.transactions import check_transaction_threw
from raiden.settings import EXPECTED_CONTRACTS_VERSION
from raiden.utils import compare_versions, pex, privatekey_to_address
from raiden_contracts.constants import CONTRACT_ENDPOINT_REGISTRY
from raiden_contracts.contract_manager import ContractManager

log = structlog.get_logger(__name__)  # pylint: disable=invalid-name


class Discovery:
    """On chain smart contract raiden node discovery: allows registering
    endpoints (host, port) for your ethereum-/raiden-address and looking up
    endpoints for other ethereum-/raiden-addressess.
    """

    def __init__(
            self,
            jsonrpc_client,
            discovery_address,
            contract_manager: ContractManager,
    ):
        contract = jsonrpc_client.new_contract(
예제 #37
0
import structlog

from aiohttp import web
from urllib.parse import unquote

from autofin import settings, models
from autofin.contact import ContactMethod
from autofin.error import capture_error, capture_error_context
from autofin.billing import InvoiceRetriever
from autofin.contact import MessageFormatter

LOGGER = structlog.get_logger(__name__)


async def on_sms_received(request):
    """HTTP handler for the webhook that is called when
    a SMS is received."""

    try:
        context = dict(raw_body=await request.text())
        capture_error_context(**context)
        logger = LOGGER.bind(**context)

        data = await request.post()

        from_ = data.get("From")
        if not from_:
            logger.error("Received bad request, missing from")
            return web.Response(status=400, text="Missing 'From'")

        body = data.get("Body")
예제 #38
0
from typing import Callable, Optional

import numpy as np
import structlog
import torch
from torch.optim import Optimizer
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.dataloader import default_collate
from tqdm import tqdm

from baal.utils.array_utils import stack_in_memory
from baal.utils.cuda_utils import to_cuda
from baal.utils.iterutils import map_on_tensor
from baal.utils.metrics import Loss

log = structlog.get_logger("ModelWrapper")


def _stack_preds(out):
    if isinstance(out[0], Sequence):
        out = [torch.stack(ts, dim=-1) for ts in zip(*out)]
    else:
        out = torch.stack(out, dim=-1)
    return out


class ModelWrapper:
    """
    Wrapper created to ease the training/testing/loading.

    Args:
예제 #39
0
파일: base.py 프로젝트: ypsilik/nvchecker
# MIT licensed
# Copyright (c) 2019-2020 lilydjwg <*****@*****.**>, et al.

import structlog
from typing import Optional, Dict, Mapping
import json as _json

from ..ctxvars import tries, proxy, user_agent

logger = structlog.get_logger(logger_name=__name__)

class Response:
  '''The response of an HTTP request.

  .. py:attribute:: body
     :type: bytes

  .. py:attribute:: headers
     :type: Mapping[str, str]
  '''
  def __init__(
    self,
    headers: Mapping[str, str],
    body: bytes,
  ) -> None:
    self.headers = headers
    self.body = body

  def json(self):
    '''Convert reponse content to JSON.'''
    return _json.loads(self.body.decode('utf-8'))
예제 #40
0
파일: worker.py 프로젝트: netsia/voltha_doc
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re

from structlog import get_logger
from twisted.internet import reactor
from twisted.internet.base import DelayedCall
from twisted.internet.defer import inlineCallbacks, returnValue, Deferred
from simplejson import dumps, loads

from common.utils.asleep import asleep

log = get_logger()


class Worker(object):
    """
    Worker side of the coordinator. An instance of this class runs in every
    voltha instance. It monitors what work is assigned to this instance by
    the leader. This is all done via consul.
    """

    ASSIGNMENT_EXTRACTOR = '^%s(?P<member_id>[^/]+)/(?P<work_id>[^/]+)$'

    # Public methods:

    def __init__(self, instance_id, coordinator):
예제 #41
0
import traceback
from asyncio import Future
from collections import OrderedDict, namedtuple
from contextlib import suppress
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Generic, Optional, Tuple, TypeVar, Union, cast

from structlog import get_logger

from txstratum.constants import DEFAULT_EXPECTED_MINING_TIME

if TYPE_CHECKING:
    from asyncio.events import AbstractEventLoop

    from hathorlib import BaseTransaction

logger = get_logger()


class Periodic:
    """Create an asyncio task that calls an async function periodically.

    The function is called every `T` seconds, not taking into consideration how long the last call took.
    If the duration of a call is longer than `T` seconds, the function will be called immediately after it finishes.

    Adapted from:
    - https://stackoverflow.com/a/37514633/947511
    - https://stackoverflow.com/a/55505152/947511
    """

    def __init__(self,
                 afunc: Callable[..., Awaitable[None]],
예제 #42
0
# -*- coding: utf-8 -*-
from typing import Optional
from reprlib import repr as _repr
from time import perf_counter as perf

import structlog

from ..typedefs import HTTPRequest
from ..typedefs import HTTPResponse
from ..errors import JsonRpcError

logger = structlog.get_logger('jussi')


async def initialize_jussi_request(
        request: HTTPRequest) -> Optional[HTTPResponse]:
    # parse jsonrpc
    try:
        request.jsonrpc
    except JsonRpcError as e:
        return e.to_sanic_response()
    except Exception as e:
        return JsonRpcError(http_request=request,
                            exception=e).to_sanic_response()


async def finalize_jussi_response(request: HTTPRequest,
                                  response: HTTPResponse) -> None:
    # pylint: disable=bare-except
    try:
        response.headers['x-jussi-request-id'] = request.jussi_request_id
예제 #43
0
def create_app(cmdline=False, test_config=None):
    app = Flask(__name__)
    relengapi_logging.configure_logging(app)
    logger = structlog.get_logger()

    env_var = 'RELENGAPI_SETTINGS'
    if test_config:
        app.config.update(**test_config)
    else:
        if env_var in os.environ and os.environ[env_var]:
            app.config.from_envvar(env_var)
        else:
            logger.warning(
                "Using default settings; to configure relengapi, set "
                "%s to point to your settings file" % env_var)

    # reconfigure logging now that we have loaded configuration
    relengapi_logging.configure_logging(app)
    # and re-construct the logger to get updated configuration
    logger = structlog.get_logger()

    # add the necessary components to the app
    app.db = db.make_db(app)
    app.celery = celery.make_celery(app)
    layout.init_app(app)
    auth.init_app(app)
    api.init_app(app)
    aws.init_app(app)
    memcached.init_app(app)

    app.relengapi_blueprints = {}
    for bp in blueprints:
        if cmdline:
            logger.info("registering blueprint %s", bp.name)
        app.register_blueprint(bp, url_prefix='/%s' % bp.name)
        app.relengapi_blueprints[bp.name] = bp

    # set up a random session key if none is specified
    if not app.config.get('SECRET_KEY'):
        logger.warning(
            "setting per-process session key - sessions will be reset on "
            "process restart")
        app.secret_key = os.urandom(24)

    request_id_header = app.config.get('REQUEST_ID_HEADER')

    def get_req_id_uuid():
        return str(uuid.uuid4())

    def get_req_id_header():
        return request.headers.get(request_id_header) or get_req_id_uuid()

    get_req_id = get_req_id_header if request_id_header else get_req_id_uuid

    @app.before_request
    def setup_request():
        # set up `g`
        g.db = app.db
        g.request_id = get_req_id()

        # reset the logging context, deleting any info for the previous request
        # in this thread and binding new
        relengapi_logging.reset_context(request_id=g.request_id,
                                        user=str(current_user))

    @app.route('/')
    def root():
        # render all of the blueprints' templates first
        bp_widgets = []
        for bp in app.blueprints.itervalues():
            bp_widgets.extend(bp.root_widget_templates or [])
        bp_widgets.sort()
        bp_widgets = [
            tpl for (_, tpl, condition) in bp_widgets
            if not condition or condition()
        ]
        return render_template('root.html', bp_widgets=bp_widgets)

    @app.route('/versions')
    @api.apimethod(VersionInfo)
    def versions():
        dists = {}
        for dist in introspection.get_distributions().itervalues():
            dists[dist.key] = DistributionInfo(project_name=dist.project_name,
                                               version=dist.version,
                                               relengapi_metadata={})
        blueprints = {}
        relengapi_dist = pkg_resources.get_distribution('relengapi')
        for bp in app.relengapi_blueprints.itervalues():
            blueprints[bp.name] = BlueprintInfo(distribution='relengapi',
                                                version=relengapi_dist.version)
        return VersionInfo(distributions=dists, blueprints=blueprints)

    return app
예제 #44
0
# -*- encoding: utf-8 -*-
"""Metrics for an evergreen version."""
from __future__ import absolute_import, division

from datetime import datetime
from typing import Callable, Dict, List, Optional, TYPE_CHECKING

from structlog import get_logger

if TYPE_CHECKING:
    from evergreen.build import Build
    from evergreen.version import Version
    from evergreen.metrics.buildmetrics import BuildMetrics

LOGGER = get_logger(__name__)


class VersionMetrics(object):
    """Metrics about an evergreen version."""

    def __init__(self, version: "Version") -> None:
        """
        Create an instance of version metrics.

        :param version: Version to analyze.
        """
        self.version = version

        self.total_processing_time = 0
        self.task_success_count = 0
        self.task_failure_count = 0
import os
import datetime
import logging
import structlog
import sys
import time
import configparser
import re

from pkg_resources import resource_filename

from cpo_pipeline.drmaa import prepare_job, run_jobs
from cpo_pipeline.logging import now
from cpo_pipeline.resistance.parsers import result_parsers

logger = structlog.get_logger()


def main(args):
    """
    main entrypoint
    Args:
        args(argparse.Namespace): Parsed command-line arguments.
    Returns:
        (void)
    """

    config = configparser.ConfigParser()
    config.read(args.config_file)

    sample_id = args.sample_id
예제 #46
0
 def __init__(self):
     """Initializes StrategyAnalyzer class """
     self.logger = structlog.get_logger()
class BlockFetcher:
    """Fetches new blocks via a web3 interface and passes them on to a set of callbacks."""

    logger = structlog.get_logger("monitor.block_fetcher")

    def __init__(
        self, state, w3, db, max_reorg_depth=1000, initial_block_resolver=None
    ):
        self.w3 = w3
        self.db = db
        self.max_reorg_depth = max_reorg_depth

        self.head = state.head
        self.current_branch = state.current_branch

        self.report_callbacks = []
        self.initial_block_resolver = initial_block_resolver
        self.initial_blocknr = state.initial_blocknr

        self._start_sync_number = 0
        self.syncing = False

    @classmethod
    def from_fresh_state(cls, *args, **kwargs):
        return cls(cls.get_fresh_state(), *args, **kwargs)

    @classmethod
    def get_fresh_state(cls):
        return BlockFetcherStateV1(head=None, current_branch=[], initial_blocknr=0)

    @property
    def state(self):
        return BlockFetcherStateV1(
            head=self.head,
            current_branch=self.current_branch,
            initial_blocknr=self.initial_blocknr,
        )

    @property
    def _backwards_sync_in_progress(self) -> bool:
        return len(self.current_branch) > 0

    def register_report_callback(self, callback):
        self.report_callbacks.append(callback)

    def _run_callbacks(self, blocks):
        for block in blocks:
            for callback in self.report_callbacks:
                callback(block)

    def _insert_branch(self, blocks):
        if len(blocks) == 0:
            return

        if blocks[0].number not in (0, self.initial_blocknr) and not self.db.contains(
            blocks[0].parentHash
        ):
            raise FetchingForkWithUnkownBaseError(
                "Tried to insert branch from a fork with unknown parent block."
            )

        try:
            self.db.insert_branch(blocks)
            self.head = blocks[-1]
            self.current_branch.clear()
        except AlreadyExists:
            raise ValueError("Tried to insert already known block")

        self._run_callbacks(blocks)

    def _insert_first_block(self):
        resolver = self.initial_block_resolver or blocksel.ResolveGenesisBlock()
        block = resolver.resolve_block(self.w3)

        if not block:
            raise ValueError("Can't fetch initial block to sync from!")

        latest = self.w3.eth.getBlock("latest")
        safe_initial_blocknr = max(latest.number - self.max_reorg_depth, 0)
        if block.number > safe_initial_blocknr:
            unsafe_block = block
            block = self.w3.eth.getBlock(safe_initial_blocknr)
            self.logger.warn(
                f"choosing {format_block(block)} instead of {format_block(unsafe_block)}"
            )
        self.initial_blocknr = block.number

        self.logger.info(
            f"starting initial sync from {format_block(block)}, latest {format_block(latest)}"
        )
        self._insert_branch([block])

    def fetch_and_insert_new_blocks(
        self, *, max_number_of_blocks=5000, max_block_height: int = None
    ):
        """Fetches up to `max_number_of_blocks` blocks and only up to blocknumber `max_block_height` (inclusive)
        and updates the internal state
            If a full branch is fetched it also inserts the new blocks
            Returns the number of fetched blocks
        """
        if max_number_of_blocks < 1:
            return 0

        number_of_synced_blocks = 0

        if self.db.is_empty():
            self._insert_first_block()
            number_of_synced_blocks += 1

        self._save_sync_start()

        if not self._backwards_sync_in_progress:
            forward_sync_target = self.fetch_forward_sync_target()

            # sync forwards at most up until the forward sync target, but no more than
            # max_number_of_blocks
            max_forward_block_height = (
                forward_sync_target
                if max_block_height is None
                else min(forward_sync_target, max_block_height)
            )
            max_forward_sync_blocks = max(
                0, max_number_of_blocks - number_of_synced_blocks
            )

            number_of_synced_blocks += self._sync_forwards(
                max_number_of_blocks=max_forward_sync_blocks,
                max_block_height=max_forward_block_height,
            )

        # sync backwards until we have synced max_number_of_blocks in total or we are fully synced
        assert 0 <= number_of_synced_blocks <= max_number_of_blocks
        max_backward_sync_blocks = max_number_of_blocks - number_of_synced_blocks

        number_of_synced_blocks += self._sync_backwards(
            max_number_of_blocks=max_backward_sync_blocks,
            max_block_height=max_block_height,
        )

        return number_of_synced_blocks

    def fetch_forward_sync_target(self):
        return max(self.w3.eth.blockNumber - self.max_reorg_depth, 0)

    def _sync_forwards(
        self, *, max_number_of_blocks: int, max_block_height: int
    ) -> int:
        block_numbers_to_fetch = range(
            self.head.number + 1,
            min(self.head.number + 1 + max_number_of_blocks, max_block_height + 1),
        )

        blocks = list(
            itertools.takewhile(
                lambda block: block is not None,
                (
                    self.w3.eth.getBlock(block_number)
                    for block_number in block_numbers_to_fetch
                ),
            )
        )

        self._insert_branch(blocks)
        return len(blocks)

    def _sync_backwards(
        self, *, max_number_of_blocks: int, max_block_height: int = None
    ) -> int:
        branch_length_before = len(self.current_branch)
        complete = self._fetch_branch(
            max_number_of_blocks, head_block_id=max_block_height
        )
        number_of_fetched_blocks = len(self.current_branch) - branch_length_before

        if complete and len(self.current_branch) > 0:
            self._insert_branch(list(reversed(self.current_branch)))

        return number_of_fetched_blocks

    def _get_block(self, block_id):
        """call self.w3.eth.getBlock, but make sure we don't fetch a block
        before the initial block"""
        block = self.w3.eth.getBlock(block_id)
        assert block is not None, f"Could not fetch block {block_id}"

        if block.number < self.initial_blocknr:
            self.logger.error(
                f"Fetched block with number {block.number} < {self.initial_blocknr} (initial block number) on syncing backwards!"
            )
            raise FetchingForkWithUnkownBaseError(
                "Synchronized backwards on a fork with base before initial synchronized block!"
            )

        return block

    def _fetch_branch(self, max_blocks_to_fetch, head_block_id=None):
        """
        Starts or continues to fetch a branch
        :param max_blocks_to_fetch: Max number of blocks to fetch before return
        :param head_block_id: head block id of the branch to fetch, defaults to 'latest'
        :return: True if the full branch was fetched, False if it needs to continue on the next call
        """

        if max_blocks_to_fetch < 0:
            raise ValueError("Maximum number of blocks to fetch must not be negative")
        elif max_blocks_to_fetch == 0:
            return False

        number_of_fetched_blocks = 0
        if len(self.current_branch) == 0:
            if head_block_id is None:
                head_block_id = "latest"

            head = self._get_block(head_block_id)
            if self.db.contains(head.hash):
                self.logger.debug(
                    "no new blocks",
                    head_hash=self.head.hash,
                    head_number=self.head.number,
                )
                return True

            self.current_branch = [head]
            number_of_fetched_blocks += 1

        while (
            not number_of_fetched_blocks >= max_blocks_to_fetch
            and not self.db.contains(self.current_branch[-1].parentHash)
        ):
            parent = self._get_block(self.current_branch[-1].parentHash)
            self.current_branch.append(parent)
            number_of_fetched_blocks += 1

        complete = self.db.contains(self.current_branch[-1].parentHash)
        return complete

    def get_sync_status(self):
        last_block_number = self.w3.eth.blockNumber
        head_block_number = self.head_block_number
        if last_block_number <= self._start_sync_number:
            return 0
        # limit it to not go over 100 %
        branch_correction = min(
            len(self.current_branch), last_block_number - head_block_number
        )
        return (head_block_number - self._start_sync_number + branch_correction) / (
            last_block_number - self._start_sync_number
        )

    def _save_sync_start(self):
        # To show sync status, remember start sync block
        if not self.syncing and self.head.number < self.w3.eth.blockNumber - 5:
            self._start_sync_number = self.head.number
            self.syncing = True

        if self.syncing and self.head.number >= self.w3.eth.blockNumber - 1:
            self.syncing = False

    @property
    def head_block_number(self):
        if self.head is None:
            return 0
        return self.head.number
예제 #48
0
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import logging.config
import threading
import uuid

import structlog

request_logger = structlog.get_logger("warehouse.request")

RENDERER = structlog.processors.JSONRenderer()


class StructlogFormatter(logging.Formatter):
    def format(self, record):
        # TODO: Figure out a better way of handling this besides just looking
        #       at the logger name, ideally this would have some way to
        #       really differentiate between log items which were logged by
        #       structlog and which were not.
        if not record.name.startswith("warehouse."):
            # TODO: Is there a better way to handle this? Maybe we can figure
            #       out a way to pass this through the structlog processors
            #       instead of manually duplicating the side effects here?
            event_dict = {
예제 #49
0
파일: offer_book.py 프로젝트: kli-fi/raidex
from __future__ import print_function
import random

from sortedcontainers import SortedDict
import structlog
from raidex.utils import pex
from raidex.utils.timestamp import to_str_repr
from raidex.raidex_node.order.offer import OfferType

from eth_utils import int_to_big_endian

log = structlog.get_logger('node.offer_book')


def generate_random_offer_id():
    # generate random offer-id in the 32byte int range
    return int(random.randint(0, 2**256 - 1))


class OfferDeprecated(object):
    """

    Represents an Offer from the Broadcast.
    the broadcasted offer_message stores absolute information (bid_token, ask_token, bid_amount, ask_amount)
    the Offer stores it's information relative to it's market (type,  price)


    Internally we work with relative values because:
        1) we want to easily compare prices (prices are the ultimate ordering criterion)
        2) we want to separate BUYs from SELLs
        3) traders are used to this!
예제 #50
0
from requests.exceptions import RequestException

from readthedocs.api.v2.client import api
from readthedocs.builds import utils as build_utils
from readthedocs.builds.constants import (
    BUILD_STATUS_SUCCESS,
    SELECT_BUILD_STATUS,
)
from readthedocs.core.permissions import AdminPermission
from readthedocs.integrations.models import Integration

from ..constants import GITHUB
from ..models import RemoteOrganization, RemoteRepository
from .base import Service, SyncServiceError

log = structlog.get_logger(__name__)


class GitHubService(Service):
    """Provider service for GitHub."""

    adapter = GitHubOAuth2Adapter
    # TODO replace this with a less naive check
    url_pattern = re.compile(r'github\.com')
    vcs_provider_slug = GITHUB

    def sync_repositories(self):
        """Sync repositories from GitHub API."""
        remote_repositories = []

        try:
예제 #51
0
from structlog import get_logger
from structlog.testing import capture_logs

with capture_logs() as cap_logs:
    get_logger().bind(x="y").info("hello")

cap_logs
[{"x": "y", "event": "hello", "log_level": "info"}]
예제 #52
0
Methods for clustering point collections. Methods available
are designed to work with infrastructure elements, but can
be used to any other point collection.



"""
import structlog
from typing import List

from ...core.query import Query
from ...core.mixins import GeoDataMixin

from .versioned_infrastructure import VersionedInfrastructure

logger = structlog.get_logger("flowmachine.debug", submodule=__name__)


class LocationCluster(GeoDataMixin, Query):
    """
    Class for computing clusters of points using different
    algorithms. This class was designed to work with infrastructure
    elements (i.e. towers/sites), but can also be used with other
    point collection as long as that is a table in the 
    database. This class currently implements three methods:
    K-means, DBSCAN, and Area.

    K-means is a clustering algorithm that clusters together points
    based on the point's distance to a point representing the 
    centroid of the cluster. The algorithm has two steps: (a) point
    allocation and (b) centroid re-calculation. In (a) it allocates
예제 #53
0
import kombu.exceptions
import redis.exceptions
from clickhouse_driver.errors import Error as ClickhouseError
from django.core.cache import cache
from django.db import DEFAULT_DB_ALIAS
from django.db import Error as DjangoDatabaseError
from django.db import connections
from django.db.migrations.executor import MigrationExecutor
from django.http import HttpRequest, HttpResponse, JsonResponse
from structlog import get_logger

from ee.clickhouse.client import sync_execute
from ee.kafka_client.client import can_connect as can_connect_to_kafka
from posthog.celery import app

logger = get_logger(__file__)

ServiceRole = Literal["events", "web", "worker"]

service_dependencies: Dict[ServiceRole, List[str]] = {
    "events": ["http", "kafka_connected"],
    "web": [
        "http",
        "postgres",
        "postgres_migrations_uptodate",
        "cache",
        # NOTE: we do not include clickhouse for web, as even without clickhouse we
        # want to be able to display something to the user.
        # "clickhouse"
        # NOTE: we do not include "celery_broker" as web could still do lot's of
        # useful things
예제 #54
0
class LightClientMessageHandler:
    log = structlog.get_logger(__name__)  # pylint: disable=invalid-name

    @classmethod
    def store_light_client_protocol_messages(cls, messages: List[Message],
                                             wal: WriteAheadLog):
        protocol_messages = list(
            map(build_light_client_protocol_message, messages))
        assert len(messages) == len(
            protocol_messages), "Light client protocol message persist error"
        to_store = []
        for msg_dto in protocol_messages:
            to_store.append(DbLightClientProtocolMessage(msg_dto))
        return wal.storage.write_light_client_protocol_messages(to_store)

    @classmethod
    def store_light_client_protocol_message(cls, identifier: int,
                                            message: Message, signed: bool,
                                            payment_id: int, order: int,
                                            wal: WriteAheadLog):
        return wal.storage.write_light_client_protocol_message(
            message,
            build_light_client_protocol_message(identifier, message, signed,
                                                payment_id, order))

    @classmethod
    def store_received_locked_transfer(cls, identifier: int, message: Message,
                                       signed: bool, payment_id: int,
                                       order: int,
                                       storage: SerializedSQLiteStorage):
        return storage.write_light_client_protocol_message(
            message,
            build_light_client_protocol_message(identifier, message, signed,
                                                payment_id, order))

    @classmethod
    def update_stored_msg_set_signed_data(cls, message: Message,
                                          payment_id: int, order: int,
                                          wal: WriteAheadLog):
        return wal.storage.update_light_client_protocol_message_set_signed_data(
            payment_id, order, message)

    @classmethod
    def store_light_client_payment(cls, payment: LightClientPayment,
                                   storage: SerializedSQLiteStorage):
        return storage.write_light_client_payment(payment)

    @classmethod
    def is_light_client_protocol_message_already_stored(
            cls, payment_id: int, order: int, wal: WriteAheadLog):
        existing_message = wal.storage.is_light_client_protocol_message_already_stored(
            payment_id, order)
        if existing_message:
            return LightClientProtocolMessage(existing_message[4] is not None,
                                              existing_message[3],
                                              existing_message[2],
                                              existing_message[1],
                                              existing_message[4],
                                              existing_message[5])
        return existing_message

    @classmethod
    def is_light_client_protocol_message_already_stored_message_id(
            cls, message_id: int, payment_id: int, order: int,
            wal: WriteAheadLog):
        return wal.storage.is_light_client_protocol_message_already_stored_with_message_id(
            message_id, payment_id, order)

    @classmethod
    def get_light_client_protocol_message_by_identifier(
            cls, message_identifier: int, wal: WriteAheadLog):
        message = wal.storage.get_light_client_protocol_message_by_identifier(
            message_identifier)
        return LightClientProtocolMessage(message[3] is not None, message[1],
                                          message[4], message[0], message[2],
                                          message[3])

    @classmethod
    def get_light_client_payment_locked_transfer(cls, payment_identifier: int,
                                                 wal: WriteAheadLog):
        message = wal.storage.get_light_client_payment_locked_transfer(
            payment_identifier)
        return LightClientProtocolMessage(message[3] is not None, message[1],
                                          message[4], message[0], message[2],
                                          message[3])

    @staticmethod
    def get_order_for_ack(ack_parent_type: string,
                          ack_type: string,
                          is_delivered_from_initiator: bool = False):
        switcher_processed = {
            LockedTransfer.__name__: 3,
            Secret.__name__: 13,
        }
        switcher_delivered = {
            LockedTransfer.__name__: 4 if is_delivered_from_initiator else 2,
            RevealSecret.__name__: 10 if is_delivered_from_initiator else 8,
            SecretRequest.__name__: 6,
            Secret.__name__: 14 if is_delivered_from_initiator else 12,
        }
        if ack_type.lower() == "processed":
            return switcher_processed.get(ack_parent_type, -1)
        else:
            return switcher_delivered.get(ack_parent_type, -1)

    @classmethod
    def exists_payment(cls, payment_id: int, wal: WriteAheadLog):
        return wal.storage.exists_payment(payment_id)

    @classmethod
    def store_lc_processed(cls, message: Processed, wal: WriteAheadLog):
        # If exists for that payment, the same message by the order, then discard it.
        message_identifier = message.message_identifier
        # get first principal message by message identifier
        protocol_message = LightClientMessageHandler.get_light_client_protocol_message_by_identifier(
            message_identifier, wal)
        json_message = None
        if protocol_message.signed_message is None:
            json_message = protocol_message.unsigned_message
        else:
            json_message = protocol_message.signed_message
        json_message = json.loads(json_message)

        order = LightClientMessageHandler.get_order_for_ack(
            json_message["type"], message.__class__.__name__.lower())
        if order == -1:
            cls.log.error(
                "Unable to find principal message for {} {}: ".format(
                    message.__class__.__name__, message_identifier))
        else:
            exists = LightClientMessageHandler.is_light_client_protocol_message_already_stored_message_id(
                message_identifier, protocol_message.light_client_payment_id,
                order, wal)
            if not exists:
                LightClientMessageHandler.store_light_client_protocol_message(
                    message_identifier, message, True,
                    protocol_message.light_client_payment_id, order, wal)
            else:
                cls.log.info(
                    "Message for lc already received, ignoring db storage")

    @classmethod
    def store_lc_delivered(cls, message: Delivered, wal: WriteAheadLog):
        # If exists for that payment, the same message by the order, then discard it.
        message_identifier = message.delivered_message_identifier
        # get first by message identifier
        protocol_message = LightClientMessageHandler.get_light_client_protocol_message_by_identifier(
            message_identifier, wal)
        json_message = None
        if protocol_message.signed_message is None:
            json_message = protocol_message.unsigned_message
        else:
            json_message = protocol_message.signed_message

        json_message = json.loads(json_message)

        first_message_is_lt = protocol_message.message_order == 1
        is_delivered_from_initiator = True
        delivered_sender = message.sender
        if not first_message_is_lt:
            # get lt to get the payment identifier
            locked_transfer = LightClientMessageHandler.get_light_client_payment_locked_transfer(
                protocol_message.light_client_payment_id, wal)
            signed_locked_transfer_message = json.loads(
                locked_transfer.signed_message)
            payment_initiator = signed_locked_transfer_message["initiator"]
            if to_checksum_address(delivered_sender) != to_checksum_address(
                    payment_initiator):
                is_delivered_from_initiator = False
        else:
            # message is the lt
            payment_initiator = json_message["initiator"]
            if to_checksum_address(delivered_sender) != to_checksum_address(
                    payment_initiator):
                is_delivered_from_initiator = False

        order = LightClientMessageHandler.get_order_for_ack(
            json_message["type"], message.__class__.__name__.lower(),
            is_delivered_from_initiator)
        if order == -1:
            cls.log.error(
                "Unable to find principal message for {} {}: ".format(
                    message.__class__.__name__, message_identifier))
        else:
            exists = LightClientMessageHandler.is_light_client_protocol_message_already_stored_message_id(
                message_identifier, protocol_message.light_client_payment_id,
                order, wal)
            if not exists:
                LightClientMessageHandler.store_light_client_protocol_message(
                    message_identifier, message, True,
                    protocol_message.light_client_payment_id, order, wal)
            else:
                cls.log.info(
                    "Message for lc already received, ignoring db storage")

    @classmethod
    def store_update_non_closing_balance_proof(
            cls,
            non_closing_balance_proof_data: LightClientNonClosingBalanceProof,
            storage: SerializedSQLiteStorage):
        return storage.write_light_client_non_closing_balance_proof(
            non_closing_balance_proof_data)

    @classmethod
    def get_latest_light_client_non_closing_balance_proof(
            cls, channel_id: int, storage: SerializedSQLiteStorage):
        latest_update_balance_proof_data = storage.get_latest_light_client_non_closing_balance_proof(
            channel_id)
        if latest_update_balance_proof_data:
            balance_proof = Unlock.from_dict(
                json.loads(latest_update_balance_proof_data[7]))
            return LightClientNonClosingBalanceProof(
                latest_update_balance_proof_data[1],
                latest_update_balance_proof_data[2],
                latest_update_balance_proof_data[3],
                latest_update_balance_proof_data[4],
                latest_update_balance_proof_data[5],
                latest_update_balance_proof_data[6], balance_proof,
                latest_update_balance_proof_data[8],
                latest_update_balance_proof_data[0])
        return None
예제 #55
0
"""
teatro - get_ranking
"""
import structlog
from telegram import ChatAction
from telegram.ext.dispatcher import run_async

from api import parse_alternativa

logger = structlog.get_logger(filename=__name__)


@run_async
def get_ranking(update, context, *args, **kwargs):
    context.bot.send_chat_action(chat_id=update.message.chat_id,
                                 action=ChatAction.TYPING)
    logger.info(f"Teatro... by {update.message.from_user.name}")

    data = parse_alternativa()
    if not data:
        return

    if not context.args:
        text = "\n".join(data[:5])
        text = (
            f"Ranking de lo más buscado en teatro:\n{text}\nBy http://www.alternativateatral.com/"
        )
        context.bot.send_message(chat_id=update.message.chat_id, text=text)
        return

    try:
    def __init__(self, technology, extra_args, device_id, backend, host, port):
        """
        Create PONResourceManager object.

        :param technology: PON technology
        :param: extra_args: This string contains extra arguments passed during
        pre-provisioning of OLT and specifies the OLT Vendor type
        :param device_id: OLT device id
        :param backend: backend store
        :param host: ip of backend store
        :param port: port on which backend store listens
        :raises exception when invalid backend store passed as an argument
        """
        # logger
        self._log = structlog.get_logger()

        try:
            self.technology = technology
            self.extra_args = extra_args
            self.device_id = device_id
            self.backend = backend
            self.host = host
            self.port = port
            self.olt_model = None

            self._kv_store = ResourceKvStore(technology, device_id, backend,
                                             host, port)
            self.tech_profile = TechProfile(self)

            # Below attribute, pon_resource_ranges, should be initialized
            # by reading from KV store.
            self.pon_resource_ranges = dict()
            self.pon_resource_ranges[
                PONResourceManager.ONU_ID_SHARED_IDX] = None
            self.pon_resource_ranges[
                PONResourceManager.ALLOC_ID_SHARED_IDX] = None
            self.pon_resource_ranges[
                PONResourceManager.GEMPORT_ID_SHARED_IDX] = None
            self.pon_resource_ranges[
                PONResourceManager.FLOW_ID_SHARED_IDX] = None

            self.shared_resource_mgrs = dict()
            self.shared_resource_mgrs[
                PONResourceManager.ONU_ID_SHARED_IDX] = None
            self.shared_resource_mgrs[
                PONResourceManager.ALLOC_ID_SHARED_IDX] = None
            self.shared_resource_mgrs[
                PONResourceManager.GEMPORT_ID_SHARED_IDX] = None
            self.shared_resource_mgrs[
                PONResourceManager.FLOW_ID_SHARED_IDX] = None

            self.shared_idx_by_type = dict()
            self.shared_idx_by_type[
                PONResourceManager.
                ONU_ID] = PONResourceManager.ONU_ID_SHARED_IDX
            self.shared_idx_by_type[
                PONResourceManager.
                ALLOC_ID] = PONResourceManager.ALLOC_ID_SHARED_IDX
            self.shared_idx_by_type[
                PONResourceManager.
                GEMPORT_ID] = PONResourceManager.GEMPORT_ID_SHARED_IDX
            self.shared_idx_by_type[
                PONResourceManager.
                FLOW_ID] = PONResourceManager.FLOW_ID_SHARED_IDX

            self.intf_ids = None

        except Exception as e:
            self._log.exception("exception-in-init")
            raise Exception(e)
예제 #57
0
    def __init__(self):
        """Initializes IchimokuCloud class
        """

        self.logger = structlog.get_logger()
예제 #58
0
파일: tasks.py 프로젝트: kli-fi/raidex
import structlog
from raidex import messages
from raidex.utils import pex

from raidex.commitment_service.swap import SwapFactory
from raidex.raidex_node.listener_tasks import ListenerTask
from raidex.trader_mock.trader import TransferReceipt
from raidex.trader_mock.trader import TransferReceivedListener
from raidex.message_broker.listeners import (
    TakerCommitmentListener,
    CommitmentListener,
    SwapExecutionListener,
    CancellationListener,
)

log = structlog.get_logger('commitment_service')
log_swaps = structlog.get_logger('commitment_service.asset_swaps')
log_messaging = structlog.get_logger('commitment_service.messaging')
log_refunds = structlog.get_logger('commitment_service.refunds')
log_trader = structlog.get_logger('commitment_service.trader')


class QueueListenerTask(gevent.Greenlet):
    def __init__(self, queue):
        self.queue = queue
        gevent.Greenlet.__init__(self)

    def _run(self):
        while True:
            data = self.queue.get()
            self.process(data)
예제 #59
0
from raiden.transfer.state_change import ActionChangeNodeNetworkState
from raiden.utils import pex
from raiden.utils.notifying_queue import NotifyingQueue
from raiden.utils.runnable import Runnable
from raiden.utils.typing import (
    MYPY_ANNOTATION,
    Address,
    Dict,
    List,
    MessageID,
    Nonce,
    Tuple,
    UDPMessageID,
)

log = structlog.get_logger(__name__)  # pylint: disable=invalid-name
log_healthcheck = structlog.get_logger(__name__ + ".healthcheck")  # pylint: disable=invalid-name

QueueItem_T = Tuple[bytes, MessageID]
Queue_T = List[QueueItem_T]

# GOALS:
# - Each netting channel must have the messages processed in-order, the
# transport must detect unacknowledged messages and retry them.
# - A queue must not stall because of synchronization problems in other queues.
# - Assuming a queue can stall, the unhealthiness of a node must not be
# inferred from the lack of acknowledgement from a single queue, but healthiness
# may be safely inferred from it.
# - The state of the node must be synchronized among all tasks that are
# handling messages.
예제 #60
0
import structlog
import time

from csv import DictReader
from adjuntos.models import Attachment
from elecciones.models import Categoria, Circuito, MesaCategoria
from elecciones.management.commands.basic_command import BaseCommand

from scheduling.scheduler import scheduler
from scheduling.models import ColaCargasPendientes

logger = structlog.get_logger('scheduler')


class Command(BaseCommand):
    help = "Toma una lista de circuitos y la prioriza."

    def priorizar_circuito(self, nuevas, lugar_en_cola, linea, slug_cat, nro_distrito, nro_seccion, nro_circuito, cant_mesas_necesarias):
        try:
            categoria = Categoria.objects.get(slug=slug_cat)
        except Circuito.DoesNotExist:
            logger.error(f"No existe la categoría con slug {slug_cat} (línea {linea}).")
            return lugar_en_cola

        try:
            circuito = Circuito.objects.get(
                numero=nro_circuito, seccion__numero=nro_seccion,
                seccion__distrito__numero=nro_distrito
            )
        except Circuito.DoesNotExist:
            logger.error(f"No existe el circuito nro {nro_circuito} en la sección nro {nro_seccion} y distrito {nro_distrito}  (línea {linea}).")