Ejemplo n.º 1
0
def do_rotate_logs(basefile, type):
    global observers

    if os.path.exists(basefile):
        if os.path.getsize(basefile) > 1000:
            for c in range(19, 0, -1):
                filename_cur = "%s.1" % basefile
                if c == 1 and os.path.exists(filename_cur):
                    with open(filename_cur) as src, gzip.open(
                            '%s.gz' % filename_cur, 'wb') as dst:
                        dst.writelines(src)
                    os.remove(filename_cur)

                filename_cur = "%s.%s.gz" % (basefile, c)
                filename_next = "%s.%s.gz" % (basefile, c + 1)
                if os.path.exists(filename_cur):
                    os.rename(filename_cur, filename_next)
            os.rename(basefile, "%s.1" % basefile)

        if type == 'json':
            globalLogPublisher.removeObserver(observers['json'])
            observers['json'] = jsonFileLogObserver(
                io.open("usr/log/yombo.json", "a"))
            globalLogPublisher.addObserver(observers['json'])

            globalLogPublisher.addObserver(
                jsonFileLogObserver(io.open(basefile, "a")))
        elif type == 'text':
            observers['text'] = textFileLogObserver(
                io.open("usr/log/yombo.text", "a"))
            globalLogPublisher.addObserver(observers['text'])

            globalLogPublisher.removeObserver(textFileLogObserver())
            globalLogPublisher.addObserver(
                textFileLogObserver(io.open(basefile, "a")))
Ejemplo n.º 2
0
def do_rotate_logs(basefile, type):
    global observers

    if os.path.exists(basefile):
        if os.path.getsize(basefile) > 1000:
            for c in range(19, 0, -1):
                filename_cur = f"{basefile}.1"
                if c == 1 and os.path.exists(filename_cur):
                    with open(filename_cur) as src, gzip.open(
                            f"{filename_cur}.gz", "wb") as dst:
                        dst.writelines(src)
                    os.remove(filename_cur)

                filename_cur = f"{basefile}.{c}.gz"
                filename_next = f"{basefile}.{c+1}.gz"
                if os.path.exists(filename_cur):
                    os.rename(filename_cur, filename_next)
            os.rename(basefile, f"{basefile}.1")

        if type == "json":
            globalLogPublisher.removeObserver(observers["json"])
            observers["json"] = jsonFileLogObserver(
                io.open("usr/log/yombo.json", "a"))
            globalLogPublisher.addObserver(observers["json"])

            globalLogPublisher.addObserver(
                jsonFileLogObserver(io.open(basefile, "a")))
        elif type == "text":
            observers["text"] = textFileLogObserver(
                io.open("usr/log/yombo.text", "a"))
            globalLogPublisher.addObserver(observers["text"])

            globalLogPublisher.removeObserver(textFileLogObserver())
            globalLogPublisher.addObserver(
                textFileLogObserver(io.open(basefile, "a")))
Ejemplo n.º 3
0
def get_json_file_observer(
        name="nucypher.log.json",
        path=USER_LOG_DIR):  # TODO: More configurable naming here?
    _ensure_dir_exists(path)
    logfile = DailyLogFile(name, path)
    observer = jsonFileLogObserver(outFile=logfile)
    return observer
def get_json_log_observer():
    f = logfile.LogFile("carbon_forwarder.log", log_dir, rotateLength=log_rotate_length, maxRotatedFiles=max_rotated_log_files)
    observer = jsonFileLogObserver(f)
    filterer = FilteringLogObserver(observer,
        [LogLevelFilterPredicate(
            LogLevel.levelWithName(log_level))])
    return filterer
Ejemplo n.º 5
0
    def _create_query_logger(response_log):
        """Create a logger that will persist incoming query results."""

        # Ensures different log files for different
        # processes in multi worker mode
        if response_log:
            # We need to generate a unique file name,
            # even in multiprocess environments
            timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
            log_file_name = "rasa_nlu_log-{}-{}.log".format(
                timestamp, os.getpid())
            response_logfile = os.path.join(response_log, log_file_name)
            # Instantiate a standard python logger,
            # which we are going to use to log requests
            utils.create_dir_for_file(response_logfile)
            query_logger = Logger(observer=jsonFileLogObserver(
                io.open(response_logfile, 'a', encoding='utf8')),
                                  namespace='query-logger')
            # Prevents queries getting logged with parent logger
            # --> might log them to stdout
            logger.info("Logging requests to '{}'.".format(response_logfile))
            return query_logger
        else:
            # If the user didn't provide a logging directory, we wont log!
            logger.info("Logging of requests is disabled. "
                        "(No 'request_log' directory configured)")
            return None
Ejemplo n.º 6
0
def main(reactor, *argv):
    import argparse

    a = argparse.ArgumentParser()

    a.add_argument('number', type=int)
    a.add_argument('subprocess', nargs='+')

    args = a.parse_args(argv)

    globalLogBeginner.beginLoggingTo([jsonFileLogObserver(sys.stdout)])

    executablePath = distutils.spawn.find_executable(args.subprocess[0])
    args.subprocess[0] = executablePath

    collection = ProcessCollection()
    reactor.addSystemEventTrigger("before", "shutdown", collection.stop)

    processes = [RespawningProcess(reactor,
                                   executablePath, args.subprocess,
                                   usePTY=True)
                 for _ in xrange(args.number)]
    collection.addProcesses(processes)
    collection.start()

    terminationDeferred = defer.Deferred()
    stdio.StandardIO(FireOnInput(terminationDeferred))

    return terminationDeferred
Ejemplo n.º 7
0
    def _create_query_logger(response_log):
        """Create a logger that will persist incoming query results."""

        # Ensures different log files for different
        # processes in multi worker mode
        if response_log:
            # We need to generate a unique file name,
            # even in multiprocess environments
            timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
            log_file_name = "rasa_nlu_log-{}-{}.log".format(timestamp,
                                                            os.getpid())
            response_logfile = os.path.join(response_log, log_file_name)
            # Instantiate a standard python logger,
            # which we are going to use to log requests
            utils.create_dir_for_file(response_logfile)
            out_file = io.open(response_logfile, 'a', encoding='utf8')
            query_logger = Logger(
                    observer=jsonFileLogObserver(out_file, recordSeparator=''),
                    namespace='query-logger')
            # Prevents queries getting logged with parent logger
            # --> might log them to stdout
            logger.info("Logging requests to '{}'.".format(response_logfile))
            return query_logger
        else:
            # If the user didn't provide a logging directory, we wont log!
            logger.info("Logging of requests is disabled. "
                        "(No 'request_log' directory configured)")
            return None
Ejemplo n.º 8
0
    def bootstrap(self):
        path = os.path.join(paths.log_dir, 'pps.log.json')
        obs = jsonFileLogObserver(io.open(path, 'w'))
        logger = Logger(observer=obs)

        if self._has_endpoints:
            self.start()
Ejemplo n.º 9
0
def getJsonFileObserver(
        name="nucypher.log.json",
        path=USER_LOG_DIR):  # TODO: More configurable naming here?
    _get_or_create_user_log_dir()
    logfile = DailyLogFile(name, path)
    observer = jsonFileLogObserver(outFile=logfile)
    return observer
Ejemplo n.º 10
0
def get_json_file_observer(name=DEFAULT_JSON_LOG_FILENAME, path=USER_LOG_DIR):
    _ensure_dir_exists(path)
    logfile = LogFile(name=name,
                      directory=path,
                      rotateLength=MAXIMUM_LOG_SIZE,
                      maxRotatedFiles=MAX_LOG_FILES)
    observer = jsonFileLogObserver(outFile=logfile)
    return observer
Ejemplo n.º 11
0
    def start_collection(self) -> None:
        print("Starting Data Collection...")

        json_filepath = os.path.join(self.OUTPUT_DIR, AnalyzeGas.LOG_FILENAME)
        json_io = io.open(json_filepath, "w")
        json_observer = jsonFileLogObserver(json_io)
        globalLogPublisher.addObserver(json_observer)
        globalLogPublisher.addObserver(self)
Ejemplo n.º 12
0
def hendrixObserver(path=DEFAULT_LOG_FILE, log_level=LogLevel.warn):
    json_observer = jsonFileLogObserver(
        io.open(path, 'a')
    )
    return FilteringLogObserver(
        json_observer,
        [LogLevelFilterPredicate(log_level), ]
    )
Ejemplo n.º 13
0
def main(reactor, *argv):
    argument_parser = argparse.ArgumentParser()
    argument_parser.add_argument('solr_url')
    argument_parser.add_argument('json_docs')

    args = argument_parser.parse_args(argv)

    globalLogBeginner.beginLoggingTo([jsonFileLogObserver(sys.stdout)])

    return replay(args.json_docs, args.solr_url)
Ejemplo n.º 14
0
def main():

    if Config.get('logging')['verbose'] is True:
        globalLogPublisher.addObserver(
            FileLogObserver(sys.stdout, lambda e: eventAsText(e) + "\n"))

    if Config.get('logging')['log_to_file'] is True:
        logfile = os.path.join(os.path.abspath(os.path.dirname(__file__)),
                               "logs/log.json")
        globalLogPublisher.addObserver(
            jsonFileLogObserver(io.open(logfile, 'w+'), ''))

    server = Server(Config.get('server')['port'])
    server.run()
Ejemplo n.º 15
0
def wrapp_observer(output):
    json = jsonFileLogObserver(output, recordSeparator='')

    @provider(ILogObserver)
    def wrapped(event):
        try:
            # If there is a failure, use the type and message to create the main msg.
            if 'failure' in event:
                f = event['failure']
                parts = [f.type.__name__, f.getErrorMessage()]
                msg = ' '.join(filter(None, parts))

            # Otherwise use normal event formatting to create the main msg.
            else:
                msg = formatEvent(event)

            new = OrderedDict([
                ('level', level_name[event.pop('log_level')]),
                ('msg', msg),
            ])

            if 'log_namespace' in event:
                new['namespace'] = event.pop('log_namespace')

            if 'log_system' in event:
                new['system'] = event.pop('log_system')

            # Keep all keys except the noise.
            for k, v in sorted(event.items()):
                if k not in noisey_keys:
                    new[k] = v

        except Exception as e:
            # Fallback to normal event processing
            new = event
            new['log_failure'] = str(e)

        output.write(new['level'].upper() + ' ')
        json(new)

    return wrapped
Ejemplo n.º 16
0
def wrapp_observer(output, service=None):
    json = jsonFileLogObserver(output, recordSeparator='')
    service = service or os.environ.get('SERVICE_NAME')

    @provider(ILogObserver)
    def wrapped(event):
        try:
            # If there is a failure, use the type and message to create the main msg.
            if 'failure' in event:
                f = event['failure']
                parts = [f.type.__name__, f.getErrorMessage()]
                msg = ' '.join(filter(None, parts))

            # Otherwise use normal event formatting to create the main msg.
            else:
                msg = formatEvent(event)

            new = OrderedDict([('level', level_name[event.pop('log_level')]),
                               ('msg', msg), ('service', service),
                               ('timestamp', _timestamp())])

            if 'log_namespace' in event:
                new['namespace'] = event.pop('log_namespace')

            if 'log_system' in event:
                new['system'] = event.pop('log_system')

            # Keep all keys except the noise.
            for k, v in sorted(event.items()):
                if k not in noisey_keys:
                    new[k] = v

        except Exception as e:
            # Fallback to normal event processing
            new = event
            new['log_failure'] = str(e)

        json(new)

    return wrapped
Example of a InfluxDB query using TwistedInfluxDB Driver.
"""


import io

from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.logger import jsonFileLogObserver, Logger

from twistedInfluxDB.Client import InfluxClient
from twistedInfluxDB.QueryObject import InfluxQueryObject


# Logger to bind to the API.
log = Logger(observer=jsonFileLogObserver(io.open("log.json", "a")))
# Database client setup configurations.
db = InfluxClient('http://localhost', 8086, 'example', log)
# Query Parameter (Influx Query Object)
query = InfluxQueryObject('cpu_load_short', fields=None, condition="region='us-west'")


# Success Callback
def inserted(response):
    print (response)
    return response


# Failed Callback
def failed(data):
    print "Failed"
Ejemplo n.º 18
0
class FusionsCO2Factory(LaserFactory):
    _name = 'FusionsCO2'


class FusionsDiodeFactory(LaserFactory):
    _name = 'FusionsDiode'


class FusionsUVFactory(LaserFactory):
    _name = 'FusionsUV'

from pychron.paths import paths
path = os.path.join(paths.log_dir, 'pps.log.json')

logger = Logger(observer=jsonFileLogObserver(io.open(path, 'w')))


from pychron.paths import paths

path = os.path.join(paths.log_dir, 'pps.log.json')

logger = Logger(observer=jsonFileLogObserver(io.open(path, 'w')))


class BaseFactory(Factory):
    protocol_klass = None

    def __init__(self, application=None):
        self._app = application
Ejemplo n.º 19
0
import io
from twisted.logger import jsonFileLogObserver, globalLogPublisher
from ad_hoc import AdHoc

globalLogPublisher.addObserver(jsonFileLogObserver(io.open("log.json", "a")))

AdHoc(3, 4).logMessage()
Ejemplo n.º 20
0
    failure.trap(ServiceNameError)
    return failure


def response_err(failure):
    failure.trap(ResponseError)
    return failure


def nargs_err(failure):
    failure.trap(ValueError)
    return InvalidArgumentsErrorCode("Foo", str(failure.value))


path = os.path.join(paths.log_dir, "pps.log.json")
obs = jsonFileLogObserver(io.open(path, "w"))
logger = Logger(observer=obs)
# logger = Logger()

regex = re.compile(r"^(?P<command>\w+) {0,1}(?P<args>.*)")


class ServiceProtocol(Protocol):
    def __init__(self, *args, **kw):
        # super(ServiceProtocol, self).__init__(*args, **kw)
        self._services = {}
        self._cmd_delim = " "
        self._arg_delim = ","
        self.debug = logger.debug
        self.warning = logger.warn
        self.info = logger.info
# -*- coding: utf-8 -*-
# @Time    : 2019/6/3 下午10:14
# @Author  : xuhaihu
# @Email   : [email protected]
# @File    : http_server_multithreading.py
# @Software: PyCharm

from twisted.logger import jsonFileLogObserver, Logger
from twisted.web import server, resource
from twisted.internet import reactor, endpoints, protocol
from aes import MyAES
import json, os
from read_ini import ReadINI


log = Logger(observer=jsonFileLogObserver(open("myLog/log.json", "a")),
                     namespace="http_server_multithreading")


class MyPP(protocol.ProcessProtocol):
    def __init__(self):
        self.data = ""

    def connectionMade(self):
        print("connectionMade!")
        # print("xuhaihu\n".encode())
        # self.transport.write("xuhaihu\n".encode())
        # self.transport.closeStdin()

    def outReceived(self, data):
        print("outReceived! with %d bytes!" % len(data))
Ejemplo n.º 22
0
)
from twisted.logger import (
    Logger,
    jsonFileLogObserver,
)
from core.predef import (pycard_protocol as pp,
    table_type as tt
)
from player.player_class import LobbyPerson
from sample_games.retard_game import retard_game
from core.game import GameOver

__author__ = 'Anton Korobkov'

log = Logger(
    observer=jsonFileLogObserver(io.open("server.json", "a")),
    namespace="server"
)


class MultiEcho(protocol.Protocol):

    def __init__(self, *args):
        self.factory = args[0]
        self.playernum = args[1]
        self.anncounter = args[2]

    # Twisted методы

    def connectionMade(self):
        log.info('Incoming connection on {host}', host=self)
Ejemplo n.º 23
0
class FusionsCO2Factory(LaserFactory):
    _name = 'FusionsCO2'


class FusionsDiodeFactory(LaserFactory):
    _name = 'FusionsDiode'


class FusionsUVFactory(LaserFactory):
    _name = 'FusionsUV'


from pychron.paths import paths
path = os.path.join(paths.log_dir, 'pps.log.json')

logger = Logger(observer=jsonFileLogObserver(io.open(path, 'w')))

from pychron.paths import paths

path = os.path.join(paths.log_dir, 'pps.log.json')

logger = Logger(observer=jsonFileLogObserver(io.open(path, 'w')))


class BaseFactory(Factory):
    protocol_klass = None

    def __init__(self, application=None):
        self._app = application

    def buildProtocol(self, addr):
Ejemplo n.º 24
0
def setup_structured_logging(
    hs,
    config,
    log_config: dict,
    logBeginner: LogBeginner,
    redirect_stdlib_logging: bool = True,
) -> LogPublisher:
    """
    Set up Twisted's structured logging system.

    Args:
        hs: The homeserver to use.
        config (HomeserverConfig): The configuration of the Synapse homeserver.
        log_config (dict): The log configuration to use.
    """
    if config.no_redirect_stdio:
        raise ConfigError(
            "no_redirect_stdio cannot be defined using structured logging.")

    logger = Logger()

    if "drains" not in log_config:
        raise ConfigError(
            "The logging configuration requires a list of drains.")

    observers = []  # type: List[ILogObserver]

    for observer in parse_drain_configs(log_config["drains"]):
        # Pipe drains
        if observer.type == DrainType.CONSOLE:
            logger.debug("Starting up the {name} console logger drain",
                         name=observer.name)
            observers.append(SynapseFileLogObserver(observer.location))
        elif observer.type == DrainType.CONSOLE_JSON:
            logger.debug("Starting up the {name} JSON console logger drain",
                         name=observer.name)
            observers.append(jsonFileLogObserver(observer.location))
        elif observer.type == DrainType.CONSOLE_JSON_TERSE:
            logger.debug(
                "Starting up the {name} terse JSON console logger drain",
                name=observer.name,
            )
            observers.append(
                TerseJSONToConsoleLogObserver(observer.location, metadata={}))

        # File drains
        elif observer.type == DrainType.FILE:
            logger.debug("Starting up the {name} file logger drain",
                         name=observer.name)
            log_file = open(observer.location,
                            "at",
                            buffering=1,
                            encoding="utf8")
            observers.append(SynapseFileLogObserver(log_file))
        elif observer.type == DrainType.FILE_JSON:
            logger.debug("Starting up the {name} JSON file logger drain",
                         name=observer.name)
            log_file = open(observer.location,
                            "at",
                            buffering=1,
                            encoding="utf8")
            observers.append(jsonFileLogObserver(log_file))

        elif observer.type == DrainType.NETWORK_JSON_TERSE:
            metadata = {"server_name": hs.config.server_name}
            log_observer = TerseJSONToTCPLogObserver(
                hs=hs,
                host=observer.location[0],
                port=observer.location[1],
                metadata=metadata,
                maximum_buffer=observer.options.maximum_buffer,
            )
            log_observer.start()
            observers.append(log_observer)
        else:
            # We should never get here, but, just in case, throw an error.
            raise ConfigError("%s drain type cannot be configured" %
                              (observer.type, ))

    publisher = StoppableLogPublisher(*observers)
    log_filter = LogLevelFilterPredicate()

    for namespace, namespace_config in log_config.get("loggers",
                                                      DEFAULT_LOGGERS).items():
        # Set the log level for twisted.logger.Logger namespaces
        log_filter.setLogLevelForNamespace(
            namespace,
            stdlib_log_level_to_twisted(namespace_config.get("level", "INFO")),
        )

        # Also set the log levels for the stdlib logger namespaces, to prevent
        # them getting to PythonStdlibToTwistedLogger and having to be formatted
        if "level" in namespace_config:
            logging.getLogger(namespace).setLevel(
                namespace_config.get("level"))

    f = FilteringLogObserver(publisher, [log_filter])
    lco = LogContextObserver(f)

    if redirect_stdlib_logging:
        stuff_into_twisted = PythonStdlibToTwistedLogger(lco)
        stdliblogger = logging.getLogger()
        stdliblogger.addHandler(stuff_into_twisted)

    # Always redirect standard I/O, otherwise other logging outputs might miss
    # it.
    logBeginner.beginLoggingTo([lco], redirectStandardIO=True)

    return publisher
Ejemplo n.º 25
0
import io

from twisted.logger import Logger, jsonFileLogObserver

log = Logger(observer=jsonFileLogObserver(open("log.json", "a")), namespace="saver")


def loggit(values):
    log.info("Some values: {values!r}", values=values)


loggit([1234, 5678])
loggit([9876, 5432])
class AppSession(ApplicationSession):

    fdb = FlyCircuitDB()
    log = Logger()
    txaio.start_logging(level='debug')
    feedback_log = Logger(observer=jsonFileLogObserver(
        io.open(
            os.path.join(os.path.dirname(__file__), 'logs',
                         'feedback_log.json'), 'a+b')),
                          namespace='feedback')
    #log = Logger(observer=jsonFileLogObserver(io.open("processor_log.json", "a")), namespace="saver")

    @inlineCallbacks
    def onJoin(self, details):

        # Processor Data
        directory = {
            'processor': {
                'autobahn': autobahn.__version__,
                'version': __version__
            },
            'nlp': {},
            'na': {},
            'nk': {}
        }

        # Email notification
        # email_dict = {
        #    "sender": "*****@*****.**",
        #    "recipients": {"John Doe": "*****@*****.**",
        #                   "Jane Who": "*****@*****.**"
        #    }
        # }
        email_dict = None
        try:
            with open('email_dict.json') as data_file:
                email_dict = json.load(data_file)
                self.log.info("Loading Email Dictionary with keys, {keys}",
                              keys=list(email_dict.keys()))
        except:
            self.log.warn(
                "Loading Email Dictionary failed, no email notification on session leave."
            )

        # Memory Management
        @inlineCallbacks
        def trigger_memory_management():
            yield self.publish(six.u('ffbo.processor.memory_manager'))
            self.log.info(
                'Memory Management ping: ffbo.processor.memory_manager')

        lc = task.LoopingCall(trigger_memory_management)
        interval = 60 * self.config.extra['clean_interval']  # in mins
        lc.start(interval)

        def get_process_info():
            # hacky way to determine whether process is inside docker
            try:
                cgroup = {}
                with open('/proc/1/cgroup') as f:
                    for line in f:
                        field = line.split(':')
                        if len(field) == 3:
                            cgroup[field[1]] = field[2]
                docker_cid = 'not inside docker'
                if cgroup.has_key('cpu'):
                    field = cgroup['cpu'].strip().split('/')
                    if len(field) > 2 and field[1] == 'docker':
                        docker_cid = field[2]
            except:
                docker_cid = 'cat not determine whether inside docker or not'

            # get process pid
            try:
                pid = os.getpid()
            except:
                pid = None

            # get host IP, messy code for cross platform support
            try:
                hostip = [
                    ip
                    for ip in socket.gethostbyname_ex(socket.gethostname())[2]
                    if not ip.startswith("127.")
                ][:1]
                hostip = hostip or [[
                    (s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close())
                    for s in
                    [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]
                ][0][1]]
                hostip = hostip[0]
            except:
                hostip = None

            process_info = "\n\n" \
                           "processor_component pid: {pid}\n" \
                           "docker container id: {cid}\n" \
                           "host ip address: {hip}\n".format(pid=str(pid),cid=str(docker_cid),hip=str(hostip))
            return process_info

        def send_email(text, subject=None, verbose=True):
            try:
                sender = email_dict['sender']
                recipients = [
                    '%s <%s>' % (k, v)
                    for k, v in email_dict['recipients'].items()
                ]
            except:
                return "Incorrect email dictionary format"

            if verbose:
                text += get_process_info()

            msg = MIMEText(text)

            msg['Subject'] = '[FFBO Server] ' + subject
            msg['From'] = sender
            msg['To'] = ','.join(recipients)

            gmail_user = ""
            gmail_pwd = ""

            try:
                s = smtplib.SMTP_SSL("smtp.gmail.com", 465)
                s.login(gmail_user, gmail_pwd)
            except Exception as e:
                print(e)
                return "Failed to connect to SMTP server on gmail"
            try:
                s.sendmail(sender, recipients, msg.as_string())
            except:
                return "Failed to send out email"
            s.quit()
            return "successfully sent out email"

        def feedback_error(request, message, exception=None):
            info = {
                'error': {
                    'request': str(request),
                    'message': str(message),
                    'exception': str(exception)
                }
            }
            return info

        def feedback_success(request, result):
            info = {
                'success': {
                    'request': str(request),
                    'result': result,
                }
            }
            return info

        @inlineCallbacks  # Progressive calls
        def process_nlp_query(request, details=None):
            """
                Process a nlp request, this request should have
                user:       session_id
                servers: {  nlp: session_id,
                            na : session_id,
                            vis: session_id
                         }
                nlp_query:  string
            """
            request['user'] = details.caller
            #user_details = yield self.call('ffbo.auth_server.get_user',details.caller)
            user_details = yield self.call(six.u('ffbo.auth_server.get_user'),
                                           details.caller)
            if user_details: request['username'] = user_details['username']
            feedback = []
            self.log.info(
                "process_nlp_query() accessed with request: {request}",
                request=request)
            # Validate 3 each server is defined
            try:
                #build up server calls
                rpc_calls = {}
                for stype in ['nlp', 'na']:
                    rpc_calls[stype] = six.u("ffbo.%(s_type)s.query.%(s_id)s" % \
                    {'s_id':    request['servers'][stype],
                     's_type':  stype})
                rpc_calls['user_msg'] = six.u("ffbo.ui.receive_msg.%(s_id)s" % \
                                        {'s_id': request['user']})
            except Exception as e:
                self.log.warn(
                    "process_nlp_query() failed due to incomplete server list in {servers}",
                    servers=str(request['servers']))
                feedback = feedback_error(request,
                                          "Server list not fully defined", e)
                returnValue(feedback)

            # Natural Language Processing
            try:
                self.log.info(
                    "process_nlp_query() accessed on NLP server {server_id} with query: {query}",
                    server_id=rpc_calls['nlp'],
                    query=request['nlp_query'])

                language = "en"
                if "language" in request:
                    language = request["language"]

                nlp_res = yield self.call(rpc_calls['nlp'],
                                          request['nlp_query'], language)
                self.log.info(
                    "process_nlp_query() accessed on NLP server {server_id} with result: {result}",
                    server_id=rpc_calls['nlp'],
                    result=nlp_res)

            except ApplicationError as e:
                self.log.warn(
                    "Processor failed to access NLP server {server_id}, with error {e}",
                    server_id=rpc_calls['nlp'],
                    e=e)

                feedback = feedback_error(request, "Unable to contact server",
                                          e)
                returnValue(feedback)

            try:
                if nlp_res == {}:
                    yield self.call(rpc_calls['user_msg'], {
                        'info': {
                            'error': 'NLP module could not parse your input'
                        }
                    })
                    self.log.warn(
                        "{server_id} failed to parse query: {query}}",
                        server_id=rpc_calls['nlp'],
                        query=request['nlp_query'])
                    returnValue(None)
                yield self.call(
                    rpc_calls['user_msg'], {
                        'info': {
                            'success':
                            'NLP module successfully parsed your input'
                        }
                    })
                nlp_res['user_msg'] = rpc_calls['user_msg']
                for key in request:
                    if key not in nlp_res: nlp_res[key] = request[key]

                nlp_res['user'] = request['user']
                # HARD CODE morphology if not specified
                nlp_res['format'] = 'morphology' if not 'format' in request \
                                    else request['format']

                if 'verb' not in nlp_res or nlp_res['verb'] == 'show':
                    yield self.call(rpc_calls['user_msg'],
                                    {'commands': {
                                        'reset': ''
                                    }})

                na_res = yield self.call(rpc_calls['na'], nlp_res)
                if not na_res:
                    yield self.call(rpc_calls['user_msg'], {
                        'info': {
                            'error': 'Failed to execute query on Neuroarch'
                        }
                    })
                    returnValue(None)
                else:
                    if not 'format' in request:
                        request['format'] = 'morphology'
                    msg = {
                        'info': {
                            'success':
                            'Finished fetching all results from NeuroArch'
                        }
                    }
                    self.call(rpc_calls['user_msg'], msg)
                    returnValue(na_res)
            except ApplicationError as e:
                self.log.warn(
                    "Processor failed to access NA server {server_id}, with error {e}",
                    server_id=rpc_calls['na'],
                    e=e)
                traceback.print_exc()
                yield self.call(
                    rpc_calls['user_msg'],
                    {'info': {
                        'error': 'Unable to contact NeuroArch server'
                    }})
                returnValue(None)

        yield self.register(process_nlp_query,
                            six.u('ffbo.processor.nlp_to_visualise'),
                            RegisterOptions(details_arg='details'))
        self.log.debug("procedure process_nlp_query registered")

        @inlineCallbacks  # Progressive calls
        def process_nk_request(request, details=None):
            """

            """
            request['user'] = details.caller
            user_details = yield self.call(six.u('ffbo.auth_server.get_user'),
                                           details.caller)
            if user_details: request['username'] = user_details['username']
            feedback = []
            self.log.debug(
                "process_nk_request() accessed with request: {request}",
                request=request)
            # Validate 3 each server is defined
            try:
                #build up server calls
                rpc_calls = {}
                rpc_calls['na'] = u'ffbo.na.query.{}'.format(
                    request['servers']['na'])
                rpc_calls['nk'] = u'ffbo.nk.launch.{}'.format(
                    request['servers']['nk'])

            except Exception as e:
                self.log.warn(
                    "process_nk_request() failed due to incomplete server list in {servers}",
                    servers=str(request['servers']))
                feedback = feedback_error(request,
                                          "Server list not fully defined", e)
                returnValue(feedback)

            # Natural Language Processing
            na_res = request
            try:
                na_task = {
                    'user': request['user'],
                    'command': {
                        "retrieve": {
                            "state": 0
                        }
                    },
                    'format': "nk"
                }
                self.log.info(
                    "process_nk_request() accessed on NA server {server_id} with query: {query}",
                    server_id=rpc_calls['na'],
                    query=na_task)

                #na_res =  yield self.call(rpc_calls['na'], na_task)
                na_res_update = yield self.call(rpc_calls['na'], na_task)
                na_res.update(na_res_update)
                self.log.info(
                    "process_nk_request() accessed on NA server {server_id} with result: {result}",
                    server_id=rpc_calls['na'],
                    result=na_res)

            except ApplicationError as e:
                self.log.warn(
                    "Processor failed to access NLP server {server_id}, with error {e}",
                    server_id=rpc_calls['nlp'],
                    e=e)

                feedback = feedback_error(request, "Unable to contact server",
                                          e)
                returnValue(feedback)

            details.progress("Circuit data retrieved from NeuroArch")

            try:
                na_res['user'] = request['user']
                if 'neuron_list' in request:
                    na_res['neuron_list'] = request['neuron_list']

                progressive_result = {}

                def on_progress(p):
                    progressive_result.update(p)
                    details.progress("Receive partial result from execution.")

                details.progress("Start execution in Neurokernel")
                # Fprward result to the Front End
                na_res["forward"] = six.u("ffbo.gfx.receive_partial." +
                                          str(na_res['user']))
                nk_res = yield self.call(rpc_calls['nk'],
                                         na_res,
                                         options=CallOptions(
                                             on_progress=on_progress,
                                             timeout=30000000000))

                # Did we use a progressive result
                if nk_res is None:
                    nk_res = progressive_result

                if nk_res == {}:
                    feedback = feedback_error(
                        request, "Neurokernel returned zero results")
                    returnValue(feedback)
                #details.progress("Circuit execution completed")

            except ValueError as e:
                self.log.warn(
                    "{server_id} failed to start simulation: {query}}",
                    server_id=rpc_calls['nk'],
                    query=na_res)
                feedback = feedback_error(request, "Execution Failure", e)
                returnValue(feedback)

            except ApplicationError as e:
                self.log.warn(
                    "Processor failed to access NK server {server_id}, with error {e}",
                    server_id=rpc_calls['nk'],
                    e=e)

                feedback = feedback_error(request, "Execution Failure", e)
                returnValue(feedback)

            returnValue(nk_res)
            #details.progress("Neurokernel Started Execution")

            try:
                vis_res = {
                    'success': {
                        'result': {
                            'user': request['user'],
                            'type': 'json'
                        },
                        'user': request['user'],
                        'reset': False
                    }
                }

                if 'error' in vis_res:
                    raise RuntimeError(
                        'Visualisation component was unable to complete the request '
                    )

            except Exception as e:
                self.log.warn(
                    "Processor failed to access complete visualisation")
                feedback = feedback_error(request,
                                          "Unable to create on visualiser", e)
                returnValue(feedback)

            #details.progress("Visualisation: Parsed result")

            self.log.debug(
                "Process_NK_Request complete with request: {request} and result: {result}",
                request=request,
                result=vis_res)

            returnValue(vis_res)

        yield self.register(process_nk_request,
                            six.u('ffbo.processor.nk_execute'),
                            RegisterOptions(details_arg='details'))
        self.log.debug("procedure process_nk_request registered")

        # Recieve Feedback from the User
        @inlineCallbacks
        def log_feedback(user, feedback_message, details):
            try:
                user_details = yield self.call(
                    six.u('ffbo.auth_server.get_user'), details.caller)
                template = "-" * 70 + "Feedback recieved\n username: {user} \n feedback_message: {feedback}\n"
                yield self.feedback_log.info(
                    template.format(user=user_details['username'],
                                    feedback=feedback_message))
            except Exception as e:
                print(e)
                self.log.warn("Failed to log user feedback: {user} {feedback}",
                              user=user,
                              feedback=feedback_message)

                returnValue(False)
            returnValue(True)

        yield self.register(log_feedback, six.u('ffbo.server.log_feedback'),
                            RegisterOptions(details_arg='details'))
        self.log.debug("registered ffbo.server.log_feedback")

        # SUBSCRIBE to session leaving events to check if a server needs removing
        @inlineCallbacks
        def on_session_leave(session_id):
            self.log.info("event for 'on_session_leave' received for session: " \
                    "{session}", session=session_id)
            for stype in directory:
                if str(session_id) in directory[stype]:
                    self.log.info("Server disconnected. removing session" \
                            "{session} of type {stype}", session=session_id, stype=stype)
                    if email_dict is not None:
                        email_res = None
                        title = "Server down"
                        text = "disconnected session {session} of type {stype}".format(
                            session=session_id, stype=stype)
                        self.log.info("Sending email about disconnected session" \
                            "{session} of type {stype}",
                            session=session_id,
                            stype=stype)
                        try:
                            email_res = send_email(text, title)
                        except Exception as e:
                            self.log.info(
                                "Sending email failed with error {error}",
                                error=str(e))
                        if email_res is not None:
                            self.log.info("Tried to send out email... {msg}",
                                          msg=email_res)
                    del directory[stype][str(session_id)]
                    yield self.publish(six.u('ffbo.server.update'), directory)

        yield self.subscribe(on_session_leave, six.u('wamp.session.on_leave'))
        self.log.debug("subscribed to topic 'wamp.session.on_leave'")

        # REGISTER a procedure for registering a new server
        @inlineCallbacks
        def register_new_server(server_id, server_type, server_config):
            if (not server_type in directory):
                print('not in directory')
                print(server_type)
                print(directory)
                returnValue({})

            if server_type != 'nlp':
                if not server_config.get('autobahn', '0.0.0').split(
                        '.')[0] == autobahn.__version__.split('.')[0]:
                    self.log.info(
                        'autobahn version mismatch {server_type}, component autobahn version is {c_version}, and processor autobahn version is {p_version}'
                        .format(server_type=server_type,
                                c_version=server_config.get(
                                    'autobahn', '0.0.0'),
                                p_version=autobahn.__version__))
                    returnValue(
                        json.dumps(
                            'Autobahn version mismatch. Processor has version {}, and you have version {}'
                            .format(autobahn.__version__,
                                    server_config.get('autobahn', 'unknown'))))
            self.log.info("{server_type} server registered with name " \
                            "{server_name} and id {server_id} {dataset}",
                            server_name=server_config.get('name'),
                            server_id=server_id, \
                            server_type=server_type,
                            dataset = 'for dataset {}'.format(server_config.get('dataset')) \
                                      if 'dataset' in server_config else '')

            directory[server_type][str(server_id)] = server_config

            # PUBLISH updated server list after a new server registration
            yield self.publish(six.u('ffbo.server.update'), directory)
            returnValue(json.dumps({'server': directory}))

        yield self.register(register_new_server, six.u('ffbo.server.register'))
        self.log.debug("procedure register_new_server registered")

        @inlineCallbacks
        def nlp_query(server_id, query, user=None):
            """
            Call the nlp module for a nlp translation
            Input

                rpc_call: the WAMP registered RPC call
                query   : the natural language to translate
                user    : used to record state, can be None for stateless

            Return:
                A success or error dictionary
            """

            try:
                self.log.info(
                    "nlp_query() accessed on NLP server {server_id} with query: {query}",
                    server_id=server_id,
                    query=query)
                nlp_res = yield self.call("ffbo.nlp.query." + str(server_id),
                                          query)
                self.log.info(
                    "nlp_query() accessed on NLP server {server_id} with result: {result}",
                    server_id=server_id,
                    result=nlp_res)

                # First Exit Point: NLP parsing has failed
                if nlp_res == {}:
                    raise ValueError('NLP parsing could not parse string')

                returnValue(feedback_success(query, nlp_res))

            except ApplicationError as e:
                # ignore errors due to the frontend not yet having
                # registered the procedure we would like to call
                raise e
                self.log.warn(
                    "nlp_query() failed to access NLP server {server_id}, with error {e}",
                    e=e)

                feedback = feedback_error(rpc_call, "Unable to contact server",
                                          e)
                returnValue(feedback)
            except ValueError as e:
                self.log.warn(
                    "nlp_query() accessed on {server_id} failed to parse query: {query}}",
                    query=query)
                feedback = feedback_error(
                    query, "NLP parsing could not parse string", e)
                returnValue(feedback)

        yield self.register(nlp_query, six.u('ffbo.processor.nlp_query'))
        self.log.debug("procedure ffbo.processor.nlp_query registered")

        @inlineCallbacks
        def neuroarch_query(request, details=None):
            """
            Call the neuroarch_query module with a neuroarch-json object
            The request should have
                user:       session_id for client
                server:     session_id for na server
                query:  neuroarch json query object
            """
            try:
                request['user'] = details.caller
                user_details = yield self.call(
                    six.u('ffbo.auth_server.get_user'), details.caller)
                if user_details: request['username'] = user_details['username']
                self.log.info(
                    "neuroarch_query() accessed with request: {request}",
                    request=request)

                progressive_result = {}

                def on_progress(p):
                    progressive_result.update(p)

                #print request
                result = yield self.call(
                    six.u('ffbo.na.query.' + str(request['server'])),
                    request,
                    options=CallOptions(on_progress=on_progress))
                self.log.info("na_query returned with result")

                if progressive_result:
                    result = progressive_result

                # Catch no results and return
                if result == "{}":
                    raise ValueError('Neuroarch returned zero results')
                returnValue(feedback_success(request, (result)))

            except ValueError as e:
                feedback = feedback_error(request,
                                          "Neuroarch returned zero results", e)
                returnValue(feedback)

            except ApplicationError as e:
                raise e
                feedback = feedback_error(server_id,
                                          "Unable to contact server", e)
                returnValue(feedback)

        yield self.register(neuroarch_query,
                            six.u('ffbo.processor.neuroarch_query'),
                            RegisterOptions(details_arg='details'))
        self.log.info("procedure ffbo.processor.neuroarch_query registered")

        @inlineCallbacks
        def flycircuit_neuron_query(neuron, details=None):
            self.log.info("Fetch the flycircuit database for neuron: {neuron}",
                          neuron=neuron)
            try:
                res = self.fdb.parse_neuron(neuron)
            except Exception as e:
                print(e)
                res = feedback_error(neuron,
                                     "Unable to fetch flycircuit database", e)
                yield res
            returnValue(res)

        yield self.register(
            flycircuit_neuron_query, six.u("ffbo.processor.fetch_flycircuit"),
            RegisterOptions(details_arg='details', concurrency=4))
        self.log.info("procedure ffbo.processor.fetch_flycircuit registered")

        @inlineCallbacks
        def neurokernel_query(request):
            """
            Call the neuroarch_query module with a neuroarch-json object
            The request should have
                user:       session_id for client
                server:     session_id for na server
                query:  neuroarch json query object
            """
            try:
                progressive_result = {}

                def on_progress(p):
                    progressive_result.update(p)

                result = yield self.call(
                    six.u('ffbo.nk.launch.' + str(request['server'])), request)
                self.log.info("nk_query returned with result")

                if result is None:
                    result = progressive_result

                # Catch no results and return
                if result == "{}":
                    raise ValueError('Neurokernel returned zero results')
                returnValue(feedback_success(request, (result)))

            except ValueError as e:
                feedback = feedback_error(request,
                                          "Neurokernel returned zero results",
                                          e)
                returnValue(feedback)

            except ApplicationError as e:
                raise e
                feedback = feedback_error(server_id,
                                          "Unable to contact server", e)
                returnValue(feedback)

        # REGISTER a procedure for relaying current servers to single ui
        def relay_server_information():
            self.log.debug("relay_server_information rpc called")
            return directory

        yield self.register(relay_server_information,
                            six.u('ffbo.processor.server_information'))
        self.log.debug("ffbo.processor.server_information registered")
Ejemplo n.º 27
0
    failure.trap(ServiceNameError)
    return failure


def response_err(failure):
    failure.trap(ResponseError)
    return failure


def nargs_err(failure):
    failure.trap(ValueError)
    return InvalidArgumentsErrorCode('Foo', str(failure.value))


path = os.path.join(paths.log_dir, 'pps.log.json')
obs = jsonFileLogObserver(io.open(path, 'w'))
logger = Logger(observer=obs)
# logger = Logger()

regex = re.compile(r'^(?P<command>\w+) {0,1}(?P<args>.*)')


class ServiceProtocol(Protocol):
    def __init__(self, *args, **kw):
        # super(ServiceProtocol, self).__init__(*args, **kw)
        self._services = {}
        self._cmd_delim = ' '
        self._arg_delim = ','
        self.debug = logger.debug
        self.warning = logger.warn
        self.info = logger.info
Ejemplo n.º 28
0
def getJsonFileObserver():
    _get_or_create_user_log_dir()
    logfile = DailyLogFile("ursula.log.json", USER_LOG_DIR)
    observer = jsonFileLogObserver(outFile=logfile)
    return observer
Ejemplo n.º 29
0
class ConsultingReplyApi(SQLite3JsonBase):
    log = Logger(observer=jsonFileLogObserver(io.open("log.json", "a")),
                 namespace="ConsultingReply")

    table_fields = ' '.join(get_sqlite3_column_name('ConsultingReply'))
    table_fields_getter = attrgetter(*table_fields.split())
    row_named_record = collections.namedtuple(
        'ConsultingReply',
        ' '.join([SQLite3JsonBase.table_common_fields, table_fields]))

    def __init__(self):
        self.table = Table('ConsultingReply')

    @refine_twisted_web_request
    @defer.inlineCallbacks
    def consulting_reply_get_all(self, request):
        resp = yield self.search_service_execute(
            ModelRequest(*get_model_request_args(request.args),
                         item=ConsultingReply(uid=request.args.get(
                             'uid', [''])[0],
                                              consulting_uid=request.args.get(
                                                  'consulting_uid', [''])[0])))
        return jsonpickle.encode(resp, unpicklable=False)

    @refine_twisted_web_request
    @defer.inlineCallbacks
    def consulting_reply_get(self, request, uid):
        resp = yield self.search_service_execute(
            ModelRequest(*get_model_request_args(request.args),
                         item=ConsultingReply(uid=uid)))
        return jsonpickle.encode(resp, unpicklable=False)

    @refine_twisted_web_request
    @defer.inlineCallbacks
    def consulting_reply_create(self, request):
        content = json.load(request.content)
        resp = yield self.update_service_execute(
            ModelRequest(*get_model_request_args(request.args),
                         item=ConsultingReply(**content)))
        return jsonpickle.encode(resp, unpicklable=False)

    @refine_twisted_web_request
    @defer.inlineCallbacks
    def consulting_reply_delete(self, _, uid):
        resp = yield self.delete_service_execute(ModelDeleteRequest(uid))
        return jsonpickle.encode(resp, unpicklable=False)

    @refine_twisted_web_request
    @defer.inlineCallbacks
    def consulting_reply_update(self, request, uid):
        content = json.load(request.content)
        content['uid'] = uid
        resp = yield self.update_service_execute(
            ModelRequest(*get_model_request_args(request.args),
                         item=ConsultingReply(**content)))
        return jsonpickle.encode(resp, unpicklable=False)

    @staticmethod
    def create_response(**kwd):
        return ModelResponse(ConsultingReply)

    @staticmethod
    def set_sortby_statement(q, req):
        q = super(ConsultingReplyApi,
                  ConsultingReplyApi).set_sortby_statement(q, req)
        q = q.orderby('reg_date', order=enums.Order.desc)
        return q

    def set_keyword_statement(self, q, req):
        if req.item.consulting_uid:
            q = q.where(self.table.consulting_uid == req.item.consulting_uid)
        if req.item.author:
            q = q.where(self.table.author == req.item.author)
        return q
Ejemplo n.º 30
0
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition
from notifications.notifications import NotificationsManager
from client_ui.connection_screen import ConnectionScreen
from client_ui.lobby_screen import LobbyScreen
from client_ui.chat_widget import ChatWidget

import core.predef as predef
from core.predef import pycard_protocol as pp
from sample_games.retard_game import retard_game


log = Logger(
    observer=jsonFileLogObserver(io.open("client.json", "a")),
    namespace="client"
)

__author__ = 'Anton Korobkov'


class EchoClient(protocol.Protocol):

    def connectionMade(self):
        self.factory.app.on_connection(self.transport)

    def dataReceived(self, data):
        messages = data.split(pp.message_delimiter)
        for m in messages:
            if m:
Ejemplo n.º 31
0
    failure.trap(ServiceNameError)
    return failure


def response_err(failure):
    failure.trap(ResponseError)
    return failure


def nargs_err(failure):
    failure.trap(ValueError)
    return InvalidArgumentsErrorCode('Foo', str(failure.value))


path = os.path.join(paths.log_dir, 'pps.log.json')
obs = jsonFileLogObserver(io.open(path, 'w'))
logger = Logger(observer=obs)
# logger = Logger()

regex = re.compile(r'^(?P<command>\w+) {0,1}(?P<args>.*)')


class ServiceProtocol(Protocol):
    def __init__(self, *args, **kw):
        # super(ServiceProtocol, self).__init__(*args, **kw)
        self._services = {}
        self._cmd_delim = ' '
        self._arg_delim = ','
        self.debug = logger.debug
        self.warning = logger.warn
        self.info = logger.info
Ejemplo n.º 32
0
class ConsultingApi(SQLite3JsonBase):
    log = Logger(observer=jsonFileLogObserver(io.open("log.json", "a")), namespace="Consulting")

    table_fields = ' '.join(get_sqlite3_column_name('Consulting'))
    table_fields_getter = attrgetter(*table_fields.split())
    row_named_record = collections.namedtuple('Consulting',
                                              ' '.join([SQLite3JsonBase.table_common_fields, table_fields]))

    def __init__(self):
        self.table = Table('Consulting')

    @refine_twisted_web_request
    @defer.inlineCallbacks
    def consulting_get_all(self, request):
        resp = yield self.search_service_execute(
                ModelRequest(*get_model_request_args(request.args),
                             item=Consulting(uid=request.args.get('uid', [''])[0],
                                             subject=request.args.get('subject', [''])[0],
                                             body=request.args.get('body', [''])[0],
                                             author=request.args.get('author', [''])[0])))
        yield self.set_reply_item(resp.items)
        return jsonpickle.encode(resp, unpicklable=False)

    @refine_twisted_web_request
    @defer.inlineCallbacks
    def consulting_get(self, request, uid):
        resp = yield self.search_service_execute(
                ModelRequest(*get_model_request_args(request.args), item=Consulting(uid=uid)))
        yield self.set_reply_item(resp.items)
        return jsonpickle.encode(resp, unpicklable=False)

    @refine_twisted_web_request
    @defer.inlineCallbacks
    def consulting_create(self, request):
        content = json.load(request.content)
        item = Consulting(**content)
        item.clicked = 0
        resp = yield self.update_service_execute(
                ModelRequest(*get_model_request_args(request.args), item=item))
        return jsonpickle.encode(resp, unpicklable=False)

    @refine_twisted_web_request
    @defer.inlineCallbacks
    def consulting_delete(self, _, uid):
        resp = yield self.delete_service_execute(ModelDeleteRequest(uid))
        return jsonpickle.encode(resp, unpicklable=False)

    @refine_twisted_web_request
    @defer.inlineCallbacks
    def consulting_update(self, request, uid):
        content = json.load(request.content)
        content['uid'] = uid
        resp = yield self.update_service_execute(
                ModelRequest(*get_model_request_args(request.args), item=Consulting(**content)),
                module='consulting_update')

        return jsonpickle.encode(resp, unpicklable=False)

    def set_keyword_statement(self, q, req):
        c = self.set_like_keyword_statement(None, req.item.subject, self.table.subject)
        c = self.set_like_keyword_statement(c, req.item.body, self.table.body)
        c = self.set_like_keyword_statement(c, req.item.author, self.table.author)
        if c is not None:
            q = q.where(c)
        return q

    @staticmethod
    def set_sortby_statement(q, req):
        q = super(ConsultingApi, ConsultingApi).set_sortby_statement(q, req)
        q = q.orderby('reg_date', order=enums.Order.desc)
        return q

    @staticmethod
    def create_response(**kwd):
        return ModelResponse(Consulting)

    @defer.inlineCallbacks
    def set_reply_item(self, items):
        api = ConsultingReplyApi()
        o = TwistedRequestWrapper()
        for item in items:
            item.reply = []
            o.args['consulting_uid'] = [item.uid]
            api_ret = yield api.consulting_reply_get_all(o)
            api_ret = json.loads(api_ret)
            item.reply.extend(api_ret['items'])
Ejemplo n.º 33
0
def get_observers(main_config, twistd_user, log_group):
    log_debug = main_config.get_bool("debug", False)
    log_to_file = main_config.get_bool("log_file", False)
    log_stdout = main_config.get_bool("log_stdout", False)
    log_syslog = main_config.get_bool("log_syslog", False)
    log_auth_events = main_config.get_bool("log_auth_events", False)
    log_sso_events = main_config.get_bool("log_sso_events", True)

    # Log to file if nothing else is turned on
    log_to_file = log_to_file or not (log_to_file or log_syslog or log_stdout)

    log_dir = main_config.get_str("log_dir", "log")
    log_max_size = main_config.get_int("log_max_size", 10 * (1 << 20))
    log_max_files = main_config.get_int("log_max_files", 6)
    if log_max_files == 0:
        # we need to pass None explicitly if we want there to be no limit
        # 0 would just mean no logfiles would get kept...
        log_max_files = None

    observers = []
    if log_to_file:
        log_txt = create_log_file(
            "authproxy.log",
            log_dir,
            log_max_size,
            log_max_files,
            twistd_user,
            log_group,
        )
        text_observer = textFileLogObserver(log_txt)
        text_filter = FilteringLogObserver(text_observer, [only_default_log_predicate])
        observers.append(text_filter)

    if log_stdout:
        stdout_observer = textFileLogObserver(sys.stdout)
        filtered_stdout = FilteringLogObserver(
            stdout_observer, [only_default_log_predicate]
        )
        observers.append(filtered_stdout)

    if log_syslog:
        if syslog is None:
            raise config_error.ConfigError("syslog not supported on Windows")

        facility_dict = {
            "LOG_KERN": pySyslog.LOG_KERN,
            "LOG_USER": pySyslog.LOG_USER,
            "LOG_MAIL": pySyslog.LOG_MAIL,
            "LOG_DAEMON": pySyslog.LOG_DAEMON,
            "LOG_AUTH": pySyslog.LOG_AUTH,
            "LOG_LPR": pySyslog.LOG_LPR,
            "LOG_NEWS": pySyslog.LOG_NEWS,
            "LOG_UUCP": pySyslog.LOG_UUCP,
            "LOG_CRON": pySyslog.LOG_CRON,
            "LOG_SYSLOG": pySyslog.LOG_SYSLOG,
            "LOG_LOCAL0": pySyslog.LOG_LOCAL0,
            "LOG_LOCAL1": pySyslog.LOG_LOCAL1,
            "LOG_LOCAL2": pySyslog.LOG_LOCAL2,
            "LOG_LOCAL3": pySyslog.LOG_LOCAL3,
            "LOG_LOCAL4": pySyslog.LOG_LOCAL4,
            "LOG_LOCAL5": pySyslog.LOG_LOCAL5,
            "LOG_LOCAL6": pySyslog.LOG_LOCAL6,
            "LOG_LOCAL7": pySyslog.LOG_LOCAL7,
        }
        syslog_facilitystr = main_config.get_str("syslog_facility", "LOG_USER")
        syslog_facility = facility_dict.get(syslog_facilitystr, None)
        if syslog_facility is None:
            raise config_error.ConfigError(
                "Unknown syslog_facility: {0}".format(syslog_facilitystr)
            )

        syslog_observer = syslog.SyslogObserver("Authproxy", facility=syslog_facility)
        wrapped_syslog_observer = LegacyLogObserverWrapper(syslog_observer.emit)
        syslog_filtering_observer = FilteringLogObserver(
            wrapped_syslog_observer, [only_default_log_predicate],
        )
        observers.append(syslog_filtering_observer)

    if log_debug:
        debug_predicate = LogLevelFilterPredicate(LogLevel.debug)
        for i, observer in enumerate(observers):
            observers[i] = FilteringLogObserver(observer, [debug_predicate])

    if log_auth_events:
        auth_log_file = create_log_file(
            "authevents.log",
            log_dir,
            log_max_size,
            log_max_files,
            twistd_user,
            log_group,
        )
        auth_observer = jsonFileLogObserver(auth_log_file, "")
        observers.append(FilteringLogObserver(auth_observer, [auth_type_predicate]))

    if log_sso_events:
        sso_log_file = create_log_file(
            "ssoevents.log",
            log_dir,
            log_max_size,
            log_max_files,
            twistd_user,
            log_group,
        )
        sso_observer = jsonFileLogObserver(sso_log_file, "")
        observers.append(FilteringLogObserver(sso_observer, [sso_type_predicate]))

    return observers
Ejemplo n.º 34
0
import io

from ad_hoc import AdHoc

from twisted.logger import globalLogPublisher, jsonFileLogObserver

globalLogPublisher.addObserver(jsonFileLogObserver(open("log.json", "a")))

AdHoc(3, 4).logMessage()
Ejemplo n.º 35
0
from sys import argv

import json
import os
import base64
import io

__version__ = "0.1.2"


@provider(ILogObserver)
def simpleObserver(event):
    print(formatEvent(event))


log = Logger(observer=jsonFileLogObserver(io.open("log.json", "a")),
             namespace="ChatServer")
globalLogPublisher.addObserver(simpleObserver)


def parse_msg0(msg):
    _, _color = msg.split(';')
    _joining = _[4:].strip()
    _color = _color.split('=')[1]
    return _joining, _color


def parse_msg1(msg, server, *args):
    if ';' in msg:
        cmd_chain = []
        for request in msg.split(';'):
Ejemplo n.º 36
0
def hendrixObserver(path=DEFAULT_LOG_FILE, log_level=LogLevel.warn):
    json_observer = jsonFileLogObserver(io.open(path, 'a'))
    return FilteringLogObserver(json_observer, [
        LogLevelFilterPredicate(log_level),
    ])