Ejemplo n.º 1
0
 def test_singleton_serializer(self):
     from kasaya.core.protocol import Serializer
     s1 = Serializer(silentinit=True)
     s2 = Serializer(silentinit=True)
     self.assertIs(
         s1, s2,
         "Singleton not working, different instances od Serializer class")
Ejemplo n.º 2
0
 def __init__(self, ownid):
     self.is_running = True
     self.own_ip = None
     self.ID = ownid
     self._msgdb = {}
     self.port = settings.BROADCAST_PORT
     self.SOCK = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
     self.SOCK.bind(('', settings.BROADCAST_PORT))
     self.SOCK.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
     self.serializer = Serializer()
Ejemplo n.º 3
0
def send_and_receive(address, message, timeout=10):
    """
    address - full destination address (eg: tcp://127.0.0.1:1234)
    message - message payload (will be automatically serialized)
    timeout - time in seconds after which TimeoutError will be raised
    """
    serializer = Serializer()  # <-- serializer is a singleton

    typ, addr, so1, so2 = decode_addr(address)
    SOCK = socket.socket(so1, so2)
    SOCK.connect(addr)

    # send...
    _serialize_and_send(SOCK, serializer, message, resreq=True)

    # receive response
    try:
        if timeout is None:
            res, resreq = _receive_and_deserialize(SOCK, serializer)
        else:
            # throw exception after timeout and close socket
            try:
                with gevent.Timeout(timeout, exceptions.ReponseTimeout):
                    res, resreq = _receive_and_deserialize(SOCK, serializer)
            except exceptions.ReponseTimeout:
                raise exceptions.ReponseTimeout("Response timeout")
    finally:
        SOCK.close()
    return res
Ejemplo n.º 4
0
    def __init__(self, worker_id):
        # used in process of selecting jobs
        self.own_async_id = worker_id
        # database setup
        dbb = settings.ASYNC_DB_BACKEND
        if dbb=="sqlite":
            from db.sqlite import SQLiteDatabase
            self.DB = SQLiteDatabase( worker_id )
        else:
            raise Exception("Unknown database backend defined in configuration: %r" % dbb)
        # serializer / deserializer
        self.serializer = Serializer()
        # caller
        self.PROXY = RawProxy()
        self._processing = True

        self.SEMA = Semaphore()
Ejemplo n.º 5
0
    def __init__(self, address, maxport=None, backlog=50):
        # session id is received from connecting side for each connection
        # by default it's unset and unused. When set it will be sended after
        # connection lost in event.
        self._msgdb = {}
        self.__listening_on = []

        # bind to socket
        self.socket_type, addr, so1, so2 = decode_addr(address)
        sock = socket.socket(so1, so2)
        if self.socket_type == "ipc":
            os.unlink(addr)
        else:
            sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        sock.setblocking(1)

        if self.socket_type == "ipc" or (maxport is None):
            # single socket binding
            sock.bind(addr)
        else:
            # binding to one from range of available ports
            while True:
                try:
                    sock.bind(addr)
                    break  # success
                except socket.error as e:
                    if e.errno == errno.EADDRINUSE:
                        ip, port = addr
                        if port < maxport:
                            addr = (ip, port + 1)
                            continue
                        else:
                            # whole port range is used
                            raise
                    else:
                        # other socket errors...
                        raise

        sock.listen(backlog)

        # stream server from gevent
        self.SERVER = StreamServer(sock, self.connection_handler)

        # current address
        if self.socket_type == "tcp":
            ip, self.port = self.SERVER.address
            self.ip = self.__ip_translate(ip)
            self.address = "tcp://%s:%i" % (self.ip, self.port)

        elif self.socket_type == "ipc":
            self.address = "ipc://%s" % addr

        # serialization
        self.serializer = Serializer()
Ejemplo n.º 6
0
 def __init__(self, address, autoreconnect=False, sessionid=None):
     """
     address - destination address inf format: tcp://ipnumber:port
     autoreconnect - will try to connect in background until success if connection fails or is unavailable
     """
     self.__working = False
     self.__recon = None
     self._address = address
     self.autoreconnect = autoreconnect
     self.__sessionid = sessionid
     self.serializer = Serializer()
     # connect or start background connecting process
     if self.autoreconnect:
         self.__start_auto_reconnect()
     else:
         self._connect()
Ejemplo n.º 7
0
class UDPLoop(object):
    def __init__(self, ownid):
        self.is_running = True
        self.own_ip = None
        self.ID = ownid
        self._msgdb = {}
        self.port = settings.BROADCAST_PORT
        self.SOCK = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.SOCK.bind(('', settings.BROADCAST_PORT))
        self.SOCK.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
        self.serializer = Serializer()

    def set_own_ip(self, ip):
        self.own_ip = ip

    def stop(self):
        """
        Request warm stop, exits loop after finishing current task
        """
        self.is_running = False

    def close(self):
        self.SOCK.close()

    def register_message(self, message, func):
        self._msgdb[message] = func

    def loop(self):
        while self.is_running:
            # receive data
            msgdata, addr = self.SOCK.recvfrom(4096)
            # skip own broadcast messages
            if addr[0] == self.own_ip:
                continue
            # deserialize
            try:
                msgdata, repreq = self.serializer.deserialize(msgdata)
            except NotOurMessage:
                continue
            except Exception:
                LOG.warning("Message from broadcast deserialisation error")
                LOG.debug(
                    "Broken message body dump in hex (only first 1024 bytes):\n%s"
                    % msgdata[:1024].encode("hex"))
                continue

            # own broadcast from another interface
            try:
                if msgdata['__sid__'] == self.ID:
                    continue
            except KeyError:
                continue

            # message type
            try:
                msg = msgdata['message']
            except KeyError:
                LOG.debug("Decoded message is incomplete. Message dump: %s" %
                          repr(msgdata))
                continue
            # find handler
            try:
                handler = self._msgdb[msg]
            except KeyError:
                # unknown messages are ignored silently
                LOG.warning("Unknown message received [%s]" % msg)
                LOG.debug("Message body dump:\n%s" % repr(msgdata))
                continue
            # run handler
            try:
                handler(msgdata)
            except Exception as e:
                # log exception details
                excname = e.__class__.__name__
                # traceback
                tback = traceback.format_exc()
                try:
                    tback = unicode(tback, "utf-8")
                except:
                    tback = repr(tback)
                # error message
                errmsg = e.message
                try:
                    errmsg = unicode(errmsg, "utf-8")
                except:
                    errmsg = repr(errmsg)
                # log & clean
                LOG.error(
                    "Exception [%s] when processing message [%s]. Message: %s."
                    % (excname, msg, errmsg))
                LOG.debug("Message dump: %s" % repr(msgdata))
                LOG.debug(tback)
                del excname, tback, errmsg

    def broadcast_message(self, msg):
        """
        Wysłanie komunikatu do wszystkich odbiorców w sieci
        """
        msg['__sid__'] = self.ID
        msg = self.serializer.serialize(msg, resreq=False)
        self.SOCK.sendto(msg, ('<broadcast>', self.port))
Ejemplo n.º 8
0
class AsyncWorker(object):

    def __init__(self, worker_id):
        # used in process of selecting jobs
        self.own_async_id = worker_id
        # database setup
        dbb = settings.ASYNC_DB_BACKEND
        if dbb=="sqlite":
            from db.sqlite import SQLiteDatabase
            self.DB = SQLiteDatabase( worker_id )
        else:
            raise Exception("Unknown database backend defined in configuration: %r" % dbb)
        # serializer / deserializer
        self.serializer = Serializer()
        # caller
        self.PROXY = RawProxy()
        self._processing = True

        self.SEMA = Semaphore()


    def get_database_id(self):
        return self.DB.CONF['databaseid']

    def close(self):
        self._processing = False
        self.DB.close()


    # task processing loop

    def task_eater(self):
        rectime = time.time()
        while self._processing:
            taskproc = self.process_next_job()
            if taskproc:
                gevent.sleep()
            else:
                gevent.sleep(2)
            if rectime<time.time():
                g = gevent.Greenlet(self.check_lost_tasks)
                g.start()
                rectime = time.time() + settings.ASYNC_RECOVERY_TIME
                gevent.sleep()


    def start_eat(self):
        g = gevent.Greenlet(self.task_eater)
        g.start()


    # task scheduling


    def task_add_new(self, task, context, args, kwargs, ign_result=False):
        """
        Register task in database.
            task - task name with worker
            context - context data
            args, kwargs - function arguments
            ign_result - ignore result flag (True / False)
        """
        args = (args, kwargs)
        taskid = self.DB.task_add(
            taskname = task,
            time = time.time(),
            args = self.serializer.data_2_bin(args),
            context = self.serializer.data_2_bin(context),
            ign_result = ign_result,
        )
        #self._check_next()
        return taskid


    def process_task(self, taskid):
        """
        Process job with given ID
        """
        LOG.debug("Processing task %i" % taskid)
        # get task from database
        data = self.DB.task_start_process(taskid)
        if data is None:
            return
        # unpack data
        data['args'],data['kwargs'] = self.serializer.bin_2_data(data['args'])
        data['context'] = self.serializer.bin_2_data(data['context'])
        # send task to realize
        try:
            result = self.PROXY.sync_call(
                data['task'],
                data['context'],
                data['args'],
                data['kwargs'],
            )

        except exceptions.SerializationError:
            # Exception raised when serialization or deserialization fails.
            # If this exception occurs here, we can't try to run this task again, because
            # data stored in async database are probably screwed up (for example async daemon
            # died or was stopped, but current configuration is different and uses other
            # serialization methods). We mark this task as permanently broken (and newer will be repeated).
            self.DB.task_fail_permanently(taskid)
            return

        except exceptions.ServiceNotFound:
            # Worker not found occurs when destination service is currently not
            # available. Task will be repeated in future.
            self.DB.task_error_and_delay(taskid, settings.ASYNC_ERROR_TASK_DELAY)
            return

        except exceptions.ServiceBusException:
            # Any other internal exception
            # will bump error counter
            self.DB.task_error_and_delay(taskid, settings.ASYNC_ERROR_TASK_DELAY)
            return


        if result['message'] == messages.ERROR:
            # task prodoced error
            # not kasaya exception, but task's own error
            # it's not our fault ;-)

            # get task context or create it if not exist
            ctx = data['context']
            if ctx is None:
                ctx = {}

            # increace error count
            errcnt = ctx.get("err_count", 0) + 1
            ctx['err_count'] = errcnt

            # if error counter is limited, is that limit reached?
            maxerr = ctx.get("err_max", None)
            if not maxerr is None:
                no_retry = errcnt>=maxerr
            else:
                no_retry = False

            data['context'] = ctx
            if no_retry:
                self.DB.task_fail_permanently(
                    taskid,
                    settings.ASYNC_ERROR_TASK_DELAY,
                    self.serializer.data_2_bin(result) )
                self.DB.task_store_context(
                    taskid,
                    self.serializer.data_2_bin(ctx) )
            else:
                self.DB.task_error_and_delay(taskid,
                    settings.ASYNC_ERROR_TASK_DELAY,
                    self.serializer.data_2_bin(result) )
                self.DB.task_store_context(
                    taskid,
                    self.serializer.data_2_bin(ctx) )
            return

        # task is processed succesfully
        self.DB.task_finished_ok(taskid, self.serializer.data_2_bin( result ) )



    def process_next_job(self):
        """
        Check if is any job waiting and start processing it.
        """
        self.SEMA.acquire()
        try:
            taskid = self.DB.task_choose_for_process()
        finally:
            self.SEMA.release()

        if taskid is None:
            # nothing is waiting
            return False
        else:
            self.process_task( taskid )
            return True


    def check_lost_tasks(self):
        """
        Find tasks assigned to unexisting async workers and reassign them to self.
        once - if true, then not register task to do it again in future
        also:
        Check database for dead tasks: task waiting long with status=1
        (selected to process, but unprocessed)
        also:
        Find tasks with status=2 - processing, but without asyncid (after asyncid died).
        """
        # get all tasks belonging to dead async workers
        lost_asyncd = 0
        for asyncid in self.DB.async_list():
            if control.worker.exists( asyncid ):
                # worker exists
                continue
            # found lost async daemon tasks,
            # reassign them to self
            rc = self.DB.unlock_lost_tasks(asyncid)
            lost_asyncd =+ 1

        # process all tasks with status 1 -> selected for process but unprocessed long time
        self.DB.recover_unprocessed_tasks( settings.ASYNC_DEAD_TASK_TIME_LIMIT )

        # process all tasks with status 2 -> processing started, but async daemon died before receiving result
        self.DB.recover_unfinished_tasks()