Ejemplo n.º 1
0
    def handle(self):
        self.role.kill_mutex.acquire()

        best_proc = None
        for pid, proc in self.role.processes.items():
            ctrl = proc[0][1]
            nb_conn = ctrl.send('nb_conn')
            self.role.processes[pid][1] = nb_conn
            if nb_conn < Config.max_connection:
                best_proc = pid
                break
        if best_proc is None:
            if len(self.role.processes) < Config.max_process:
                best_proc = self.role.create_process()
            else:
                best_proc = min(self.role.processes,
                                key=lambda pid: self.role.processes[pid][1])
                Logger.warn(
                    "WebApps service has reached the open connections limit")

        ctrl = self.role.processes[best_proc][0][1]
        pickled_sock = pickle.dumps(reduce_socket(self.request)[1])
        ctrl.send(('socket', pickled_sock))

        self.role.kill_mutex.release()
Ejemplo n.º 2
0
def epoll_server(socket_, timeout=1, use_worker=False):
    '''Single process epoll() with non-blocking accept() and recv().'''
    peers = {}  # {fileno: socket}
    flag = (select.EPOLLIN |
            select.EPOLLET |
            select.EPOLLERR |
            select.EPOLLHUP)

    try:
        max_peers = 0

        if use_worker:
            queue = multiprocessing.Queue()
            worker = multiprocessing.Process(target=queued_handle_conn,
                                             args=(queue,))
            worker.start()

        epoll = select.epoll()
        epoll.register(socket_, select.EPOLLIN | select.EPOLLET)
        while True:
            max_peers = max(max_peers, len(peers))
            actionable = epoll.poll(timeout=timeout)

            for fd, event in actionable:
                if fd == socket_.fileno():
                    while True:
                        try:
                            conn, addr = socket_.accept()
                            conn.setblocking(0)

                            peers[conn.fileno()] = conn
                            epoll.register(conn, flag)
                        except:
                            break

                elif event & select.EPOLLIN:
                    epoll.unregister(fd)
                    conn, addr = peers[fd], peers[fd].getpeername()

                    if use_worker:
                        # Behind-the-scene: 'conn' is serialized and sent to
                        # worker process via socket (IPC).
                        rebuild_func, hints = reduce_socket(conn)
                        queue.put((rebuild_func, hints, addr))
                    else:
                        handle_conn(conn, addr)

                elif event & select.EPOLLERR or event & select.EPOLLHUP:
                    epoll.unregister(fd)
                    peers[fd].close()
    finally:
        if use_worker and worker.is_alive():
            worker.terminate()
        epoll.close()

        print 'Max. number of connections:', max_peers
Ejemplo n.º 3
0
def epoll_server(socket_, timeout=1, use_worker=False):
    '''Single process epoll() with non-blocking accept() and recv().'''
    peers = {}  # {fileno: socket}
    flag = (select.EPOLLIN | select.EPOLLET | select.EPOLLERR
            | select.EPOLLHUP)

    try:
        max_peers = 0

        if use_worker:
            queue = multiprocessing.Queue()
            worker = multiprocessing.Process(target=queued_handle_conn,
                                             args=(queue, ))
            worker.start()

        epoll = select.epoll()
        epoll.register(socket_, select.EPOLLIN | select.EPOLLET)
        while True:
            max_peers = max(max_peers, len(peers))
            actionable = epoll.poll(timeout=timeout)

            for fd, event in actionable:
                if fd == socket_.fileno():
                    while True:
                        try:
                            conn, addr = socket_.accept()
                            conn.setblocking(0)

                            peers[conn.fileno()] = conn
                            epoll.register(conn, flag)
                        except:
                            break

                elif event & select.EPOLLIN:
                    epoll.unregister(fd)
                    conn, addr = peers[fd], peers[fd].getpeername()

                    if use_worker:
                        # Behind-the-scene: 'conn' is serialized and sent to
                        # worker process via socket (IPC).
                        rebuild_func, hints = reduce_socket(conn)
                        queue.put((rebuild_func, hints, addr))
                    else:
                        handle_conn(conn, addr)

                elif event & select.EPOLLERR or event & select.EPOLLHUP:
                    epoll.unregister(fd)
                    peers[fd].close()
    finally:
        if use_worker and worker.is_alive():
            worker.terminate()
        epoll.close()

        print 'Max. number of connections:', max_peers
Ejemplo n.º 4
0
    def __getstate__(self):
        """
        Here are properties should be cloned when passing me to another process
        Returns:
            dict
        """
        server_state = {
            "server_capabilities": self.server_capabilities,
            "server_charset": self.server_charset,
            "server_language": self.server_language,
            "server_status": self.server_status,
            "server_thread_id": self.server_thread_id,
            "server_version": self.server_version,
        }
        con_args = {
            "ssl": self.ssl,
            "salt": self.salt,
            "host": self.host,
            "host_info": self.host_info,
            "password": self.password,
            "db": self.db,
            "port": self.port,
            "user": self.user,
            "connect_timeout": self.connect_timeout,
            "protocol_version": self.protocol_version,
            "charset": self.charset,
            "encoding": self.encoding,
        }
        state = {
            "sql_mode": self.sql_mode,
            "use_unicode": self.use_unicode,
            "decoders": self.decoders,
            # "encoders": self.encoders,  # todo: encoders can't be passed
            "client_flag": self.client_flag,
            "init_command": self.init_command,
            "autocommit_mode": self.autocommit_mode,
            "_result": self._result,
            "_write_timeout": self._write_timeout,
            "_read_timeout": self._read_timeout,
            "__id__": self.__id__,
        }
        state.update(server_state)
        state.update(con_args)
        state["__fd__"] = os.dup(self._sock.fileno())

        state["_sock"] = reduction.reduce_socket(self._sock)
        packed = dict()
        for k, v in state.items():
            if v is None:
                v = 'None'
            packed[k] = v
        return packed
Ejemplo n.º 5
0
def select_server(socket_, timeout=1, use_worker=False):
    '''Single process select() with non-blocking accept() and recv().'''
    peers = []

    try:
        max_peers = 0

        if use_worker:
            queue = multiprocessing.Queue()
            worker = multiprocessing.Process(target=queued_handle_conn,
                                             args=(queue, ))
            worker.start()

        while True:
            max_peers = max(max_peers, len(peers))
            readable, w, e = select.select(peers + [socket_], [], [], timeout)

            for s in readable:
                if s is socket_:
                    while True:
                        try:
                            conn, addr = socket_.accept()
                            conn.setblocking(0)

                            peers.append(conn)
                        except:
                            break
                else:
                    peers.remove(s)
                    conn, addr = s, s.getpeername()

                    if use_worker:
                        # Behind-the-scene: 'conn' is serialized and sent to
                        # worker process via socket (IPC).
                        rebuild_func, hints = reduce_socket(conn)
                        queue.put((rebuild_func, hints, addr))
                    else:
                        handle_conn(conn, addr)
    finally:
        if use_worker and worker.is_alive():
            worker.terminate()

        print 'Max. number of connections:', max_peers
Ejemplo n.º 6
0
def select_server(socket_, timeout=1, use_worker=False):
    '''Single process select() with non-blocking accept() and recv().'''
    peers = []

    try:
        max_peers = 0

        if use_worker:
            queue = multiprocessing.Queue()
            worker = multiprocessing.Process(target=queued_handle_conn,
                                             args=(queue,))
            worker.start()

        while True:
            max_peers = max(max_peers, len(peers))
            readable, w, e = select.select(peers + [socket_], [], [], timeout)

            for s in readable:
                if s is socket_:
                    while True:
                        try:
                            conn, addr = socket_.accept()
                            conn.setblocking(0)

                            peers.append(conn)
                        except:
                            break
                else:
                    peers.remove(s)
                    conn, addr = s, s.getpeername()

                    if use_worker:
                        # Behind-the-scene: 'conn' is serialized and sent to
                        # worker process via socket (IPC).
                        rebuild_func, hints = reduce_socket(conn)
                        queue.put((rebuild_func, hints, addr))
                    else:
                        handle_conn(conn, addr)
    finally:
        if use_worker and worker.is_alive():
            worker.terminate()

        print 'Max. number of connections:', max_peers
Ejemplo n.º 7
0
	def handle(self):
		self.role.kill_mutex.acquire()
		
		best_proc = None
		for pid, proc in self.role.processes.items():
			ctrl = proc[0][1]
			nb_conn = ctrl.send('nb_conn')
			self.role.processes[pid][1] = nb_conn
			if nb_conn < Config.max_connection:
				best_proc = pid
				break
		if best_proc is None:
			if len(self.role.processes) < Config.max_process:
				best_proc = self.role.create_process()
			else:
				best_proc = min(self.role.processes, key=lambda pid:self.role.processes[pid][1])
				Logger.warn("Gateway service has reached the open connections limit")
		
		ctrl = self.role.processes[best_proc][0][1]
		pickled_sock = pickle.dumps(reduce_socket(self.request)[1])
		ctrl.send(('socket', pickled_sock))
		
		self.role.kill_mutex.release()
Ejemplo n.º 8
0
 def handle(self):
     self.role.kill_mutex.acquire()
     
     best_proc = None #指向
     for pid, proc in self.role.processes.items():
         ctrl = proc[0][1]
         nb_conn = ctrl.send('nb_conn')
         self.role.processes[pid][1] = nb_conn
         if nb_conn < 100:
             best_proc = pid
             break
     if best_proc is None:
         if len(self.role.processes) < 10:
             best_proc = self.role.create_process()
         else:
             best_proc = min(self.role.processes, key=lambda pid:self.role.processes[pid][1])
            
     
     ctrl = self.role.processes[best_proc][0][1]
     pickled_sock = pickle.dumps(reduce_socket(self.request)[1])
     ctrl.send(('socket', pickled_sock))
     
     self.role.kill_mutex.release()
     
Ejemplo n.º 9
0
 def reduce(self):
     if not self._isReduced:
         self.socket = reduce_socket(self.socket)
         self._isReduced = True
     return self
Ejemplo n.º 10
0
        print(s.getsockname())
    except Exception as e:
        print(e)


if __name__ == "__main__":

    help(rebuild_socket)
    a = multiprocessing.Manager().list()
    b = multiprocessing.Queue()
    s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s1.connect(("localhost", 4040))
    s2.connect(("localhost", 4040))
    print(s1.getsockname(), s2.getsockname())
    h1 = reduce_socket(s1)
    h2 = reduce_socket(s2)
    # h1 = reduce_handle(s1.fileno())
    # h2 = reduce_handle(s2.fileno())
    b.put(h1)
    b.put(h2)
    a.append(1)
    print(os.getpid())
    print(a)
    p = multiprocessing.Process(target=f, args=(a, b))
    print(p.pid)
    print(os.getpid())
    p.start()
    print(p.pid)
    p.join()
    print(a)
Ejemplo n.º 11
0
 def serialise_socket(socket):
     rebuild_fn, serialised = reduction.reduce_socket(socket)
     return serialised
Ejemplo n.º 12
0
        print(s.getsockname())
    except Exception as e:
        print(e)


if __name__ == '__main__':

    help(rebuild_socket)
    a = multiprocessing.Manager().list()
    b = multiprocessing.Queue()
    s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s1.connect(('localhost', 4040))
    s2.connect(('localhost', 4040))
    print(s1.getsockname(), s2.getsockname())
    h1 = reduce_socket(s1)
    h2 = reduce_socket(s2)
    #h1 = reduce_handle(s1.fileno())
    #h2 = reduce_handle(s2.fileno())
    b.put(h1)
    b.put(h2)
    a.append(1)
    print(os.getpid())
    print(a)
    p = multiprocessing.Process(target=f, args=(a, b))
    print(p.pid)
    print(os.getpid())
    p.start()
    print(p.pid)
    p.join()
    print(a)
Ejemplo n.º 13
0
	def reduce(self):
		if not self._isReduced:
			self.socket = reduce_socket(self.socket)
			self._isReduced = True
		return self