コード例 #1
0
ファイル: monitor.py プロジェクト: reesun/glusterfs
    def multiplex(self, wspx, suuid):
        argv = sys.argv[:]
        for o in ("-N", "--no-daemon", "--monitor"):
            while o in argv:
                argv.remove(o)
        argv.extend(("-N", "-p", "", "--slave-id", suuid))
        argv.insert(0, os.path.basename(sys.executable))

        cpids = set()
        agents = set()
        ta = []
        for wx in wspx:

            def wmon(w):
                cpid, _ = self.monitor(w, argv, cpids, agents)
                time.sleep(1)
                self.lock.acquire()
                for cpid in cpids:
                    os.kill(cpid, signal.SIGKILL)
                self.lock.release()
                finalize(exval=1)

            t = Thread(target=wmon, args=[wx])
            t.start()
            ta.append(t)
        for t in ta:
            t.join()
コード例 #2
0
ファイル: master.py プロジェクト: ngtuna/glusterfs
 def start_checkpoint_thread(self):
     """prepare and start checkpoint service"""
     if self.checkpoint_thread or not getattr(gconf, "state_socket_unencoded", None):
         return
     chan = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
     state_socket = "/tmp/%s.socket" % md5(gconf.state_socket_unencoded).hexdigest()
     try:
         os.unlink(state_socket)
     except:
         if sys.exc_info()[0] == OSError:
             pass
     chan.bind(state_socket)
     chan.listen(1)
     checkpt_tgt = None
     if gconf.checkpoint:
         checkpt_tgt = self._checkpt_param(gconf.checkpoint, "target")
         if not checkpt_tgt:
             checkpt_tgt = self.xtime(".")
             if isinstance(checkpt_tgt, int):
                 raise GsyncdError("master root directory is unaccessible (%s)", os.strerror(checkpt_tgt))
             self._set_checkpt_param(gconf.checkpoint, "target", checkpt_tgt)
         logging.debug(
             "checkpoint target %s has been determined for checkpoint %s" % (repr(checkpt_tgt), gconf.checkpoint)
         )
     t = Thread(target=self.checkpt_service, args=(chan, gconf.checkpoint, checkpt_tgt))
     t.start()
     self.checkpoint_thread = t
コード例 #3
0
ファイル: master.py プロジェクト: vbellur/glusterfs
 def crawl_loop(self):
     timo = int(gconf.timeout or 0)
     if timo > 0:
         def keep_alive():
             while True:
                 gap = timo * 0.5
                 # first grab a reference as self.volinfo
                 # can be changed in main thread
                 vi = self.volinfo
                 if vi:
                     # then have a private copy which we can mod
                     vi = vi.copy()
                     vi['timeout'] = int(time.time()) + timo
                 else:
                     # send keep-alives more frequently to
                     # avoid a delay in announcing our volume info
                     # to slave if it becomes established in the
                     # meantime
                     gap = min(10, gap)
                 self.slave.server.keep_alive(vi)
                 time.sleep(gap)
         t = Thread(target=keep_alive)
         t.start()
     self.lastreport['time'] = time.time()
     while not self.terminate:
         self.crawl()
コード例 #4
0
ファイル: master.py プロジェクト: montuviky/glusterfs
    def crawl_loop(self):
        """start the keep-alive thread and iterate .crawl"""
        timo = int(gconf.timeout or 0)
        if timo > 0:

            def keep_alive():
                while True:
                    gap = timo * 0.5
                    # first grab a reference as self.volinfo
                    # can be changed in main thread
                    vi = self.volinfo
                    if vi:
                        # then have a private copy which we can mod
                        vi = vi.copy()
                        vi['timeout'] = int(time.time()) + timo
                    else:
                        # send keep-alives more frequently to
                        # avoid a delay in announcing our volume info
                        # to slave if it becomes established in the
                        # meantime
                        gap = min(10, gap)
                    self.slave.server.keep_alive(vi)
                    time.sleep(gap)

            t = Thread(target=keep_alive)
            t.start()
        self.lastreport['time'] = time.time()
        while not self.terminate:
            self.crawl()
コード例 #5
0
 def start_checkpoint_thread(self):
     """prepare and start checkpoint service"""
     if self.checkpoint_thread or not (
             getattr(gconf, 'state_socket_unencoded', None)
             and getattr(gconf, 'socketdir', None)):
         return
     chan = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
     state_socket = os.path.join(
         gconf.socketdir,
         md5hex(gconf.state_socket_unencoded) + ".socket")
     try:
         os.unlink(state_socket)
     except:
         if sys.exc_info()[0] == OSError:
             pass
     chan.bind(state_socket)
     chan.listen(1)
     checkpt_tgt = None
     if gconf.checkpoint:
         checkpt_tgt = self._checkpt_param(gconf.checkpoint, 'target')
         if not checkpt_tgt:
             checkpt_tgt = self.xtime('.')
             if isinstance(checkpt_tgt, int):
                 raise GsyncdError(
                     "master root directory is unaccessible (%s)",
                     os.strerror(checkpt_tgt))
             self._set_checkpt_param(gconf.checkpoint, 'target',
                                     checkpt_tgt)
         logging.debug("checkpoint target %s has been determined for checkpoint %s" % \
                       (repr(checkpt_tgt), gconf.checkpoint))
     t = Thread(target=self.checkpt_service,
                args=(chan, gconf.checkpoint, checkpt_tgt))
     t.start()
     self.checkpoint_thread = t
コード例 #6
0
ファイル: monitor.py プロジェクト: Byreddys/glusterfs
    def multiplex(self, wspx, suuid, slave_vol, slave_host, master):
        argv = sys.argv[:]
        for o in ('-N', '--no-daemon', '--monitor'):
            while o in argv:
                argv.remove(o)
        argv.extend(('-N', '-p', '', '--slave-id', suuid))
        argv.insert(0, os.path.basename(sys.executable))

        cpids = set()
        agents = set()
        ta = []
        for wx in wspx:
            def wmon(w):
                cpid, _ = self.monitor(w, argv, cpids, agents, slave_vol,
                                       slave_host, master)
                time.sleep(1)
                self.lock.acquire()
                for cpid in cpids:
                    errno_wrap(os.kill, [cpid, signal.SIGKILL], [ESRCH])
                for apid in agents:
                    errno_wrap(os.kill, [apid, signal.SIGKILL], [ESRCH])
                self.lock.release()
                finalize(exval=1)
            t = Thread(target=wmon, args=[wx])
            t.start()
            ta.append(t)
        for t in ta:
            t.join()
コード例 #7
0
ファイル: master.py プロジェクト: vbellur/glusterfs
 def __init__(self, slave):
     self.slave = slave
     self.lock = Lock()
     self.pb = PostBox()
     for i in range(int(gconf.sync_jobs)):
         t = Thread(target=self.syncjob)
         t.start()
コード例 #8
0
ファイル: master.py プロジェクト: portante/glusterfs
 def __init__(self, slave):
     """spawn worker threads"""
     self.slave = slave
     self.lock = Lock()
     self.pb = PostBox()
     for i in range(int(gconf.sync_jobs)):
         t = Thread(target=self.syncjob)
         t.start()
コード例 #9
0
ファイル: repce.py プロジェクト: Gaurav-Gangalwar/glusterfs
 def service_loop(self):
     for i in range(self.wnum):
         t = Thread(target=self.worker)
         t.start()
     try:
         while True:
             self.q.put(recv(self.inf))
     except EOFError:
         logging.info("terminating on reaching EOF.")
コード例 #10
0
ファイル: master.py プロジェクト: csabahenk/glusterfs
 def __init__(self, slave):
     """spawn worker threads"""
     self.slave = slave
     self.lock = Lock()
     self.pb = PostBox()
     self.bytes_synced = 0
     for i in range(int(gconf.sync_jobs)):
         t = Thread(target=self.syncjob)
         t.start()
コード例 #11
0
ファイル: repce.py プロジェクト: Kaushikbv/Gluster
 def service_loop(self):
     for i in range(self.wnum):
         t = Thread(target=self.worker)
         t.start()
     try:
         while True:
             self.q.put(recv(self.inf))
     except EOFError:
         logging.info("terminating on reaching EOF.")
コード例 #12
0
 def service_loop(self):
     """fire up worker threads, get messages and dispatch among them"""
     for i in range(self.wnum):
         t = Thread(target=self.worker)
         t.start()
     try:
         while True:
             self.q.put(recv(self.inf))
     except EOFError:
         logging.info("terminating on reaching EOF.")
コード例 #13
0
ファイル: repce.py プロジェクト: raghavendrabhat/glusterfs
 def service_loop(self):
     """fire up worker threads, get messages and dispatch among them"""
     for i in range(self.wnum):
         t = Thread(target=self.worker)
         t.start()
     try:
         while True:
             self.q.put(recv(self.inf))
     except EOFError:
         logging.info("terminating on reaching EOF.")
コード例 #14
0
 def init_keep_alive(cls):
     """start the keep-alive thread """
     timo = int(gconf.timeout or 0)
     if timo > 0:
         def keep_alive():
             while True:
                 vi, gap = cls.keepalive_payload_hook(timo, timo * 0.5)
                 cls.slave.server.keep_alive(vi)
                 time.sleep(gap)
         t = Thread(target=keep_alive)
         t.start()
コード例 #15
0
    def init_keep_alive(cls):
        """start the keep-alive thread """
        timo = int(gconf.timeout or 0)
        if timo > 0:

            def keep_alive():
                while True:
                    vi, gap = cls.keepalive_payload_hook(timo, timo * 0.5)
                    cls.slave.server.keep_alive(vi)
                    time.sleep(gap)

            t = Thread(target=keep_alive)
            t.start()
コード例 #16
0
ファイル: master.py プロジェクト: portante/glusterfs
 def crawl_loop(self):
     """start the keep-alive thread and iterate .crawl"""
     timo = int(gconf.timeout or 0)
     if timo > 0:
         def keep_alive():
             while True:
                 vi, gap = self.keepalive_payload_hook(timo, timo * 0.5)
                 self.slave.server.keep_alive(vi)
                 time.sleep(gap)
         t = Thread(target=keep_alive)
         t.start()
     self.lastreport['time'] = time.time()
     while not self.terminate:
         self.crawl()
コード例 #17
0
ファイル: monitor.py プロジェクト: bopopescu/glusterfs-6
    def multiplex(self, wspx, suuid, slave_vol, slave_host, master, slavenodes):
        argv = [os.path.basename(sys.executable), sys.argv[0]]

        cpids = set()
        ta = []
        for wx in wspx:
            def wmon(w):
                cpid, _ = self.monitor(w, argv, cpids, slave_vol,
                                       slave_host, master, suuid, slavenodes)
                time.sleep(1)
                self.lock.acquire()
                for cpid in cpids:
                    errno_wrap(os.kill, [cpid, signal.SIGKILL], [ESRCH])
                self.lock.release()
                finalize(exval=1)
            t = Thread(target=wmon, args=[wx])
            t.start()
            ta.append(t)

        # monitor status was being updated in each monitor thread. It
        # should not be done as it can cause deadlock for a worker start.
        # set_monitor_status uses flock to synchronize multple instances
        # updating the file. Since each monitor thread forks worker,
        # these processes can hold the reference to fd of status
        # file causing deadlock to workers which starts later as flock
        # will not be release until all references to same fd is closed.
        # It will also cause fd leaks.

        self.lock.acquire()
        set_monitor_status(gconf.get("state-file"), self.ST_STARTED)
        self.lock.release()
        for t in ta:
            t.join()
コード例 #18
0
    def multiplex(self, wspx, suuid, slave_vol, slave_host, master,
                  slavenodes):
        argv = [os.path.basename(sys.executable), sys.argv[0]]

        cpids = set()
        agents = set()
        ta = []
        for wx in wspx:

            def wmon(w):
                cpid, _ = self.monitor(w, argv, cpids, agents, slave_vol,
                                       slave_host, master, suuid, slavenodes)
                time.sleep(1)
                self.lock.acquire()
                for cpid in cpids:
                    errno_wrap(os.kill, [cpid, signal.SIGKILL], [ESRCH])
                for apid in agents:
                    errno_wrap(os.kill, [apid, signal.SIGKILL], [ESRCH])
                self.lock.release()
                finalize(exval=1)

            t = Thread(target=wmon, args=[wx])
            t.start()
            ta.append(t)
        for t in ta:
            t.join()
コード例 #19
0
ファイル: monitor.py プロジェクト: vieyahn/glusterfs
    def multiplex(self, wspx, suuid, slave_vol, slave_host, master):
        argv = sys.argv[:]
        for o in ('-N', '--no-daemon', '--monitor'):
            while o in argv:
                argv.remove(o)
        argv.extend(('-N', '-p', '', '--slave-id', suuid))
        argv.insert(0, os.path.basename(sys.executable))

        cpids = set()
        agents = set()
        ta = []
        for wx in wspx:
            def wmon(w):
                cpid, _ = self.monitor(w, argv, cpids, agents, slave_vol,
                                       slave_host, master)
                time.sleep(1)
                self.lock.acquire()
                for cpid in cpids:
                    errno_wrap(os.kill, [cpid, signal.SIGKILL], [ESRCH])
                for apid in agents:
                    errno_wrap(os.kill, [apid, signal.SIGKILL], [ESRCH])
                self.lock.release()
                finalize(exval=1)
            t = Thread(target=wmon, args=[wx])
            t.start()
            ta.append(t)
        for t in ta:
            t.join()
コード例 #20
0
ファイル: monitor.py プロジェクト: csabahenk/glusterfs
    def multiplex(self, wspx, suuid):
        def sigcont_handler(*a):
            """
            Re-init logging and send group kill signal
            """
            md = gconf.log_metadata
            logging.shutdown()
            lcls = logging.getLoggerClass()
            lcls.setup(label=md.get('saved_label'), **md)
            pid = os.getpid()
            os.kill(-pid, signal.SIGUSR1)
        signal.signal(signal.SIGUSR1, lambda *a: ())
        signal.signal(signal.SIGCONT, sigcont_handler)

        argv = sys.argv[:]
        for o in ('-N', '--no-daemon', '--monitor'):
            while o in argv:
                argv.remove(o)
        argv.extend(('-N', '-p', '', '--slave-id', suuid))
        argv.insert(0, os.path.basename(sys.executable))

        cpids = set()
        ta = []
        for wx in wspx:
            def wmon(w):
                cpid, _ = self.monitor(w, argv, cpids)
                terminate()
                time.sleep(1)
                self.lock.acquire()
                for cpid in cpids:
                    os.kill(cpid, signal.SIGKILL)
                self.lock.release()
                finalize(exval=1)
            t = Thread(target = wmon, args=[wx])
            t.start()
            ta.append(t)
        for t in ta:
            t.join()
コード例 #21
0
ファイル: monitor.py プロジェクト: raghavendrabhat/glusterfs
    def multiplex(self, wspx, suuid, slave_vol, slave_host, master, slavenodes):
        argv = [os.path.basename(sys.executable), sys.argv[0]]

        cpids = set()
        agents = set()
        ta = []
        for wx in wspx:
            def wmon(w):
                cpid, _ = self.monitor(w, argv, cpids, agents, slave_vol,
                                       slave_host, master, suuid, slavenodes)
                time.sleep(1)
                self.lock.acquire()
                for cpid in cpids:
                    errno_wrap(os.kill, [cpid, signal.SIGKILL], [ESRCH])
                for apid in agents:
                    errno_wrap(os.kill, [apid, signal.SIGKILL], [ESRCH])
                self.lock.release()
                finalize(exval=1)
            t = Thread(target=wmon, args=[wx])
            t.start()
            ta.append(t)
        for t in ta:
            t.join()
コード例 #22
0
ファイル: monitor.py プロジェクト: amarts/glusterfs
    def multiplex(self, wspx, suuid, slave_vol, slave_host, master, slavenodes):
        argv = [os.path.basename(sys.executable), sys.argv[0]]

        cpids = set()
        agents = set()
        ta = []
        for wx in wspx:
            def wmon(w):
                cpid, _ = self.monitor(w, argv, cpids, agents, slave_vol,
                                       slave_host, master, suuid, slavenodes)
                time.sleep(1)
                self.lock.acquire()
                for cpid in cpids:
                    errno_wrap(os.kill, [cpid, signal.SIGKILL], [ESRCH])
                for apid in agents:
                    errno_wrap(os.kill, [apid, signal.SIGKILL], [ESRCH])
                self.lock.release()
                finalize(exval=1)
            t = Thread(target=wmon, args=[wx])
            t.start()
            ta.append(t)

        # monitor status was being updated in each monitor thread. It
        # should not be done as it can cause deadlock for a worker start.
        # set_monitor_status uses flock to synchronize multple instances
        # updating the file. Since each monitor thread forks worker and
        # agent, these processes can hold the reference to fd of status
        # file causing deadlock to workers which starts later as flock
        # will not be release until all references to same fd is closed.
        # It will also cause fd leaks.

        self.lock.acquire()
        set_monitor_status(gconf.get("state-file"), self.ST_STARTED)
        self.lock.release()
        for t in ta:
            t.join()
コード例 #23
0
 def __init__(self, i, o):
     self.inf, self.out = ioparse(i, o)
     self.jtab = {}
     t = Thread(target=self.listen)
     t.start()
コード例 #24
0
ファイル: repce.py プロジェクト: raghavendrabhat/glusterfs
 def __init__(self, i, o):
     self.inf, self.out = ioparse(i, o)
     self.jtab = {}
     t = Thread(target=self.listen)
     t.start()