コード例 #1
0
def run():
    try:
        register.register((100003, 3, IPPROTO_TCP, 2049))
        register.register((100005, 3, IPPROTO_TCP, 5555))

        th_mountd = threadutil.start_daemon_thread(mountd)
        th_nfsd = threadutil.start_daemon_thread(nfsd)

        th_mountd.join()
        th_nfsd.join()

    except KeyboardInterrupt:
        sys.exit(0)
コード例 #2
0
    def test_brecv_message(self):

        c = redisutil.RedisChannel((self.ip, redis_port), '/foo', 'client')
        s = redisutil.RedisChannel((self.ip, redis_port), '/foo', 'server')

        c.send_msg('aa')
        self.assertEqual('aa', s.brecv_msg(timeout=1))
        self.assertEqual(None, s.brecv_msg(timeout=1))

        def _send_msg():
            time.sleep(0.5)
            c.send_msg('bar')

        threadutil.start_daemon_thread(target=_send_msg)
        self.assertEqual('bar', s.brecv_msg(timeout=1))
コード例 #3
0
    def test_wait_for_data_timeout(self):

        expected = [
            'a' * 32,
            'b' * 32,
        ]
        rst = []

        append_lines(self.fn, expected)
        dd('appended 1')

        def _append():
            time.sleep(0.3)
            append_lines(self.fn, expected)
            dd('appended')

        th = threadutil.start_daemon_thread(_append)

        try:
            for l in fsutil.Cat(self.fn, strip=True).iterate(timeout=0.1):
                rst.append(l)

            self.failed('expect NoData to raise')
        except fsutil.NoData:
            pass

        th.join()
        self.assertEqual(expected, rst)
コード例 #4
0
    def test_concurrent(self):
        pq = priorityqueue.PriorityQueue()
        ntimes = 10240
        nq = 3
        n_thread = 3
        ths = []

        def worker():
            for _ in range(ntimes):
                pq.get()

        for i in range(1, nq + 1):
            pq.add_producer(i, i, yield_forever())

        for i in range(n_thread):
            th = threadutil.start_daemon_thread(worker)
            ths.append(th)

        for th in ths:
            th.join()

        consumed = []
        got = 0
        for i in range(1, nq + 1):
            q = pq.producer_by_id[i]
            consumed.append(q.consumed)
            dd('get:', q.stat)
            got += q.stat['get']

        self.assertEqual(ntimes * n_thread, got)

        dd('consumed:', consumed)
        self.assertAlmostEqual(consumed[0], consumed[1])
        self.assertAlmostEqual(consumed[1], consumed[2])
コード例 #5
0
    def test_bench(self):

        pq = priorityqueue.PriorityQueue()

        ntimes = 10240
        nq = 1024
        n_thread = 3
        ths = []

        def worker():
            for _ in range(ntimes):
                pq.get()

        for i in range(1, nq + 1):
            pq.add_producer(i, i, yield_forever())

        with ututil.Timer() as t:
            for i in range(n_thread):
                th = threadutil.start_daemon_thread(worker)
                ths.append(th)

            for th in ths:
                th.join()

            us_per_call = t.spent() / ntimes / n_thread * 1000 * 1000
            dd(us_per_call, 'us/call')

        self.assertLess(us_per_call, 50)
コード例 #6
0
    def test_set_thread_num_keep_order(self):

        def _pass(args):
            return args

        rst = []

        jm = jobq.JobManager([_pass, rst.append], keep_order=True)

        setter = {'running': True}

        def _change_thread_nr():
            while setter['running']:
                jm.set_thread_num(_pass, random.randint(1, 4))
                time.sleep(0.5)

        ths = []
        for ii in range(3):
            th = threadutil.start_daemon_thread(_change_thread_nr)
            ths.append(th)

        n = 10240
        for i in range(n):
            jm.put(i)

        jm.join()

        rst.sort()
        for i in range(n):
            self.assertEqual(i, rst[i])

        setter['running'] = False

        for th in ths:
            th.join()
コード例 #7
0
    def fire(self, curr_time, job_name, job_conf, job_status):
        thread_n = len(job_status['active_threads'])

        if thread_n >= job_conf['concurrence_n']:
            log_msg = 'at time: %s, already have %d threads for job: %s' % (
                curr_time, thread_n, job_name)
            self.append_log(job_status, job_conf, log_msg)

            logger.error('too many threads for job: %s' % job_name)
            return

        logger.info('at time: %s, start to run job: %s' %
                    (curr_time, job_name))

        threadutil.start_daemon_thread(
            self.run_job, args=(curr_time, job_name, job_conf, job_status))

        job_status['message'] = ''
コード例 #8
0
def run(**argkv):
    context = {
        'get_cgroup_pid_file': argkv['get_cgroup_pid_file'],
        'cgroup_dir': argkv.get('cgroup_dir', '/sys/fs/cgroup'),

        'communicate_ip': argkv.get('communicate_ip', '0.0.0.0'),
        'communicate_port': argkv.get('communicate_port', 43409),

        'tasks_update_interval': argkv.get('tasks_update_interval', 30),

        'redis_ip': argkv['redis_ip'],
        'redis_port': argkv['redis_port'],
        'redis_prefix': argkv.get('redis_prefix', 'cgroup_arch'),
        'redis_client': None,
        'redis_expire_time': argkv.get('redis_expire_time', 60 * 5),

        'get_zk_host': argkv['get_zk_host'],
        'zk_prefix': argkv['zk_prefix'],
        'zk_auth_data': argkv['zk_auth_data'],
        'zk_client': None,

        'protected_cgroup': argkv.get('protected_cgroup'),

        'arch_conf': None,
    }

    init_redis_client(context)
    init_arch_conf(context)

    global_value['context'] = context

    cgroup_manager.build_all_subsystem_cgroup_arch(context)

    cgroup_manager.set_cgroup(context)

    cgroup_manager.reset_statistics(context)

    threadutil.start_daemon_thread(account.run, args=(context,))

    threadutil.start_daemon_thread(cgroup_manager.loop_set_cgroup,
                                   args=(context,))

    communicate.run(context, ip=context['communicate_ip'],
                    port=context['communicate_port'])
コード例 #9
0
def run(**argkv):
    context = {
        # websocket listen ip and port
        'ip': argkv.get('ip', '127.0.0.1'),
        'port': argkv.get('port', 32345),

        # every message received will be put into messgee queue,
        # and processed by message processor
        'message_queue': Queue.Queue(1024 * 10),

        # info in destribution will be used by function assign()
        'distribution': {},

        # for monitoring
        'connections': {},
        'reported': {},
        'consumption_sum': {},
        'rejection_sum': {},
        'quota': {},

        # specify how many slot to save in a dict contain info by slot
        # normally, only recent slots should be kept
        'nr_slot': argkv.get('nr_slot', 60),

        # a global lock class which can be used in 'with' statement.
        # it is used to make sure only one central node acutally working
        # at any time.
        'Lock': argkv['Lock'],

        # who got lock, who running
        'running': False,

        # callback function to get all limits setted for a service
        'list_limits': argkv['list_limits'],
    }

    threadutil.start_daemon_thread(assigner.run, args=(context, ))

    threadutil.start_daemon_thread(message_processor.run, args=(context, ))

    communicate.run(context)
コード例 #10
0
ファイル: assigner.py プロジェクト: bsc-s2/throttle_central
def start_assign(context, assign_threads, slot_number):
    for service_name, service_model in service.services.iteritems():
        if service_name in assign_threads:
            continue

        th = threadutil.start_daemon_thread(service_model['module'].assign,
                                            args=(
                                                context,
                                                slot_number,
                                            ))

        assign_threads[service_name] = th
コード例 #11
0
ファイル: collector.py プロジェクト: wenbobuaa/pykit
def run(**kwargs):
    context = {
        'node_id': kwargs['node_id'],
        'node_ip': kwargs['node_ip'],
        'send_log': kwargs['send_log'],
        'conf': kwargs['conf'],
        'cache_lock': threading.RLock(),
        'cache': {},
        'stat': {},
        'queue': Queue.Queue(1024 * 10),
    }

    # strptime not thread safe, need to call it manually before
    # initiating any thread
    datetime.strptime("2011-04-05", "%Y-%m-%d")

    for log_name in context['conf'].keys():
        context['cache'][log_name] = {}
        context['stat'][log_name] = {}

        threadutil.start_daemon_thread(scanner.scan, args=(context, log_name))

    threadutil.start_daemon_thread(cache_flusher.run, args=(context, ))

    threadutil.start_daemon_thread(sender.run, args=(context, ))

    while True:
        # actually it is not an error log, but normally we only report
        # error log, and we want to report this log even it is not
        # an error log.
        logger.error('stat: %s' % context['stat'])

        time.sleep(100)
コード例 #12
0
def run(**argkv):
    context = {
        'get_cgroup_pid_file': argkv['get_cgroup_pid_file'],
        'cgroup_dir': argkv.get('cgroup_dir', '/sys/fs/cgroup'),
        'communicate_ip': argkv.get('communicate_ip', '0.0.0.0'),
        'communicate_port': argkv.get('communicate_port', 43409),
        'tasks_update_interval': argkv.get('tasks_update_interval', 30),
        'redis_ip': argkv['redis_ip'],
        'redis_port': argkv['redis_port'],
        'redis_prefix': argkv.get('redis_prefix', 'cgroup_arch'),
        'redis_client': None,
        'redis_expire_time': argkv.get('redis_expire_time', 60 * 5),
        'get_zk_host': argkv['get_zk_host'],
        'zk_prefix': argkv['zk_prefix'],
        'zk_auth_data': argkv['zk_auth_data'],
        'zk_client': None,
        'protected_cgroup': argkv.get('protected_cgroup'),
        'arch_conf': None,
    }

    init_redis_client(context)
    init_arch_conf(context)

    global_value['context'] = context

    cgroup_manager.build_all_subsystem_cgroup_arch(context)

    cgroup_manager.set_cgroup(context)

    cgroup_manager.reset_statistics(context)

    threadutil.start_daemon_thread(account.run, args=(context, ))

    threadutil.start_daemon_thread(cgroup_manager.loop_set_cgroup,
                                   args=(context, ))

    communicate.run(context,
                    ip=context['communicate_ip'],
                    port=context['communicate_port'])
コード例 #13
0
ファイル: test_redisutil.py プロジェクト: drmingdrmer/pykit
    def setUp(self):
        self.addr = ('127.0.0.1', 22038)
        self.proxy_addr = ('127.0.0.1', 22039)

        self.response['http-status'] = 200

        def _start_http_svr():
            self.http_server = HTTPServer(self.addr, HttpHandle)
            self.http_server.serve_forever()

        def _start_proxy_http_svr():
            self.proxy_http_server = HTTPServer(self.proxy_addr, HttpHandle)
            self.proxy_http_server.serve_forever()

        threadutil.start_daemon_thread(_start_http_svr)
        threadutil.start_daemon_thread(_start_proxy_http_svr)
        time.sleep(0.1)

        self.cli = redisutil.RedisProxyClient([self.addr])
        self.n = self.cli.n
        self.w = self.cli.w
        self.r = self.cli.r
コード例 #14
0
    def test_file_end_handler(self):
        expected = [
            'a' * 32,
            'b' * 32,
        ]
        append_lines(self.fn, expected)

        rst = []

        def _end():
            rst.append('end')

        # file_end_handler in cat()
        c = fsutil.Cat(self.fn,
                       strip=True,
                       handler=rst.append,
                       file_end_handler=_end)
        c.cat(timeout=0)
        self.assertEqual(expected + ['end'], rst)

        # file_end_handler in iterate()
        rst = []
        force_remove(self.fn)
        append_lines(self.fn, expected)
        for line in fsutil.Cat(self.fn, strip=True,
                               file_end_handler=_end).iterate(timeout=0):
            rst.append(line)
        self.assertEqual(expected + ['end'], rst)

        # file_end_handler multi times
        rst = []
        force_remove(self.fn)
        append_lines(self.fn, expected)

        def _append():
            time.sleep(0.1)
            append_lines(self.fn, expected)

        th = threadutil.start_daemon_thread(_append)

        try:
            for line in fsutil.Cat(self.fn, strip=True,
                                   file_end_handler=_end).iterate(timeout=0.2):
                rst.append(line)
        except fsutil.NoData:
            pass

        th.join()
        self.assertEqual((expected + ['end']) * 2, rst)
コード例 #15
0
ファイル: test_zklock.py プロジェクト: wenbobuaa/pykit
    def test_concurrent(self):

        self.running = True
        self.total = 0
        n_repeat = 40
        n_thread = 5

        ths = []
        for ii in range(n_thread):
            t = threadutil.start_daemon_thread(self._loop_acquire, args=(n_repeat, ii,))
            ths.append(t)

        for th in ths:
            th.join()

        self.running = False
        self.assertEqual(n_repeat * n_thread, self.total)
コード例 #16
0
ファイル: jobq.py プロジェクト: bsc-s2/ops
def start_thread(exec_func, *args):
    # `thread_index` identifying a thread in worker_group.threads.
    # It is used to decide which thread to remove.
    return threadutil.start_daemon_thread(exec_func, args=args)
コード例 #17
0
    def test_write_file_atomically(self):

        fn = '/tmp/pykit-ut-fsutil-write-atomic'

        dd('atomically write file')

        cont_thread1 = 'cont_thread1'
        cont_thread2 = 'cont_thread2'

        os_fsync = os.fsync

        def _wait_fsync(fildes):
            time.sleep(3)

            os_fsync(fildes)

        os.fsync = _wait_fsync

        assert_ok = {'ok': True}

        def _write_wait(cont_write, cont_read, start_after, atomic):

            time.sleep(start_after)

            fsutil.write_file(fn, cont_write, atomic=atomic)

            if cont_read != fsutil.read_file(fn):
                assert_ok['ok'] = False

        force_remove(fn)
        # atomic=False
        #  time     file    thread1     thread2
        #   0      cont_1   w_cont_1    sleep()
        #  1.5     cont_2   sleep()     w_cont_2
        #   3      cont_2   return      sleep()
        #  4.5     cont_2    None       return

        ths = []
        th = threadutil.start_daemon_thread(_write_wait,
                                            args=(cont_thread1, cont_thread2,
                                                  0, False))
        ths.append(th)

        th = threadutil.start_daemon_thread(_write_wait,
                                            args=(cont_thread2, cont_thread2,
                                                  1.5, False))
        ths.append(th)

        for th in ths:
            th.join()
        self.assertTrue(assert_ok['ok'])

        force_remove(fn)
        # atomic=True
        #  time     file    thread1     thread2
        #   0       None    w_cont_1    sleep()
        #  1.5      None    sleep()     w_cont_2
        #   3      cont_1   return      sleep()
        #  4.5     cont_2    None       return

        ths = []
        th = threadutil.start_daemon_thread(_write_wait,
                                            args=(cont_thread1, cont_thread1,
                                                  0, True))
        ths.append(th)

        th = threadutil.start_daemon_thread(_write_wait,
                                            args=(cont_thread2, cont_thread2,
                                                  1.5, True))
        ths.append(th)

        for th in ths:
            th.join()
        self.assertTrue(assert_ok['ok'])

        os.fsync = os_fsync
        force_remove(fn)
コード例 #18
0
ファイル: test_logcollector.py プロジェクト: hubiyong/pykit
    def test_basic(self):
        logger = logutil.make_logger(base_dir=this_base, log_name='test_log')

        def log():
            start_time = time.time()

            while True:
                logger.info('info')
                logger.warn('warn')
                logger.error('error')

                if time.time() > start_time + 2.5:
                    break

                time.sleep(0.01)

        log_th = threadutil.start_daemon_thread(log)

        log_entries = []

        def get_level(log_str):
            for k in ('INFO', 'WARNING', 'ERROR'):
                if k in log_str:
                    return k.lower()
            else:
                return 'unknown'

        def send_log(log_entry):
            log_entries.append(log_entry)

        kwargs = {
            'node_id': '123abc',
            'node_ip': '1.2.3.4',
            'send_log': send_log,
            'conf': {
                'my_test_log': {
                    'file_path': os.path.join(this_base, 'test_log.out'),
                    'level': ['error'],
                    'get_level': get_level,
                    'is_first_line': is_first_line,
                    'parse': parse,
                },
            },
        }

        threadutil.start_daemon_thread(collector.run, kwargs=kwargs)

        log_th.join()

        time.sleep(2)

        self.assertEqual(3, len(log_entries))
        dd(log_entries)

        dd(log_entries[0]['count'])
        dd(log_entries[1]['count'])
        dd(log_entries[2]['count'])
        self.assertAlmostEqual(100, log_entries[1]['count'], delta=30)

        self.assertEqual('error', log_entries[0]['level'])
        self.assertEqual('my_test_log', log_entries[0]['log_name'])
        self.assertEqual('test_log.out', log_entries[0]['log_file'])
コード例 #19
0
def start_thread(exec_func, *args):
    # `thread_index` identifying a thread in worker_group.threads.
    # It is used to decide which thread to remove.
    return threadutil.start_daemon_thread(exec_func, args=args)