예제 #1
0
파일: ping.py 프로젝트: spookey/photon
    def __init__(self, m, six=False, net_if=None, num=5,
                 packetsize=None, max_pool_size=None):
        super().__init__()

        self.m = check_m(m)
        self.__ping_cmd = 'ping6' if six else 'ping'
        self.__net_if = '-I %s' % (net_if) if net_if else ''

        if not num:
            num = 1
        self.__num = '-c %d' % (num)

        if not packetsize:
            packetsize = ''
        if packetsize and packetsize > 1:
            packetsize = '-s %s' % (packetsize)
        self.__packetsize = packetsize

        if not max_pool_size:
            max_pool_size = _cpu_count()
        if max_pool_size < 1:
            max_pool_size = 1
        self.__max_pool_size = max_pool_size

        self.__probe_results = dict()

        self.m(
            'ping tool startup done',
            more=dict(
                pingc=self.__ping_cmd,
                net_if=self.__net_if,
                num=self.__num
            ),
            verbose=False
        )
예제 #2
0
def _mixprocess(data, key, iv, fn, to_string, threads=None):
    threads = threads if threads is not None else _cpu_count()
    assert len(key) == 16, "key must be 16 bytes long"
    assert len(iv) == 16, "iv must be 16 bytes long"
    assert threads >= 1, "you must use at least one thread"
    assert len(data) % _lib.MACRO_SIZE == 0, \
        "plaintext size must be a multiple of %d" % _lib.MACRO_SIZE

    _data = ffi.from_buffer("unsigned char[]", data)
    _out = ffi.new("unsigned char[]", len(data))
    _size = ffi.cast("unsigned long", len(data))
    _thr = ffi.cast("unsigned int", threads)
    _key = ffi.new("unsigned char[]", key)
    _iv = ffi.new("unsigned char[]", iv)

    if fn in (_lib.mixencrypt, _lib.mixdecrypt):
        fn(_data, _out, _size, _key, _iv)
    elif fn in (_lib.t_mixencrypt, _lib.t_mixdecrypt):
        fn(_thr, _data, _out, _size, _key, _iv)
    elif fn in (_lib.mixslice, _lib.unsliceunmix):
        fn(_thr, _data, _out, _size, _key, _iv)
    else:
        raise Exception("unknown mix function %r" % fn)

    res = ffi.buffer(_out, len(data))
    return res[:] if to_string else res
예제 #3
0
def system_load(pl, format='{avg:.1f}', threshold_good=1, threshold_bad=2,
                track_cpu_count=False, short=False):
	'''Return system load average.

	Highlights using ``system_load_good``, ``system_load_bad`` and
	``system_load_ugly`` highlighting groups, depending on the thresholds
	passed to the function.

	:param str format:
		format string, receives ``avg`` as an argument
	:param float threshold_good:
		threshold for gradient level 0: any normalized load average below this
		value will have this gradient level.
	:param float threshold_bad:
		threshold for gradient level 100: any normalized load average above this
		value will have this gradient level. Load averages between
		``threshold_good`` and ``threshold_bad`` receive gradient level that
		indicates relative position in this interval:
		(``100 * (cur-good) / (bad-good)``).
		Note: both parameters are checked against normalized load averages.
	:param bool track_cpu_count:
		if True powerline will continuously poll the system to detect changes
		in the number of CPUs.
	:param bool short:
		if True only the sys load over last 1 minute will be displayed.

	Divider highlight group used: ``background:divider``.

	Highlight groups used: ``system_load_gradient`` (gradient) or ``system_load``.
	'''
	global cpu_count
	try:
		cpu_num = cpu_count = _cpu_count() if cpu_count is None or track_cpu_count else cpu_count
	except NotImplementedError:
		pl.warn('Unable to get CPU count: method is not implemented')
		return None
	ret = []
	for avg in os.getloadavg():
		normalized = avg / cpu_num
		if normalized < threshold_good:
			gradient_level = 0
		elif normalized < threshold_bad:
			gradient_level = (normalized - threshold_good) * 100.0 / (threshold_bad - threshold_good)
		else:
			gradient_level = 100
		ret.append({
			'contents': format.format(avg=avg),
			'highlight_groups': ['system_load_gradient', 'system_load'],
			'divider_highlight_group': 'background:divider',
			'gradient_level': gradient_level,
		})

		if short:
		    return ret

	ret[0]['contents'] += ' '
	ret[1]['contents'] += ' '
	return ret
예제 #4
0
def system_load(pl,
                format='{avg:.1f}',
                threshold_good=1,
                threshold_bad=2,
                track_cpu_count=False):
    '''Return system load average.

	Highlights using ``system_load_good``, ``system_load_bad`` and
	``system_load_ugly`` highlighting groups, depending on the thresholds
	passed to the function.

	:param str format:
		format string, receives ``avg`` as an argument
	:param float threshold_good:
		threshold for gradient level 0: any normalized load average below this
		value will have this gradient level.
	:param float threshold_bad:
		threshold for gradient level 100: any normalized load average above this
		value will have this gradient level. Load averages between
		``threshold_good`` and ``threshold_bad`` receive gradient level that
		indicates relative position in this interval:
		(``100 * (cur-good) / (bad-good)``).
		Note: both parameters are checked against normalized load averages.
	:param bool track_cpu_count:
		if True powerline will continuously poll the system to detect changes
		in the number of CPUs.

	Divider highlight group used: ``background:divider``.

	Highlight groups used: ``system_load_gradient`` (gradient) or ``system_load``.
	'''
    global cpu_count
    try:
        cpu_num = cpu_count = _cpu_count(
        ) if cpu_count is None or track_cpu_count else cpu_count
    except NotImplementedError:
        pl.warn('Unable to get CPU count: method is not implemented')
        return None
    ret = []
    for avg in os.getloadavg():
        normalized = avg / cpu_num
        if normalized < threshold_good:
            gradient_level = 0
        elif normalized < threshold_bad:
            gradient_level = (normalized - threshold_good) * 100.0 / (
                threshold_bad - threshold_good)
        else:
            gradient_level = 100
        ret.append({
            'contents': format.format(avg=avg),
            'highlight_group': ['system_load_gradient', 'system_load'],
            'divider_highlight_group': 'background:divider',
            'gradient_level': gradient_level,
        })
    ret[0]['contents'] += ' '
    ret[1]['contents'] += ' '
    return ret
예제 #5
0
 def __prepareParallel(self, parallel):
     maxParallelprocesses = _cpu_count()
     if parallel is None or parallel == 'max':
         parallel = maxParallelprocesses
     else:
         parallel = int(parallel)
         if parallel < 0:
             parallel = maxParallelprocesses + parallel
     self.__parallel = parallel
     self.info("Will use %d workers" % (self.__parallel))
예제 #6
0
파일: sys.py 프로젝트: firebitsbr/powerline
def system_load(pl, format="{avg:.1f}", threshold_good=1, threshold_bad=2, track_cpu_count=False):
    """Return system load average.

	Highlights using ``system_load_good``, ``system_load_bad`` and
	``system_load_ugly`` highlighting groups, depending on the thresholds
	passed to the function.

	:param str format:
		format string, receives ``avg`` as an argument
	:param float threshold_good:
		threshold for gradient level 0: any normalized load average below this
		value will have this gradient level.
	:param float threshold_bad:
		threshold for gradient level 100: any normalized load average above this
		value will have this gradient level. Load averages between
		``threshold_good`` and ``threshold_bad`` receive gradient level that
		indicates relative position in this interval:
		(``100 * (cur-good) / (bad-good)``).
		Note: both parameters are checked against normalized load averages.
	:param bool track_cpu_count:
		if True powerline will continuously poll the system to detect changes
		in the number of CPUs.

	Divider highlight group used: ``background:divider``.

	Highlight groups used: ``system_load_gradient`` (gradient) or ``system_load``.
	"""
    global cpu_count
    try:
        cpu_num = cpu_count = _cpu_count() if cpu_count is None or track_cpu_count else cpu_count
    except NotImplementedError:
        pl.warn("Unable to get CPU count: method is not implemented")
        return None
    ret = []
    for avg in os.getloadavg():
        normalized = avg / cpu_num
        if normalized < threshold_good:
            gradient_level = 0
        elif normalized < threshold_bad:
            gradient_level = (normalized - threshold_good) * 100.0 / (threshold_bad - threshold_good)
        else:
            gradient_level = 100
        ret.append(
            {
                "contents": format.format(avg=avg),
                "highlight_group": ["system_load_gradient", "system_load"],
                "divider_highlight_group": "background:divider",
                "gradient_level": gradient_level,
            }
        )
    ret[0]["contents"] += " "
    ret[1]["contents"] += " "
    return ret
예제 #7
0
    def __init__(self,
                 m,
                 six=False,
                 net_if=None,
                 num=5,
                 packetsize=None,
                 max_pool_size=None):
        super().__init__()

        self.m = check_m(m)
        self.__ping_cmd = 'ping6' if six else 'ping'
        self.__net_if = '-I %s' % (net_if) if net_if else ''

        if not num:
            num = 1
        self.__num = '-c %d' % (num)

        if not packetsize:
            packetsize = ''
        if packetsize and packetsize > 1:
            packetsize = '-s %s' % (packetsize)
        self.__packetsize = packetsize

        if not max_pool_size:
            max_pool_size = _cpu_count()
        if max_pool_size < 1:
            max_pool_size = 1
        self.__max_pool_size = max_pool_size

        self.__probe_results = dict()

        self.m('ping tool startup done',
               more=dict(pingc=self.__ping_cmd,
                         net_if=self.__net_if,
                         num=self.__num),
               verbose=False)
예제 #8
0
# Copyright 2018 Robert Haas
# For license information, see LICENSE.TXT in the package root directory

from multiprocessing import cpu_count as _cpu_count

_DETECTED_NUM_CORES = _cpu_count()


def dask(function, argument_list, num_cores=None):
    """Apply a multivariate function to a list of arguments in a parallel fashion.

    Uses Dask's delayed() function to build a task graph and compute() function to
    calculate results.

    Args:
        function: A callable object that accepts more than one argument
        argument_list: An iterable object of input argument collections
        num_cores (optional): Number of cores to use for calculation.

    Returns:
        List of output results

    Example:
        >>> def add(x, y, z):
        ...     return x+y+z
        ...
        >>> dask(add, [(1, 2, 3), (10, 20, 30)])
        [6, 60]

    References:
        - https://dask.pydata.org
예제 #9
0
def _get_num_workers(num_workers):
    if num_workers == -1:
        return _cpu_count() - 1
    return num_workers
예제 #10
0
 def __init__(self, cpu_count=_cpu_count() - 1, buffer_size=10000):
     self.input_queue = Queue(buffer_size)
     self.output_queue = Queue()
     self.cpu_count = cpu_count
예제 #11
0
    # TODO improve design
    global data
    data = tuple(seq_iter)

    # creates min(len(data), cpu_count) processes
    processes = [
        Process(target=reverse_and_print_task, args=(q, c, v))
        for _ in range(min(len(data), cpu_count))
    ]

    for p in processes:
        p.start()
    for i in range(len(data)):
        q.put(i)  # mark entries in queue
    for p in processes:
        q.put(None)
    for p in processes:
        p.join()


if __name__ == '__main__':
    # generates each sequence
    seq_iter = read_sequences(stdin)
    cpu_count = _cpu_count()

    if cpu_count == 1:
        main_mono_prc(seq_iter)
    else:
        # TODO function can fail silently; fix
        main_multi_prc(seq_iter, cpu_count)