Exemple #1
0
def find_process_files(root_dir):
    lock = Lock()

    try:
        num_proc = int(os.environ.get('SCIPY_NUM_CYTHONIZE_JOBS', cpu_count()))
        pool = Pool(processes=num_proc)
    except ImportError as e:
        # Allow building (single-threaded) on GNU/Hurd, which does not
        # support semaphores so Pool cannot initialize.
        pool = type('', (), {
            'imap_unordered':
            lambda self, func, iterable: map(func, iterable)
        })()
    except ValueError:
        pool = Pool()

    hash_db = load_hashes(HASH_FILE)
    # Keep changed pxi/pxd hashes in a separate dict until the end
    # because if we update hash_db and multiple files include the same
    # .pxi file the changes won't be detected.
    dep_hashes = {}

    # Run any _generate_pyx.py scripts
    jobs = []
    for cur_dir, dirs, files in os.walk(root_dir):
        generate_pyx = os.path.join(cur_dir, '_generate_pyx.py')
        if os.path.exists(generate_pyx):
            jobs.append(generate_pyx)

    for result in pool.imap_unordered(
            lambda fn: process_generate_pyx(fn, lock), jobs):
        pass

    # Process pyx files
    jobs = []
    for cur_dir, dirs, files in os.walk(root_dir):
        for filename in files:
            in_file = os.path.join(cur_dir, filename + ".in")
            if filename.endswith('.pyx') and os.path.isfile(in_file):
                continue
            for fromext, function in rules.items():
                if filename.endswith(fromext):
                    toext = ".c"
                    with open(os.path.join(cur_dir, filename), 'rb') as f:
                        data = f.read()
                        m = re.search(
                            br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$",
                            data, re.I | re.M)
                        if m:
                            toext = ".cxx"
                    fromfile = filename
                    tofile = filename[:-len(fromext)] + toext
                    jobs.append((cur_dir, fromfile, tofile, function, hash_db,
                                 dep_hashes, lock))

    for result in pool.imap_unordered(lambda args: process(*args), jobs):
        pass

    hash_db.update(dep_hashes)
    save_hashes(hash_db, HASH_FILE)
Exemple #2
0
class Bank(object):
    tie_lock = Lock()

    @staticmethod
    def __get_hash(obj):
        return id(obj)  # hash_func(obj)

    @classmethod
    def transfer(p_from, p_to, money):
        """p_from:谁转账,p_to:转给谁,money:转多少"""
        from_hash = Bank.__get_hash(p_from)
        to_hash = Bank.__get_hash(p_to)

        # 规定:谁大先锁谁
        if from_hash > to_hash:
            with p_from.lock:
                p_from.money -= money
                sleep(1)  # 模拟网络延迟
                with p_to.lock:
                    p_to += money
        elif from_hash < to_hash:
            with p_to:
                p_to.money += money
                sleep(1)  # 模拟网络延迟
                with p_from:
                    p_from.money -= money
        # hash出现碰撞时处理:(可能性很低)
        else:
            # 平局的时候,大家一起抢一个中间锁,谁抢到谁先转账
            with Bank.tie_lock:
                with p_from.lock:
                    p_from.money -= money
                    sleep(1)  # 模拟网络延迟
                    with p_to.lock:
                        p_to.money += money
Exemple #3
0
    def __init__(self, max_threads=1, max_browsers=0):

        super(TokenTops, self).__init__(max_threads, max_browsers)

        # should be 'firefox', 'chrome' or 'phantomjs'(headless)
        self.browser_name = 'firefox'

        # should be 'html5lib', 'lxml' or 'html.parser'
        self.html_parser = 'html5lib'

        self.mutex = Lock()

        self.NOT_FOUND_MSG = "From {}: could not find {}"

        # location of listings in website, may be more than one
        self.urls = [
            'https://www.tokentops.com/ico/?page=1',
            'https://www.tokentops.com/ico/?page=2',
            'https://www.tokentops.com/ico/?page=3',
            'https://www.tokentops.com/ico/?page=4',
            'https://www.tokentops.com/ico/?page=5',
            'https://www.tokentops.com/ico/?page=6',
            'https://www.tokentops.com/ico/?page=7',
            'https://www.tokentops.com/ico/?page=8',
            'https://www.tokentops.com/ico/?page=9',
            'https://www.tokentops.com/ico/?page=10',
            'https://www.tokentops.com/ico/?page=11',
            'https://www.tokentops.com/ico/?page=12',
            'https://www.tokentops.com/ico/?page=13'
        ]

        self.domain = 'https://www.tokentops.com/'
Exemple #4
0
    def __init__(self,
                 creator,
                 pool_size=5,
                 max_overflow=10,
                 timeout=30,
                 use_lifo=False,
                 **kw):
        """
        pool_size:
        Note that the pool begins with no connections; once this number of connections is requested, that number of connections will remain.
        pool_size can be set to 0 to indicate no size limit; to disable pooling, use a NullPool instead.

        max_overflow:
        When the number of checked-out connections reaches the size set in pool_size,additional connections will be returned up to this limit.
        When those additional connections are returned to the pool,they are disconnected and discarded.
        the total number of simultaneous connections the pool will allow is pool_size + max_overflow,and the total number of "sleeping" connections the pool will allow is pool_size.
        max_overflow can be set to -1 to indicate no overflow limit;

        kw:
        Other keyword arguments including Pool.recycle,Pool.echo,Pool.reset_on_return and others are passed to the class Pool constructor.
        """
        Pool.__init__(self, creator, **kw)
        self._pool = sqla_queue.Queue(pool_size, use_lifo=use_lifo)
        self._overflow = -pool_size  # 注意
        self._max_overflow = max_overflow
        self._timeout = timeout
        self._overflow_lock = Lock()
def find_process_files(root_dir):
    lock = Lock()
    pool = Pool()

    hash_db = load_hashes(HASH_FILE)
    # Keep changed .pxi hashes in a separate dict until the end
    # because if we update hash_db and multiple files include the same
    # .pxi file the changes won't be detected.
    pxi_hashes = {}

    jobs = []

    for cur_dir, dirs, files in os.walk(root_dir):
        for filename in files:
            in_file = os.path.join(cur_dir, filename + ".in")
            if filename.endswith('.pyx') and os.path.isfile(in_file):
                continue
            for fromext, function in rules.items():
                if filename.endswith(fromext):
                    toext = ".c"
                    with open(os.path.join(cur_dir, filename), 'rb') as f:
                        data = f.read()
                        m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
                        if m:
                            toext = ".cxx"
                    fromfile = filename
                    tofile = filename[:-len(fromext)] + toext
                    jobs.append((cur_dir, fromfile, tofile, function, hash_db, pxi_hashes, lock))

    for result in pool.imap(lambda args: process(*args), jobs):
        pass

    hash_db.update(pxi_hashes)
    save_hashes(hash_db, HASH_FILE)
Exemple #6
0
 def __init__(self,processes,maxiter,lb,ub,mp_pool,minstep,minfunc,debug,quiet,initial_best_guess=None):
     from multiprocessing.dummy import Lock
     self.xlog = {}
     self.fxlog = {}
     for i in range(processes):
         self.xlog[str(i)] = []
         self.fxlog[str(i)] = []
     if initial_best_guess == None:    
         self.g = np.random.rand(len(lb))
         self.g = lb + self.g*(ub - lb) #position of global best - random at start.
         self.fg = np.inf #cost of global best
     else:
         self.g = np.array(initial_best_guess[0])
         self.fg = initial_best_guess[1]
     self.last_g = np.array(self.g)
     self.last_fg = self.fg
     self.lock = Lock()
     self.count = 0
     self.processes = processes
     self.maxiter = maxiter
     self.minstep = minstep
     self.minfunc = minfunc
     self.debug = debug
     self.maxcount = maxiter * processes
     self.end = False
     self.mp_pool = mp_pool
     self.quiet = quiet
     self.no_improvement_count = 0
Exemple #7
0
 def __init__(self, target, threads):
     self.target = target
     self.ips = []
     self.time = time.time()
     self.threads = threads
     self.lock = Lock()
     self.get_ip_addr()
Exemple #8
0
    def __init__(self, db_file_path='http_lib_cache.db'):
        self.compress_type = strRAW_PAGE
        self.db = shelve.open(str(db_file_path), writeback=True)

        self.db_lock = Lock()
        self.rollback()
        self.page_root_node = self.db
Exemple #9
0
def main():
    lock = Lock()
    p_list = [threading.Thread(target=test, args=(i, lock)) for i in range(5)]
    for i in p_list:
        i.start()
    for i in p_list:
        i.join()
    print(num)
Exemple #10
0
def main():
    print(f"[还钱前]小明{xiaoming},小张{xiaozhang},小周{xiaozhou}")
    lock = Lock()
    p = ThreadPool()
    p.apply_async(test, args=(lock, ))
    p.close()
    p.join()
    print(f"[还钱后]小明{xiaoming},小张{xiaozhang},小周{xiaozhou}")
 def __init__(self):
     self.client_list = deque()
     self.db = DB()
     self.stats = {}
     self.pool = None
     self.lock = Lock()
     signal.signal(signal.SIGTERM, self.term_test)
     signal.signal(signal.SIGINT, self.term_test)
Exemple #12
0
def singleton(cls):
    _instance = {}
    lock = Lock()
    def _singleton(*args, **kwargs):
        with lock:  # 线程安全单例模式
            if cls not in _instance:
                _instance[cls] = cls(*args, **kwargs)
        return  _instance[cls]
    return _singleton
Exemple #13
0
 def file_info(self, filename, writable=False):
     with self.open_lock:
         if filename in self.open_files.keys():
             return self.open_files[filename]
         mode = {True: 'wb', False: 'rb'}[writable]
         file_handle = open(filename, mode)
         write_lock = Lock()
         self.open_files[filename] = dict(file_handle=file_handle,
                                          write_lock=write_lock)
     return self.file_info(filename, writable)
Exemple #14
0
 def __init__(self,
              input_files,
              ncpus=2,
              binary_name='fast',
              stream=sys.stdout):
     self.input_files = input_files
     self.ncpus = ncpus
     self.binary_name = binary_name
     self.stream = stream
     self.lock = Lock()
Exemple #15
0
def main():
    resource = 5  # 5个筷子,5个哲学家
    locks = [Lock() for i in range(resource)]  # 几个资源几个锁

    p = ThreadPool(resource)  # 让线程池里面有5个线程(默认是cup核数)
    for i in range(resource):
        # 抢左手筷子(locks[i])和右手的筷子(locks[(i + 1) % resource])
        # 举个例子更清楚:i=0 ==> 0,1;i=4 ==> 4,0
        p.apply_async(eat, args=(locks[i], locks[(i + 1) % resource]))
    p.close()
    p.join()
Exemple #16
0
    def __init__(self, target, startPort, endPort):
        self.target = target
        self.startPort = startPort
        self.endPort = endPort

        self.dnsRecords = []
        self.mutex = Lock()

        self.ports = []
        self.getPorts()
        self.time = time()
Exemple #17
0
def liveTotal():
    cacheLock = Lock()
    liveQuoteAtomic = partial(liveQuote,lock=cacheLock)
    threadPool = Pool(5)
    dailyTotals = threadPool.map(liveQuoteAtomic,STOCKS.keys())
    threadPool.close()
    threadPool.join()
    balance = int(sum([dt[0] for dt in dailyTotals]))
    totalDayChange = int(sum([dt[1] for dt in dailyTotals]))
    Result = collections.namedtuple("Result",["balance","dayChange"])
    return Result(balance,totalDayChange)
Exemple #18
0
def singleton_pool(cls):
    _instance_pool = []
    lock = Lock()
    def _singleton(*args, **kwargs):
        with lock:  # 线程安全单例池
            for _args,_kwargs,_instance in _instance_pool:
                if (_args,_kwargs) == (args,kwargs):
                    return _instance
            _instance = cls(*args, **kwargs)
            _instance_pool.append((args, kwargs, _instance))
            return _instance
    return _singleton
Exemple #19
0
def parallel_scan(app_config, user_id, media_files):
    """
    Once the app is launched for the first scan (when there is no database) or in order to re-scan,
    analyzing media files will be performed on demand (as authorized user, navigate to /settings and
    click 'Scan Media Files' button).
    To speed-up the scan process, 32 threads max will be used for this task.
    During the scan process files metadata is retrieved from files and is registered in the DB.

    :param app_config: a dictionary containing the application configuration settings (=app.config).
    :param user_id: an integer number of user id which will be considered as owner (0 for public).
    :param media_files: a list of strings - absolute paths of media files to be processed.
    :return: True.
    """
    passed, lock_passed = Value('i', 0), Lock()
    failed, lock_failed = Value('i', 0), Lock()
    args = [(app_config, user_id, path, passed, lock_passed, failed, lock_failed)
            for path in media_files]
    pool = ThreadPool(2)
    pool.starmap(single_scan, args)
    pool.close()
    pool.join()
    return True
Exemple #20
0
 def __init__(self, target, start, end):
     self.target = target
     self.start = start
     self.end = end
     self.W = '\033[0m'
     self.G = '\033[1;32m'
     self.O = '\033[1;33m'
     self.R = '\033[1;31m'
     self.time = time()
     self.ports = []
     self.result = []
     self.mutex = Lock()
     self.get_ports()
def do_pool_commands(cmd, lock=Lock(), shell_var=True):
    '''run pool commands'''
    try:
        output = subprocess.Popen(cmd, shell=shell_var, \
                 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        output_stdout, output_stderr = output.communicate()
        with lock:
            print 'running: {}'.format(cmd)
            print output_stdout
            print output_stderr
    except BaseException:
        print "command failed {}".format(cmd)
    return output.wait()
Exemple #22
0
 def __init__(self, target, threads, custom_ports):
     self.W            = '\033[0m'
     self.G            = '\033[1;32m'
     self.O            = '\033[1;33m'
     self.R            = '\033[1;31m'
     self.custom_ports = custom_ports
     self.server       = target
     self.result       = []
     self.ips          = []
     self.time         = time.time()
     self.threads      = threads
     self.lock         = Lock()
     self.target       = self.handle_target()
     self.get_ip_addr()
Exemple #23
0
 def __init__(self, host, user, protocol, threads, ssl):
     self.W = '\033[0m'
     self.G = '\033[1;32m'
     self.O = '\033[1;33m'
     self.R = '\033[1;31m'
     self.time = time.time()
     self.host = host
     self.user = user
     self.proto = protocol
     self.uname = ''
     self.pwd = []
     self.result = []
     self.ssl = ssl
     self.thread = threads
     self.lock = Lock()
Exemple #24
0
 def __init__(self, transfermanager, session_count, files_dir='', coo=None):
     """
     Author: David
     Desc:
             maxs defines how much sessions run concurrently.
             curs defines how much sessions are running now.
             wq is for storing delayed jobs due to given maxs.
     """
     self._tm = transfermanager
     self._maxs = session_count
     self._curs = 0
     self._files_dir = files_dir
     self._wq = []
     self._pl = Pool()
     self._lock = Lock()
     self._coo = coo
Exemple #25
0
    def __init__(self, max_threads=1, max_browsers=0):

        super(IcoMarks, self).__init__(max_threads, max_browsers)

        # should be 'firefox', 'chrome' or 'phantomjs'(headless)
        self.browser_name = 'firefox'

        # should be 'html5lib', 'lxml' or 'html.parser'
        self.html_parser = 'lxml'

        self.mutex = Lock()

        self.NOT_FOUND_MSG = "From {}: could not find {}"
        self.max_pagination = 50

        # location of listings in website, may be more than one
        self.urls = ['https://www.icomarks.com/icos?sort=rating-desc']
        self.domain = 'https://www.icomarks.com/'
Exemple #26
0
def parallelize_with_param(method, param_list):
    lock = Lock()
    num_done_wrapper = [0]  # TODO: switch to python 3 and use non-local
    total = len(param_list)

    # closure to track of how many network calls we've completed
    def _method(param):
        result = method(param)
        with lock:
            num_done_wrapper[0] = num_done_wrapper[0] + 1
            num_done = num_done_wrapper[0]
            if num_done % 20 == 0:
                print "Finished {}/{}".format(num_done, total)
        return result

    pool = ThreadPool(100)
    results = pool.map(_method, param_list)
    pool.close()
    pool.join()
    return results
def do_pool_commands(cmd, logger, lock=Lock()):
    try:
        output = subprocess.Popen(cmd,
                                  shell=True,
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.PIPE)
        output_stdout, output_stderr = output.communicate()
        with lock:
            logger.info('running: %s', cmd)
            logger.info(output_stdout)
            dtime = parse_time(output_stderr)
            logger.info('system_time: %s', dtime['system_time'])
            logger.info('user_time: %s', dtime['user_time'])
            logger.info('wall_clock: %s', dtime['wall_clock'])
            logger.info('percent_of_cpu: %s', dtime['percent_of_cpu'])
            logger.info('maximum_resident_set_size: %s',
                        dtime['maximum_resident_set_size'])
            logger.info('exitcode: %s', output.wait())
    except BaseException, e:
        logger.error('Failed: %s', e)
Exemple #28
0
 def __init__(self, room_name, blind, buyin, room_id):
     self.room_name = room_name
     if isinstance(blind, int) and blind < 100:
         self.blind = blind
     else:
         raise Exception('blind must be int and small than 100')
     if isinstance(buyin, int):
         self.buyin = buyin
     else:
         raise Exception('buyin must be int')
     self.lock = Lock()
     self.room_id = room_id
     self.stage = 1  #未开局/翻牌/转牌/河牌/开牌
     self.players = []
     self.change_banker = False
     self.banker = 1
     self.speak = 0  #all speak
     self.queue = Queue()
     self.poker_engine = PokerEngine()
     self.public = Deck()
     self.players_cache = {}
Exemple #29
0
 def __init__(self, config=None, config_file_path=None):
     self.file_count = 0
     self.status_counts = {}
     self.urls = dict(
     )  # Dictionary of URLs that have been checked, with their broken status and status code
     self.domains = dict(
     )  # Dictionary of URL domains and when they were last requested from (429 code)
     self.unchecked_urls = set()  # Initial set of urls to be checked
     self.delayed_urls = []  # List of urls to be checked later (429 code)
     self.directory = '.'
     self.pool = ThreadPool(THREADS)
     self.lock = Lock()
     if not config and config_file_path:
         logging.info(
             'Using Linkie configuration file {}'.format(config_file_path))
         config = self.read_config(config_file_path)
     elif config:
         logging.info('Using custom Linkie settings via Python constructor')
     elif not config and not config_file_path:
         logging.info('Using default Linkie configuation')
     config = self.check_config(config)
     self.config = self.process_config(config)
Exemple #30
0
class Bank(object):
    tie_lock = Lock()

    @classmethod
    def __get_hash(cls, obj):
        return id(obj)  # hash_func(obj)

    @classmethod
    def transfer(cls, p_from, p_to, money):
        """p_from:谁转账,p_to:转给谁,money:转多少"""
        from_hash = cls.__get_hash(p_from)
        to_hash = cls.__get_hash(p_to)

        print(f"from:{p_from.name}to{p_to.name}=>{money}")
        # 规定:谁大先锁谁
        if from_hash > to_hash:
            print("from_hash > to_hash")
            with p_from.lock:
                p_from.money -= money
                sleep(1)  # 模拟网络延迟
                with p_to.lock:
                    p_to.money += money
        elif from_hash < to_hash:
            print("from_hash < to_hash")
            with p_to.lock:
                p_to.money += money
                sleep(1)  # 模拟网络延迟
                with p_from.lock:
                    p_from.money -= money
        # hash出现碰撞时处理:(可能性很低)
        else:
            print("from_hash < to_hash")
            # 平局的时候,大家一起抢一个中间锁,谁抢到谁先转账
            with cls.tie_lock:
                with p_from.lock:
                    p_from.money -= money
                    sleep(1)  # 模拟网络延迟
                    with p_to.lock:
                        p_to.money += money