示例#1
0
    def __init__(self, host, port, onreceive, threadpool_size):

        # udp socket binding
        self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

        # tcp socket binding
        self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

        self.host = host
        self.port = port

        self.onreceive = onreceive

        log.debug('using a threadpool of size: %d' % threadpool_size)

        self.threadpool = futures.ThreadPoolExecutor(
            max_workers=threadpool_size)
        self.pollers = futures.ThreadPoolExecutor(max_workers=1)

        self.active_threads = 0

        self.udp_socket.bind((host, port))
        self.tcp_socket.bind((host, port))

        log.info("Server started on %s:%s" % (self.host, self.port))
示例#2
0
def benchmark(conf):
    loaders = get_running_lcnodes(conf, role='loader')
    cass = get_running_lcnodes(conf, role='cass')
    cassips = [c.private_ips[-1] for c in cass]

    print 'Running with keys=1 to establish keyspace....'
    cmd = get_benchcmd(conf, loaders[0], cassips, 'keyspace')
    run_cmd(conf, loaders[0], cmd)
    print 'Sleeping for 20 seconds to allow keyspace propogation.'
    time.sleep(20)
    print 'Done!'
    print ''

    pt = PrettyTable(['name', 'status', 'detail'])

    with futures.ThreadPoolExecutor(max_workers=len(loaders)) as e:
        returns = {}
        for loader in loaders:
            cmd = get_benchcmd(conf, loader, cassips, 'benchmark')
            returns[loader.name] = e.submit(run_cmd, conf, loader, cmd)

        for key in returns.keys():
            try:
                n = returns[key].result()
                pt.add_row([key, 'OK', ''])
            except Exception as exc:
                traceback.print_exc(file=sys.stdout)
                pt.add_row([key, 'EXCEPTION', str(exc)])
    print pt
示例#3
0
def getresults(conf):
    loaders = get_running_lcnodes(conf, role='loader')
    pt = PrettyTable(['name', 'status', 'detail'])

    with futures.ThreadPoolExecutor(max_workers=len(loaders)) as e:
        returns = {}
        for loader in loaders:
            fname = '/opt/cassandra/' + loader.name + '.results2'
            cmd = [
                'ssh', '-o', 'StrictHostKeyChecking=no', '-o',
                'UserKnownHostsFile=/dev/null',
                os_login(conf.loader.image) + '@' + loader.public_ips[-1],
                'cat', fname
            ]

            returns[loader.name] = e.submit(run_cmd, conf, loader, cmd)

        for key in returns.keys():
            try:
                n = returns[key].result()
                pt.add_row([key, 'OK', ''])
                with open(key + '.results', 'w') as fp:
                    fp.write(n)
            except Exception as exc:
                traceback.print_exc(file=sys.stdout)
                pt.add_row([key, 'EXCEPTION', str(exc)])
    print pt
示例#4
0
def sshtest(conf):
    nodes = get_running_lcnodes(conf)

    pt = PrettyTable(['name', 'status', 'detail'])
    with futures.ThreadPoolExecutor(max_workers=len(nodes)) as e:
        returns = {}
        for node in nodes:
            cmd = [
                'ssh',
                '-o',
                'StrictHostKeyChecking=no',
                '-o',
                'UserKnownHostsFile=/dev/null',
                os_login(conf.loader.image) + '@' + node.public_ips[-1],
                'uptime',
            ]
            returns[node.name] = e.submit(run_cmd, conf, node, cmd)

        for key in returns.keys():
            try:
                n = returns[key].result()
                pt.add_row([key, 'OK', n.strip()])
            except Exception as exc:
                traceback.print_exc(file=sys.stdout)
                pt.add_row([key, 'EXCEPTION', str(exc)])
    print pt.get_string(sortby="status")
示例#5
0
def upload_multiple(file_list, max_workers=5):
    """Upload a bunch of CSV files to Sailthru. 

    Using Python's concurrent.futures library, asynchronously uploads
    the files to Sailthru, a few at a time.

    Arguments:
        file_list: List of filenames to be uploaded.
        max_workers: The numbers of asynchronous uploads to allow at once.

    """
    with futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
        future_to_file = {
            executor.submit(upload, filename): filename
            for filename in file_list
        }
        count = 0
        total = len(file_list)
        for future in futures.as_completed(future_to_file):
            file_name = future_to_file[future]
            if future.exception() is not None:
                print '%r generated an exception: %s' % (file_name,
                                                         future.exception())
            else:
                # Successful upload
                count += 1
                print('%s/%s files have been successfully uploaded' %
                      (count, total))
示例#6
0
    def __init__(self, settings, domain, args):
        self.dst = str(args['destination'])
        if not self.dst.endswith('.'):
            self.dst += '.'

        resolvers = args.get('resolvers', None)

        # how long we wait on upstream dns servers before puking
        self.timeout = settings.get('resolution_timeout', DEFAULT_TIMEOUT)
        log.debug('timeout: %d' % self.timeout)

        # Add way to obtain A record for dst
        def get_resolver(nameserver):
            res = dns.resolver.Resolver(filename=None, configure=False)
            res.nameservers = [nameserver]
            res.timeout = self.timeout
            return res

        self.resolvers = map(lambda x: get_resolver(x.strip()),
                             resolvers.split(','))
        log.debug('resolvers: %s' % self.resolvers)

        self.pool = futures.ThreadPoolExecutor(max_workers=len(self.resolvers))

        super(CNameRule, self).__init__(settings, domain, args)
示例#7
0
def main():
    user_txt = open(USER_FILE)
    for username in user_txt:
        task.put(username.rstrip("\n"))
    executor = futures.ThreadPoolExecutor(max_workers=THREAD_NUM)
    for i in range(THREAD_NUM):
        executor.submit(attack)
    executor.shutdown()
示例#8
0
def solveImages(imagePaths,
                channel=None,
                maskingFn=maskStarfield,
                solveTimeout=60 * 5,
                parallel=True,
                debugOutputFolder=None,
                noAstrometryPlots=False,
                pixelError=10,
                oddsToSolve=None,
                sigma=None,
                maxWorkers=None,
                astrometryBinPath=None,
                useModifiedPath=False,
                verbose=False):
    """
    Solves multiple images in parallel. See :func:`solveImage` for parameter documentation.
    
    Note: This only works because the actual solving is done in an external
          process (astrometry.net), otherwise nothing would have been gained
          due to Pythons GIL.
    """
    if maxWorkers is None:
        maxWorkers = multiprocessing.cpu_count()
    if parallel and len(imagePaths) > 1:
        workerCount = min(multiprocessing.cpu_count(), maxWorkers)

        with futures.ThreadPoolExecutor(max_workers=workerCount) as executor:
            for wcsHeader in executor.map(
                    lambda imagePath: solveImage(
                        imagePath,
                        channel=channel,
                        maskingFn=maskingFn,
                        solveTimeout=solveTimeout,
                        debugOutputFolder=debugOutputFolder,
                        noAstrometryPlots=noAstrometryPlots,
                        astrometryBinPath=astrometryBinPath,
                        useModifiedPath=useModifiedPath,
                        pixelError=pixelError,
                        oddsToSolve=oddsToSolve,
                        sigma=sigma,
                        verbose=verbose), imagePaths):
                yield wcsHeader
    else:
        for wcsHeader in map(
                lambda imagePath: solveImage(
                    imagePath,
                    channel=channel,
                    maskingFn=maskingFn,
                    solveTimeout=solveTimeout,
                    debugOutputFolder=debugOutputFolder,
                    noAstrometryPlots=noAstrometryPlots,
                    astrometryBinPath=astrometryBinPath,
                    useModifiedPath=useModifiedPath,
                    pixelError=pixelError,
                    oddsToSolve=oddsToSolve,
                    sigma=sigma,
                    verbose=verbose), imagePaths):
            yield wcsHeader
示例#9
0
    def handler(*args, **kwargs):
        logging.info(
            'asyncmethod: Method "%s" invoked with args "%s" and kwargs "%s"',
            method.__name__, args, kwargs)
        with futures.ThreadPoolExecutor(max_workers=1) as executor:
            future_result = executor.submit(method, *args, **kwargs)
            future_result.add_done_callback(callback)

        return future_result
示例#10
0
def main():
    # fs = {}
    with futures.ThreadPoolExecutor(max_workers=100) as executor:  #默认10线程
        time.clock()
        executor.submit(refreshCode)
        time.sleep(1)
        for i in xrange(130000, 134800):
            future = executor.submit(burpCode, str(i))
        print time.clock()
示例#11
0
def get_page_names(base_url, category):
    if isinstance(category, basestring):
        category = [category]

    with futures.ThreadPoolExecutor(50) as executor:
        all_page_names = executor.map(
            partial(get_page_names_for_category, base_url),
            get_all_categories(base_url, category))

        return reduce(set.union, all_page_names)
示例#12
0
 def make_one(self, pkg='dummy', index_path=None):
     from cheeseprism import index
     executor = futures.ThreadPoolExecutor(1)
     if index_path is None:
         index_path = self.new_path('test-index')
     idx = index.IndexManager(index_path, executor=executor)
     pkg = getattr(self, pkg)
     pkg.copy(idx.path)
     self.dummypath = idx.path / pkg.name
     return idx
示例#13
0
def Audit(services):
    if services.has_key('url') and services.has_key(
            'webserver') and services['webserver'] == 'Tomcat':
        url = services['url']
        host = None
        m = re.match('(http[s]?)://([^:^/]+):?([^/]*)/', url)
        if m:
            host = m.group(2)
        pwddicts = getPwds(host)
        # pprint(pwddicts)

        fs = {}
        time.clock()
        # use ProcessPoolExecutor will faster
        with futures.ThreadPoolExecutor(max_workers=20) as executor:  #默认10线程
            time.clock()
            for eachname in pwddicts.keys():
                for eachpwd in pwddicts[eachname]:
                    # print 'starting\t',eachname+':'+eachpwd
                    future = executor.submit(
                        tomcatcrack,
                        url,
                        eachname,
                        eachpwd,
                    )
                    fs[future] = eachname + ':' + eachpwd
                    # print eachname+':'+eachpwd +' '+str(f.result())
            # print time.clock()
            logger(time.clock())

            # 如何抓取到一个就优雅的退出?
            # print len(fs)
            # uncompleted_fs = fs
            # for future in futures.as_completed(fs):
            # 	url = fs[future]
            # 	if future.exception() is not None:
            # 		print('%r generated an exception: %s' % (url,future.exception()))
            # 		#Regardless of the value of wait, the entire Python program will not exit until all pending futures are done executing.
            # 		# executor.shutdown(wait=False)
            # 		break
            # 	else:
            # 		# print url,'\t',future.result()
            # 		uncompleted_fs.pop(future)
            # print len(fs)
            # print time.clock()
            # for future in uncompleted_fs:
            # 	url = uncompleted_fs[future]
            # 	fg = future.cancel()
            # 	# print 'canceling',url,'\t',fg
            # donefs,notdonefs = futures.wait(fs)
            # # print notdonefs
            # print time.clock()
        # 找到一个不立即停止,把所有子进程都跑完,最后返回,所花时间更长
        # print time.clock()
        logger(time.clock())
示例#14
0
def main():
    test = httpscan(ADDRESS, TIMES)
    #start = time.time()
    try:
        with futures.ThreadPoolExecutor(max_workers=THREADS) as executor:
            tasks = dict((executor.submit(test.scan, port), port)
                         for port in map(str, xrange(START, STOP)))
            futures.as_completed(tasks)
    except KeyboardInterrupt:
        print "Scan done\n"
    finally:
        print "Have a nice day"
示例#15
0
    def __init__(self, settings, domain, args):
        resolvers = args.get('resolvers', None)

        self.resolvers = map(lambda x: x.strip(), resolvers.split(','))
        log.debug('resolvers: %s' % self.resolvers)

        # how long we wait on upstream dns servers before puking
        self.timeout = settings.get('resolution_timeout', DEFAULT_TIMEOUT)
        log.debug('timeout: %d' % self.timeout)

        self.pool = futures.ThreadPoolExecutor(max_workers=len(self.resolvers))

        super(ResolveRule, self).__init__(settings, domain, args)
示例#16
0
def download_pages(page=1):
    with futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:
        # We submit NUM_THREADS tasks at a time since we don't know how many
        # pages we will need to download in advance
        while True:
            l = []
            for i in range(NUM_THREADS):
                f = executor.submit(download_and_save_page, page)
                l.append(f)
                page += 1
            # Block and stop if we're done downloading the page
            if not all(f.result() for f in l):
                break
示例#17
0
def get_plants(category, base_url):
    if category == [] or category is None:
        category = SEED_CATEGORIES
    if base_url is None:
        base_url = WIKIPEDIA_URL

    page_names = get_page_names(base_url, category)
    logging.info("Got %s page names", len(page_names))
    with futures.ThreadPoolExecutor(50) as executor:
        pages = executor.map(partial(get_page, base_url), page_names)
        plants = map(parse_plant_info, pages)

        return filter(None, plants)
示例#18
0
def search_results(request):
    query = request.params.get('q', '')

    with futures.ThreadPoolExecutor(max_workers=3) as executor:
        workers = [
            executor.submit(dataset.search, query, request),
            executor.submit(organization.search, query),
            executor.submit(wiki.search, query),
        ]

    results = dict(worker.result() for worker in futures.as_completed(workers))

    return templates.render_site('search.html', request, search_query=query, has_ckan=False, **results)
示例#19
0
def Audit(services):
	global ret
	retinfo = {}
	output = ''
	#print'ok'
	if services.has_key('url'):
		#print'ok'
		output += 'plugin run' + os.linesep
		urls = generateUrls(services['url'])
		# pprint(urls)

		#  threads
		
		lock = threading.Lock()
		threads = []
		maxthreads = 20

		# for url in urls:
		# 	th = threading.Thread(target=httpcrack,args=(url,lock))
		# 	threads.append(th)
		# i = 0
		
		# while i<len(threads):
		# 	if i+maxthreads >len(threads):
		# 		numthreads = len(threads) - i
		# 	else:
		# 		numthreads = maxthreads
		# 	print 'threads:',i,' - ', i + numthreads

		# 	# start threads
		# 	for j in range(numthreads):
		# 		threads[i+j].start()

		# 	# wait for threads
		# 	for j in range(numthreads):
		# 		threads[i+j].join()

		# 	i += maxthreads

		# 改用futures模块
		with futures.ThreadPoolExecutor(max_workers=maxthreads) as executor:      #默认10线程
			future_to_url = dict((executor.submit(httpcrack, url, lock), url)
						 for url in urls)

	if ret != '':
		retinfo = {'level':'low','content':ret}
		security_warning(str(ret))
		# 
		ret = ''

	return (retinfo,output)
示例#20
0
 def multiScan(self):
     with futures.ThreadPoolExecutor(max_workers=self.threads) as executor:
         future_to_url = dict((executor.submit(self.scan, target), target)
                              for target in self.targets)
         for future in futures.as_completed(future_to_url):
             target = future_to_url[future]
             try:
                 ret = future.result()
             except Exception as exc:
                 print('%r generated an exception: %s' % (target, exc))
                 # logger('%r generated an exception: %s' % (url, exc))
             else:
                 print('%r returns: %s' % (target, str(ret)))
                 self.results.append((target[0], target[1], ret))
示例#21
0
def test_concurrent_dwt():
    # dwt on 1D data calls the Cython dwt_single
    # other cases call dwt_axis
    for dwt_func, x in zip([pywt.dwt, pywt.dwt2, pywt.dwtn],
                           [np.ones(8), np.eye(16), np.eye(16)]):
        transform = partial(dwt_func, wavelet='haar')
        for _ in range(10):
            arrs = [x.copy() for _ in range(100)]
            with futures.ThreadPoolExecutor(max_workers=max_workers) as ex:
                results = list(ex.map(transform, arrs))

        # validate result from  one of the concurrent runs
        expected_result = transform(x)
        _assert_all_coeffs_equal([expected_result, ], [results[-1], ])
示例#22
0
def test_concurrent_cwt():
    time, sst = pywt.data.nino()
    dt = time[1]-time[0]
    transform = partial(pywt.cwt, scales=np.arange(1, 4), wavelet='cmor1.5-1',
                        sampling_period=dt)
    for _ in range(10):
        arrs = [sst.copy() for _ in range(50)]
        with futures.ThreadPoolExecutor(max_workers=max_workers) as ex:
            results = list(ex.map(transform, arrs))

    # validate result from  one of the concurrent runs
    expected_result = transform(sst)
    for a1, a2 in zip(expected_result, results[-1]):
        assert_array_equal(a1, a2)
示例#23
0
 def process(self):
     LOGGER.warn('process, start ')
     start_time = time.time()
     with futures.ThreadPoolExecutor(len(self.collectors)) as tp_executor:
         results = {
             tp_executor.submit(collector.collect): collector
             for collector in self.collectors
         }
         futures.as_completed(results)
     duration = time.time() - start_time
     self.next_wake_interval = self._wake_interval - duration
     if self.next_wake_interval < 1:
         LOGGER.warn('process, poll interval took greater than %is',
                     duration)
         self.next_wake_interval = int(self._wake_interval)
     LOGGER.warn(
         'process, end in %.2fs, next poll will begin at %is from now',
         duration, self.next_wake_interval)
示例#24
0
def test_concurrent_swt():
    # tests error-free concurrent operation (see gh-288)
    # swt on 1D data calls the Cython swt
    # other cases call swt_axes
    with warnings.catch_warnings():
        # can remove catch_warnings once the swt2 FutureWarning is removed
        warnings.simplefilter('ignore', FutureWarning)
        for swt_func, x in zip([pywt.swt, pywt.swt2, pywt.swtn],
                               [np.ones(8), np.eye(16), np.eye(16)]):
            transform = partial(swt_func, wavelet='haar', level=3)
            for _ in range(10):
                arrs = [x.copy() for _ in range(100)]
                with futures.ThreadPoolExecutor(max_workers=max_workers) as ex:
                    results = list(ex.map(transform, arrs))

        # validate result from  one of the concurrent runs
        expected_result = transform(x)
        _assert_all_coeffs_equal(expected_result, results[-1])
 def find_all_devices(self, device_cfg):
     devices_mp = dict()
     discovery_tp_size = self.config.get('discovery_tp_size', 5)
     devices = utils.get_device_list(device_cfg)
     with futures.ThreadPoolExecutor(discovery_tp_size) as tp_executor:
         results = {
             tp_executor.submit(self.find_device, device): device
             for device in devices
         }
         devices = [fut.result() for fut in futures.as_completed(results)]
         for device in devices:
             if device is not None and device.get(
                     'ip_address') not in devices_mp:
                 devices_mp[device.get('ip_address')] = device
     LOGGER.info('get_all_devices, device_count [%d]',
                 len(devices_mp.values()))
     self.active_devices = devices_mp.values()
     return self.active_devices
示例#26
0
    def __init__(self, server, workers, limit):
        log.info('starting benchmark')

        self.server, self.port = server.split(':')
        self.limit = limit

        log.info('using %d workers' % workers)

        # sanity checking the server name:
        try:
            dns.reversename.from_address(self.server)
        except dns.exception.SyntaxError:
            raise Exception(
                'server name must be an ip not DNS name, you gave us: %s' %
                self.server)

        log.info('benchmarking host: %s port: %s' % (self.server, self.port))

        # build threadpool
        self.executor = futures.ThreadPoolExecutor(max_workers=workers)
示例#27
0
def stop_workers(*ids):
    """stop_workers

    Description
    -----------

    Stops worker engines.

    Parameters
    ----------
    ids: int or list of worker descriptions, optional
        The id's of the worker engines to stop. If not provided, all
        worker engines in the cluster will be stopped.

    Returns
    -------
    list
        A list of dicts describing the engines.
    """
    if len(ids) == 0:
        ids = [worker["id"] for worker in list_workers()]
        if len(ids) == 0:
            return
        return stop_workers(*ids)
    else:
        ids_to_stop = [get_id(worker_or_id) for worker_or_id in ids]
        base_url = get_base_url()
        auth = get_auth()

        # The stop requests are done concurrently in a thread pool for
        # lower latency.
        def stop_worker(id):
            return requests.put(
                base_url + "/" + str(id) + "/stop",
                auth=(auth["user"], auth["password"])
            )

        pool = futures.ThreadPoolExecutor(THREAD_POOL_SIZE)

        responses = [pool.submit(stop_worker, id) for id in ids]
        return [x.result() for x in futures.wait(responses)[0]]
示例#28
0
    def start_reading_changes(self):
         # Close the input end, because we will read from the pipe here.
        self._changes_in.close()

        # The queue to store `persist_changes` tasks on.
        self._persist_queue = Queue.Queue(maxsize=self.task_queue_length)

        self._persist_executor = futures.ThreadPoolExecutor(
            max_workers=1
        )

        # Start the future for tracking the sequence.
        self._seq_tracking_future = self._persist_executor.submit(
            self._track_seq
        )

        # When tracker dies, purge the queue to prevent deadlocks.
        # TODO(kris): Sort it out properly.
        def purge_queue(_):
            while True:
                try:
                    self._persist_queue.get_nowait()
                    self._persist_queue.task_done()
                except Queue.Empty:
                    break

        # Purge the queue if nobody is reading from it anymore.
        self._seq_tracking_future.add_done_callback(
            purge_queue
        )

        # Now, read changes until you die.
        try:
            self.read_changes()
        except:
            logger.exception('Error reading changes! Terminating.')

        # Write a termination sequence to the queue, so that the sequence
        # tracker can exit.
        logger.debug('Writing termination sequence to persist queue...')
        self._persist_queue.put((None, None))
示例#29
0
def delete_nodes(conf):
    names = get_node_names(conf)
    todelete = []
    conn = get_conn(conf)
    for n in conn.list_nodes():
        if n.name in names:
            todelete.append(n)

    pt = PrettyTable(['delete-success', 'uuid', 'name'])
    with futures.ThreadPoolExecutor(max_workers=CONCURRENCY) as e:
        returns = []
        for node in todelete:
            returns.append(e.submit(delete_node, conf, node))
        for rv in returns:
            try:
                n = rv.result()
                pt.add_row([n[0], n[1].uuid, n[1].name])
            except Exception as exc:
                traceback.print_exc(file=sys.stdout)
                pt.add_row(['EXCEPTION', '', str(exc)])
    print pt
示例#30
0
def create_nodes(conf):
    osimg = os_flavor(conf.loader.image)
    if osimg == 'coreos':
        print 'etcd discovery url: %s' % (conf.getDiscoveryUrl())

    toboot = get_missing_nodes(conf)
    pt = PrettyTable(['state', 'id', 'name', 'public_ip', 'private_ip'])
    with futures.ThreadPoolExecutor(max_workers=CONCURRENCY) as e:
        returns = []
        for ni in toboot:
            returns.append(e.submit(create_node, conf, ni))
        for rv in returns:
            try:
                n = rv.result()
                pt.add_row(
                    [n.state, n.id, n.name, n.public_ips, n.private_ips])
            except Exception as exc:
                traceback.print_exc(file=sys.stdout)
                pt.add_row(['EXCEPTION', '', str(exc), '', ''])

    print pt