Example #1
0
def main():
    test=httpscan(ADDRESS,TIMES)
    #start = time.time()
    try:
        with futures.ThreadPoolExecutor(max_workers=THREADS) as executor:
            tasks =dict((executor.submit(test.scan,port),port) for port in map(str,xrange(START,STOP)))
            futures.as_completed(tasks)
    except KeyboardInterrupt:
        print "Scan done\n"
    finally:
        print "Have a nice day"
Example #2
0
def main():
    test = httpscan(ADDRESS, TIMES)
    #start = time.time()
    try:
        with futures.ThreadPoolExecutor(max_workers=THREADS) as executor:
            tasks = dict((executor.submit(test.scan, port), port)
                         for port in map(str, xrange(START, STOP)))
            futures.as_completed(tasks)
    except KeyboardInterrupt:
        print "Scan done\n"
    finally:
        print "Have a nice day"
Example #3
0
def Audit(services):
    if services.has_key('url'):
        url = services['url']
        #print "!!!!!"
        target_url=getname(url)
        test(target_url)    # 这里执行  得到  DIR
        if url[-1]!="/":
            url += "/"
        print url
        bttest=btscan(url)
        
        with futures.ThreadPoolExecutor(max_workers=10) as executor:      #默认10线程
            tasks=dict((executor.submit(bttest.dirscan,dir),dir) for dir in DIR)
            futures.as_completed(tasks)
Example #4
0
def fetch_all(git_repo_url, search_projects, auth=None, stream=sys.stdout):
    storage, stash = _init_fetch(git_repo_url, auth)
    start_time = time.time()
    
    exclude = set(['etk'])
    repos = []
    for project in search_projects:
        repos += ['{}/{}'.format(project, slug) for slug in stash.iter_repos(project) if slug not in exclude]
    
    with futures.ThreadPoolExecutor(max_workers=8) as executor:
        
        # submit fetch jobs for each repo, creating a mapping future => (repo_name, sub_stream)
        future_to_repo = {}
        for repo_name in repos:
            sub_stream = StringIO()
            future = executor.submit(fetch, repo_name, storage, stash, sub_stream)
            future_to_repo[future] = (repo_name, sub_stream)
        
        # as fetches get gone, print its status along with sub-stream contents
        for index, future in enumerate(futures.as_completed(future_to_repo)):
            percent = int(((index + 1.0) / len(repos)) * 100.0)
            repo_name, sub_stream = future_to_repo[future]
            print >> stream, '=== Fetched %s (%d of %d: %d%%) ===' % (repo_name, index + 1, len(repos), percent)
            print >> stream, sub_stream.getvalue()
            yield 
    
    print >> stream
    total_seconds = time.time()-start_time
    print >> stream, 'Total Time:', total_seconds
    storage.set_last_fetch_all_status(datetime.datetime.today(), datetime.timedelta(seconds=total_seconds))
 def process(self):
     LOGGER.warn('process, start ')
     start_time = time.time()
     with futures.ThreadPoolExecutor(len(self.collectors)) as tp_executor:
         results = {
             tp_executor.submit(
                 collector.collect): collector for collector in self.collectors}
         futures.as_completed(results)
     duration = time.time() - start_time
     self.next_wake_interval = self._wake_interval - duration
     if self.next_wake_interval < 1:
         LOGGER.warn('process, poll interval took greater than %is',
                     duration)
         self.next_wake_interval = int(self._wake_interval)
     LOGGER.warn('process, end in %.2fs, next poll will begin at %is from now',
                 duration, self.next_wake_interval)
Example #6
0
def cli():
    """Preprocess the samples for BDT optimization.
    """
    # Load the configuration module.
    config = load_config()
    # Create the output directory.
    safe_makedirs('sample')
    # Preprocess the samples in parallel. To guard against deadlock, the number
    # of workers is chosen to be the smaller of the number of available cores
    # or the number of samples to preprocess.
    samples = config.SIGNAL + config.BACKGROUND
    max_workers = min(multiprocessing.cpu_count(), len(samples))
    tasks = []
    with futures.ProcessPoolExecutor(max_workers) as executor:
        for sample in samples:
            # The configuration module cannot be pickled, so pass the options directly.
            tasks.append(
                executor.submit(worker, sample, config.DIRECTORY,
                                config.SELECTION, config.BRANCHES,
                                config.TARGET_LUMI))
        with click.progressbar(label='Preprocessing Samples',
                               length=len(samples),
                               show_pos=True,
                               show_percent=False) as bar:
            for task in futures.as_completed(tasks):
                bar.update(1)
def upload_multiple(file_list, max_workers=5):
    """Upload a bunch of CSV files to Sailthru. 

    Using Python's concurrent.futures library, asynchronously uploads
    the files to Sailthru, a few at a time.

    Arguments:
        file_list: List of filenames to be uploaded.
        max_workers: The numbers of asynchronous uploads to allow at once.

    """
    with futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
        future_to_file = {executor.submit(upload, filename): filename
                             for filename in file_list}
        count = 0
        total = len(file_list)
        for future in futures.as_completed(future_to_file):
            file_name = future_to_file[future]
            if future.exception() is not None:
                print '%r generated an exception: %s' % (file_name,
                                                         future.exception())
            else:
                # Successful upload
                count += 1
                print ('%s/%s files have been successfully uploaded' % 
                    (count, total))
Example #8
0
def upload_multiple(file_list, max_workers=5):
    """Upload a bunch of CSV files to Sailthru. 

    Using Python's concurrent.futures library, asynchronously uploads
    the files to Sailthru, a few at a time.

    Arguments:
        file_list: List of filenames to be uploaded.
        max_workers: The numbers of asynchronous uploads to allow at once.

    """
    with futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
        future_to_file = {
            executor.submit(upload, filename): filename
            for filename in file_list
        }
        count = 0
        total = len(file_list)
        for future in futures.as_completed(future_to_file):
            file_name = future_to_file[future]
            if future.exception() is not None:
                print '%r generated an exception: %s' % (file_name,
                                                         future.exception())
            else:
                # Successful upload
                count += 1
                print('%s/%s files have been successfully uploaded' %
                      (count, total))
Example #9
0
def go():
    print '-------------------------'

    with futures.ThreadPoolExecutor(max_workers=10) as executor:
        quote_futures = [executor.submit(pull_quote, q) for q in PULL]
        print [f.result() for f in futures.as_completed(quote_futures)]

    threading.Timer(PULL_INTERVAL, go).start()
 def process(self):
     LOGGER.warn('process, start ')
     start_time = time.time()
     with futures.ThreadPoolExecutor(len(self.collectors)) as tp_executor:
         results = {
             tp_executor.submit(collector.collect): collector
             for collector in self.collectors
         }
         futures.as_completed(results)
     duration = time.time() - start_time
     self.next_wake_interval = self._wake_interval - duration
     if self.next_wake_interval < 1:
         LOGGER.warn('process, poll interval took greater than %is',
                     duration)
         self.next_wake_interval = int(self._wake_interval)
     LOGGER.warn(
         'process, end in %.2fs, next poll will begin at %is from now',
         duration, self.next_wake_interval)
Example #11
0
	def multiScan(self):
		with futures.ThreadPoolExecutor(max_workers=self.threads) as executor:
			future_to_url = dict((executor.submit(self.scan, target), target) for target in self.targets)
			for future in futures.as_completed(future_to_url):
				target = future_to_url[future]
				try:
					ret = future.result()
				except Exception as exc:
					print('%r generated an exception: %s' % (target, exc))
					# logger('%r generated an exception: %s' % (url, exc))
				else:
					print('%r returns: %s' % (target, str(ret)))
					self.results.append((target[0],target[1],ret))
Example #12
0
def search_results(request):
    query = request.params.get('q', '')

    with futures.ThreadPoolExecutor(max_workers=3) as executor:
        workers = [
            executor.submit(dataset.search, query, request),
            executor.submit(organization.search, query),
            executor.submit(wiki.search, query),
        ]

    results = dict(worker.result() for worker in futures.as_completed(workers))

    return templates.render_site('search.html', request, search_query=query, has_ckan=False, **results)
Example #13
0
 def multiScan(self):
     with futures.ThreadPoolExecutor(max_workers=self.threads) as executor:
         future_to_url = dict((executor.submit(self.scan, target), target)
                              for target in self.targets)
         for future in futures.as_completed(future_to_url):
             target = future_to_url[future]
             try:
                 ret = future.result()
             except Exception as exc:
                 print('%r generated an exception: %s' % (target, exc))
                 # logger('%r generated an exception: %s' % (url, exc))
             else:
                 print('%r returns: %s' % (target, str(ret)))
                 self.results.append((target[0], target[1], ret))
Example #14
0
def json2agreementmatrix(jsonflist,start=2,maxlen=0,task_type='all'):
    """ Multi process function to convert 2 json file annotation combination to
    agreement values (alpha,kappa,Avg Observed agreement)

        Args:
           jsonflist (list):  list of json filenames.
           start (int): combination group size to begin with.
           maxlen(int): maximum count starting from :data:'start'


        Kwargs:
           state (bool): Current state to be in.

        Returns:
           A dict mapping annotator combination to agreement values then
            pickled, yamled and csved.

        Raises:
           Future.Exception
        """
    future_list=[]
    detaildata={}

    flen=len(jsonflist)


    assert start+maxlen-2<=flen



    with futures.ProcessPoolExecutor() as executor:
        for cnt in range(start,start+maxlen+1):
            for tpl in list(itertools.combinations(jsonflist,cnt)):
                future_list.append(executor.submit(getagreement,tpl,os.path.dirname(jsonflist[0]),task_type))


        for future in futures.as_completed(future_list):
            if future.exception() is not None:
                print('%r generated an exception: %s' % (future,
                                                     future.exception()))
            else:

                detaildata.update( future.result())


    yaml.dump(detaildata,open(os.path.dirname(jsonflist[0])+'\\'+str(start)+'-'+str(start+maxlen)+'out.yaml','w'))
    csvdump(detaildata,open(os.path.dirname(jsonflist[0])+'\\'+str(start)+'-'+str(start+maxlen)+'out.csv','w'))
    print "Dumped output"
    return detaildata
 def find_all_devices(self, device_cfg):
     devices_mp = dict()
     discovery_tp_size = self.config.get('discovery_tp_size', 5)
     devices = utils.get_device_list(device_cfg)
     with futures.ThreadPoolExecutor(discovery_tp_size) as tp_executor:
         results = {
             tp_executor.submit(self.find_device, device): device
             for device in devices
         }
         devices = [fut.result() for fut in futures.as_completed(results)]
         for device in devices:
             if device is not None and device.get(
                     'ip_address') not in devices_mp:
                 devices_mp[device.get('ip_address')] = device
     LOGGER.info('get_all_devices, device_count [%d]',
                 len(devices_mp.values()))
     self.active_devices = devices_mp.values()
     return self.active_devices
 def find_all_devices(self, device_cfg):
     devices_mp = dict()
     discovery_tp_size = self.config.get('discovery_tp_size', 5)
     devices = utils.get_device_list(device_cfg)
     with futures.ThreadPoolExecutor(discovery_tp_size) as tp_executor:
         results = {
             tp_executor.submit(
                 self.find_device,
                 device): device for device in devices}
         devices = [fut.result() for fut in futures.as_completed(results)]
         for device in devices:
             if device is not None and device.get(
                     'ip_address') not in devices_mp:
                 devices_mp[device.get('ip_address')] = device
     LOGGER.info(
         'get_all_devices, device_count [%d]', len(
             devices_mp.values()))
     self.active_devices = devices_mp.values()
     return self.active_devices
Example #17
0
def main():
    url = 'https://s3.amazonaws.com/pydemo/test.zip'
    async_downloads = []
    session = FuturesSession(executor=ThreadPoolExecutor(max_workers=4))
    for cnt in range(4):
        print "getting %d..." % cnt

        def cb(sess, resp, n=cnt):
            fn = '/tmp/%d-test-futures.zip' % n
            print "writing %s" % fn
            write_part(fn, resp)
            return "done with %s" % fn
        s = session.get(url,
                        stream=True,
                        timeout=10,  #!
                        background_callback=cb)

        async_downloads.append(s)
    for f in futures.as_completed(async_downloads):
        exp = f.exception()
        if exp is not None:
            raise exp
        print f.result()
Example #18
0
def slurp_puppet_facts(source=None, source_url=None, auth=None, facts=None,
                       ssl_verify=None):
    """
    Spawn N threads for every fact being fetched. Tell N ForemanFactSlurp
    instances to fetch their data, process that data, and then insert the data
    into the ExteriorData table.
    """
    future_facts = get_future_facts(
        source, source_url, auth, facts, ssl_verify
    )

    # Clear everything we saw last time
    ExternalData.objects.filter(source=source).delete()

    with futures.ThreadPoolExecutor(max_workers=len(future_facts)) as executor:
        facts = dict(
            (executor.submit(ff.slurp), ff) for ff in future_facts
        )

        for fact in futures.as_completed(facts):
            # This will can an exception and rollback our delete()
            fact.result()

            facts[fact].process()
def slurp_puppet_facts(source=None,
                       source_url=None,
                       auth=None,
                       facts=None,
                       ssl_verify=None):
    """
    Spawn N threads for every fact being fetched. Tell N ForemanFactSlurp
    instances to fetch their data, process that data, and then insert the data
    into the ExteriorData table.
    """
    future_facts = get_future_facts(source, source_url, auth, facts,
                                    ssl_verify)

    # Clear everything we saw last time
    ExternalData.objects.filter(source=source).delete()

    with futures.ThreadPoolExecutor(max_workers=len(future_facts)) as executor:
        facts = dict((executor.submit(ff.slurp), ff) for ff in future_facts)

        for fact in futures.as_completed(facts):
            # This will can an exception and rollback our delete()
            fact.result()

            facts[fact].process()
Example #20
0
def fetch_all(git_repo_url, search_projects, auth=None, stream=sys.stdout):
    storage, stash = _init_fetch(git_repo_url, auth)
    start_time = time.time()

    exclude = set(['etk'])
    repos = []
    for project in search_projects:
        repos += [
            '{}/{}'.format(project, slug) for slug in stash.iter_repos(project)
            if slug not in exclude
        ]

    with futures.ThreadPoolExecutor(max_workers=8) as executor:

        # submit fetch jobs for each repo, creating a mapping future => (repo_name, sub_stream)
        future_to_repo = {}
        for repo_name in repos:
            sub_stream = StringIO()
            future = executor.submit(fetch, repo_name, storage, stash,
                                     sub_stream)
            future_to_repo[future] = (repo_name, sub_stream)

        # as fetches get gone, print its status along with sub-stream contents
        for index, future in enumerate(futures.as_completed(future_to_repo)):
            percent = int(((index + 1.0) / len(repos)) * 100.0)
            repo_name, sub_stream = future_to_repo[future]
            print >> stream, '=== Fetched %s (%d of %d: %d%%) ===' % (
                repo_name, index + 1, len(repos), percent)
            print >> stream, sub_stream.getvalue()
            yield

    print >> stream
    total_seconds = time.time() - start_time
    print >> stream, 'Total Time:', total_seconds
    storage.set_last_fetch_all_status(
        datetime.datetime.today(), datetime.timedelta(seconds=total_seconds))
Example #21
0
def Audit(services):
	global ret, http404page
	retinfo = {}
	output = 'plugin run' + os.linesep
	
	# first get http404 code page
	http404page = get404Page(services['url']+'/404.html')
	# print 'http404page=',http404page

	urls = generateUrls(services['url'])
	# pprint(urls)

	#  threads
	lock = threading.Lock()
	threads = []
	maxthreads = 20

	# for url in urls:
	# 	th = threading.Thread(target=httpcrack,args=(url,lock))
	# 	threads.append(th)
	# i = 0
	# while i<len(threads):
	# 	if i+maxthreads >len(threads):
	# 		numthreads = len(threads) - i
	# 	else:
	# 		numthreads = maxthreads
	# 	print 'threads:',i,' - ', i + numthreads

	# 	# start threads
	# 	for j in range(numthreads):
	# 		threads[i+j].start()

	# 	# wait for threads
	# 	for j in range(numthreads):
	# 		threads[i+j].join()

	# 	i += maxthreads
	# 改用futures模块
	# with futures.ThreadPoolExecutor(max_workers=maxthreads) as executor:      #默认10线程
	# 	future_to_url = dict((executor.submit(httpcrack, url), url)
	# 				 for url in urls)

	with futures.ThreadPoolExecutor(max_workers=maxthreads) as executor:
		# Start the load operations and mark each future with its URL
		future_to_url = dict((executor.submit(httpcrack, url), url) for url in urls)
		# try:
		for future in futures.as_completed(future_to_url):
			url = future_to_url[future]
			try:
				ret = future.result()
			except Exception as exc:
				# print('%r generated an exception: %s' % (url, exc))
				logger('%r generated an exception: %s' % (url, exc))
			else:
				# print('%r returns: %s' % (url, str(ret)))
				logger('%r returns: %s' % (url, str(ret)))
		
		# except (KeyboardInterrupt, SystemExit):
		# 	print "Exiting..."
		# 	return (retinfo,output)

	# ctrl + c KeyboardInterrupt
	# try:
	# 	executor = futures.ThreadPoolExecutor(max_workers=maxthreads)
	# 	future_to_url = dict((executor.submit(httpcrack, url, lock), url) for url in urls)
	# 	for future in futures.as_completed(future_to_url):
	# 		url = future_to_url[future]
	# 		data = future.result()
	# except IndexError,e:
	# 	pass
	# except KeyboardInterrupt,e:
	# 	print 'KeyboardInterrupt found'
	# 	executor.shutdown(wait=False)

	if ret != '':
		retinfo = {'level':'low','content':ret}
		# security_warning(str(ret))
		
	return (retinfo,output)
Example #22
0
    def cosn_expval2(self, nmax=None, rbins=500, rmax=None,
                     truncate=5.0, nthreads=None,
                     epsabs=1.0e-7, epsrel=1.0e-7):
        '''Calculates the expectation values of cos^n(theta) for the fit as a
        function r up to rmax and for n from 0 to nmax.

        rbins specifies the number of data points to calculate.

        rmax specifies the maximum radius to consider and is specified
        in dimensions of the original image that was fitted.

        nthreads specifies the number of threads to be used. If None,
        then the number of CPU cores is used as the number of threads.

        truncate specifies the number of basis function sigmas we
        consider either side of each point when calculating the
        intensity at each point. For example if truncate is 5.0, at
        each point we'll consider all basis functions whose centre
        lies within 5.0 * sigma of that point. 5.0 is the default.

        epsabs and epsrel specify the absolute and relative error
        desired when performing the numerical integration over theta
        when calculating the expectatino values. The default values
        should suffice.

        '''
        if self.coef is None:
            logger.error('no fit done')
            raise AttributeError

        if rmax is None:
            rmax = self.rmax
        elif rmax > self.rmax:
            logger.error('rmax exceeds that of original data')
            raise ValueError

        if nmax is None:
            nmax = self.lmax
        elif nmax > self.lmax:
            logger.error('nmax exceeds lmax of the fit')
            raise ValueError

        if self.oddl:
            oddl = 1
        else:
            oddl = 0

        # Calculate r values. Set enpoint=False here, since the r
        # values are the lowest value of r in each bin.
        r = numpy.linspace(0.0, rmax, rbins, endpoint=False)

        expval = numpy.zeros((nmax + 1, rbins))

        def __worker(rbin):
            rr = r[rbin]

            expval[:, rbin] = cosn_expval_point(
                rr, nmax, self.coef, self.kmax, self.rkstep,
                self.sigma, self.lmax, oddl, truncate,
                epsabs, epsrel)
            # import time
            # time.sleep(1)

        if nthreads is None:
            nthreads = multiprocessing.cpu_count()

        with futures.ThreadPoolExecutor(max_workers=nthreads) as executor:
            jobs = dict((executor.submit(__worker, rbin), rbin)
                        for rbin in xrange(rbins))

            jobs_done = futures.as_completed(jobs)

            while True:
                try:
                    job = next(jobs_done)

                    if job.exception() is not None:
                        logger.error(job.exception())
                        for j in jobs:
                            if not j.done():  # and not j.running():
                                j.cancel()
                        raise job.exception()
                except StopIteration:
                    break
                except KeyboardInterrupt:
                    logger.info('Ctrl-c received, exiting.')
                    for j in jobs:
                        if not j.done():  # and not j.running():
                            j.cancel()
                    raise

        return r, expval
Example #23
0
def Audit(services):
    global ret, http404page
    retinfo = {}
    output = 'plugin run' + os.linesep

    # first get http404 code page
    http404page = get404Page(services['url'] + '/404.html')

    urls = generateUrls(services['url'])
    # pprint(urls)

    #  threads
    lock = threading.Lock()
    threads = []
    maxthreads = 20

    # for url in urls:
    # 	th = threading.Thread(target=httpcrack,args=(url,lock))
    # 	threads.append(th)
    # i = 0
    # while i<len(threads):
    # 	if i+maxthreads >len(threads):
    # 		numthreads = len(threads) - i
    # 	else:
    # 		numthreads = maxthreads
    # 	print 'threads:',i,' - ', i + numthreads

    # 	# start threads
    # 	for j in range(numthreads):
    # 		threads[i+j].start()

    # 	# wait for threads
    # 	for j in range(numthreads):
    # 		threads[i+j].join()

    # 	i += maxthreads

    # 改用futures模块
    # with futures.ThreadPoolExecutor(max_workers=maxthreads) as executor:     #默认10线程
    # 	future_to_url = dict((executor.submit(httpcrack, url), url) for url in urls)

    with futures.ThreadPoolExecutor(max_workers=maxthreads) as executor:
        # Start the load operations and mark each future with its URL
        future_to_url = dict(
            (executor.submit(httpcrack, url), url) for url in urls)
        # try:
        for future in futures.as_completed(future_to_url):
            url = future_to_url[future]
            try:
                ret = future.result()
            except Exception as exc:
                # print('%r generated an exception: %s' % (url, exc))
                logger('%r generated an exception: %s' % (url, exc))
            else:
                # print('%r returns: %s' % (url, str(ret)))
                logger('%r returns: %s' % (url, str(ret)))

        # except (KeyboardInterrupt, SystemExit):
        # 	print "Exiting..."
        # 	return (retinfo,output)
    if ret != '':
        retinfo = {'level': 'medium', 'content': ret}

    return (retinfo, output)
Example #24
0
class portscan():
    def __init__(self, IP):
        self.ip = IP
        #self.timeout=TIMEOUT
    def scan(self, port):

        s = socket.socket()
        s.settimeout(0.1)  #timeout 手动修改 > <
        try:
            s.connect((self.ip, port))
        except Exception, e:
            #print e
            pass
        else:
            print '%s open' % port
        s.close()


if __name__ == "__main__":
    start = time.time()
    IP = url2ip(ADDRESS)
    test = portscan(IP)
    #test.scan(port)
    print "ip ------------>" + str(IP)

    with futures.ThreadPoolExecutor(max_workers=THREADS) as executor:
        tasks = dict((executor.submit(test.scan, port), port)
                     for port in range(START, STOP))
        futures.as_completed(tasks)
Example #25
0
    def overlap_factor(self, rbins=500, rmax=None,
                       truncate=5.0, nthreads=None):
        if self.coef is None:
            logger.error('no fit done')
            raise AttributeError

        if rmax is None:
            rmax = self.rmax
        elif rmax > self.rmax:
            logger.error('rmax exceeds that of original data')
            raise ValueError

        if self.oddl:
            oddl = 1
        else:
            oddl = 0

        df = self.detectionfn
        if df.oddl:
            df_oddl = 1
        else:
            df_oddl = 0

        # Calculate r values. Set enpoint=False here, since the r
        # values are the lowest value of r in each bin.
        r = numpy.linspace(0.0, rmax, rbins, endpoint=False)

        lmax = min(self.lmax, df.lmax)

        wt = numpy.fromfunction(lambda l: 1 / (2 * l + 1),
                               (lmax + 1,))

        # Construct array holding the legendre polynomials evaluated at self.beta
        plbeta = legendre.legval(math.cos(self.beta), numpy.diag(numpy.ones(lmax + 1)))

        overlap = numpy.zeros(rbins)

        def __worker(rbin):
            rr = r[rbin]

            # Axis distribution beta parameters
            beta_axis = beta_coeffs_point(
                rr, self.coef, self.kmax, self.rkstep,
                self.sigma, self.lmax, oddl, truncate)

            # Detection function beta parameters in the detection frame
            beta_df = beta_coeffs_point(
                rr, df.coef, df.kmax, df.rkstep,
                df.sigma, df.lmax, df_oddl, truncate)

            # Detection function beta parameters in the lab frame
            beta_df = beta_df[0:lmax + 1] * plbeta

            # The following is too slow
            # res = scipy.optimize.basinhopping(
            #     lambda x: -legendre.legval(x, beta_df),
            #     [0.0], stepsize = 1.0/lmax, T=1.0,
            #     minimizer_kwargs=dict(method='TNC', bounds=((-1.0,1.0),))
            #     )

            # Find approximate position of maxmimum. Note that there
            # are lmax maxima/minima between -1..1, so this value of
            # Ns should suffice.
            res = scipy.optimize.brute(
                lambda x, *args: -legendre.legval(x, beta_df),
                ((-1.0, 1.0),), Ns=5.0 * lmax,
            )

            # Refine position of maximum - note that the 'finish'
            # option of optimize.brute doesn't work with fmin_tnc etc
            respolish = scipy.optimize.minimize(lambda x: -legendre.legval(x, beta_df),
                                                [res[0]], bounds=((-1.0,1.0),), method='TNC')

            maxval = -respolish.fun

            beta = beta_axis[0:lmax + 1] * beta_df[0:lmax + 1] * wt

            overlap[rbin] = beta.sum() / maxval

        if nthreads is None:
            nthreads = multiprocessing.cpu_count()

        with futures.ThreadPoolExecutor(max_workers=nthreads) as executor:
            jobs = dict((executor.submit(__worker, rbin), rbin)
                        for rbin in xrange(rbins))

            jobs_done = futures.as_completed(jobs)

            while True:
                try:
                    job = next(jobs_done)

                    if job.exception() is not None:
                        logger.error(job.exception())
                        for j in jobs:
                            if not j.done():  # and not j.running():
                                j.cancel()
                        raise job.exception()
                except StopIteration:
                    break
                except KeyboardInterrupt:
                    logger.info('Ctrl-c received, exiting.')
                    for j in jobs:
                        if not j.done():  # and not j.running():
                            j.cancel()
                    raise

        return r, overlap
Example #26
0
    values = OrderedDict(result)
    r = json.dumps(values, sort_keys=False, indent=2, separators=(',', ': '))

    now1 = datetime.datetime.now() - now0

    #print "runtime in seconds: "
    #print now1.seconds
    #print r
    fout.write('runtime in seconds:  ' + str(now1.seconds) + '\n' + r + '\n')

    fout.close()
    print str(fname) + " complete****************************\n"
    return r


f = open("uniqueURI", 'r')
theurls = f.readlines()
count = 1

q = Queue.Queue()

with futures.ThreadPoolExecutor(max_workers=NUMTHREADS) as executor:
    for u in theurls:
        urifutures = executor.submit(cd, u, count)
        count = count + 1

    for future in futures.as_completed(urifutures):
        try:
            data = future.result()
        except Exception as exc:
            print "{} generated an exception: {}".format(u, exc)
Example #27
0
        ip=socket.gethostbyname(ADDRESS)
        return ip
    except:
        print "sorry,something wrong"
class portscan():
    def __init__(self,IP):
        self.ip=IP
        #self.timeout=TIMEOUT
    def scan(self,port):

        s=socket.socket()
        s.settimeout(0.1) #timeout 手动修改 > <
        try:
            s.connect((self.ip,port))
        except Exception,e:
            #print e
            pass
        else:
            print '%s open'% port
        s.close()
if __name__=="__main__":
    start = time.time()
    IP=url2ip(ADDRESS)
    test=portscan(IP)
    #test.scan(port)
    print "ip ------------>"+str(IP)

    with futures.ThreadPoolExecutor(max_workers=THREADS) as executor:
        tasks=dict((executor.submit(test.scan,port),port) for port in range(START,STOP))
        futures.as_completed(tasks)
Example #28
0
    def calc_matrix_threaded(self, Rbins, Thetabins, kmax, lmax,
                             detectionfn, alpha=0.0, beta=0.0,
                             sigma=None, oddl=True, method='cquad',
                             epsabs=0.0, epsrel=1.0e-7,
                             wkspsize=100000, nthreads=None):
        """Calculates an inversion matrix using multiple threads.

        kmax determines the number of radial basis functions (from
        k=0..kmax).

        lmax determines the maximum value of l for the legendre
        polynomials (l=0..lmax).

        Rbins specifies the number of radial bins in the image to be
        inverted.

        Thetabins specifies the number of angular bins in the image to
        be inverted.

        sigma specifes the width of the Gaussian radial basis
        functions. This is defined according to the normal convention
        for Gaussian functions i.e. FWHM=2*sigma*sqrt(2*ln2), and NOT
        as defined in the Garcia, Lahon, Powis paper. If sigma is not
        specified it is set automatically such that the half-maximum
        of the Gaussian occurs midway between each radial
        function. Note that sigma is specified indimension of bins of
        the image, NOT in the dimensions of the detection function.

        method specifies the integration method to be used. Currently
        supported values are 'qaws', 'qags' and 'cquad' corresponding
        to the different GSL integration functions of the same name.

        epsabs and epsrel specify the desired integration tolerance
        when calculating the basis functions. The defaults should
        suffice.

        wkspsize specifies the maximum number of subintervals used for
        the numerical integration of the basis functions.

        detectionfn specifies a detection function. At present this
        can be None, or an instance of PbasexFit from a previous fit.

        alpha specifies the azimuthal angle between the frame that the
        detection function is specified in and the lab frame. This is
        in radians. If None, we assume 0.0 for this angle.

        beta specifies the polar angle between the frame that the
        detection function is specified in and the lab frame. This is
        in radians. If None, we assume 0.0 for this angle.

        nthreads specifies the number of threads to use. If this has
        the default value of None, the number of threads used will be
        equal to the number of CPU cores.

        """
        if not isinstance(detectionfn, pbasex.PbasexFit):
            raise TypeError('detectionfn is not an instance of PbasexFit')

        # Spacing of radial basis function centres. The most obvious
        # choice here is rkspacing = Rbins / (kmax + 1.0), but we'd
        # actually like to have the last basis function centered on
        # the largest value of R, so instead we choose:
        rkspacing = Rbins / float(kmax)

        if sigma is None:
            # If sigma is not specified, we calculate a reasonable
            # value based on rkspacing. In the original Powis et al
            # paper they had rkspacing=2, and set their sigma (=2
            # sigma^2) = 2 pixels. We do similar here, and then make
            # it 20% bigger to avoid oscillations.
            sigma = m.sqrt(rkspacing / 2.0) * 1.2

        # We need to rescale the detection function parameters to
        # express them in terms of bins for the actual matrix
        # calculation. The detection function rmax specifies the
        # maximum radial value we can sensibly consider. The stored
        # values of rkstep and sigma in the detection function are
        # currently scaled according to that rmax (i.e. are in
        # dimensions of the image used to generate the detection
        # function fit). So, since our matrix calculation is actually
        # done in terms of bins, rather than absolute scale, we need
        # to rescale the detection function parameters accordingly.
        df_rscale = detectionfn.rmax / Rbins
        df_rkstep = detectionfn.rkstep / df_rscale
        df_sigma = detectionfn.sigma / df_rscale

        # It seems like a good idea to normalize the detection
        # coefficients to a maximum value of one, however, this seems
        # to lead to instability, so we don't do it. But we leave this
        # here as a reminder not to do it in the future.
        #detectionfn.coef /= detectionfn.coef.max()

        if detectionfn.oddl is True:
            df_oddl = 1
        else:
            df_oddl = 0

        if oddl is False:
            linc = 2
            mtx = numpy.empty([kmax + 1, lmax / 2 + 1, Rbins, Thetabins])
        else:
            linc = 1
            mtx = numpy.empty([kmax + 1, lmax + 1, Rbins, Thetabins])

        def __worker(k, l):
            rk = rkspacing * k

            logger.info(
                'Calculating basis function for k={0}, l={1}'.format(k, l))

            bf = basisfn_detfn1(
                k, l, Rbins, Thetabins, sigma, rk,
                epsabs, epsrel, wkspsize,
                detectionfn.coef, detectionfn.kmax, df_sigma,
                df_rkstep, detectionfn.lmax, df_oddl,
                alpha, beta, method)

            if oddl is True:
                mtx[k, l] = bf
            else:
                mtx[k, l / 2] = bf

            logger.info(
                'Finished calculating basis function for k={0}, l={1}'.format(
                    k, l)
            )

        if nthreads is None:
            nthreads = multiprocessing.cpu_count()

        with futures.ThreadPoolExecutor(max_workers=nthreads) as executor:
            jobs = []

            for k in numpy.arange(kmax + 1):
                for l in numpy.arange(0, lmax + 1, linc):
                    j = executor.submit(__worker, k, l)
                    jobs.append(j)

            jobs_done = futures.as_completed(jobs)

            while True:
                try:
                    job = next(jobs_done)

                    if job.exception() is not None:
                        logger.error(job.exception())
                        for j in jobs:
                            if not j.done():  # and not j.running():
                                j.cancel()
                        raise job.exception()
                except StopIteration:
                    break
                except KeyboardInterrupt:
                    logger.info('Ctrl-c received, exiting.')
                    for j in jobs:
                        if not j.done():  # and not j.running():
                            j.cancel()
                    raise

        self.matrix = mtx
        self.kmax = kmax
        self.sigma = sigma
        self.lmax = lmax
        self.oddl = oddl
        self.Rbins = Rbins
        self.Thetabins = Thetabins
        self.epsabs = epsabs
        self.epsrel = epsrel
        self.method = method
        # It's important we save this as part of the matrix
        # object, as subsequent fits with this matrix are only
        # valid if they have the same binning and scaling. Note
        # that we can always calculate the relevant rscale from
        # (self.Rbins / self.rmax) if needs be, so we don't save
        # that.
        self.rmax = detectionfn.rmax

        # Also save the detectionfn details for future reference
        self.detectionfn = detectionfn
        self.alpha = alpha
        self.beta = beta
Example #29
0
File: ssh.py Project: baua/simbol
    def writeMeABASHScript(self, var, username, password, cmd):
        mutex = Lock()

        sys.stdout.write("#!/bin/bash\n")
        sys.stdout.write("#. threads: %d\n" % self._threads)
        sys.stdout.write("#. timeout: %0.1f\n" % self._timeout)
        sys.stdout.write("\n")
        with futures.ThreadPoolExecutor(max_workers=self._threads) as executor:
            #. See if stdin from the shell has any data to offer...
            #stdindata = []
            #if select([sys.stdin,],[],[],0.0)[0]:
            #    stdindata = [_.strip('\n') for _ in sys.stdin.readlines()]

            stdindata = []
            if password:
                cmd = """sudo -S %s""" % cmd
                stdindata.append(password)

            queue = {
                executor.submit(self.connect, username, qdn, cmd, stdindata):
                    qdn for qdn in self._clients.keys()
            }

            sys.stdout.write("local -A %s\n"   % var) #. stdout
            sys.stdout.write("local -A %s_o\n" % var) #. stdout
            sys.stdout.write("local -A %s_e\n" % var) #. stderr
            sys.stdout.write("local -A %s_w\n" % var) #. warnings

            i = 0
            total = len(self._clients.keys())

            #for future in futures.as_completed(queue, timeout=self._timeout):
            for future in futures.as_completed(queue):
                i += 1

                qdn = queue[future]
                data = None
                try:
                    #data = future.result(timeout=self._timeout)
                    data = future.result()
                except futures.TimeoutError:
                    sys.stdout.write("#. %s is taking too long. Oh well.\n" % qdn)
                else:
                    mutex.acquire()
                    try:
                        sys.stdout.write("#. %s -={\n" % qdn)

                        _qdn, stdout, stderr, e = data
                        qdn_hash = sha(qdn).hexdigest()
                        sys.stdout.write(
                            "#. Query with host %s (%d of %d) : %s\n" % (
                                qdn, i, total, future._state
                            )
                        )

                        sys.stdout.write("%s[%s]=%d\n" % (var, qdn_hash, e))

                        if len(stdout) > 0:
                            sys.stdout.write("read -r -d '' %s_o[%s] <<-!\n" % (var, qdn_hash))
                            sys.stdout.write(''.join(stdout).strip())
                            sys.stdout.write("\n!\n")

                        if len(stderr) > 0:
                            sys.stdout.write("read -r -d '' %s_e[%s] <<-!\n" % (var, qdn_hash))
                            sys.stdout.write(''.join(stderr).strip())
                            sys.stdout.write("\n!\n")

                        sys.stdout.write("#. }=- %s\n\n" % qdn)
                    finally:
                        mutex.release()

            sys.stdout.write("#. All done.\n")
Example #30
0
    def writeMeABASHScript(self, var, username, password, cmd):
        mutex = Lock()

        sys.stdout.write("#!/bin/bash\n")
        sys.stdout.write("#. threads: %d\n" % self._threads)
        sys.stdout.write("#. timeout: %0.1f\n" % self._timeout)
        sys.stdout.write("\n")
        with futures.ThreadPoolExecutor(max_workers=self._threads) as executor:
            #. See if stdin from the shell has any data to offer...
            #stdindata = []
            #if select([sys.stdin,],[],[],0.0)[0]:
            #    stdindata = [_.strip('\n') for _ in sys.stdin.readlines()]

            stdindata = []
            if password:
                cmd = """sudo -S %s""" % cmd
                stdindata.append(password)

            queue = {
                executor.submit(self.connect, username, qdn, cmd, stdindata):
                qdn
                for qdn in self._clients.keys()
            }

            sys.stdout.write("local -A %s\n" % var)  #. stdout
            sys.stdout.write("local -A %s_o\n" % var)  #. stdout
            sys.stdout.write("local -A %s_e\n" % var)  #. stderr
            sys.stdout.write("local -A %s_w\n" % var)  #. warnings

            i = 0
            total = len(self._clients.keys())

            #for future in futures.as_completed(queue, timeout=self._timeout):
            for future in futures.as_completed(queue):
                i += 1

                qdn = queue[future]
                data = None
                try:
                    #data = future.result(timeout=self._timeout)
                    data = future.result()
                except futures.TimeoutError:
                    sys.stdout.write("#. %s is taking too long. Oh well.\n" %
                                     qdn)
                else:
                    mutex.acquire()
                    try:
                        sys.stdout.write("#. %s -={\n" % qdn)

                        _qdn, stdout, stderr, e = data
                        qdn_hash = sha(qdn).hexdigest()
                        sys.stdout.write(
                            "#. Query with host %s (%d of %d) : %s\n" %
                            (qdn, i, total, future._state))

                        sys.stdout.write("%s[%s]=%d\n" % (var, qdn_hash, e))

                        if len(stdout) > 0:
                            sys.stdout.write("read -r -d '' %s_o[%s] <<-!\n" %
                                             (var, qdn_hash))
                            sys.stdout.write(''.join(stdout).strip())
                            sys.stdout.write("\n!\n")

                        if len(stderr) > 0:
                            sys.stdout.write("read -r -d '' %s_e[%s] <<-!\n" %
                                             (var, qdn_hash))
                            sys.stdout.write(''.join(stderr).strip())
                            sys.stdout.write("\n!\n")

                        sys.stdout.write("#. }=- %s\n\n" % qdn)
                    finally:
                        mutex.release()

            sys.stdout.write("#. All done.\n")
Example #31
0
	if uri_json[ECD]:
		print 'Found creation date: {}'.format(uri_json[ECD])
		return (uri, uri_json[ECD])
	else:
		print 'Found no ECD'
		return None

if __name__ == '__main__':
	# Remove already completed links
	with open('site_mementos') as infile:
		input_uris = [line.split(' ')[0] for line in infile if line.rstrip('\n').split(' ')[1] != '0']
	with open('site_ecd_all') as prevfile:
		prev = [line.split(' ')[0] for line in prevfile]	
	uris = [uri for uri in input_uris if uri not in prev]
	print 'Starting on uri #{}'.format(len(uris))

	# Work on the rest
	with open('site_ecd_all','a') as outfile:
		with futures.ThreadPoolExecutor(max_workers=8) as executor:
 			urifutures = [executor.submit(getdate, uri) for uri in uris]
 			for future in futures.as_completed(urifutures):
	 			try:
	 				data = future.result()
	 			except Exception as exc:
	 				print '{} generated an exception: {}'.format(uri, exc)
	 			if len(data) == 2:
	 				print 'Writing data: {}'.format(data)
	 				outfile.write('{} {}\n'.format(data[0], data[1]))
	 			else:
	 				print 'Found no data'
		
Example #32
0
def Audit(services):
	global ret, http404page
	retinfo = {}
	output = 'plugin run' + os.linesep
	# first check no exists file
	noexistscode = getNoExists(services['url']+'/noexists_1231232.html')
	if not noexistscode:
		logger('no exists file return code 200, stop plugin')
		return 
	# first get http404 code page
	http404page = get404Page(services['url']+'/404.html')
	
	urls = generateUrls(services['url'])
	# pprint(urls)

	#  threads
	lock = threading.Lock()
	threads = []
	maxthreads = 20

	# for url in urls:
	# 	th = threading.Thread(target=httpcrack,args=(url,lock))
	# 	threads.append(th)
	# i = 0
	# while i<len(threads):
	# 	if i+maxthreads >len(threads):
	# 		numthreads = len(threads) - i
	# 	else:
	# 		numthreads = maxthreads
	# 	print 'threads:',i,' - ', i + numthreads

	# 	# start threads
	# 	for j in range(numthreads):
	# 		threads[i+j].start()

	# 	# wait for threads
	# 	for j in range(numthreads):
	# 		threads[i+j].join()

	# 	i += maxthreads

	# 改用futures模块
	# with futures.ThreadPoolExecutor(max_workers=maxthreads) as executor:     #默认10线程
	# 	future_to_url = dict((executor.submit(httpcrack, url), url) for url in urls)

	with futures.ThreadPoolExecutor(max_workers=maxthreads) as executor:
		# Start the load operations and mark each future with its URL
		future_to_url = dict((executor.submit(httpcrack, url), url) for url in urls)
		# try:
		for future in futures.as_completed(future_to_url):
			url = future_to_url[future]
			try:
				ret = future.result()
			except Exception as exc:
				# print('%r generated an exception: %s' % (url, exc))
				logger('%r generated an exception: %s' % (url, exc))
			else:
				# print('%r returns: %s' % (url, str(ret)))
				logger('%r returns: %s' % (url, str(ret)))
		
		# except (KeyboardInterrupt, SystemExit):
		# 	print "Exiting..."
		# 	return (retinfo,output)
	if ret != '':
		retinfo = {'level':'medium','content':ret}

	return (retinfo,output)
#!/usr/bin/env python
#### https://pypi.python.org/pypi/futures/#downloads
#### Backport of the concurrent.futures package from Python 3.2
import futures
import urllib2

URLS = ['http://www.foxnews.com/',
        'http://www.cnn.com/',
        'http://europe.wsj.com/',
        'http://www.bbc.co.uk/',
        'http://google.com/',
        'http://yahoo.com/']

def load_url(url, timeout):
    return urllib2.urlopen(url, timeout=timeout).read()

with futures.ThreadPoolExecutor(max_workers=5) as executor:
    future_to_url = dict((executor.submit(load_url, url, 60), url)
                         for url in URLS)

    for future in futures.as_completed(future_to_url):
        url = future_to_url[future]
        if future.exception() is not None:
            print '%r generated an exception: %s' % (url,
                                                     future.exception())
        else:
            print '%r page is %d bytes' % (url, len(future.result()))