Exemple #1
0
def demo(args):
    """ Demonstrates the Python logging facility. """

    cli = argparse.ArgumentParser()
    cli.add_argument("--verbose", "-v", action='count', default=ENV_VERBOSITY)
    cli.add_argument("--quiet", "-q", action='count', default=0)
    args = cli.parse_args(args)

    level = verbosity_to_level(args.verbose - args.quiet)

    info("new log level: " + str(level))
    old_level = set_loglevel(level)
    info("old level was: " + str(old_level))

    info("printing some messages with different log levels")

    spam("rofl")
    dbg("wtf?")
    info("foo")
    warn("WARNING!!!!")
    err("that didn't go so well")
    crit("pretty critical, huh?")

    info("restoring old loglevel")

    set_loglevel(old_level)

    info("old loglevel restored")
    info("running some threaded stuff")

    pool = ThreadPool()
    for i in range(8):
        pool.apply_async(info, ("async message #" + str(i),))
    pool.close()
    pool.join()
Exemple #2
0
    def test_turn_lights_on_same_time(self):
        """ Test to turn on all lights on at the same time, and then off at the same time """

        print "\n"
        print "****************************************************"
        print "Testing turning on all lights at the same time"
        print "****************************************************"

        # Find bridges to associate with
        lights = {}
        lights.update(dr_hue.get_all_lights(self.url, USERNAME))

        print "There are %s lights, turning them all on now" % len(lights)
        pool = ThreadPool(len(lights) or THREAD_MAX)

        for light in lights.keys():
            args = (self.url, light, USERNAME)
            pool.apply_async(dr_hue.turn_light_on, args=args)

        # Wait for all the threads to complete then make sure everything is Kosher
        pool.close()
        pool.join()

        print "Now trying to turn them all off at the same time in 5 seconds"
        pool = ThreadPool(len(lights) or THREAD_MAX)
        sleep(5)

        for light in lights.keys():
            args = (self.url, light, USERNAME)
            pool.apply_async(dr_hue.turn_light_off, args=args)

        # Wait for all the threads to complete then make sure everything is Kosher
        pool.close()
        pool.join()
def process_processed(path, devices, dates, dry):
	pool = multiprocessing.Pool(8)
	thread_pool = ThreadPool(8)

	manager = multiprocessing.Manager()
	queue = manager.Queue()

	total_size = [0]

	file_size_dict = {}
	failed = {}
	def update_file_size_dict(device, remote_path, local_path, size):
		file_size_dict[remote_path] = {
			'size': size,
			'remote': remote_path,
			'local': local_path
		}
		total_size[0] += size
		logger.debug("total_size = %d" % (total_size[0]))

	ym = set([(x.year, x.month) for x in dates])
	for d in devices:
		for year, month in ym:
			fpath = 'time/%04d/%02d' % (year, month)
			srcpath = os.path.join(BACKEND_PROCESSED_BASE_PATH, d, fpath)
			remote_path = '%s:%s' % (BACKEND, srcpath)

			outpath = os.path.join(path, d, fpath)
			# Make the outpath (if needed)
			if not os.path.exists(outpath):
				os.makedirs(outpath)

			thread_pool.apply_async(get_remote_files, args=(d, remote_path, outpath, dates, update_file_size_dict))
#			get_remote_files(d, remote_path, outpath, dates, update_file_size_dict)

	thread_pool.close()
	thread_pool.join()

	total_size = total_size[0]
	finished = 0

	logger.info("# files: %d" % (len(file_size_dict.keys())))
	for k, v in file_size_dict.iteritems():
		pool.apply_async(rsync_worker, args=(k, file_size_dict[k]['local'], '-avzupr', dry, queue))
		#rsync_worker(k, file_size_dict[k]['local'], '-avzpr', dry, queue)
	pool.close()
	try:
		i = 0
		while i < len(file_size_dict.keys()):
			path, ret, out, stder = queue.get()
			size = file_size_dict[path]['size']
			finished += size
			i += 1
#			pycommons.print_progress(finished, total_size)
#			logger.info("Finished: %d/%d" % (i, len(file_size_dict.keys())))

		pool.join()
	except KeyboardInterrupt:
		logger.warning("Terminating ...")
		return
Exemple #4
0
class TaskManager(object):
	def __init__(self, processes):
		self.pool = ThreadPool(processes=processes)
		self.workers = threading.Semaphore(processes)
		self.counter = 0
		self.sizes = 0
		self.total = 0
		self.progress_line = 0
		self.progress_bar = ''

	def new(self,task, arg):
		self.workers.acquire()
		self.sizes += 1
		self.pool.apply_async(task, args=(arg, ), callback=self.done)

	def done(self, args):
		self.workers.release()
		self.sizes -= 1
		self.setCount(1)

	def setTotal(self,total):
		self.total = total
		return self
	
	def getTotal(self):
		return self.total
	
	def setCount(self,num):
		self.counter += num
		return self
		
	def getCount(self):
		return self.counter
	#
	# The progress() of the outdated abandoned
	# Using the ProgressBar showed the progress bar 
	# Example: 
	#	progressBar = ProgressBar(100,"#")
	#	for i in range(0,99):
	#		progressBar.progress(i)
	def progress(self):
		if self.getCount() != 0:
			self.percent = int((float(self.getCount())/(self.getTotal()-1))*100)
			blockcount = int(self.percent/2)

			if blockcount > self.progress_line:
				self.progress_bar += '#'
			self.progress_line = blockcount
			
		log = str((self.getTotal()))+'||'+str(self.getCount())
		log += '||'+self.progress_bar+'->||'+str(self.progress_line)+"%\r"
		stdout.write(log)
		stdout.flush()
		return

	def size(self):
		return self.sizes

	def __len__(self):
		return self.sizes
def play(track_id):
    from rhapsody.models.common import Image
    from multiprocessing.pool import ThreadPool

    album_id = plugin.request.args.get('album_id', [False])[0]
    duration = plugin.request.args.get('duration', [False])[0]
    thumbnail_missing = plugin.request.args.get('thumbnail_missing', [False])[0]

    item = dict()
    pool = ThreadPool(processes=2)

    stream_result = pool.apply_async(lambda: rhapsody.streams.detail(track_id))

    if thumbnail_missing:
        album_result = pool.apply_async(lambda: rhapsody.albums.detail(album_id))
        album = album_result.get()
        item['thumbnail'] = album.images[0].get_url(size=Image.SIZE_ORIGINAL)

    stream = stream_result.get()
    item['path'] = stream.url
    plugin.set_resolved_url(item)

    started = rhapsody.events.log_playstart(track_id, stream)
    rhapsody.events.log_playstop(track_id, stream, started, duration)

    pool.close()
    pool.join()
    def run(self):
        pool = ThreadPool(self.num_agents)
        for idx in range(self.num_agents):
            pool.apply_async(self.run_experiement, args=(self.experiment, idx))

        pool.close()
        pool.join()
    def _listArtifacts(self, urls, gavs):
        """
        Loads maven artifacts from list of GAVs and tries to locate the artifacts in one of the
        specified repositories.

        :param urls: repository URLs where the given GAVs can be located
        :param gavs: List of GAVs
        :returns: Dictionary where index is MavenArtifact object and value is it's repo root URL.
        """
        def findArtifact(gav, urls, artifacts):
            artifact = MavenArtifact.createFromGAV(gav)
            for url in urls:
                if maven_repo_util.gavExists(url, artifact):
                    #Critical section?
                    artifacts[artifact] = ArtifactSpec(url)
                    return

            logging.warning('Artifact %s not found in any url!', artifact)

        artifacts = {}
        pool = ThreadPool(maven_repo_util.MAX_THREADS)
        for gav in gavs:
            pool.apply_async(findArtifact, [gav, urls, artifacts])

        # Close the pool and wait for the workers to finnish
        pool.close()
        pool.join()

        return artifacts
class NewClient(Client):
    """ new client """

    def __init__(self, context, poller):
        super(NewClient, self).__init__(context, poller)

        self._threadn = cv2.getNumberOfCPUs()
        self._pool = ThreadPool(processes = self._threadn)
        self._pending = deque()


    def run_img(self, wait=0.5):
        """ run img proc """
        img_it = self.fetch_img()

        for img in img_it:
            while len(self._pending) >0 and self._pending[0].ready():
                name, res = self._pending.popleft().get()

                cv2.imshow(name, res)

                ch = cv2.waitKey(1)
                gevent.sleep(wait)

            if len(self._pending) < self._threadn:
                tasks = [ self._pool.apply_async(detect_edge, ('detect_edge', img.copy())),
                          self._pool.apply_async(detect_line, ('detect_lineP', img.copy())),
                          self._pool.apply_async(detect_circle, ('detect_circle', img.copy())),
                          self._pool.apply_async(detect_face, ('detect_face', img.copy())) ]

            [self._pending.append(task) for task in tasks]
	def downloadPDFs(self):
		### Download all the files extracted from the metadata
		startTime = time.strftime("%c")
		# Loop through the CSV
		f = open(self.csvpath)
		metadata = csv.reader(f, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)
		
		for row in metadata:
			pmcid = row[8]
			
			### Check the input is a PMC ID
			if 'PMC' in pmcid:
				print('Starting thread for: '+pmcid)
				
				pool = Pool(30)
				pool.apply_async(self.saveFile, (pmcid,))
				pool.close()
				pool.join()
			else:
				print('Something is wrong. '+pmcid+' is not a PMC id')
				sys.exit(0)
			
		f.close()
		
		print('Finished downloading all files: start {} end {}.'.format(startTime, time.strftime("%c")))
    def run(self, suites):
        wrapper = self.config.plugins.prepareTest(suites)
        if wrapper is not None:
            suites = wrapper

        wrapped = self.config.plugins.setOutputStream(self.stream)
        if wrapped is not None:
            self.stream = wrapped

        result = self._makeResult()

        size = self.config.options.thread_pool
        if size < 0:
            size = cpu_count()

        pool = ThreadPool(size)

        with measure_time(result):

            for suite in suites:
                pool.apply_async(suite, args=(result,))

            pool.close()
            pool.join()

        self.config.plugins.finalize(result)
        return result
Exemple #11
0
    def parse_dir(dir): 
        ignores = Parser.load_ignores(dir)
        ignores.extend([".svn", ".hg", ".git"])

        def callback(res):
            dependencies.extend(res)

        def is_ignored(res, is_dir=False):
            if is_dir:
                res = res + "/"
            for i in ignores:
                if fnmatch.fnmatch(res, i) or res.startswith(i):
                    return True
            return False

        def find_ignored(reslist, is_dir=False):
            return [res for res in reslist if is_ignored(res, is_dir)]

        pool = ThreadPool(processes=Parser.concurrency)
        dependencies = []

        for root, dirs, files in scandir.walk(dir):
            for d in find_ignored(dirs, True):
                logging.debug("%s is blacklisted" % d)
                dirs.remove(d)
            for f in find_ignored(files):
                logging.debug("%s is blacklisted" % d)
                files.remove(f)
            for name in files:
                pool.apply_async(Parser.parse_file, args = (os.path.join(root, name),), callback = callback)

        pool.close()
        pool.join()
        return dependencies
Exemple #12
0
def thread(host, port, threads, num):
    pool = ThreadPool(threads)
    for _ in range(num):
        pool.apply_async(job, (host, port))
        time.sleep(0.001)
    pool.close()
    pool.join()
Exemple #13
0
    def getMessagesBySource(self, source, batch_mode=False):
        """
        Returns the messages for the given source, including messages
        from the configured builder (if available) and static checks
        Extra arguments are
        """
        self._setupEnvIfNeeded()

        if self._USE_THREADS:
            records = []
            pool = ThreadPool()

            static_check = pool.apply_async(
                getStaticMessages, args=(source.getSourceContent().split('\n'), ))

            if self._isBuilderCallable():
                builder_check = pool.apply_async(self._getBuilderMessages,
                                                 args=[source, batch_mode])
                records += builder_check.get()

            records += static_check.get()

            pool.terminate()
            pool.join()
        else:
            records = getStaticMessages(source.getSourceContent().split('\n'))
            if self._isBuilderCallable():
                records += self._getBuilderMessages(source, batch_mode)

        self._saveCache()
        return records
    def get_data(self):
        amazon_service = AmazonService(self.title, self.country)
        bluray_service = BlurayService(self.title)
        imdb_service = ImdbService(self.title)
        tmdb_service = TmdbService(imdb_service.get_id())
        omdb_service = OmdbService(imdb_service.get_id())

        pool = ThreadPool(processes=self.__THREAD_COUNT)
        async_rt_rating = pool.apply_async(omdb_service.get_rt_rating)
        async_bluray_rating = pool.apply_async(bluray_service.get_bluray_rating)
        async_tech_specs = pool.apply_async(imdb_service.get_tech_spec)
        async_artwork = pool.apply_async(tmdb_service.get_artwork)
        async_price = pool.apply_async(amazon_service.get_price)
        pool.close()

        # try:
        rt_rating = async_rt_rating.get()
        bluray_rating = async_bluray_rating.get()
        tech_specs = async_tech_specs.get()
        price = async_price.get()
        artwork = async_artwork.get()
        pool.join()
        # except:
        #     raise ValueError("Oops, something went wrong")

        data = {'rt_rating': rt_rating,
                'bluray_rating': bluray_rating,
                'tech_specs': tech_specs,
                'price': price,
                'artwork': artwork}

        return data
Exemple #15
0
def update(args=None):
    projects = list_projects(False, args.dir)

    print("Update in progress...")

    if args.j:
        pool = Pool(args.j)

        def worker(p):
            if p.is_behind():
                p.update()
                print("{} updated".format(p.name))

        for p in projects:
            pool.apply_async(worker, (p,))

        pool.close()
        pool.join()
    else:
        for p in projects:
            if p.is_behind():
                p.update()
                print("{} updated".format(p.name))

    print("Update done")
class CCSimpleHttpServer(HTTPServer):
    '''
    Simple http server to handle requests from the clients
    '''

    daemon_threads = False

    def __init__(self,
                 server_address,
                 RequestHandlerClass,
                 db_conn_string,
                 pckg_data,
                 suppress_handler,
                 db_version_info):

        LOG.debug('Initializing HTTP server')

        self.www_root = pckg_data['www_root']
        self.doc_root = pckg_data['doc_root']
        self.checker_md_docs = pckg_data['checker_md_docs']
        self.checker_md_docs_map = pckg_data['checker_md_docs_map']
        self.suppress_handler = suppress_handler
        self.db_version_info = db_version_info
        self.__engine = sqlalchemy.create_engine(db_conn_string,
                                                 client_encoding='utf8',
                                                 poolclass=sqlalchemy.pool.NullPool)

        Session = scoped_session(sessionmaker())
        Session.configure(bind=self.__engine)
        self.sc_session = Session

        self.__request_handlers = ThreadPool(processes=10)

        HTTPServer.__init__(self, server_address,
                            RequestHandlerClass,
                            bind_and_activate=True)

    def process_request_thread(self, request, client_address):
        try:
            # finish_request instatiates request handler class
            self.finish_request(request, client_address)
            self.shutdown_request(request)
        except socket.error as serr:
            if serr[0] == errno.EPIPE:
                LOG.debug('Broken pipe')
                LOG.debug(serr)
                self.shutdown_request(request)

        except Exception as ex:
            LOG.debug(ex)
            self.handle_error(request, client_address)
            self.shutdown_request(request)

    def process_request(self, request, client_address):
        # sock_name = request.getsockname()
        # LOG.debug('PROCESSING request: '+str(sock_name)+' from: '
        #           +str(client_address))
            self.__request_handlers.apply_async(self.process_request_thread,
                                                (request, client_address))
	def post(self):

		email = xhtml_escape(self.get_argument("email"))
		password = xhtml_escape(self.get_argument("password"))

		pool = ThreadPool(processes=1)
		pool.apply_async(self.__checkLogin, args=(email, password), callback=self.__onfinish)
		pool.close()
Exemple #18
0
 def wrapper(*args, **kwargs):
     pool = ThreadPool(processes=1)
     async = pool.apply_async(callback, args, kwargs)
     try:
         return async.get(self.get_timeout_sec(route))
     except TimeoutError:
         pool.terminate()
         raise bottle.HTTPError(503, 'Service Unavailable, process timeout')
class CCSimpleHttpServer(HTTPServer):
    """
    Simple http server to handle requests from the clients.
    """

    daemon_threads = False

    def __init__(self,
                 server_address,
                 RequestHandlerClass,
                 db_conn_string,
                 pckg_data,
                 suppress_handler,
                 db_version_info,
                 manager):

        LOG.debug('Initializing HTTP server')

        self.www_root = pckg_data['www_root']
        self.doc_root = pckg_data['doc_root']
        self.checker_md_docs = pckg_data['checker_md_docs']
        self.checker_md_docs_map = pckg_data['checker_md_docs_map']
        self.suppress_handler = suppress_handler
        self.db_version_info = db_version_info
        self.__engine = database_handler.SQLServer.create_engine(
            db_conn_string)

        Session = scoped_session(sessionmaker())
        Session.configure(bind=self.__engine)
        self.sc_session = Session
        self.manager = manager

        self.__request_handlers = ThreadPool(processes=10)

        HTTPServer.__init__(self, server_address,
                            RequestHandlerClass,
                            bind_and_activate=True)

    def process_request_thread(self, request, client_address):
        try:
            # Finish_request instatiates request handler class.
            self.finish_request(request, client_address)
            self.shutdown_request(request)
        except socket.error as serr:
            if serr[0] == errno.EPIPE:
                LOG.debug('Broken pipe')
                LOG.debug(serr)
                self.shutdown_request(request)

        except Exception as ex:
            LOG.debug(ex)
            self.handle_error(request, client_address)
            self.shutdown_request(request)

    def process_request(self, request, client_address):
        self.__request_handlers.apply_async(self.process_request_thread,
                                            (request, client_address))
def parallel_process(obs_list,cmd_out,overwrite,loud):
  pool = ThreadPool()
  n = len(obs_list) / mp.cpu_count() + 1
  obs_chunks = [obs_list[i:i + n] for i in range(0, len(obs_list), n)]
  for obs_chunk in obs_chunks:
    pool.apply_async(execute_process, args = (obs_chunk,cmd_out,overwrite,loud))
  pool.close()
  pool.join()
  return
Exemple #21
0
    def multipart_upload(self, keyname, source_path, source_size, **kwargs):
        acl = kwargs.pop('acl', 'private')
        num_cb = kwargs.pop('num_cb', 10)

        debug = kwargs.pop('debug', True)
        headers = kwargs.pop('headers', {})

        parallel_processes = kwargs.pop('parallel_processes', 4)
        reduced_redundancy = kwargs.pop('reduced_redundancy', False)

        if kwargs.get('guess_mimetype', True):
            mtype = mimetypes.guess_type(keyname)[0] or 'application/octet-stream'
            headers.update({'Content-Type': mtype})

        bytes_per_chunk = max(int(math.sqrt(5242880) * math.sqrt(source_size)),
                              5242880)
        chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk)))

        metadata = kwargs.pop('metadata', {})
        metadata.update(self._md5_checksum_metadata(source_path))
        multipart_upload = self.bucket.initiate_multipart_upload(keyname, headers=headers, metadata=metadata,
                                                                 reduced_redundancy=reduced_redundancy)

        pool = ThreadPool(processes=parallel_processes)

        for index in range(chunk_amount):
            offset = index * bytes_per_chunk

            remaining_bytes = source_size - offset
            bytes_len = min([bytes_per_chunk, remaining_bytes])

            part_num = index + 1

            # task args
            args = (
                multipart_upload.id,
                part_num,
                source_path,
                offset,
                bytes_len,
                debug,
                self.cb,
                num_cb
            )
            pool.apply_async(self._upload_part, args)

        pool.close()
        pool.join()

        parts = multipart_upload.get_all_parts()

        if parts is not None and len(parts) == chunk_amount:
            multipart_upload.complete_upload()
            key = self.bucket.get_key(keyname)
            key.set_acl(acl)
        else:
            multipart_upload.cancel_upload()
Exemple #22
0
def course_search(dept, num):
    url = 'http://osoc.berkeley.edu/OSOC/osoc?p_term={}&x=0&p_classif=--+' \
          'Choose+a+Course+Classification+--&p_deptname=--+Choose+a+Depar' \
          'tment+Name+--&p_presuf=--+Choose+a+Course+Prefix%2fSuffix+--&y' \
          '=0&p_course={}&p_dept={}'.format(TERM, num, dept)
    contents = urllib2.urlopen(url).read()

    stats = {}
    def save(res):
        if res: stats[res[0]] = res[1]

    pool = Pool(20)
    ccns = re.findall(r'input type="hidden" name="_InField2" value="([0-9]*)"',
                      contents)
    semester = re.search(r'input type="hidden" name="_InField3" value="(.*)"',
                      contents).group(1)

    session = getSession()

    for ccn in ccns:
        pool.apply_async(scrape_enrollment,
                         args=(ccn, semester, url, session),
                         callback=save)
    pool.close()
    pool.join()

    data = []
    for line in contents.split('\n'):
        if ':&#160;' in line:
            raw = re.findall(r'>([^:<]+)', line)
            if len(raw) == 1:
                raw.append('')
            data.append(raw[1].strip())

    columns = '{0:<10}{1:<9}{2:<11}{3:<11}{4:<14}{5:<20}'
    print columns.format('Section', 'CCN', 'Enrolled', 'Waitlist', 'Time', 'Place')
    print '-------   -----    --------   --------   -----------   --------------'

    sections = zip(*[iter(data)] * 11)
    # sections contains a list per section:
    # [course, coursetitle, location, instructor, status, ccn, units,
    #  finalgroup, restrictions, note]

    for ccn_lookup, section in zip(ccns, sections):
        enrolled, waitlist = stats[ccn_lookup]
        name = ' '.join(section[0].split()[-2:])
        time_place = section[2].split(',')*2
        time = time_place[0].strip()
        place = time_place[1].strip()
        result = columns.format(name, section[5], enrolled, waitlist, time, place)

        # check if a section has space
        students, spaces = map(int, enrolled.split('/'))
        if students < spaces:
            result = result.replace(enrolled, colors.header(enrolled), 1)
        print result
def download_images():

    # download images for each city in a different thread
    num_threads = 4
    pool = ThreadPool(num_threads)
    for city, (lat, lon) in cities.iteritems():
        pool.apply_async(download_images_for_city, (city, lat, lon))

    pool.close()
    pool.join()
Exemple #24
0
 def _stop_server(self):
     pool = ThreadPool()
     for session in self._slack_sessions.values():
         def polite_stop():
             session.send("Sorry, the server is shutting down now.")
             session.kill()
         pool.apply_async(polite_stop)
     pool.close()
     pool.join()
     self._stop_requested = True
Exemple #25
0
def _write_files(app, static_url_loc, static_folder, files, bucket,
                 ex_keys=None):
    """ Writes all the files inside a static folder to S3. """

    if logger.level == logging.INFO:
        files_tqdm = tqdm(
            files,
            desc='Uploading from {0} to {1}'.format(
                static_url_loc,
                bucket.name))

    num = app.config.get('S3_PARALLEL_UPLOADS', None)
    if num is not None:
        pool = ThreadPool(num)
        tasks = []
        for file_path in files:
            tasks.append(pool.apply_async(_write_file, (
                static_url_loc, static_folder, bucket, ex_keys, file_path,
                app.config['S3_HEADERS'].items()
            )))
        retries = [0 for i in files]
        finished = False
        while not finished:
            sleep(.1)
            finished = True
            for n, task in enumerate(tasks):
                if task.ready():
                    if not task.successful():
                        logger.error("Error while uploading %s!", files[n])
                        if retries[n] < app.config.get("S3_PARALLEL_RETRIES", 0):
                            retries[n] += 1
                            logger.info("%s: would retry (%d)", files[n],
                                        retries[n])
                            finished = False
                            tasks.append(pool.apply_async(_write_file, (
                                static_url_loc, static_folder, bucket, ex_keys,
                                files[n], app.config['S3_HEADERS'].items()
                            )))
                        else:
                            logger.info("%s: would not retry")
                    files_tqdm.next()
                    tasks.remove(task)
                    logger.debug('%s uploaded', files[n])
                else:
                    finished = False
        try:
            files_tqdm.next()
        except StopIteration:
            pass
        pool.close()
        pool.join()
    else:
        for file_path in files_tqdm:
            _write_file(static_url_loc, static_folder, bucket, ex_keys, file_path,
                        app.config['S3_HEADERS'].items())
	def post(self):

		username = xhtml_escape(self.get_argument("username"))
		email = xhtml_escape(self.get_argument("email"))
		password = xhtml_escape(self.get_argument("password"))
		conf_pass = xhtml_escape(self.get_argument("confirmPassword"))

		#Thread incaricato di gesitire la scrittura sul db
		pool = ThreadPool(processes=1)
		pool.apply_async(self.__checkDuplicates, args=(username, email, password), callback=self.__onfinish)
		pool.close()
Exemple #27
0
def title(word, word_eol, userdata):
    channel = xchat.get_info('channel').lower()
    if channel in allowed_chans:
        urls = re.findall("(?<!<)http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]"
                          "|[!*\(\),]|(?:%[0-9a-fA-F]"
                          "[0-9a-fA-F]))+", xchat.strip(word_eol[0]))
        if len(urls) > 0:
            context = xchat.get_context()
            pool = ThreadPool(processes=1)
            pool.apply_async(gettitle, (urls, pool, context),
                             callback=send_message)
Exemple #28
0
 def test_synchronize_with_same_param(self):
     demo = LockDemo()
     pool = ThreadPool(3)
     pool.imap(demo.foo2, (1, 1))
     pool.apply_async(demo.foo1)
     sleep(0.04)
     assert_that(demo.call_count, equal_to(1))
     sleep(0.05)
     assert_that(demo.call_count, equal_to(2))
     sleep(0.05)
     assert_that(demo.call_count, equal_to(3))
Exemple #29
0
    def post(self):
        data = self.get_json()

        name = data.get('name', '')
        download = data.get('download', [])

        if not download:
            download = None

        pool = ThreadPool(processes=1)
        pool.apply_async(update, (name, download,), callback=self.resp)
Exemple #30
0
 def proc(self, sock, trig, op, args, timeout):
     pool = ThreadPool(processes=1)
     async = pool.apply_async(trig.proc, (op, args))
     try:
         res = async.get(timeout)
         stream_input(sock, res)
     except TimeoutError:
         log_err(self, 'failed to process (timeout)')
         pool.terminate()
     finally:
         sock.close()
Exemple #31
0
####################################################################################
''')

            # limit number of cells to preprocess at a time (based on hardware limitations)
            n_preprocess = n_parallel[0]
            if n_preprocess > len(cells):
                n_preprocess = len(cells)

            # create pool of workers and run through all cells
            preprocess_pool = ThreadPool(processes=n_preprocess)

            # align and index cells
            for c in cells:
                preprocess_pool.apply_async(
                    resources.SingleCell.align_and_index, args=(
                        c,
                        bt2_ref,
                    ))

            preprocess_pool.close()
            preprocess_pool.join()

            # optionally, call FLT3-ITDs using ITDseek
            if not skip_flt3:

                # limit number of cells for flt3 calling at a time (based on hardware limitations)
                n_flt3_calling = 500
                if n_flt3_calling > len(cells):
                    n_flt3_calling = len(cells)

                # verify flt3 folder is empty before continuing (remove and recreate folder)
class ApiClient(object):
    """Generic API client for Swagger client library builds.

    Swagger generic API client. This client handles the client-
    server communication, and is invariant across implementations. Specifics of
    the methods and models for each application are generated from the Swagger
    templates.

    NOTE: This class is auto generated by the swagger code generator program.
    Ref: https://github.com/swagger-api/swagger-codegen
    Do not edit the class manually.

    :param configuration: .Configuration object for this client
    :param header_name: a header to pass when making calls to the API.
    :param header_value: a header value to pass when making calls to
        the API.
    :param cookie: a cookie to include in the header when making calls
        to the API
    """

    PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
    NATIVE_TYPES_MAPPING = {
        'int': int,
        'long': int if six.PY3 else long,  # noqa: F821
        'float': float,
        'str': str,
        'bool': bool,
        'date': datetime.date,
        'datetime': datetime.datetime,
        'object': object,
    }

    def __init__(self,
                 configuration=None,
                 header_name=None,
                 header_value=None,
                 cookie=None):
        if configuration is None:
            configuration = Configuration()
        self.configuration = configuration

        self.pool = ThreadPool()
        self.rest_client = rest.RESTClientObject(configuration)
        self.default_headers = {}
        if header_name is not None:
            self.default_headers[header_name] = header_value
        self.cookie = cookie
        # Set default User-Agent.
        self.user_agent = 'Swagger-Codegen/3.1.0/python'

    def __del__(self):
        self.pool.close()
        self.pool.join()

    @property
    def user_agent(self):
        """User agent for this API client"""
        return self.default_headers['User-Agent']

    @user_agent.setter
    def user_agent(self, value):
        self.default_headers['User-Agent'] = value

    def set_default_header(self, header_name, header_value):
        self.default_headers[header_name] = header_value

    def __call_api(self,
                   resource_path,
                   method,
                   path_params=None,
                   query_params=None,
                   header_params=None,
                   body=None,
                   post_params=None,
                   files=None,
                   response_type=None,
                   auth_settings=None,
                   _return_http_data_only=None,
                   collection_formats=None,
                   _preload_content=True,
                   _request_timeout=None):

        config = self.configuration

        # header parameters
        header_params = header_params or {}
        header_params.update(self.default_headers)
        if self.cookie:
            header_params['Cookie'] = self.cookie
        if header_params:
            header_params = self.sanitize_for_serialization(header_params)
            header_params = dict(
                self.parameters_to_tuples(header_params, collection_formats))

        # path parameters
        if path_params:
            path_params = self.sanitize_for_serialization(path_params)
            path_params = self.parameters_to_tuples(path_params,
                                                    collection_formats)
            for k, v in path_params:
                # specified safe chars, encode everything
                resource_path = resource_path.replace(
                    '{%s}' % k,
                    quote(str(v), safe=config.safe_chars_for_path_param))

        # query parameters
        if query_params:
            query_params = self.sanitize_for_serialization(query_params)
            query_params = self.parameters_to_tuples(query_params,
                                                     collection_formats)

        # post parameters
        if post_params or files:
            post_params = self.prepare_post_parameters(post_params, files)
            post_params = self.sanitize_for_serialization(post_params)
            post_params = self.parameters_to_tuples(post_params,
                                                    collection_formats)

        # auth setting
        self.update_params_for_auth(header_params, query_params, auth_settings)

        # body
        if body:
            body = self.sanitize_for_serialization(body)

        # request url
        url = self.configuration.host + resource_path

        # perform request and return response
        response_data = self.request(method,
                                     url,
                                     query_params=query_params,
                                     headers=header_params,
                                     post_params=post_params,
                                     body=body,
                                     _preload_content=_preload_content,
                                     _request_timeout=_request_timeout)

        self.last_response = response_data

        return_data = response_data
        if _preload_content:
            # deserialize response data
            if response_type:
                return_data = self.deserialize(response_data, response_type)
            else:
                return_data = None

        if _return_http_data_only:
            return (return_data)
        else:
            return (return_data, response_data.status,
                    response_data.getheaders())

    def sanitize_for_serialization(self, obj):
        """Builds a JSON POST object.

        If obj is None, return None.
        If obj is str, int, long, float, bool, return directly.
        If obj is datetime.datetime, datetime.date
            convert to string in iso8601 format.
        If obj is list, sanitize each element in the list.
        If obj is dict, return the dict.
        If obj is swagger model, return the properties dict.

        :param obj: The data to serialize.
        :return: The serialized form of data.
        """
        if obj is None:
            return None
        elif isinstance(obj, self.PRIMITIVE_TYPES):
            return obj
        elif isinstance(obj, list):
            return [
                self.sanitize_for_serialization(sub_obj) for sub_obj in obj
            ]
        elif isinstance(obj, tuple):
            return tuple(
                self.sanitize_for_serialization(sub_obj) for sub_obj in obj)
        elif isinstance(obj, (datetime.datetime, datetime.date)):
            return obj.isoformat()

        if isinstance(obj, dict):
            obj_dict = obj
        else:
            # Convert model obj to dict except
            # attributes `swagger_types`, `attribute_map`
            # and attributes which value is not None.
            # Convert attribute name to json key in
            # model definition for request.
            obj_dict = {
                obj.attribute_map[attr]: getattr(obj, attr)
                for attr, _ in six.iteritems(obj.swagger_types)
                if getattr(obj, attr) is not None
            }

        return {
            key: self.sanitize_for_serialization(val)
            for key, val in six.iteritems(obj_dict)
        }

    def deserialize(self, response, response_type):
        """Deserializes response into an object.

        :param response: RESTResponse object to be deserialized.
        :param response_type: class literal for
            deserialized object, or string of class name.

        :return: deserialized object.
        """
        # handle file downloading
        # save response body into a tmp file and return the instance
        if response_type == "file":
            return self.__deserialize_file(response)

        # fetch data from response object
        try:
            data = json.loads(response.data)
        except ValueError:
            data = response.data

        return self.__deserialize(data, response_type)

    def __deserialize(self, data, klass):
        """Deserializes dict, list, str into an object.

        :param data: dict, list or str.
        :param klass: class literal, or string of class name.

        :return: object.
        """
        if data is None:
            return None

        if type(klass) == str:
            if klass.startswith('list['):
                sub_kls = re.match('list\[(.*)\]', klass).group(1)
                return [
                    self.__deserialize(sub_data, sub_kls) for sub_data in data
                ]

            if klass.startswith('dict('):
                sub_kls = re.match('dict\(([^,]*), (.*)\)', klass).group(2)
                return {
                    k: self.__deserialize(v, sub_kls)
                    for k, v in six.iteritems(data)
                }

            # convert str to class
            if klass in self.NATIVE_TYPES_MAPPING:
                klass = self.NATIVE_TYPES_MAPPING[klass]
            else:
                klass = getattr(intrinio_sdk.models, klass)

        if klass in self.PRIMITIVE_TYPES:
            return self.__deserialize_primitive(data, klass)
        elif klass == object:
            return self.__deserialize_object(data)
        elif klass == datetime.date:
            return self.__deserialize_date(data)
        elif klass == datetime.datetime:
            return self.__deserialize_datatime(data)
        else:
            return self.__deserialize_model(data, klass)

    def call_api(self,
                 resource_path,
                 method,
                 path_params=None,
                 query_params=None,
                 header_params=None,
                 body=None,
                 post_params=None,
                 files=None,
                 response_type=None,
                 auth_settings=None,
                 _async=None,
                 _return_http_data_only=None,
                 collection_formats=None,
                 _preload_content=True,
                 _request_timeout=None):
        """Makes the HTTP request (synchronous) and returns deserialized data.

        To make an async request, set the async parameter.

        :param resource_path: Path to method endpoint.
        :param method: Method to call.
        :param path_params: Path parameters in the url.
        :param query_params: Query parameters in the url.
        :param header_params: Header parameters to be
            placed in the request header.
        :param body: Request body.
        :param post_params dict: Request post form parameters,
            for `application/x-www-form-urlencoded`, `multipart/form-data`.
        :param auth_settings list: Auth Settings names for the request.
        :param response: Response data type.
        :param files dict: key -> filename, value -> filepath,
            for `multipart/form-data`.
        :param async bool: execute request asynchronously
        :param _return_http_data_only: response data without head status code
                                       and headers
        :param collection_formats: dict of collection formats for path, query,
            header, and post parameters.
        :param _preload_content: if False, the urllib3.HTTPResponse object will
                                 be returned without reading/decoding response
                                 data. Default is True.
        :param _request_timeout: timeout setting for this request. If one
                                 number provided, it will be total request
                                 timeout. It can also be a pair (tuple) of
                                 (connection, read) timeouts.
        :return:
            If async parameter is True,
            the request will be called asynchronously.
            The method will return the request thread.
            If parameter async is False or missing,
            then the method will return the response directly.
        """
        if not _async:
            return self.__call_api(resource_path, method, path_params,
                                   query_params, header_params, body,
                                   post_params, files, response_type,
                                   auth_settings, _return_http_data_only,
                                   collection_formats, _preload_content,
                                   _request_timeout)
        else:
            thread = self.pool.apply_async(
                self.__call_api,
                (resource_path, method, path_params, query_params,
                 header_params, body, post_params, files, response_type,
                 auth_settings, _return_http_data_only, collection_formats,
                 _preload_content, _request_timeout))
        return thread

    def request(self,
                method,
                url,
                query_params=None,
                headers=None,
                post_params=None,
                body=None,
                _preload_content=True,
                _request_timeout=None):
        """Makes the HTTP request using RESTClient."""
        if method == "GET":
            return self.rest_client.GET(url,
                                        query_params=query_params,
                                        _preload_content=_preload_content,
                                        _request_timeout=_request_timeout,
                                        headers=headers)
        elif method == "HEAD":
            return self.rest_client.HEAD(url,
                                         query_params=query_params,
                                         _preload_content=_preload_content,
                                         _request_timeout=_request_timeout,
                                         headers=headers)
        elif method == "OPTIONS":
            return self.rest_client.OPTIONS(url,
                                            query_params=query_params,
                                            headers=headers,
                                            post_params=post_params,
                                            _preload_content=_preload_content,
                                            _request_timeout=_request_timeout,
                                            body=body)
        elif method == "POST":
            return self.rest_client.POST(url,
                                         query_params=query_params,
                                         headers=headers,
                                         post_params=post_params,
                                         _preload_content=_preload_content,
                                         _request_timeout=_request_timeout,
                                         body=body)
        elif method == "PUT":
            return self.rest_client.PUT(url,
                                        query_params=query_params,
                                        headers=headers,
                                        post_params=post_params,
                                        _preload_content=_preload_content,
                                        _request_timeout=_request_timeout,
                                        body=body)
        elif method == "PATCH":
            return self.rest_client.PATCH(url,
                                          query_params=query_params,
                                          headers=headers,
                                          post_params=post_params,
                                          _preload_content=_preload_content,
                                          _request_timeout=_request_timeout,
                                          body=body)
        elif method == "DELETE":
            return self.rest_client.DELETE(url,
                                           query_params=query_params,
                                           headers=headers,
                                           _preload_content=_preload_content,
                                           _request_timeout=_request_timeout,
                                           body=body)
        else:
            raise ValueError("http method must be `GET`, `HEAD`, `OPTIONS`,"
                             " `POST`, `PATCH`, `PUT` or `DELETE`.")

    def parameters_to_tuples(self, params, collection_formats):
        """Get parameters as list of tuples, formatting collections.

        :param params: Parameters as dict or list of two-tuples
        :param dict collection_formats: Parameter collection formats
        :return: Parameters as list of tuples, collections formatted
        """
        new_params = []
        if collection_formats is None:
            collection_formats = {}
        for k, v in six.iteritems(params) if isinstance(
                params, dict) else params:  # noqa: E501
            if k in collection_formats:
                collection_format = collection_formats[k]
                if collection_format == 'multi':
                    new_params.extend((k, value) for value in v)
                else:
                    if collection_format == 'ssv':
                        delimiter = ' '
                    elif collection_format == 'tsv':
                        delimiter = '\t'
                    elif collection_format == 'pipes':
                        delimiter = '|'
                    else:  # csv is the default
                        delimiter = ','
                    new_params.append(
                        (k, delimiter.join(str(value) for value in v)))
            else:
                new_params.append((k, v))
        return new_params

    def prepare_post_parameters(self, post_params=None, files=None):
        """Builds form parameters.

        :param post_params: Normal form parameters.
        :param files: File parameters.
        :return: Form parameters with files.
        """
        params = []

        if post_params:
            params = post_params

        if files:
            for k, v in six.iteritems(files):
                if not v:
                    continue
                file_names = v if type(v) is list else [v]
                for n in file_names:
                    with open(n, 'rb') as f:
                        filename = os.path.basename(f.name)
                        filedata = f.read()
                        mimetype = (mimetypes.guess_type(filename)[0]
                                    or 'application/octet-stream')
                        params.append(
                            tuple([k, tuple([filename, filedata, mimetype])]))

        return params

    def select_header_accept(self, accepts):
        """Returns `Accept` based on an array of accepts provided.

        :param accepts: List of headers.
        :return: Accept (e.g. application/json).
        """
        if not accepts:
            return

        accepts = [x.lower() for x in accepts]

        if 'application/json' in accepts:
            return 'application/json'
        else:
            return ', '.join(accepts)

    def select_header_content_type(self, content_types):
        """Returns `Content-Type` based on an array of content_types provided.

        :param content_types: List of content-types.
        :return: Content-Type (e.g. application/json).
        """
        if not content_types:
            return 'application/json'

        content_types = [x.lower() for x in content_types]

        if 'application/json' in content_types or '*/*' in content_types:
            return 'application/json'
        else:
            return content_types[0]

    def update_params_for_auth(self, headers, querys, auth_settings):
        """Updates header and query params based on authentication setting.

        :param headers: Header parameters dict to be updated.
        :param querys: Query parameters tuple list to be updated.
        :param auth_settings: Authentication setting identifiers list.
        """
        if not auth_settings:
            return

        for auth in auth_settings:
            auth_setting = self.configuration.auth_settings().get(auth)
            if auth_setting:
                if not auth_setting['value']:
                    continue
                elif auth_setting['in'] == 'header':
                    headers[auth_setting['key']] = auth_setting['value']
                elif auth_setting['in'] == 'query':
                    querys.append((auth_setting['key'], auth_setting['value']))
                else:
                    raise ValueError(
                        'Authentication token must be in `query` or `header`')

    def __deserialize_file(self, response):
        """Deserializes body to file

        Saves response body into a file in a temporary folder,
        using the filename from the `Content-Disposition` header if provided.

        :param response:  RESTResponse.
        :return: file path.
        """
        fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
        os.close(fd)
        os.remove(path)

        content_disposition = response.getheader("Content-Disposition")
        if content_disposition:
            filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
                                 content_disposition).group(1)
            path = os.path.join(os.path.dirname(path), filename)

        with open(path, "wb") as f:
            f.write(response.data)

        return path

    def __deserialize_primitive(self, data, klass):
        """Deserializes string to primitive type.

        :param data: str.
        :param klass: class literal.

        :return: int, long, float, str, bool.
        """
        try:
            return klass(data)
        except UnicodeEncodeError:
            return six.u(data)
        except TypeError:
            return data

    def __deserialize_object(self, value):
        """Return a original value.

        :return: object.
        """
        return value

    def __deserialize_date(self, string):
        """Deserializes string to date.

        :param string: str.
        :return: date.
        """
        try:
            from dateutil.parser import parse
            return parse(string).date()
        except ImportError:
            return string
        except ValueError:
            raise rest.ApiException(
                status=0,
                reason="Failed to parse `{0}` as date object".format(string))

    def __deserialize_datatime(self, string):
        """Deserializes string to datetime.

        The string should be in iso8601 datetime format.

        :param string: str.
        :return: datetime.
        """
        try:
            from dateutil.parser import parse
            return parse(string)
        except ImportError:
            return string
        except ValueError:
            raise rest.ApiException(
                status=0,
                reason=(
                    "Failed to parse `{0}` as datetime object".format(string)))

    def __deserialize_model(self, data, klass):
        """Deserializes list or dict to model.

        :param data: dict, list.
        :param klass: class literal.
        :return: model object.
        """

        if not klass.swagger_types and not hasattr(klass,
                                                   'get_real_child_model'):
            return data

        kwargs = {}
        if klass.swagger_types is not None:
            for attr, attr_type in six.iteritems(klass.swagger_types):
                if (data is not None and klass.attribute_map[attr] in data
                        and isinstance(data, (list, dict))):
                    value = data[klass.attribute_map[attr]]
                    kwargs[attr] = self.__deserialize(value, attr_type)

        instance = klass(**kwargs)

        if hasattr(instance, 'get_real_child_model'):
            klass_name = instance.get_real_child_model(data)
            if klass_name:
                instance = self.__deserialize(data, klass_name)
        return instance
Exemple #33
0
def pd_read_file(path_glob="*.pkl",
                 ignore_index=True,
                 cols=None,
                 verbose=False,
                 nrows=-1,
                 concat_sort=True,
                 n_pool=1,
                 drop_duplicates=None,
                 col_filter=None,
                 col_filter_val=None,
                 dtype=None,
                 **kw):
    """  Read file in parallel from disk : very Fast
  :param path_glob: list of pattern, or sep by ";"
  :return:
  """
    import glob, gc, pandas as pd, os

    def log(*s, **kw):
        print(*s, flush=True, **kw)

    readers = {
        ".pkl": pd.read_pickle,
        ".parquet": pd.read_parquet,
        ".tsv": pd.read_csv,
        ".csv": pd.read_csv,
        ".txt": pd.read_csv,
        ".zip": pd.read_csv,
        ".gzip": pd.read_csv,
        ".gz": pd.read_csv,
    }
    from multiprocessing.pool import ThreadPool

    #### File
    if isinstance(path_glob, list): path_glob = ";".join(path_glob)
    path_glob = path_glob.split(";")
    file_list = []
    for pi in path_glob:
        file_list.extend(sorted(glob.glob(pi)))
    file_list = sorted(list(set(file_list)))
    n_file = len(file_list)
    if verbose: log(file_list)

    #### Pool count
    if n_pool < 1: n_pool = 1
    if n_file <= 0: m_job = 0
    elif n_file <= 2:
        m_job = n_file
        n_pool = 1
    else:
        m_job = 1 + n_file // n_pool if n_file >= 3 else 1
    if verbose: log(n_file, n_file // n_pool)

    pool = ThreadPool(processes=n_pool)
    dfall = pd.DataFrame()
    for j in range(0, m_job):
        if verbose: log("Pool", j, end=",")
        job_list = []
        for i in range(n_pool):
            if n_pool * j + i >= n_file: break
            filei = file_list[n_pool * j + i]
            ext = os.path.splitext(filei)[1]
            if ext == None or ext == '':
                continue

            pd_reader_obj = readers[ext]
            if pd_reader_obj == None:
                continue

            ### TODO : use with kewyword arguments
            job_list.append(pool.apply_async(pd_reader_obj, (filei, )))
            if verbose: log(j, filei)

        for i in range(n_pool):
            if i >= len(job_list): break
            dfi = job_list[i].get()

            if dtype is not None:
                dfi = pd_dtype_reduce(dfi, int0='int32', float0='float32')
            if col_filter is not None:
                dfi = dfi[dfi[col_filter] == col_filter_val]
            if cols is not None: dfi = dfi[cols]
            if nrows > 0: dfi = dfi.iloc[:nrows, :]
            if drop_duplicates is not None:
                dfi = dfi.drop_duplicates(drop_duplicates)
            gc.collect()

            dfall = pd.concat((dfall, dfi),
                              ignore_index=ignore_index,
                              sort=concat_sort)
            #log("Len", n_pool*j + i, len(dfall))
            del dfi
            gc.collect()

    if m_job > 0 and verbose: log(n_file, j * n_file // n_pool)
    return dfall
eideticker.copy_dashboard_files(outputdir)

metadatadir = os.path.join(outputdir, 'metadata')
videodir = os.path.join(outputdir, 'videos')
profiledir = os.path.join(outputdir, 'profiles')

devices = requests.get(baseurl + 'devices.json')
save_file(os.path.join(outputdir, 'devices.json'), devices.content)

device_names = devices.json()['devices'].keys()

pool = ThreadPool()
for device_name in device_names:
    tests = requests.get(baseurl + '%s/tests.json' % device_name)
    devicedir = os.path.join(outputdir, device_name)
    create_dir(devicedir)
    save_file(os.path.join(devicedir, 'tests.json'), tests.content)
    testnames = tests.json()['tests'].keys()
    for testname in testnames:
        pool.apply_async(download_testdata,
                         [baseurl + '%s/%s.json' % (device_name, testname),
                          baseurl,
                          os.path.join(outputdir, device_name,
                                       '%s.json' % testname),
                          options,
                          metadatadir, videodir, profiledir])

pool.close()
pool.join()
Exemple #35
0
 def start_thread(self):
     pool = ThreadPool(processes=1)
     self.thread = pool.apply_async(
         self.process_batch, (self.file_path, self.charmap,
                              self.noise_types, self.frame_overlap_flag))
Exemple #36
0
class Scheduler(MooseObject):
    """
    Base class for handling jobs asynchronously. To use this class, call .schedule()
    and supply a list of testers to schedule. Each group of testers supplied will begin
    running immediately.

    Syntax:
       .schedule([list of tester objects])

    A list of testers will be added to a queue and begin calling their derived run method.
    You can continue to add more testers to the queue in this fashion.

    Once all jobs have been scheduled, call .waitFinish() to wait until all jobs have
    finished.
    """
    @staticmethod
    def validParams():
        params = MooseObject.validParams()
        params.addRequiredParam('average_load', 64.0, "Average load to allow")
        params.addRequiredParam('max_processes', None,
                                "Hard limit of maxium processes to use")
        params.addParam(
            'min_reported_time', 10,
            "The minimum time elapsed before a job is reported as taking to long to run."
        )

        return params

    # This is what will be checked for when we look for valid schedulers
    IS_SCHEDULER = True

    def __init__(self, harness, params):
        MooseObject.__init__(self, harness, params)

        ## The test harness to run callbacks on
        self.harness = harness

        # Retrieve and store the TestHarness options for use in this object
        self.options = harness.getOptions()

        # The Scheduler class can be initialized with no "max_processes" argument and it'll default
        # to a soft limit. If however a max_processes is passed we'll treat it as a hard limit.
        # The difference is whether or not we allow single jobs to exceed the number of slots.
        if params['max_processes'] == None:
            self.available_slots = 1
            self.soft_limit = True
        else:
            self.available_slots = params['max_processes']  # hard limit
            self.soft_limit = False

        self.average_load = params['average_load']

        self.min_report_time = params['min_reported_time']

        # Initialize run_pool based on available slots
        self.run_pool = ThreadPool(processes=self.available_slots)

        # Initialize status_pool to only use 1 process (to prevent status messages from getting clobbered)
        self.status_pool = ThreadPool(processes=1)

        # Slot lock when processing resource allocations and modifying slots_in_use
        self.slot_lock = threading.Lock()

        # Job lock when modifying a jobs status
        self.activity_lock = threading.Lock()

        # A combination of processors + threads (-j/-n) currently in use, that a job requires
        self.slots_in_use = 0

        # List of Lists containing all scheduled jobs
        self.__scheduled_jobs = []

        # Set containing jobs entering the run_pool
        self.__job_bank = set([])

        # Total running Job and Test failures encountered
        self.__failures = 0

        # Allow threads to set a global exception
        self.__error_state = False

        # Private set of jobs currently running
        self.__active_jobs = set([])

        # Jobs that are taking longer to finish than the alloted time are reported back early to inform
        # the user 'stuff' is still running. Jobs entering this set will not be reported again.
        self.jobs_reported = set([])

        # The last time the scheduler reported something
        self.last_reported_time = clock()

        # Sets of threading objects created by jobs entering and exiting the queues. When scheduler.waitFinish()
        # is called, and both thread pools are empty, the pools shut down, and the call to waitFinish() returns.
        self.__status_pool_lock = threading.Lock()
        self.__runner_pool_lock = threading.Lock()
        self.__status_pool_jobs = set([])
        self.__runner_pool_jobs = set([])

        # True when scheduler.waitFinish() is called. This alerts the scheduler, no more jobs are
        # to be scheduled. KeyboardInterrupts are then handled by the thread pools.
        self.__waiting = False

    def triggerErrorState(self):
        self.__error_state = True
        self.run_pool.close()
        self.status_pool.close()

    def killRemaining(self, keyboard=False):
        """ Method to kill running jobs """
        with self.activity_lock:
            for job in self.__active_jobs:
                job.killProcess()
        if keyboard:
            self.triggerErrorState()
            self.harness.keyboard_interrupt()
        else:
            self.triggerErrorState()

    def retrieveJobs(self):
        """ return all the jobs the scheduler was tasked to perform work for """
        return self.__scheduled_jobs

    def schedulerError(self):
        """ boolean if the scheduler prematurely exited """
        return self.__error_state and not self.maxFailures()

    def maxFailures(self):
        """ Boolean for hitting max failures """
        return ((self.options.valgrind_mode
                 and self.__failures >= self.options.valgrind_max_fails)
                or self.__failures >= self.options.max_fails)

    def run(self, job):
        """ Call derived run method """
        return

    def notifyFinishedSchedulers(self):
        """ Notify derived schedulers we are finished """
        return

    def augmentJobs(self, Jobs):
        """
        Allow derived schedulers to augment Jobs before they perform work.
        Note: This occurs before we perform a job count sanity check. So
        any additions or subtractions to the number of jobs will result in
        an exception.
        """
        return

    def waitFinish(self):
        """
        Inform the Scheduler there are no further jobs to schedule.
        Return once all jobs have completed.
        """
        self.__waiting = True
        try:
            # wait until there is an error, or if all the queus are empty
            waiting_on_status_pool = True
            waiting_on_runner_pool = True

            while (waiting_on_status_pool
                   or waiting_on_runner_pool) and self.__job_bank:

                if self.__error_state:
                    break

                with self.__status_pool_lock:
                    waiting_on_status_pool = sum(
                        1 for x in self.__status_pool_jobs if not x.ready())
                with self.__runner_pool_lock:
                    waiting_on_runner_pool = sum(
                        1 for x in self.__runner_pool_jobs if not x.ready())

                sleep(0.1)

            # Completed all jobs sanity check
            if not self.__error_state and self.__job_bank:
                raise SchedulerError(
                    'Scheduler exiting with different amount of work than what was tasked!'
                )

            if not self.__error_state:
                self.run_pool.close()
                self.run_pool.join()
                self.status_pool.close()
                self.status_pool.join()

            # allow derived schedulers to perform any exit routines
            self.notifyFinishedSchedulers()

        except KeyboardInterrupt:
            self.killRemaining(keyboard=True)

    def schedule(self, testers):
        """
        Generate and submit a group of testers to a thread pool queue for execution.
        """
        # If we are not to schedule any more jobs for some reason, return now
        if self.__error_state:
            return

        # Instance our job DAG, create jobs, and a private lock for this group of jobs (testers)
        Jobs = JobDAG(self.options)
        j_dag = Jobs.createJobs(testers)
        j_lock = threading.Lock()

        # Allow derived schedulers access to the jobs before they launch
        self.augmentJobs(Jobs)

        # job-count to tester-count sanity check
        if j_dag.size() != len(testers):
            raise SchedulerError(
                'Scheduler was going to run a different amount of testers than what was received (something bad happened)!'
            )

        # Store all jobs in the global job bank. As jobs finish, they will be removed from
        # this set. This will function as our final sanity check on 100% job completion
        with j_lock:
            self.__job_bank.update(j_dag.topological_sort())

        # Store all scheduled jobs
        self.__scheduled_jobs.append(j_dag.topological_sort())

        # Launch these jobs to perform work
        self.queueJobs(Jobs, j_lock)

    def queueJobs(self, Jobs, j_lock):
        """
        Determine which queue jobs should enter. Finished jobs are placed in the status
        pool to be printed while all others are placed in the runner pool to perform work.

        A finished job will trigger a change to the Job DAG, which will allow additional
        jobs to become available and ready to enter the runner pool (dependency jobs).
        """
        with j_lock:
            concurrent_jobs = Jobs.getJobsAndAdvance()
            for job in concurrent_jobs:
                if job.isFinished():
                    if not self.status_pool._state:
                        with self.__status_pool_lock:
                            self.__status_pool_jobs.add(
                                self.status_pool.apply_async(
                                    self.jobStatus, (job, Jobs, j_lock)))

                elif job.isHold():
                    if not self.run_pool._state:
                        with self.__runner_pool_lock:
                            job.setStatus(job.queued)
                            self.__runner_pool_jobs.add(
                                self.run_pool.apply_async(
                                    self.runJob, (job, Jobs, j_lock)))

    def getLoad(self):
        """ Method to return current load average """
        loadAverage = 0.0
        try:
            loadAverage = os.getloadavg()[0]
        except AttributeError:
            pass  # getloadavg() not available in this implementation of os
        return loadAverage

    def satisfyLoad(self):
        """ Method for controlling load average """
        while self.slots_in_use > 1 and self.getLoad() >= self.average_load:
            sleep(1.0)

    def reserveSlots(self, job, j_lock):
        """
        Method which allocates resources to perform the job. Returns bool if job
        should be allowed to run based on available resources.
        """
        # comply with load average
        if self.options.load:
            self.satisfyLoad()

        with self.slot_lock:
            can_run = False
            if self.slots_in_use + job.getSlots() <= self.available_slots:
                can_run = True

            # Check for insufficient slots -soft limit
            elif job.getSlots() > self.available_slots and self.soft_limit:
                job.addCaveats('OVERSIZED')
                can_run = True

            # Check for insufficient slots -hard limit (skip this job)
            elif job.getSlots() > self.available_slots and not self.soft_limit:
                job.addCaveats('insufficient slots')
                with j_lock:
                    job.setStatus(job.skip)

            if can_run:
                self.slots_in_use += job.getSlots()
        return can_run

    def handleTimeoutJob(self, job, j_lock):
        """ Handle jobs that have timed out """
        with j_lock:
            if job.isRunning():
                job.setStatus(job.timeout, 'TIMEOUT')
                job.killProcess()

    def handleLongRunningJob(self, job, Jobs, j_lock):
        """ Handle jobs that have not reported in the alotted time """
        with self.__status_pool_lock:
            self.__status_pool_jobs.add(
                self.status_pool.apply_async(self.jobStatus,
                                             (job, Jobs, j_lock)))

    def jobStatus(self, job, Jobs, j_lock):
        """
        Instruct the TestHarness to print the status of job. This is a serial
        threaded operation, so as to prevent clobbering of text being printed
        to stdout.
        """
        # The pool is closing down due to a failure, or this job has previously been handled:
        #
        # A job which triggers the long_running timer, has a chance to finish before this
        # slower serialized status pool, has a chance to process it. Meaning two of the same
        # jobs now exist in this queue, with a finished status. This method can only work on
        # a finished job object once (a set removal operation occurs to signify scheduled job
        # completion as a sanity check).
        if self.status_pool._state or job not in self.__job_bank:
            return

        # Peform within a try, to allow keyboard ctrl-c
        try:
            with j_lock:
                if job.isRunning():
                    # already reported this job once before
                    if job in self.jobs_reported:
                        return

                    # this job will be reported as 'RUNNING'
                    elif clock(
                    ) - self.last_reported_time >= self.min_report_time:
                        job.addCaveats('FINISHED')

                        with self.activity_lock:
                            self.jobs_reported.add(job)

                    # TestHarness has not yet been inactive long enough to warrant a report
                    else:
                        # adjust the next report time based on delta of last report time
                        adjusted_interval = max(
                            1, self.min_report_time -
                            max(1,
                                clock() - self.last_reported_time))
                        job.report_timer = threading.Timer(
                            adjusted_interval, self.handleLongRunningJob, (
                                job,
                                Jobs,
                                j_lock,
                            ))
                        job.report_timer.start()
                        return

                # Inform the TestHarness of job status
                self.harness.handleJobStatus(job)

                # Reset activity clock
                if not job.isSilent():
                    self.last_reported_time = clock()

                if job.isFail():
                    self.__failures += 1

                if job.isFinished():
                    if job in self.__job_bank:
                        self.__job_bank.remove(job)
                    else:
                        raise SchedulerError(
                            'job accountability failure while working with: %s'
                            % (job.getTestName()))

            # Max failure threshold reached, begin shutdown
            if self.maxFailures():
                self.killRemaining()

        except Exception:
            print('statusWorker Exception: %s' % (traceback.format_exc()))
            self.killRemaining()

        except KeyboardInterrupt:
            self.killRemaining(keyboard=True)

    def runJob(self, job, Jobs, j_lock):
        """ Method the run_pool calls when an available thread becomes ready """
        # Its possible, the queue is just trying to empty. Allow it to do so
        # with out generating overhead
        if self.__error_state:
            return

        try:
            # see if we have enough slots to start this job
            if self.reserveSlots(job, j_lock):
                with j_lock:
                    job.setStatus(job.running)

                with self.activity_lock:
                    self.__active_jobs.add(job)

                timeout_timer = threading.Timer(float(job.getMaxTime()),
                                                self.handleTimeoutJob, (
                                                    job,
                                                    j_lock,
                                                ))

                job.report_timer = threading.Timer(self.min_report_time,
                                                   self.handleLongRunningJob, (
                                                       job,
                                                       Jobs,
                                                       j_lock,
                                                   ))

                job.report_timer.start()
                timeout_timer.start()
                self.run(job)  # Hand execution over to derived scheduler
                timeout_timer.cancel()

                # Recover worker count before attempting to queue more jobs
                with self.slot_lock:
                    self.slots_in_use = max(0,
                                            self.slots_in_use - job.getSlots())

                # Stop the long running timer
                job.report_timer.cancel()

                # All done
                with j_lock:
                    job.setStatus(job.finished)

                with self.activity_lock:
                    self.__active_jobs.remove(job)

            # Not enough slots to run the job...
            else:
                # ...currently, place back on hold before placing it back into the queue
                if not job.isFinished():
                    with j_lock:
                        job.setStatus(job.hold)
                    sleep(.1)

            # Job is done (or needs to re-enter the queue)
            self.queueJobs(Jobs, j_lock)

        except Exception:
            print('runWorker Exception: %s' % (traceback.format_exc()))
            self.killRemaining()

        except KeyboardInterrupt:
            self.killRemaining(keyboard=True)
Exemple #37
0
def run_workflow(client, partial_analysis_func, n_workers, run_time, runner_kwargs=None):
    """Run an analysis function against a ReadUntilClient

    Parameters
    ----------
    client : read_until.ReadUntilClient
        An instance of the ReadUntilClient object
    partial_analysis_func : partial function
        Analysis function to process reads, should
        exit when client.is_running == False
    n_workers : int
        Number of analysis worker functions to run
    run_time : int
        Time, in seconds, to run the analysis for
    runner_kwargs : dict
        Keyword arguments to pass to client.run()

    Returns
    -------
    list
        Results from the analysis function, one item per worker

    """
    if runner_kwargs is None:
        runner_kwargs = dict()

    logger = logging.getLogger("Manager")

    results = []
    pool = ThreadPool(n_workers)
    logger.info("Creating {} workers".format(n_workers))
    try:
        # start the client
        client.run(**runner_kwargs)
        # start a pool of workers
        for _ in range(n_workers):
            results.append(pool.apply_async(partial_analysis_func))
        pool.close()
        # wait a bit before closing down
        time.sleep(run_time)
        logger.info("Sending reset")
        client.reset()
        pool.join()
    except KeyboardInterrupt:
        logger.info("Caught ctrl-c, terminating workflow.")
        client.reset()
    except Exception:
        client.reset()
        raise

    # collect results (if any)
    collected = []
    for result in results:
        try:
            res = result.get(5)
        except TimeoutError:
            logger.warning("Worker function did not exit successfully.")
            # collected.append(None)
        except Exception as e:
            logger.exception("EXCEPT", exc_info=e)
            # logger.warning("Worker raise exception: {}".format(repr(e)))
        else:
            logger.info("Worker exited successfully.")
            collected.append(res)
    pool.terminate()
    return collected
Exemple #38
0
    def cleanup_batch(self,
                      job_ids,
                      threads=None,
                      chunk_size=None,
                      callback=None,
                      **kwargs):
        """
        Cleans up a batch of jobs given by *job_ids* via a thread pool of size *threads* which
        defaults to its instance attribute. When *chunk_size*, which defaults to
        :py:attr:`chunk_size_cleanup`, is not negative, *job_ids* are split into chunks of that size
        which are passed to :py:meth:`cleanup`. When *callback* is set, it is invoked after each
        successful job (or job chunk) cleaning with the index of the corresponding job id (starting
        at 0) and either *None* or an exception if any occurred. All other *kwargs* are passed to
        :py:meth:`cleanup`.

        Exceptions that occured during job cleaning are stored in a list and returned. An empty list
        means that no exceptions occured.
        """
        # default arguments
        threads = max(threads or self.threads or 1, 1)

        # is chunking allowed?
        if self.chunk_size_cleanup:
            chunk_size = max(chunk_size or self.chunk_size_cleanup, 0)
        else:
            chunk_size = 0
        chunking = chunk_size > 0

        # build chunks (either job ids one by one, or real chunks of job ids)
        job_ids = make_list(job_ids)
        chunks = list(iter_chunks(job_ids,
                                  chunk_size)) if chunking else job_ids

        # factory to call the passed callback for each job id even when chunking
        def cb_factory(i):
            if not callable(callback):
                return None
            elif chunking:

                def wrapper(err):
                    offset = sum(len(chunk) for chunk in chunks[:i])
                    for j in range(len(chunks[i])):
                        callback(offset + j, err)

                return wrapper
            else:

                def wrapper(err):
                    callback(i, err)

                return wrapper

        # threaded processing
        pool = ThreadPool(threads)
        results = [
            pool.apply_async(self.cleanup, (v, ),
                             kwargs,
                             callback=cb_factory(i))
            for i, v in enumerate(chunks)
        ]
        pool.close()
        pool.join()

        # store errors
        errors = filter(
            bool, flatten(get_async_result_silent(res) for res in results))

        return errors
Exemple #39
0
ranges = []
for i in range(num_threads):
    if i == 0:
        start_byte = i * increment_size
    else:
        start_byte = (i * increment_size) + 1

    end_byte = (i * increment_size) + increment_size
    if end_byte > size:
        end_byte = size
    ranges.append((start_byte, end_byte))

pool = ThreadPool(processes=num_threads)
threads = []

for i in range(len(ranges)):
    threads.append(
        pool.apply_async(Http.get_partial_file,
                         (file_url, ranges[i][0], ranges[i][1])))

f = open(file_name + '.parallel', 'w')
data = ""
for thread in threads:
    data += thread.get()

f.write(data)
end_time = time.time()
f.close()

print('Downloaded file with {} threads in {} seconds'.format(
    num_threads, end_time - start_time))
Exemple #40
0
    """
    array_size = 20
    port = 8000 + stage * 20 + index * 2
    ctx = zmq.Context()
    s = ctx.socket(zmq.PUB)
    s.bind("tcp://*:%d" % port)

    def sync(port):
        sync_with = "tcp://*:%d" % port
        ctx = zmq.Context.instance()
        s = ctx.socket(zmq.REP)
        s.bind(sync_with)
        s.recv()
        s.send(b'GO')

    sync(port + 1)
    s.send_pyobj(cfg)


if __name__ == "__main__":
    from multiprocessing.pool import ThreadPool
    pool = ThreadPool(processes=6)
    dicts = [{u'MEFTRRHX': 8}, {u'MEFTRRHX': 9}, {u'MEFTRRHX': 6}]
    [ pool.apply_async(publisher, (0, _, dicts[_])) for _ in range(3) ]
    res_sub1 = pool.apply_async(subscriber, (0, 0))
    res_sub2 = pool.apply_async(subscriber, (0, 1))
    res_sub3 = pool.apply_async(subscriber, (0, 2))
    print(res_sub1.get())
    print(res_sub2.get())
    print(res_sub3.get())
Exemple #41
0
        else:
            return False


# keep US, IR sensor on scanning mode
try:
    print("Initialising module.....")  #print("AI engine LOADING.............")
    client.load_ai_engine()  #PORT 9009 RPC hit
    print("Module initialised")
    pool = ThreadPool(processes=1)
    while True:
        # if US, IR detect object >>
        if (obj_detected_IR() or obj_detected_US()):  #
            # 1. FIRE camera
            total_strt_time = time.time()
            async_result = pool.apply_async(client.load_ai_server)
            print("Picam streaming ON")
            t2 = Thread(target=picamclient.stream_to_ai_server)
            t2.start()
            t2.join()
            return_val = async_result.get()
            total_end_time = time.time()
            print("%s identified in %.2f seconds" %
                  (str(return_val), (total_end_time - total_strt_time)))
            while (is_obstacle_there(sensor=OBSTACLE_DETECTOR)
                   ):  #ensure items being loaded gets weighed
                print("bird is still sitting...")
                #dont accept negative values
            #try
            #query
def run(test, params, env):
    """
    Test command: virsh blockcopy.

    This command can copy a disk backing image chain to dest.
    1. Positive testing
        1.1 Copy a disk to a new image file.
        1.2 Reuse existing destination copy.
        1.3 Valid blockcopy timeout and bandwidth test.
    2. Negative testing
        2.1 Copy a disk to a non-exist directory.
        2.2 Copy a disk with invalid options.
        2.3 Do block copy for a persistent domain.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    target = params.get("target_disk", "")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    disk_type = params.get("disk_type")
    pool_name = params.get("pool_name")
    image_size = params.get("image_size")
    emu_image = params.get("emulated_image")
    copy_to_nfs = "yes" == params.get("copy_to_nfs", "no")
    mnt_path_name = params.get("mnt_path_name")
    options = params.get("blockcopy_options", "")
    bandwidth = params.get("blockcopy_bandwidth", "")
    bandwidth_byte = "yes" == params.get("bandwidth_byte", "no")
    reuse_external = "yes" == params.get("reuse_external", "no")
    persistent_vm = params.get("persistent_vm", "no")
    status_error = "yes" == params.get("status_error", "no")
    active_error = "yes" == params.get("active_error", "no")
    active_snap = "yes" == params.get("active_snap", "no")
    active_save = "yes" == params.get("active_save", "no")
    check_state_lock = "yes" == params.get("check_state_lock", "no")
    with_shallow = "yes" == params.get("with_shallow", "no")
    with_blockdev = "yes" == params.get("with_blockdev", "no")
    setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit')
    bug_url = params.get("bug_url", "")
    timeout = int(params.get("timeout", 1200))
    relative_path = params.get("relative_path")
    rerun_flag = 0
    blkdev_n = None
    back_n = 'blockdev-backing-iscsi'
    snapshot_external_disks = []
    # Skip/Fail early
    if with_blockdev and not libvirt_version.version_compare(1, 2, 13):
        raise exceptions.TestSkipError("--blockdev option not supported in "
                                       "current version")
    if not target:
        raise exceptions.TestSkipError("Require target disk to copy")
    if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("API acl test not supported in current"
                                       " libvirt version")
    if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url)
    if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3):
        raise exceptions.TestSkipError("--bytes option not supported in "
                                       "current version")
    if relative_path == "yes" and not libvirt_version.version_compare(3, 0, 0):
        test.cancel(
            "Forbid using relative path or file name only is added since libvirt-3.0.0"
        )

    if "--transient-job" in options and not libvirt_version.version_compare(
            4, 5, 0):
        test.cancel(
            "--transient-job option is supported until libvirt 4.5.0 version")

    # Check the source disk
    if vm_xml.VMXML.check_disk_exist(vm_name, target):
        logging.debug("Find %s in domain %s", target, vm_name)
    else:
        raise exceptions.TestFail("Can't find %s in domain %s" %
                                  (target, vm_name))

    original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    tmp_dir = data_dir.get_tmp_dir()

    # Prepare dest path params
    dest_path = params.get("dest_path", "")
    dest_format = params.get("dest_format", "")
    # Ugh... this piece of chicanery brought to you by the QemuImg which
    # will "add" the 'dest_format' extension during the check_format code.
    # So if we create the file with the extension and then remove it when
    # doing the check_format later, then we avoid erroneous failures.
    dest_extension = ""
    if dest_format != "":
        dest_extension = ".%s" % dest_format

    # Prepare for --reuse-external option
    if reuse_external:
        options += "--reuse-external --wait"
        # Set rerun_flag=1 to do blockcopy twice, and the first time created
        # file can be reused in the second time if no dest_path given
        # This will make sure the image size equal to original disk size
        if dest_path == "/path/non-exist":
            if os.path.exists(dest_path) and not os.path.isdir(dest_path):
                os.remove(dest_path)
        else:
            rerun_flag = 1

    # Prepare other options
    if dest_format == "raw":
        options += " --raw"
    if with_blockdev:
        options += " --blockdev"
    if len(bandwidth):
        options += " --bandwidth %s" % bandwidth
    if bandwidth_byte:
        options += " --bytes"
    if with_shallow:
        options += " --shallow"

    # Prepare acl options
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    extra_dict = {
        'uri': uri,
        'unprivileged_user': unprivileged_user,
        'debug': True,
        'ignore_status': True,
        'timeout': timeout
    }

    libvirtd_utl = utils_libvirtd.Libvirtd()
    libvirtd_conf = utils_config.LibvirtdConfig()
    libvirtd_conf["log_filters"] = '"3:json 1:libvirt 1:qemu"'
    libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log")
    libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
    logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf)
    libvirtd_utl.restart()

    def check_format(dest_path, dest_extension, expect):
        """
        Check the image format

        :param dest_path: Path of the copy to create
        :param expect: Expect image format
        """
        # And now because the QemuImg will add the extension for us
        # we have to remove it here.
        path_noext = dest_path.strip(dest_extension)
        params['image_name'] = path_noext
        params['image_format'] = expect
        image = qemu_storage.QemuImg(params, "/", path_noext)
        if image.get_format() == expect:
            logging.debug("%s format is %s", dest_path, expect)
        else:
            raise exceptions.TestFail("%s format is not %s" %
                                      (dest_path, expect))

    def _blockjob_and_libvirtd_chk(cmd_result):
        """
        Raise TestFail when blockcopy fail with block-job-complete error or
        blockcopy hang with state change lock.
        This is a specific bug verify, so ignore status_error here.
        """
        failure_msg = ""
        err_msg = "internal error: unable to execute QEMU command"
        err_msg += " 'block-job-complete'"
        if err_msg in cmd_result.stderr:
            failure_msg += "Virsh cmd error happened: %s\n" % err_msg
        err_pattern = "Timed out during operation: cannot acquire"
        err_pattern += " state change lock"
        ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error")
        if ret:
            failure_msg += "Libvirtd log error happened: %s\n" % err_pattern
        if failure_msg:
            if not libvirt_version.version_compare(1, 3, 2):
                bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592"
                failure_msg += "Hit on bug: %s " % bug_url_
            test.fail(failure_msg)

    def _make_snapshot():
        """
        Make external disk snapshot
        """
        snap_xml = snapshot_xml.SnapshotXML()
        snapshot_name = "blockcopy_snap"
        snap_xml.snap_name = snapshot_name
        snap_xml.description = "blockcopy snapshot"

        # Add all disks into xml file.
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        # Remove non-storage disk such as 'cdrom'
        for disk in disks:
            if disk.device != 'disk':
                disks.remove(disk)
        new_disks = []
        src_disk_xml = disks[0]
        disk_xml = snap_xml.SnapDiskXML()
        disk_xml.xmltreefile = src_disk_xml.xmltreefile
        del disk_xml.device
        del disk_xml.address
        disk_xml.snapshot = "external"
        disk_xml.disk_name = disk_xml.target['dev']

        # Only qcow2 works as external snapshot file format, update it
        # here
        driver_attr = disk_xml.driver
        driver_attr.update({'type': 'qcow2'})
        disk_xml.driver = driver_attr

        new_attrs = disk_xml.source.attrs
        if 'file' in disk_xml.source.attrs:
            new_file = os.path.join(tmp_dir, "blockcopy_shallow.snap")
            snapshot_external_disks.append(new_file)
            new_attrs.update({'file': new_file})
            hosts = None
        elif ('dev' in disk_xml.source.attrs or 'name' in disk_xml.source.attrs
              or 'pool' in disk_xml.source.attrs):
            if (disk_xml.type_name == 'block'
                    or disk_source_protocol == 'iscsi'):
                disk_xml.type_name = 'block'
                if 'name' in new_attrs:
                    del new_attrs['name']
                    del new_attrs['protocol']
                elif 'pool' in new_attrs:
                    del new_attrs['pool']
                    del new_attrs['volume']
                    del new_attrs['mode']
                back_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size="1G",
                                                       emulated_image=back_n)
                emulated_iscsi.append(back_n)
                cmd = "qemu-img create -f qcow2 %s 1G" % back_path
                process.run(cmd, shell=True)
                new_attrs.update({'dev': back_path})
                hosts = None

        new_src_dict = {"attrs": new_attrs}
        if hosts:
            new_src_dict.update({"hosts": hosts})
        disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

        new_disks.append(disk_xml)

        snap_xml.set_disks(new_disks)
        snapshot_xml_path = snap_xml.xml
        logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

        options = "--disk-only --xmlfile %s " % snapshot_xml_path

        snapshot_result = virsh.snapshot_create(vm_name, options, debug=True)

        if snapshot_result.exit_status != 0:
            raise exceptions.TestFail(snapshot_result.stderr)

    snap_path = ''
    save_path = ''
    emulated_iscsi = []
    nfs_cleanup = False
    try:
        # Prepare dest_path
        tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img")
        tmp_file += dest_extension
        if not dest_path:
            if with_blockdev:
                blkdev_n = 'blockdev-iscsi'
                dest_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size=image_size,
                                                       emulated_image=blkdev_n)
                emulated_iscsi.append(blkdev_n)
                # Make sure the new disk show up
                utils_misc.wait_for(lambda: os.path.exists(dest_path), 5)
            else:
                if copy_to_nfs:
                    tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name)
                dest_path = os.path.join(tmp_dir, tmp_file)

        # Domain disk replacement with desire type
        if replace_vm_disk:
            # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs
            # after test, such as pool, volume, nfs, iscsi and so on
            # TODO: remove this function in the future
            if disk_source_protocol == 'iscsi':
                emulated_iscsi.append(emu_image)
            if disk_source_protocol == 'netfs':
                nfs_cleanup = True
            utl.set_vm_disk(vm, params, tmp_dir, test)
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        if with_shallow:
            _make_snapshot()

        # Prepare transient/persistent vm
        if persistent_vm == "no" and vm.is_persistent():
            vm.undefine("--nvram")
        elif persistent_vm == "yes" and not vm.is_persistent():
            new_xml.define()

        # Run blockcopy command to create destination file
        if rerun_flag == 1:
            options1 = "--wait %s --finish --verbose" % dest_format
            if with_blockdev:
                options1 += " --blockdev"
            if with_shallow:
                options1 += " --shallow"
            cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1,
                                         **extra_dict)
            status = cmd_result.exit_status
            if status != 0:
                raise exceptions.TestFail("Run blockcopy command fail: %s" %
                                          cmd_result.stdout.strip() +
                                          cmd_result.stderr)
            elif not os.path.exists(dest_path):
                raise exceptions.TestFail("Cannot find the created copy")

        if "--transient-job" in options:
            pool = ThreadPool(processes=1)
            async_result = pool.apply_async(
                blockcopy_thread, (vm_name, target, dest_path, options))
            kill_blockcopy_process()
            utl.check_blockjob(vm_name, target)
            return

        # Run the real testing command
        cmd_result = virsh.blockcopy(vm_name, target, dest_path, options,
                                     **extra_dict)

        # check BZ#1197592
        _blockjob_and_libvirtd_chk(cmd_result)
        status = cmd_result.exit_status

        if not libvirtd_utl.is_running():
            raise exceptions.TestFail("Libvirtd service is dead")

        if not status_error:
            if status == 0:
                ret = utils_misc.wait_for(
                    lambda: check_xml(vm_name, target, dest_path, options), 5)
                if not ret:
                    raise exceptions.TestFail("Domain xml not expected after"
                                              " blockcopy")
                if options.count("--bandwidth"):
                    if options.count('--bytes'):
                        bandwidth += 'B'
                    else:
                        bandwidth += 'M'
                    if not (bandwidth
                            in ['0B', '0M']) and not utl.check_blockjob(
                                vm_name, target, "bandwidth", bandwidth):
                        raise exceptions.TestFail("Check bandwidth failed")
                val = options.count("--pivot") + options.count("--finish")
                # Don't wait for job finish when using --byte option
                val += options.count('--bytes')
                if val == 0:
                    try:
                        finish_job(vm_name, target, timeout)
                    except JobTimeout as excpt:
                        raise exceptions.TestFail("Run command failed: %s" %
                                                  excpt)
                if options.count("--raw") and not with_blockdev:
                    check_format(dest_path, dest_extension, dest_format)
                if active_snap:
                    snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
                    snap_opt = "--disk-only --atomic --no-metadata "
                    snap_opt += "vda,snapshot=external,file=%s" % snap_path
                    ret = virsh.snapshot_create_as(vm_name,
                                                   snap_opt,
                                                   ignore_status=True,
                                                   debug=True)
                    utl.check_exit_status(ret, active_error)
                if active_save:
                    save_path = "%s/%s.save" % (tmp_dir, vm_name)
                    ret = virsh.save(vm_name,
                                     save_path,
                                     ignore_status=True,
                                     debug=True)
                    utl.check_exit_status(ret, active_error)
                if check_state_lock:
                    # Run blockjob pivot in subprocess as it will hang
                    # for a while, run blockjob info again to check
                    # job state
                    command = "virsh blockjob %s %s --pivot" % (vm_name,
                                                                target)
                    session = aexpect.ShellSession(command)
                    ret = virsh.blockjob(vm_name, target, "--info")
                    err_info = "cannot acquire state change lock"
                    if err_info in ret.stderr:
                        raise exceptions.TestFail("Hit on bug: %s" % bug_url)
                    utl.check_exit_status(ret, status_error)
                    session.close()
            else:
                raise exceptions.TestFail(cmd_result.stdout.strip() +
                                          cmd_result.stderr)
        else:
            if status:
                logging.debug("Expect error: %s", cmd_result.stderr)
            else:
                # Commit id '4c297728' changed how virsh exits when
                # unexpectedly failing due to timeout from a fail (1)
                # to a success(0), so we need to look for a different
                # marker to indicate the copy aborted. As "stdout: Now
                # in mirroring phase" could be in stdout which fail the
                # check, so also do check in libvirtd log to confirm.
                if options.count("--timeout") and options.count("--wait"):
                    log_pattern = "Copy aborted"
                    if (re.search(log_pattern, cmd_result.stdout.strip())
                            or chk_libvirtd_log(libvirtd_log_path, log_pattern,
                                                "debug")):
                        logging.debug("Found success a timed out block copy")
                else:
                    raise exceptions.TestFail("Expect fail, but run "
                                              "successfully: %s" % bug_url)
    finally:
        # Recover VM may fail unexpectedly, we need using try/except to
        # proceed the following cleanup steps
        try:
            # Abort exist blockjob to avoid any possible lock error
            virsh.blockjob(vm_name, target, '--abort', ignore_status=True)
            vm.destroy(gracefully=False)
            # It may take a long time to shutdown the VM which has
            # blockjob running
            utils_misc.wait_for(
                lambda: virsh.domstate(vm_name, ignore_status=True).
                exit_status, 180)
            if virsh.domain_exists(vm_name):
                if active_snap or with_shallow:
                    option = "--snapshots-metadata"
                else:
                    option = None
                original_xml.sync(option)
            else:
                original_xml.define()
        except Exception as e:
            logging.error(e)
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)
        # Clean up libvirt pool, which may be created by 'set_vm_disk'
        if disk_type == 'volume':
            virsh.pool_destroy(pool_name, ignore_status=True, debug=True)
        # Restore libvirtd conf and restart libvirtd
        libvirtd_conf.restore()
        libvirtd_utl.restart()
        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
        # Clean up NFS
        try:
            if nfs_cleanup:
                utl.setup_or_cleanup_nfs(is_setup=False)
        except Exception as e:
            logging.error(e)
        # Clean up iSCSI
        try:
            for iscsi_n in list(set(emulated_iscsi)):
                utl.setup_or_cleanup_iscsi(is_setup=False,
                                           emulated_image=iscsi_n)
                # iscsid will be restarted, so give it a break before next loop
                time.sleep(5)
        except Exception as e:
            logging.error(e)
        if os.path.exists(dest_path):
            os.remove(dest_path)
        if os.path.exists(snap_path):
            os.remove(snap_path)
        if os.path.exists(save_path):
            os.remove(save_path)
        # Restart virtlogd service to release VM log file lock
        try:
            path.find_command('virtlogd')
            process.run('systemctl reset-failed virtlogd')
            process.run('systemctl restart virtlogd ')
        except path.CmdNotFoundError:
            pass
        'bucket_name': bucket_name
    }
    reduce_d = json.dumps(reduce_d)
    reduce_data_list.append(reduce_d)

client = 0
results = []

if worker == 'mapper':
    print 'start ' + str(n) + ' mappers'
if worker == 'reducer':
    print 'start ' + str(n) + ' reducers'

for i in range(n):
    if worker == 'mapper':
        r = pool.apply_async(invoke_map_lambda,
                             args=(map_data_list[i], client))
        #print str(i)
    if worker == 'reducer':
        r = pool.apply_async(invoke_reduce_lambda,
                             args=(reduce_data_list[i], client))
        #print str(i)
    results.append(r)

output = [p.get() for p in results]

count = 0
for i in output:
    count += 1
    print count

print 'finished'
Exemple #44
0
def main():

    opt_parser = OptionParser()

    # Add Options. Required options should have default=None
    opt_parser.add_option("-d",
                          dest="root_dir",
                          type="string",
                          help="""Root directory that contains subdirectoires
                                  with output from getASEventReadCounts""",
                          default=None)
    opt_parser.add_option("-i",
                          dest="input_dir",
                          type="string",
                          help="""Directory containing original input files to
                                  getASEventReadCounts.py. This is used to
                                  obtain the chromosome information.""",
                          default=None)
    opt_parser.add_option("--jcn_seq_len",
                          dest="jcn_seq_len",
                          type="int",
                          help="""Value used in getASEventReadCounts""",
                          default=None)
    opt_parser.add_option("-s",
                          dest="samples",
                          type="string",
                          help="""Comma separated list of the samples that will
                                  be used or a file of sample names, one per
                                  line. The order which they are given is
                                  the order in the output of the file.""",
                          default=None)
    #   opt_parser.add_option("--lengthNorm",
    #                         dest="lengthNorm",
    #                         action="store_true",
    #                         help="""Flag to indicate length normalization was
    #                                 done on the counts. Used for splitting the IR
    #                                 counts back into left and right counts""",
    #                         default=False)
    opt_parser.add_option(
        "--num_processes",
        dest="num_processes",
        type="int",
        help="""Will run each chromosome in batches using this
                                  number of parallel processes. DEF=%d""" %
        DEF_NUM_PROCESSES,
        default=DEF_NUM_PROCESSES)
    opt_parser.add_option("--print_cmd",
                          dest="print_cmd",
                          action="store_true",
                          help="""Will not run any processes, but print the
                                  commands""",
                          default=False)
    opt_parser.add_option("--check",
                          dest="check",
                          action="store_true",
                          help="Will check which samples are not finished.",
                          default=False)
    opt_parser.add_option(
        "--force",
        dest="force",
        action="store_true",
        help="""By default, will only run jobs that need to be
                                  completed. This will force to run all
                                  jobs.""",
        default=False)
    opt_parser.add_option("--run_LSF",
                          dest="run_lsf",
                          action="store_true",
                          help="Will run everything through LSF",
                          default=False)

    (options, args) = opt_parser.parse_args()

    # validate the command line arguments
    opt_parser.check_required("-d")
    opt_parser.check_required("-i")
    opt_parser.check_required("-s")
    opt_parser.check_required("--jcn_seq_len")

    root_dir = formatDir(options.root_dir)
    input_dir = formatDir(options.input_dir)
    # Change to the root directory to make sure output files are put here
    os.chdir(root_dir)

    samples = options.samples

    jcn_seq_len = options.jcn_seq_len

    #    lengthNorm = options.lengthNorm

    print_cmd = options.print_cmd
    check = options.check
    force = options.force

    num_processes = options.num_processes
    run_lsf = options.run_lsf

    chr_list = getChr(input_dir)

    ctr = 0

    tp = ThreadPool(num_processes)
    for this_chr in chr_list:
        files_are_present = False
        expected_out_files = [
            "%s/tmp_createAS_CountTables_%s_AS_exclusion_inclusion_counts.txt"
            % (root_dir, this_chr),
            "%s/tmp_createAS_CountTables_%s_left_intron_counts.txt" %
            (root_dir, this_chr),
            "%s/tmp_createAS_CountTables_%s_right_intron_counts.txt" %
            (root_dir, this_chr),
            "%s/tmp_createAS_CountTables_%s_AS_exclusion_inclusion_counts_lenNorm.txt"
            % (root_dir, this_chr),
            "%s/tmp_createAS_CountTables_%s_left_intron_counts_lenNorm.txt" %
            (root_dir, this_chr),
            "%s/tmp_createAS_CountTables_%s_right_intron_counts_lenNorm.txt" %
            (root_dir, this_chr)
        ]
        try:
            for expect_file in expected_out_files:
                if os.path.getsize(expect_file) == 0:
                    files_are_present = False
                    if check:
                        print(("Cannot find files for: %s" % this_chr))
                    break
                else:
                    files_are_present = True
        except:
            if check:
                print(("Cannot find files for: %s" % this_chr))

        if check:
            continue

        if not force:
            if files_are_present:
                continue

        ctr += 1

        cmd = "python %s " % SCRIPT
        cmd += "-d %s " % root_dir
        cmd += "-o %s/tmp_createAS_CountTables_%s " % (root_dir, this_chr)
        #       cmd += "--left_intron tmp_createAS_CountTables_%s_left_intron.out " % this_chr
        #       cmd += "--right_intron tmp_createAS_CountTables_%s_right_intron.out " % this_chr
        cmd += "-s %s " % samples
        cmd += "--jcn_seq_len %d " % jcn_seq_len

        #       if lengthNorm:
        #           cmd += "--lengthNorm "

        cmd += "--which_chr %s" % this_chr

        if print_cmd:
            print(cmd)
            continue

        if run_lsf:
            runLSF(cmd, "%s.createAS_CountTables.bsub.out" % this_chr,
                   samples.replace(",", "-") + "_" + this_chr, "hour")
            continue

        print(cmd)
        sys.stdout.flush()
        tp.apply_async(launchCMD, (cmd, ))

    tp.close()
    tp.join()
    sys.exit(0)
Exemple #45
0
class PNUClient(object):
    def __init__(self):
        self.threads = 32
        self.pool_size = 32  # your "parallelness"
        self.pool = Pool(self.pool_size)

    def worker(self, intpage, out_q):
        strain_dict = {}
        list_strains = []
        textname = ''
        url = 'http://www.straininfo.net/taxa/{0}'.format(intpage)
        print url

        try:
            testfile = urllib.URLopener()
            testfile.retrieve(url, "toparse{0}.html".format(intpage))

            with open("toparse{0}.html".format(intpage), 'r') as htmlpagespe:
                author = False
                title = False
                spename = False
                species = ''
                dates = []
                for line in htmlpagespe:
                    if spename and len(species) == 0:
                        matchObj = re.match(
                            '\s+<td class="value"><span class="speciesname"><em>([^<]+)<\/em><\/span><\/td>',
                            line)
                        if matchObj:
                            # print matchObj.group(1)
                            species = matchObj.group(1)
                            species = re.sub(' +', ' ', species)

                        else:
                            matchObj_sub = re.match(
                                '\s+<td class="value"><span class="speciesname"><em>([^<]+)</em>subsp.<em>([^<]+)<\/em><\/span><\/td>',
                                line)
                            if matchObj_sub:
                                species = matchObj_sub.group(
                                    1) + " subsp. " + matchObj_sub.group(2)
                                species = re.sub(' +', ' ', species)

                    elif not spename and len(species) == 0:
                        matchObj = re.match(
                            '\s+<tr><td class="option">species<\/td>', line)
                        matchObj_sub = re.match(
                            '\s+<tr><td class="option">subspecies<\/td>', line)
                        if matchObj or matchObj_sub:
                            spename = True

                    else:
                        matchObj = re.match(
                            "\s*<div class='popup'>([^<]+)<strong>type strain<\/strong> of:<br\/>",
                            line)
                        if matchObj:
                            strain = matchObj.group(1).replace(" is ", "")
                            list_strains.append(strain)
                            list_strains.append(strain.replace(" ", ""))

                    if 'class="authors"' in line:
                        author = True
                        continue
                    if author and 'class="publication_title"' in line:
                        title = True
                        continue
                    if (author or title) and '</tbody>' in line:
                        author = False
                        title = False
                    elif author and title:
                        print line
                        matchObj_sub = re.match('\s+<div>([^<]+)<\/div>', line)
                        if matchObj_sub:
                            dates.append(matchObj_sub.group(1))
                            author = False
                            title = False

            os.remove("toparse{0}.html".format(intpage))
            print dates
            out_q.put(
                (species, "=".join(set(list_strains)), '/'.join(set(dates))))

        except IOError:
            print 'url does not exist.'

        return True

    def run(self, outfile):
        outf = open(outfile, 'w')
        outf.write("straininfo_strains_number\n")
        full_dict = {}
        manager = multiprocessing.Manager()
        out_q = manager.Queue()
        workers = [
            self.pool.apply_async(
                # self.worker, (i, out_q)) for i in [376, 377, 378]]
                self.worker,
                (i, out_q)) for i in range(400000)
        ]
        # Collect all results into a single result dict. We know how many dicts
        # with results to expect.
        while out_q.empty():
            time.sleep(1)

        self.pool.close()
        self.pool.join()

        while not out_q.empty():
            info = out_q.get()
            if info[0] != '' and info[1] != '':
                outf.write("{0}\t{1}\t{2}\n".format(info[0], info[1], info[2]))
        outf.close()
Exemple #46
0
    def find_in_scaling_range(cls,
                              image,
                              similarity=DEFAULT_SIMILARITY,
                              lowerEnd=0.8,
                              upperEnd=1.2):
        """Finds the location of the image on the screen. First the image is searched at its default scale,
        and if it isn't found, it will be resized using values inside the range provided until a match that satisfy
        the similarity value is found. If the image isn't found even after it has been resized, the method returns None.

        Args:
            image (string): Name of the image.
            similarity (float, optional): Defaults to DEFAULT_SIMILARITY.
                Percentage in similarity that the image should at least match
            lowerEnd (float, optional): Defaults to 0.8.
                Lowest scaling factor used for resizing.
            upperEnd (float, optional): Defaults to 1.2.
                Highest scaling factor used for resizing.

        Returns:
            Region: Coordinates or where the image appears.
        """
        template = cv2.imread('assets/{}/{}.png'.format(cls.assets, image), 0)
        # first try with default size
        width, height = template.shape[::-1]
        match = cv2.matchTemplate(screen, template, cv2.TM_CCOEFF_NORMED)
        value, location = cv2.minMaxLoc(match)[1], cv2.minMaxLoc(match)[3]
        if (value >= similarity):
            return Region(location[0], location[1], width, height)

        # resize and match using threads

        # change scaling factor if the boss icon searched is small
        # (some events has as boss fleet a shipgirl with a small boss icon at her bottom right)
        if cls.small_boss_icon and image == 'enemy/fleet_boss':
            lowerEnd = 0.4
            upperEnd = 0.6

        # preparing interpolation methods
        middle_range = (upperEnd + lowerEnd) / 2.0
        if lowerEnd < 1 and upperEnd > 1 and middle_range == 1:
            l_interpolation = cv2.INTER_AREA
            u_interpolation = cv2.INTER_CUBIC
        elif upperEnd < 1 and lowerEnd < upperEnd:
            l_interpolation = cv2.INTER_AREA
            u_interpolation = cv2.INTER_AREA
        elif lowerEnd > 1 and upperEnd > lowerEnd:
            l_interpolation = cv2.INTER_CUBIC
            u_interpolation = cv2.INTER_CUBIC
        else:
            l_interpolation = cv2.INTER_NEAREST
            u_interpolation = cv2.INTER_NEAREST

        results_list = []
        regions_detected = []
        count = 0
        loop_limiter = (middle_range - lowerEnd) * 100

        # creating and launching worker processes
        pool = ThreadPool(processes=4)

        while (upperEnd > lowerEnd) and (count < loop_limiter):
            l_result = pool.apply_async(
                cls.resize_and_match,
                (template, lowerEnd, similarity, l_interpolation))
            u_result = pool.apply_async(
                cls.resize_and_match,
                (template, upperEnd, similarity, u_interpolation))
            cls.script_sleep(0.01)
            lowerEnd += 0.02
            upperEnd -= 0.02
            count += 1
            results_list.append(l_result)
            results_list.append(u_result)

        # closing pool and waiting for results
        pool.close()
        pool.join()

        # extract regions from async_result
        for i in range(0, len(results_list)):
            if results_list[i].get() is not None:
                regions_detected.append(results_list[i].get())

        if (len(regions_detected) > 0):
            return regions_detected[0]
        else:
            return None
Exemple #47
0
    def process_batch(self, file_path, charmap, noise_types,
                      frame_overlap_flag):
        if (self.b_id + self.batch) >= self.n_files:
            self.b_id = 0

        n_threads = 2
        pool = ThreadPool(processes=n_threads)
        # data per thread
        data_thread = int(np.floor(float(self.batch) / n_threads))
        # remaining data for last thread
        rem_data_lth = self.batch - (data_thread * (n_threads - 1))
        threads = []
        for p in range(n_threads):

            if p == n_threads - 1:
                if (self.b_id + rem_data_lth) >= self.n_files:
                    self.b_id = 0
            else:
                if (self.b_id + data_thread) >= self.n_files:
                    self.b_id = 0

            s_i = self.b_id
            if rem_data_lth != 0 and p == n_threads - 1:
                e_i = s_i + rem_data_lth
            else:
                e_i = s_i + data_thread
            # update the self.b_id
            self.b_id = e_i
            # """
            # Debug start---------------
            # """
            # print("s_i: %d, e_i: %d"%(s_i,e_i))
            # """
            # Debug end-----------------
            # """
            threads.append(
                pool.apply_async(self.get_train_targ_data,
                                 args=(file_path, charmap, noise_types,
                                       frame_overlap_flag, s_i, e_i, p,
                                       data_thread)))
        #gc.collect()
        #-----------------------------------------------------------
        # Variables for collapsing results from different processes
        #-----------------------------------------------------------
        frames = None
        seq_lens = []
        transcripts = []
        t_indices = []
        t_values = []
        t_shape = []
        # ----------------------------------------------------------
        for p in range(n_threads):
            data, _, nframes, \
            transcript, targ_indices, \
            targ_values, targ_shape = threads[p].get()
            # Erase the memory in threads[p]
            threads[p] = None

            frames = self.append_tensor(frames, data)
            seq_lens.append(nframes)
            t_shape.append([targ_shape])
            transcripts.append(transcript)
            t_indices.append(targ_indices)
            t_values.append(targ_values)
            del data, nframes, transcript, targ_indices, targ_values, targ_shape

        pool.close()
        pool.join()
        pool._join_exited_workers()
        t_indices = np.concatenate(t_indices)
        t_values = np.concatenate(t_values)
        t_shape = np.concatenate(t_shape)
        t_shape = [self.batch, np.max(t_shape[:, 1])]
        transcripts = [t for sublist in transcripts for t in sublist]
        seq_lens = np.concatenate(seq_lens)
        self.b_id += self.batch
        return frames, transcripts, t_indices, t_values, t_shape, seq_lens
Exemple #48
0
def evalvideo(net: Yolact, path: str):
    # If the path is a digit, parse it as a webcam index
    is_webcam = path.isdigit()

    if is_webcam:
        vid = cv2.VideoCapture(int(path))
    else:
        vid = cv2.VideoCapture(path)

    if not vid.isOpened():
        print('Could not open video "%s"' % path)
        exit(-1)

    net = CustomDataParallel(net).cuda()
    transform = torch.nn.DataParallel(FastBaseTransform()).cuda()
    frame_times = MovingAverage(100)
    fps = 0
    # The 0.8 is to account for the overhead of time.sleep
    frame_time_target = 1 / vid.get(cv2.CAP_PROP_FPS)
    running = True

    def cleanup_and_exit():
        print()
        pool.terminate()
        vid.release()
        cv2.destroyAllWindows()
        exit()

    def get_next_frame(vid):
        return [vid.read()[1] for _ in range(args.video_multiframe)]

    def transform_frame(frames):
        with torch.no_grad():
            frames = [
                torch.from_numpy(frame).cuda().float() for frame in frames
            ]
            return frames, transform(torch.stack(frames, 0))

    def eval_network(inp):
        with torch.no_grad():
            frames, imgs = inp
            return frames, net(imgs)

    def prep_frame(inp):
        with torch.no_grad():
            frame, preds = inp
            return prep_display(preds,
                                frame,
                                None,
                                None,
                                undo_transform=False,
                                class_color=True)

    frame_buffer = Queue()
    video_fps = 0

    # All this timing code to make sure that
    def play_video():
        nonlocal frame_buffer, running, video_fps, is_webcam

        video_frame_times = MovingAverage(100)
        frame_time_stabilizer = frame_time_target
        last_time = None
        stabilizer_step = 0.0005

        while running:
            frame_time_start = time.time()

            if not frame_buffer.empty():
                next_time = time.time()
                if last_time is not None:
                    video_frame_times.add(next_time - last_time)
                    video_fps = 1 / video_frame_times.get_avg()
                cv2.imshow(path, frame_buffer.get())
                last_time = next_time

            if cv2.waitKey(1) == 27:  # Press Escape to close
                running = False

            buffer_size = frame_buffer.qsize()
            if buffer_size < args.video_multiframe:
                frame_time_stabilizer += stabilizer_step
            elif buffer_size > args.video_multiframe:
                frame_time_stabilizer -= stabilizer_step
                if frame_time_stabilizer < 0:
                    frame_time_stabilizer = 0

            new_target = frame_time_stabilizer if is_webcam else max(
                frame_time_stabilizer, frame_time_target)

            next_frame_target = max(
                2 * new_target - video_frame_times.get_avg(), 0)
            target_time = frame_time_start + next_frame_target - 0.001  # Let's just subtract a millisecond to be safe
            # This gives more accurate timing than if sleeping the whole amount at once
            while time.time() < target_time:
                time.sleep(0.001)

    extract_frame = lambda x, i: (x[0][i] if x[1][i] is None else x[0][i].to(x[
        1][i]['box'].device), [x[1][i]])

    # Prime the network on the first frame because I do some thread unsafe things otherwise
    print('Initializing model... ', end='')
    eval_network(transform_frame(get_next_frame(vid)))
    print('Done.')

    # For each frame the sequence of functions it needs to go through to be processed (in reversed order)
    sequence = [prep_frame, eval_network, transform_frame]
    pool = ThreadPool(processes=len(sequence) + args.video_multiframe + 2)
    pool.apply_async(play_video)

    active_frames = []

    print()
    while vid.isOpened() and running:
        start_time = time.time()

        # Start loading the next frames from the disk
        next_frames = pool.apply_async(get_next_frame, args=(vid, ))

        # For each frame in our active processing queue, dispatch a job
        # for that frame using the current function in the sequence
        for frame in active_frames:
            frame['value'] = pool.apply_async(sequence[frame['idx']],
                                              args=(frame['value'], ))

        # For each frame whose job was the last in the sequence (i.e. for all final outputs)
        for frame in active_frames:
            if frame['idx'] == 0:
                frame_buffer.put(frame['value'].get())

        # Remove the finished frames from the processing queue
        active_frames = [x for x in active_frames if x['idx'] > 0]

        # Finish evaluating every frame in the processing queue and advanced their position in the sequence
        for frame in list(reversed(active_frames)):
            frame['value'] = frame['value'].get()
            frame['idx'] -= 1

            if frame['idx'] == 0:
                # Split this up into individual threads for prep_frame since it doesn't support batch size
                active_frames += [{
                    'value': extract_frame(frame['value'], i),
                    'idx': 0
                } for i in range(1, args.video_multiframe)]
                frame['value'] = extract_frame(frame['value'], 0)

        # Finish loading in the next frames and add them to the processing queue
        active_frames.append({
            'value': next_frames.get(),
            'idx': len(sequence) - 1
        })

        # Compute FPS
        frame_times.add(time.time() - start_time)
        fps = args.video_multiframe / frame_times.get_avg()

        print(
            '\rProcessing FPS: %.2f | Video Playback FPS: %.2f | Frames in Buffer: %d    '
            % (fps, video_fps, frame_buffer.qsize()),
            end='')

    cleanup_and_exit()
class FrontalFaceDetector(object):
    DLIB_CNN_MODEL_FILE = expanduser("~/.dlib/mmod_cnn.dat")
    DLIB_CNN_MODEL_URL = "http://dlib.net/files/mmod_human_face_detector.dat.bz2"

    def __init__(self, recogniser):
        self.recogniser = recogniser
        self.cfg_lock = Lock()
        self.cfg = None
        self.bridge = CvBridge()

        self.dynamic_reconfigure_srv = Server(
            RosPeopleModelConfig, self.dynamic_reconfigure_callback)
        self.faces_pub = rospy.Publisher('vis_dlib_frontal',
                                         Features,
                                         queue_size=10)
        self.frontal_scale = rospy.get_param('~scale', 0.4)

        self.srv_pool = ThreadPool(processes=3)
        self.srv_eye_state = rospy.ServiceProxy('eye_state_recogniser',
                                                EyeState,
                                                persistent=True)
        self.srv_face_id = rospy.ServiceProxy('face_id_recogniser',
                                              FaceId,
                                              persistent=True)
        self.srv_emotion = rospy.ServiceProxy('emotion_recogniser',
                                              Emotion,
                                              persistent=True)
        self.srv_landmarks = rospy.ServiceProxy('face_landmarks_recogniser',
                                                FaceLandmarks,
                                                persistent=True)

        self.sub = rospy.Subscriber(
            rospy.get_param('~topic_name', '/vis_dlib_cnn'), Features,
            self.features_callback)

    def dynamic_reconfigure_callback(self, config, level):
        with self.cfg_lock:
            self.cfg = config
            rospy.logdebug(
                "Dynamic reconfigure callback result: {0}".format(config))
            return config

    def features_callback(self, msg):
        features = Features()
        features.features = []

        # goes through list and only saves the one
        for k, feature in enumerate(msg.features):
            image = self.bridge.imgmsg_to_cv2(feature.crop, "8UC3")
            faces = self.recogniser.detect_frontal_faces(
                image, scale=self.frontal_scale)

            if len(faces) == 1:
                face = faces[0]
                ftr = Feature()

                roi = RegionOfInterest()
                roi.x_offset = max(feature.roi.x_offset + face.left(), 0)
                roi.y_offset = max(feature.roi.y_offset + face.top(), 0)
                roi.height = max(face.bottom() - face.top(), 0)
                roi.width = max(face.right() - face.left(), 0)

                ftr.roi = roi
                image2 = np.array(image[face.top():face.bottom(),
                                        face.left():face.right(), :])
                ftr.crop = self.bridge.cv2_to_imgmsg(image2)

                with self.cfg_lock:
                    if self.cfg is not None:
                        if self.cfg.run_face_landmarks:
                            try:
                                ftr.shapes = self.srv_landmarks(
                                    ftr.crop).landmarks

                                if self.cfg.run_face_id:
                                    face_id_result = self.srv_pool.apply_async(
                                        self.srv_face_id,
                                        (ftr.crop, ftr.roi, ftr.shapes))

                                if self.cfg.run_face_emotions:
                                    emotion_result = self.srv_pool.apply_async(
                                        self.srv_emotion,
                                        (ftr.crop, ftr.shapes))

                                if self.cfg.run_eye_state:
                                    eye_state_result = self.srv_pool.apply_async(
                                        self.srv_eye_state,
                                        (ftr.crop, ftr.shapes))

                                if self.cfg.run_face_id:
                                    ftr.face_id = face_id_result.get().face_id

                                if self.cfg.run_face_emotions:
                                    ftr.emotions = emotion_result.get(
                                    ).emotions

                                if self.cfg.run_eye_state:
                                    ftr.eyes_closed = eye_state_result.get(
                                    ).eyes_closed
                            except Exception as e:
                                rospy.logerr(
                                    "Exception getting features: {0}".format(
                                        e))

                features.features.append(ftr)

        self.faces_pub.publish(features)
Exemple #50
0
def draw(stdscr, switch_dict, ssh_list):
    from decimal import Decimal

    def fexp(number):
        (sign, digits, exponent) = Decimal(number).as_tuple()
        return len(digits) + exponent - 1

    def fman(number):
        return Decimal(number).scaleb(-fexp(number)).normalize()

    # Clear screen
    stdscr.clear()
    lines = curses.LINES
    cols = curses.COLS
    matrix = get_rates(switch_dict, ssh_list)
    # prev_matrix = matrix
    m_rows = len(matrix)
    m_rows = m_rows + (m_rows / 2)
    m_cols = len(matrix[0])
    # find max number size in matrix
    max_num = max(
        [x for x in [j for i in matrix for j in i] if isinstance(x, int)])
    colw = fexp(max_num) + 1
    if colw < 9: colw = 9
    blank_str = ' ' * colw
    # Initialise windows and colours
    curses.use_default_colors()
    curses.init_pair(1, curses.COLOR_WHITE, -1)
    curses.init_pair(2, curses.COLOR_BLACK, -1)
    curses.init_pair(3, curses.COLOR_YELLOW, -1)
    curses.init_pair(4, curses.COLOR_YELLOW, -1)
    curses.init_pair(5, curses.COLOR_GREEN, -1)
    curses.init_pair(6, curses.COLOR_GREEN, -1)
    curses.init_pair(7, curses.COLOR_RED, -1)
    curses.init_pair(8, curses.COLOR_RED, -1)
    col_title = curses.newpad(1, m_cols * colw)
    row_title = curses.newpad(m_rows, colw)
    disp_wind = curses.newpad(m_rows, m_cols * colw)
    top_cornr = curses.newpad(1, colw)
    top_cornr.addstr(0, 0, 'Rates', curses.A_BOLD | curses.A_UNDERLINE)
    # Data display block upper left-hand corner
    dminrow = 0
    dmincol = 0
    # Column title upper left-hand corner
    cminrow = 0
    cmincol = 0
    # Row title upper left-hand conrner
    rminrow = 1
    rmincol = 0
    # Data display window
    dwminrow = 1
    dwmincol = colw + 1
    dwmaxrow = lines - 1
    dwmaxcol = cols - 1
    dwrows = dwmaxrow - dwminrow
    dwcols = dwmaxcol - dwmincol
    # Column title display window
    ctminrow = 0
    ctmincol = colw + 1
    ctmaxrow = 0
    ctmaxcol = cols - 1
    # Row title display window
    rtminrow = 1
    rtmincol = 0
    rtmaxrow = lines - 1
    rtmaxcol = colw
    stdscr.nodelay(1)
    try:
        data_rdy = True
        blink = True
        pool = ThreadPool(processes=1)
        while True:
            if data_rdy:
                data_rdy = False
                thread_obj = pool.apply_async(get_rates,
                                              args=(switch_dict, ssh_list))
                blankc = 0
                reverse = False
                for i, row in enumerate(matrix):
                    if i == 0:
                        for j, val in enumerate(row):
                            if j == 0:
                                pass
                                # col_title.addstr(i,j, 'Switch', curses.A_BOLD | curses.A_UNDERLINE)
                            else:
                                col_title.addstr(
                                    i, (j - 1) * colw,
                                    '{0:>{1}}'.format(val, colw),
                                    curses.A_BOLD | curses.A_UNDERLINE)
                    else:
                        for j, val in enumerate(row):
                            if j == 0:
                                if val == 0:
                                    val = 'N/C'
                                col_pair = 1
                                if reverse: col_pair += 1
                                row_title.addstr(
                                    i + blankc - 1, 0, val,
                                    curses.color_pair(col_pair)
                                    | curses.A_BOLD)
                                if (i - 1) % 2 == 1:
                                    row_title.addstr(i + blankc - 1 + 1, 0,
                                                     ' ')
                            else:
                                width = colw - 2
                                if not val:
                                    val = 0
                                man = fman(val)
                                exp = fexp(val)
                                if exp < 3:
                                    col_pair = 1
                                    if reverse: col_pair += 1
                                    rate = 'Bs'
                                    val = '{0:>{1}} {2}'.format(
                                        int(val), width - 1, rate)
                                elif exp < 6:
                                    col_pair = 1
                                    if reverse: col_pair += 1
                                    rate = 'KB'
                                    man *= 10**(exp - 3)
                                    man = man.normalize()
                                    if width - 8 < 0:
                                        val = '{0:>{1}} {2}'.format(
                                            int(man), width - 1, rate)
                                    else:
                                        val = '{0:{1}.1f} {2}'.format(
                                            man, width - 1, rate)
                                elif exp < 9:
                                    col_pair = 3
                                    if reverse: col_pair += 1
                                    rate = 'MB'
                                    man *= 10**(exp - 6)
                                    man = man.normalize()
                                    if width - 8 < 0:
                                        val = '{0:>{1}} {2}'.format(
                                            int(man), width - 1, rate)
                                    else:
                                        val = '{0:{1}.1f} {2}'.format(
                                            man, width - 1, rate)
                                elif exp < 12:
                                    if man > 4.8:
                                        col_pair = 7
                                        if reverse: col_pair += 1
                                        col_title.addstr(
                                            0, (j - 1) * colw,
                                            '{0:>{1}}'.format(
                                                matrix[0][j], colw),
                                            curses.color_pair(col_pair)
                                            | curses.A_BOLD
                                            | curses.A_UNDERLINE)
                                        row_title.addstr(
                                            i + blankc - 1, 0, matrix[i][0],
                                            curses.color_pair(col_pair)
                                            | curses.A_BOLD)
                                    else:
                                        col_pair = 5
                                        if reverse: col_pair += 1
                                    rate = 'GB'
                                    man *= 10**(exp - 9)
                                    man = man.normalize()
                                    val = '{0:{1}.1f} {2}'.format(
                                        man, width - 1, rate)
                                else:
                                    col_pair = 1
                                    rate = 'Bs'
                                    val = '{0:>{1}} {2}'.format(
                                        int(val), width - 1, rate)
                                disp_wind.addstr(i + blankc - 1,
                                                 (j - 1) * colw, val,
                                                 curses.color_pair(col_pair))
                                if (i - 1) % 2 == 1:
                                    disp_wind.addstr(i + blankc - 1 + 1,
                                                     (j - 1) * colw, ' ')
                        if (i - 1) % 2 == 1:
                            blankc += 1
                            reverse = False  # not(reverse)
                # prev_matrix = matrix
            else:
                char = stdscr.getch()
                if char == curses.ERR:
                    try:
                        if thread_obj.ready():
                            matrix = thread_obj.get()
                            data_rdy = True
                            if blink:
                                top_cornr.addstr(
                                    0, 0, 'Rates', curses.A_BOLD
                                    | curses.A_UNDERLINE | curses.A_REVERSE)
                            else:
                                top_cornr.addstr(
                                    0, 0, 'Rates',
                                    curses.A_BOLD | curses.A_UNDERLINE)
                            blink = not (blink)
                        else:
                            time.sleep(0.1)
                    except:
                        return False
                else:
                    redraw = True
                    if char == curses.KEY_LEFT:
                        if dmincol > colw:
                            dmincol -= colw
                        else:
                            dmincol = 0
                    elif char == curses.KEY_RIGHT:
                        if dmincol < (m_cols - 2) * colw - dwcols:
                            dmincol += colw
                        else:
                            dmincol = (m_cols - 1) * colw - dwcols
                    elif char == curses.KEY_UP:
                        if dminrow > 0:
                            dminrow -= 1
                        else:
                            dminrow = 0
                    elif char == curses.KEY_DOWN:
                        if dminrow < m_rows - dwrows - 2:
                            dminrow += 1
                        else:
                            dminrow = m_rows - dwrows - 2
            # Shift titles with text
            cmincol = dmincol
            rminrow = dminrow
            disp_wind.refresh(dminrow, dmincol, dwminrow, dwmincol, dwmaxrow,
                              dwmaxcol)
            col_title.refresh(cminrow, cmincol, ctminrow, ctmincol, ctmaxrow,
                              ctmaxcol)
            row_title.refresh(rminrow, rmincol, rtminrow, rtmincol, rtmaxrow,
                              rtmaxcol)
            top_cornr.refresh(0, 0, 0, 0, 1, colw - 1)
    except KeyboardInterrupt:
        return True
Exemple #51
0
def evalvideo(net:Yolact, path:str, out_path:str=None):
    ###########################################
    print(path)
    #####################
    print("ehnii check: ",path)

    try:
        _checker = path.split('/')[1]
        #print("_checker:", _checker)
        _dot_checker = _checker.split('_')[0]
        print("check this one as ., if . it should be escaped", _dot_checker)
        if _dot_checker == '.':
            path = path.split('.')[0] + _checker.split('_')[1]
            print("changed path: ", path)
            ####################
    except:
        print("name is not funny")
    
    ###########################################

    # If the path is a digit, parse it as a webcam index
    is_webcam = path.isdigit()
    
    # If the input image size is constant, this make things faster (hence why we can use it in a video setting).
    cudnn.benchmark = True
    
    if is_webcam:
        vid = cv2.VideoCapture(int(path))
    else:
        vid = cv2.VideoCapture(path)
    ###########################################
    print(path)
    ###########################################
    
    if not vid.isOpened():
        print('Could not open video "%s"' % path)
        exit(-1)

    target_fps   = round(vid.get(cv2.CAP_PROP_FPS))
    frame_width  = round(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
    frame_height = round(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
    
    if is_webcam:
        num_frames = float('inf')
    else:
        num_frames = round(vid.get(cv2.CAP_PROP_FRAME_COUNT))

    net = CustomDataParallel(net).cuda()
    transform = torch.nn.DataParallel(FastBaseTransform()).cuda()
    frame_times = MovingAverage(100)
    fps = 0
    frame_time_target = 1 / target_fps
    running = True
    fps_str = ''
    vid_done = False
    frames_displayed = 0

    if out_path is not None:
        out = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*"mp4v"), target_fps, (frame_width, frame_height))

    def cleanup_and_exit():
        print()
        pool.terminate()
        vid.release()
        if out_path is not None:
            out.release()
        # cv2.destroyAllWindows()
        exit()

    def get_next_frame(vid):
        frames = []
        for idx in range(args.video_multiframe):
            frame = vid.read()[1]
            if frame is None:
                return frames
            frames.append(frame)
        return frames

    def transform_frame(frames):
        with torch.no_grad():
            frames = [torch.from_numpy(frame).cuda().float() for frame in frames]
            return frames, transform(torch.stack(frames, 0))

    def eval_network(inp):
        with torch.no_grad():
            frames, imgs = inp
            num_extra = 0
            while imgs.size(0) < args.video_multiframe:
                imgs = torch.cat([imgs, imgs[0].unsqueeze(0)], dim=0)
                num_extra += 1
            out = net(imgs)
            if num_extra > 0:
                out = out[:-num_extra]
            return frames, out

    def prep_frame(inp, fps_str):
        with torch.no_grad():
            frame, preds = inp
            return prep_display(preds, frame, None, None, undo_transform=False, class_color=True, fps_str=fps_str)

    frame_buffer = Queue()
    video_fps = 0

    # All this timing code to make sure that 
    def play_video():
        try:
            nonlocal frame_buffer, running, video_fps, is_webcam, num_frames, frames_displayed, vid_done

            video_frame_times = MovingAverage(100)
            frame_time_stabilizer = frame_time_target
            last_time = None
            stabilizer_step = 0.0005
            progress_bar = ProgressBar(30, num_frames)

            while running:
                frame_time_start = time.time()

                if not frame_buffer.empty():
                    next_time = time.time()
                    if last_time is not None:
                        video_frame_times.add(next_time - last_time)
                        video_fps = 1 / video_frame_times.get_avg()
                    if out_path is None:
                        print("Here was showing the frames")
                        # cv2.imshow(path, frame_buffer.get())
                    else:
                        out.write(frame_buffer.get())
                    frames_displayed += 1
                    last_time = next_time

                    if out_path is not None:
                        if video_frame_times.get_avg() == 0:
                            fps = 0
                        else:
                            fps = 1 / video_frame_times.get_avg()
                        progress = frames_displayed / num_frames * 100
                        progress_bar.set_val(frames_displayed)

                        print('\rProcessing Frames  %s %6d / %6d (%5.2f%%)    %5.2f fps        '
                            % (repr(progress_bar), frames_displayed, num_frames, progress, fps), end='')

                
                # This is split because you don't want savevideo to require cv2 display functionality (see #197)
                if out_path is None and cv2.waitKey(1) == 27:
                    # Press Escape to close
                    running = False
                if not (frames_displayed < num_frames):
                    running = False

                if not vid_done:
                    buffer_size = frame_buffer.qsize()
                    if buffer_size < args.video_multiframe:
                        frame_time_stabilizer += stabilizer_step
                    elif buffer_size > args.video_multiframe:
                        frame_time_stabilizer -= stabilizer_step
                        if frame_time_stabilizer < 0:
                            frame_time_stabilizer = 0

                    new_target = frame_time_stabilizer if is_webcam else max(frame_time_stabilizer, frame_time_target)
                else:
                    new_target = frame_time_target

                next_frame_target = max(2 * new_target - video_frame_times.get_avg(), 0)
                target_time = frame_time_start + next_frame_target - 0.001 # Let's just subtract a millisecond to be safe
                
                if out_path is None or args.emulate_playback:
                    # This gives more accurate timing than if sleeping the whole amount at once
                    while time.time() < target_time:
                        time.sleep(0.001)
                else:
                    # Let's not starve the main thread, now
                    time.sleep(0.001)
        except:
            # See issue #197 for why this is necessary
            import traceback
            traceback.print_exc()


    extract_frame = lambda x, i: (x[0][i] if x[1][i]['detection'] is None else x[0][i].to(x[1][i]['detection']['box'].device), [x[1][i]])

    # Prime the network on the first frame because I do some thread unsafe things otherwise
    print('Initializing model... ', end='')
    first_batch = eval_network(transform_frame(get_next_frame(vid)))
    print('Done.')

    # For each frame the sequence of functions it needs to go through to be processed (in reversed order)
    sequence = [prep_frame, eval_network, transform_frame]
    pool = ThreadPool(processes=len(sequence) + args.video_multiframe + 2)
    pool.apply_async(play_video)
    active_frames = [{'value': extract_frame(first_batch, i), 'idx': 0} for i in range(len(first_batch[0]))]

    print()
    if out_path is None: print('Press Escape to close.')
    try:
        while vid.isOpened() and running:
            # Hard limit on frames in buffer so we don't run out of memory >.>
            while frame_buffer.qsize() > 100:
                time.sleep(0.001)

            start_time = time.time()

            # Start loading the next frames from the disk
            if not vid_done:
                next_frames = pool.apply_async(get_next_frame, args=(vid,))
            else:
                next_frames = None
            
            if not (vid_done and len(active_frames) == 0):
                # For each frame in our active processing queue, dispatch a job
                # for that frame using the current function in the sequence
                for frame in active_frames:
                    _args =  [frame['value']]
                    if frame['idx'] == 0:
                        _args.append(fps_str)
                    frame['value'] = pool.apply_async(sequence[frame['idx']], args=_args)
                
                # For each frame whose job was the last in the sequence (i.e. for all final outputs)
                for frame in active_frames:
                    if frame['idx'] == 0:
                        frame_buffer.put(frame['value'].get())

                # Remove the finished frames from the processing queue
                active_frames = [x for x in active_frames if x['idx'] > 0]

                # Finish evaluating every frame in the processing queue and advanced their position in the sequence
                for frame in list(reversed(active_frames)):
                    frame['value'] = frame['value'].get()
                    frame['idx'] -= 1

                    if frame['idx'] == 0:
                        # Split this up into individual threads for prep_frame since it doesn't support batch size
                        active_frames += [{'value': extract_frame(frame['value'], i), 'idx': 0} for i in range(1, len(frame['value'][0]))]
                        frame['value'] = extract_frame(frame['value'], 0)
                
                # Finish loading in the next frames and add them to the processing queue
                if next_frames is not None:
                    frames = next_frames.get()
                    if len(frames) == 0:
                        vid_done = True
                    else:
                        active_frames.append({'value': frames, 'idx': len(sequence)-1})

                # Compute FPS
                frame_times.add(time.time() - start_time)
                fps = args.video_multiframe / frame_times.get_avg()
            else:
                fps = 0
            
            fps_str = 'Processing FPS: %.2f | Video Playback FPS: %.2f | Frames in Buffer: %d' % (fps, video_fps, frame_buffer.qsize())
            if not args.display_fps:
                print('\r' + fps_str + '    ', end='')

    except KeyboardInterrupt:
        print('\nStopping...')
    
    cleanup_and_exit()
Exemple #52
0
class CCSimpleHttpServer(HTTPServer):
    """
    Simple http server to handle requests from the clients.
    """

    daemon_threads = False

    def __init__(self,
                 server_address,
                 RequestHandlerClass,
                 config_directory,
                 product_db_sql_server,
                 skip_db_cleanup,
                 pckg_data,
                 suppress_handler,
                 context,
                 check_env,
                 manager):

        LOG.debug("Initializing HTTP server...")

        self.config_directory = config_directory
        self.www_root = pckg_data['www_root']
        self.doc_root = pckg_data['doc_root']
        self.checker_md_docs = pckg_data['checker_md_docs']
        self.checker_md_docs_map = pckg_data['checker_md_docs_map']
        self.version = pckg_data['version']
        self.suppress_handler = suppress_handler
        self.context = context
        self.check_env = check_env
        self.manager = manager
        self.__products = {}

        # Create a database engine for the configuration database.
        LOG.debug("Creating database engine for CONFIG DATABASE...")
        self.__engine = product_db_sql_server.create_engine()
        self.config_session = sessionmaker(bind=self.__engine)
        self.manager.set_database_connection(self.config_session)

        # Load the initial list of products and set up the server.
        cfg_sess = self.config_session()
        permissions.initialise_defaults('SYSTEM', {
            'config_db_session': cfg_sess
        })
        products = cfg_sess.query(ORMProduct).all()
        for product in products:
            self.add_product(product)
            permissions.initialise_defaults('PRODUCT', {
                'config_db_session': cfg_sess,
                'productID': product.id
            })
        cfg_sess.commit()
        cfg_sess.close()

        if not skip_db_cleanup:
            for endpoint, product in self.__products.items():
                if not product.cleanup_run_db():
                    LOG.warning("Cleaning database for " +
                                endpoint + " Failed.")

        self.__request_handlers = ThreadPool(processes=10)
        try:
            HTTPServer.__init__(self, server_address,
                                RequestHandlerClass,
                                bind_and_activate=True)
            ssl_key_file = os.path.join(config_directory, "key.pem")
            ssl_cert_file = os.path.join(config_directory, "cert.pem")
            if os.path.isfile(ssl_key_file) and os.path.isfile(ssl_cert_file):
                LOG.info("Initiating SSL. Server listening on secure socket.")
                LOG.debug("Using cert file:"+ssl_cert_file)
                LOG.debug("Using key file:"+ssl_key_file)
                self.socket = ssl.wrap_socket(self.socket, server_side=True,
                                              keyfile=ssl_key_file,
                                              certfile=ssl_cert_file)

            else:
                LOG.info("Searching for SSL key at {0}, cert at {1}, "
                         "not found...".format(ssl_key_file, ssl_cert_file))
                LOG.info("Falling back to simple, insecure HTTP.")

        except Exception as e:
            LOG.error("Couldn't start the server: " + e.__str__())
            raise

    def process_request_thread(self, request, client_address):
        try:
            # Finish_request instantiates request handler class.
            self.finish_request(request, client_address)
            self.shutdown_request(request)
        except socket.error as serr:
            if serr[0] == errno.EPIPE:
                LOG.debug("Broken pipe")
                LOG.debug(serr)
                self.shutdown_request(request)

        except Exception as ex:
            LOG.debug(ex)
            self.handle_error(request, client_address)
            self.shutdown_request(request)

    def process_request(self, request, client_address):
        self.__request_handlers.apply_async(self.process_request_thread,
                                            (request, client_address))

    def add_product(self, orm_product, init_db=False):
        """
        Adds a product to the list of product databases connected to
        by the server.
        Checks the database connection for the product databases.
        """
        if orm_product.endpoint in self.__products:
            LOG.debug("This product is already configured!")
            return

        LOG.debug("Setting up product '{0}'".format(orm_product.endpoint))

        prod = Product(orm_product,
                       self.context,
                       self.check_env)

        # Update the product database status.
        prod.connect()
        if prod.db_status == DBStatus.SCHEMA_MISSING and init_db:
            LOG.debug("Schema was missing in the database. Initializing new")
            prod.connect(init_db=True)

        self.__products[prod.endpoint] = prod

    @property
    def num_products(self):
        """
        Returns the number of products currently mounted by the server.
        """
        return len(self.__products)

    def get_product(self, endpoint):
        """
        Get the product connection object for the given endpoint, or None.
        """
        if endpoint in self.__products:
            return self.__products.get(endpoint)

        LOG.debug("Product with the given endpoint '%s' does not exist in "
                  "the local cache. Try to get it from the database.",
                  endpoint)

        # If the product doesn't find in the cache, try to get it from the
        # database.
        try:
            cfg_sess = self.config_session()
            product = cfg_sess.query(ORMProduct) \
                .filter(ORMProduct.endpoint == endpoint) \
                .limit(1).one_or_none()

            if not product:
                return None

            self.add_product(product)
            permissions.initialise_defaults('PRODUCT', {
                'config_db_session': cfg_sess,
                'productID': product.id
            })

            return self.__products.get(endpoint, None)
        finally:
            if cfg_sess:
                cfg_sess.close()
                cfg_sess.commit()

    def get_only_product(self):
        """
        Returns the Product object for the only product connected to by the
        server, or None, if there are 0 or >= 2 products managed.
        """
        return self.__products.items()[0][1] if self.num_products == 1 \
            else None

    def remove_product(self, endpoint):
        product = self.get_product(endpoint)
        if not product:
            raise ValueError("The product with the given endpoint '{0}' does "
                             "not exist!".format(endpoint))

        LOG.info("Disconnecting product '{0}'".format(endpoint))
        product.teardown()

        del self.__products[endpoint]

    def remove_products_except(self, endpoints_to_keep):
        """
        Removes EVERY product connection from the server except those
        endpoints specified in :endpoints_to_keep.
        """
        map(self.remove_product, [ep for ep in self.__products.keys()
                                  if ep not in endpoints_to_keep])
Exemple #53
0
    threaded_mode = True

    latency = StatValue()
    frame_interval = StatValue()
    last_frame_time = clock()
    while True:
        while len(pending) > 0 and pending[0].ready():
            res, t0 = pending.popleft().get()
            latency.update(clock() - t0)
            draw_str(res, (20, 20), "threaded      :  " + str(threaded_mode))
            draw_str(res, (20, 40), "latency        :  %.1f ms" % (latency.value*1000))
            draw_str(res, (20, 60), "frame interval :  %.1f ms" % (frame_interval.value*1000))
            cv2.imshow('threaded video', res)
        if len(pending) < threadn:
            ret, frame = cap.read()
            t = clock()
            frame_interval.update(t - last_frame_time)
            last_frame_time = t
            if threaded_mode:
                task = pool.apply_async(process_frame, (frame.copy(), t))
            else:
                task = DummyTask(process_frame(frame, t))
            pending.append(task)
        ch = cv2.waitKey(1)
        if ch == ord(' '):
            threaded_mode = not threaded_mode
        if ch == 27:
            break
cv2.destroyAllWindows()
        return len(self.data[0])

    def dataHeight(self):
        return len(self.data)

    def printData(self):
        for x in self.data:
            print(x[0])

    def __exit__(self, *args):
        '''
        close all conncetions        
        '''
        #close sqllite connection
        self.sqllite_cur.close()
        self.sqllite_con.close()


if __name__ == '__main__':
    data = SQLite_downloader(
        r'F:\Python\ChernLearning\Lesson7_flask_win32\flask_curses_example-master\flask_curses_example-master\db\db.db'
    )

    from multiprocessing.pool import ThreadPool
    pool = ThreadPool(processes=1)

    async_result = pool.apply_async(data.start)

    val = async_result.get()
    print(val)
Exemple #55
0
class KBTest(unittest.TestCase):
    def setUp(self):
        self.pool = ThreadPool(processes=1)
        self.lastEndStep = 0

    def playXSteps(self, solver, plays):
        """
        Call the solver's solveOneStep for x times, and record the result game state

        Args:
             solver: solver of the game
             plays: list of lists; inner list consists of the number of steps (x) followed by the expected outcome
        """
        res = []
        for play in plays:
            x = play[0]
            while self.lastEndStep < x:
                solver.solveOneStep()
                self.lastEndStep += 1
            res.append(solver.gm.getGameState())
        return res

    def solve(self, solver):
        """
        Call the solver's solve function, which should solve the game.

        Args:
             solver: solver of the game
        """
        solver.solve()

    def runPlayXSteps(self, solver, plays, timeout=15):
        """
        Wrapper function; calls playXSteps(..) with a timeout

        Args:
             solver: solver of the game
             plays: list of lists; inner list consists of the number of steps (x) followed by the expected outcome
             timeout: time out in seconds. Default 5 seconds
        """
        try:
            results = self.pool.apply_async(self.playXSteps,
                                            [solver, plays]).get(timeout)
            for index, play in enumerate(plays):
                expected = play[1]
                self.assertEqual(results[index], expected)
        except TimeoutError:
            raise Exception("Timed out: %s" % inspect.stack()[1][3])

    def runSolve(self, solver, timeout=15):
        """
        Wrapper function; calls solve(..) with a timeout

        Args:
             solver: solver of the game
             timeout: time out in seconds. Default 5 seconds
        """
        try:
            self.pool.apply_async(self.solve, [
                solver,
            ]).get(timeout)
            self.assertTrue(solver.gm.isWon())
        except TimeoutError:
            raise Exception("Timed out: %s" % inspect.stack()[1][3])

    def test01_GM_Hanoi(self):
        th = TowerOfHanoiGame()
        th.read('hanoi_5_smallest_on_three_second_smallest_on_two.txt')
        required = [
            'fact: (movable disk1 peg3 peg1)',
            'fact: (movable disk1 peg3 peg2)',
        ]
        th.setWinningCondition(required, 'hanoi_all_forbidden.txt')
        self.assertFalse(th.isWon())

        movables = th.getMovables()
        self.assertEqual(th.getGameState(), ((1, 2, 3), (), ()))
        th.makeMove(movables[0])
        self.assertEqual(th.getGameState(), ((2, 3), (1, ), ()))
        th.reverseMove(movables[0])
        self.assertEqual(th.getGameState(), ((1, 2, 3), (), ()))

    def test02_DFS_Hanoi(self):
        th = TowerOfHanoiGame()
        th.read('hanoi_5_all_disks_on_peg_one.txt')
        required = [
            'fact: (movable disk1 peg3 peg1)',
            'fact: (movable disk1 peg3 peg2)',
        ]
        th.setWinningCondition(required, 'hanoi_all_forbidden.txt')
        self.assertFalse(th.isWon())

        solver = SolverDFS(th, ((), (), (1, 2, 3)))

        self.runPlayXSteps(
            solver,
            [
                # [step, expected game state]
                [3, ((3, ), (2, ), (1, ))],
                [13, ((1, ), (), (2, 3))],
                [22, ((), (), (1, 2, 3))],
            ])

    def test03_DFS_Hanoi(self):
        th = TowerOfHanoiGame()
        th.read('hanoi_5_two_smallest_on_peg_three.txt')
        required = [
            'fact: (movable disk1 peg3 peg1)',
            'fact: (movable disk1 peg3 peg2)',
        ]
        th.setWinningCondition(required, 'hanoi_all_forbidden.txt')
        self.assertFalse(th.isWon())

        solver = SolverDFS(th, ((), (), (1, 2, 3, 4, 5)))
        self.runSolve(solver)

    def test04_BFS_Hanoi(self):
        th = TowerOfHanoiGame()
        th.read('hanoi_5_all_disks_on_peg_one.txt')
        required = [
            'fact: (movable disk1 peg3 peg1)',
            'fact: (movable disk1 peg3 peg2)',
        ]
        th.setWinningCondition(required, 'hanoi_all_forbidden.txt')
        self.assertFalse(th.isWon())

        solver = SolverBFS(th, ((), (), (1, 2, 3)))

        self.runPlayXSteps(
            solver,
            [
                # [step, expected game state]
                [10, ((), (1, 2), (3, ))],
                [11, ((1, ), (3, ), (2, ))],
                [20, ((), (2, 3), (1, ))],
            ])

    def test05_BFS_Hanoi(self):
        th = TowerOfHanoiGame()
        th.read('hanoi_5_two_smallest_on_peg_three.txt')
        required = [
            'fact: (movable disk1 peg3 peg1)',
            'fact: (movable disk1 peg3 peg2)',
        ]
        th.setWinningCondition(required, 'hanoi_all_forbidden.txt')
        self.assertFalse(th.isWon())

        solver = SolverBFS(th, ((), (), (1, 2, 3, 4, 5)))
        self.runSolve(solver, )

    def test06_GM_8Puzzle(self):
        p8 = Puzzle8Game()
        p8.read('puzzle8_top_right_empty.txt')
        required = [
            'fact: (movable tile6 pos3 pos2 pos3 pos3)',
            'fact: (movable tile8 pos2 pos3 pos3 pos3)',
        ]
        p8.setWinningCondition(required, 'puzzle8_all_forbidden.txt')
        self.assertFalse(p8.isWon())

        movables = p8.getMovables()
        self.assertEqual(p8.getGameState(), ((5, 4, -1), (6, 1, 8), (7, 3, 2)))
        p8.makeMove(movables[0])
        self.assertEqual(p8.getGameState(), ((5, -1, 4), (6, 1, 8), (7, 3, 2)))
        p8.reverseMove(movables[0])
        self.assertEqual(p8.getGameState(), ((5, 4, -1), (6, 1, 8), (7, 3, 2)))

    def test07_DFS_8Puzzle(self):
        p8 = Puzzle8Game()
        p8.read('puzzle8_top_right_empty.txt')
        required = [
            'fact: (movable tile6 pos3 pos2 pos3 pos3)',
            'fact: (movable tile8 pos2 pos3 pos3 pos3)',
        ]
        p8.setWinningCondition(required, 'puzzle8_all_forbidden.txt')
        self.assertFalse(p8.isWon())

        solver = SolverDFS(p8, ((1, 2, 3), (4, 5, 6), (7, 8, -1)))

        self.runPlayXSteps(
            solver,
            [
                # [step, expected game state]
                [9, ((5, 4, 3), (6, 1, -1), (7, 2, 8))],
                [17, ((5, -1, 4), (2, 1, 3), (6, 7, 8))],
                [34, ((5, 4, -1), (3, 2, 1), (6, 7, 8))],
            ])

    def test08_BFS_8Puzzle(self):
        p8 = Puzzle8Game()
        p8.read('puzzle8_top_right_empty.txt')
        required = [
            'fact: (movable tile6 pos3 pos2 pos3 pos3)',
            'fact: (movable tile8 pos2 pos3 pos3 pos3)',
        ]
        p8.setWinningCondition(required, 'puzzle8_all_forbidden.txt')
        self.assertFalse(p8.isWon())

        solver = SolverBFS(p8, ((1, 2, 3), (4, 5, 6), (7, 8, -1)))

        self.runPlayXSteps(
            solver,
            [
                # [step, expected game state]
                [5, ((5, 4, 8), (6, -1, 1), (7, 3, 2))],
                [13, ((5, 4, 8), (-1, 6, 1), (7, 3, 2))],
                [21, ((6, 5, 4), (1, -1, 8), (7, 3, 2))],
            ])
Exemple #56
0
pool = ThreadPool(processes=1)
Messq = Queue()

publishsocket = context.socket(zmq.PUB)
publishsocket.bind("tcp://*:12345")

receivesocket = context.socket(zmq.PULL)
receivesocket.bind("tcp://*:23456")

SessionList = {}

while True:
    poller = zmq.Poller()
    poller.register(receivesocket, zmq.POLLIN)
    event = dict(poller.poll(TIMEOUT))

    if event:
        if event.get(receivesocket) == zmq.POLLIN:
            received_message = receivesocket.recv_string()  # blocking call
            #print("message received "+ received_message)
            result = pool.apply_async(
                workers.parseworker.getFlag(received_message,
                                            Messq))  #, [received_message])
            React = Messq.get()
            print("THIS IS OUTSIDE THE THREAD " + React)
    else:
        print("Nothing yet")

    #publishsocket.send(worker.result)
class NotificationProtocol(object):
    """ The protocol which hand shakes with external devices about the\
        database and starting execution
    """

    def __init__(self, socket_addresses, wait_for_read_confirmation):
        self._socket_addresses = socket_addresses

        # Determines whether to wait for confirmation that the database
        # has been read before starting the simulation
        self._wait_for_read_confirmation = wait_for_read_confirmation
        self._wait_pool = ThreadPool(processes=1)
        self._data_base_message_connections = list()
        for socket_address in socket_addresses:
            self._data_base_message_connections.append(UDPEIEIOConnection(
                local_port=socket_address.listen_port,
                remote_host=socket_address.notify_host_name,
                remote_port=socket_address.notify_port_no))

    def wait_for_confirmation(self):
        """ if asked to wait for confirmation, waits for all external systems\
            to confirm that they are configured and have read the database

        :return:
        """
        logger.info("*** Awaiting for a response from an external source "
                    "to state its ready for the simulation to start ***")
        self._wait_pool.close()
        self._wait_pool.join()

    def send_start_notification(self):
        """ either waits till all sources have confirmed read the database and\
            are configured, and/or just sends the start notification\
            (when the system is executing)

        :return:
        """
        if self._wait_for_read_confirmation:
            self.wait_for_confirmation()
        eieio_command_message = DatabaseConfirmation()
        for connection in self._data_base_message_connections:
            connection.send_eieio_message(eieio_command_message)

    # noinspection PyPep8
    def send_read_notification(self, database_path):
        """
        sends notifications to all devices which have expressed an interest in
        when the databse has been written
        :param database_path: the path to the database file
        :return:
        """
        if database_path is not None:
            self._wait_pool.apply_async(self._send_read_notification,
                                        args=[database_path])

    def _send_read_notification(self, database_path):
        """
        sends notfications to a list of socket addresses that the database has
        been written. Messgae also includes the path to the database

        :param database_path: the path to the database
        :return: None

        """
        # noinspection PyBroadException
        if database_path is not None:
            try:
                self._sent_visualisation_confirmation = True

                # add file path to database into command message.
                number_of_chars = len(database_path)
                if number_of_chars > constants.MAX_DATABASE_PATH_LENGTH:
                    raise exceptions.ConfigurationException(
                        "The file path to the database is too large to be "
                        "transmitted via the command packet, "
                        "please set the file path manually and "
                        "set the .cfg parameter [Database] send_file_path "
                        "to False")
                eieio_command_message = DatabaseConfirmation(database_path)

                # Send command and wait for response
                logger.info(
                    "*** Notifying external sources that the database is "
                    "ready for reading ***")

                # noinspection PyBroadException
                try:
                    for connection in self._data_base_message_connections:
                        connection.send_eieio_message(eieio_command_message)

                    # if the system needs to wait, try receiving a packet back
                    if self._wait_for_read_confirmation:
                        for connection in self._data_base_message_connections:
                            connection.receive_eieio_message()
                    logger.info("*** Confirmation received, continuing ***")
                except Exception:
                    logger.warning("*** Failed to notify external application"
                                   " about the database - continuing ***")

            except Exception:
                traceback.print_exc()

    def close(self):
        """ Closes the thread pool
        :return:
        """
        self._wait_pool.close()
Exemple #58
0
    def submit_batch(self,
                     job_files,
                     threads=None,
                     chunk_size=None,
                     callback=None,
                     **kwargs):
        """
        Submits a batch of jobs given by *job_files* via a thread pool of size *threads* which
        defaults to its instance attribute. When *chunk_size*, which defaults to
        :py:attr:`chunk_size_submit`, is not negative, *job_files* are split into chunks of that
        size which are passed to :py:meth:`submit`. When *callback* is set, it is invoked after each
        successful job submission with the index of the corresponding job file (starting at 0) and
        either the assigned job id or an exception if any occurred. All other *kwargs* are passed to
        :py:meth:`submit`.

        The return value is a list containing the return values of the particular :py:meth:`submit`
        calls, in an order that corresponds to *job_files*. When an exception was raised during a
        submission, this exception is added to the returned list.
        """
        # default arguments
        threads = max(threads or self.threads or 1, 1)

        # is chunking allowed?
        if self.chunk_size_submit:
            chunk_size = max(chunk_size or self.chunk_size_submit, 0)
        else:
            chunk_size = 0
        chunking = chunk_size > 0

        # build chunks (either job files one by one, or real chunks of job files)
        job_files = make_list(job_files)
        chunks = list(iter_chunks(job_files,
                                  chunk_size)) if chunking else job_files

        # factory to call the passed callback for each job file even when chunking
        def cb_factory(i):
            if not callable(callback):
                return None
            elif chunking:

                def wrapper(job_ids):
                    offset = sum(len(chunk) for chunk in chunks[:i])
                    for j in range(len(chunks[i])):
                        job_id = job_ids if isinstance(
                            job_ids, Exception) else job_ids[j]
                        callback(offset + j, job_id)

                return wrapper
            else:

                def wrapper(job_id):
                    callback(i, job_id)

                return wrapper

        # threaded processing
        pool = ThreadPool(threads)
        results = [
            pool.apply_async(self.submit, (v, ),
                             kwargs,
                             callback=cb_factory(i))
            for i, v in enumerate(chunks)
        ]
        pool.close()
        pool.join()

        # store return values or errors, same length as job files, independent of chunking
        if chunking:
            outputs = []
            for i, (chunk, res) in enumerate(six.moves.zip(chunks, results)):
                job_ids = get_async_result_silent(res)
                if isinstance(job_ids, Exception):
                    job_ids = len(chunk) * [job_ids]
                outputs.extend(job_ids)
        else:
            outputs = flatten(get_async_result_silent(res) for res in results)

        return outputs
Exemple #59
0
class TestDNSVanillaAggressivePacketCapture(LocalPacketCaptureTestCase):

    '''Summary:

    Test whether DNS leaks during regular VPN connection.

    Details:

    This tests makes lots of simultaneous DNS requests once the VPN is connected. It uses packet
    capture to check that none of those requests went outside of the VPN tunnel.

    Discussion:

    The test is very similar to TestDNSVanillaAggressive but checks for leaks via packet capture and
    not dig.

    Weaknesses:

    It is expected that DNS traffic goes through the VPN tunnel so this test does not look for
    traffic on the tunnel itself. This doesn't mean there aren't DNS leaks. It's entirely possible
    for traffic to leak through the tunnel by going to a public DNS server, e.g. 8.8.8.8. If it
    goes over the tunnel then it will be encrypted and have the user's IP hidden, but the request
    itself can end up on a logging DNS server.

    Scenarios:

    * Run on a system with DNS servers configured to be public IP addresses, e.g. 8.8.8.8.
    * Run on a system with DNS servers configured to be local IP addresses, e.g. 192.0.0.0/24. This
      is a common setup with home routers where the router acts as the DNS server.

    '''

    # TODO: Potentially make configurable
    HOSTNAMES = [
        'google.com', 'twitter.com', 'facebook.com', 'stackoverflow.com', 'yahoo.com', 'amazon.com',
    ]

    NUM_PROCESSES = 10

    def __init__(self, devices, parameters):
        super().__init__(devices, parameters)
        self.dns_servers_before_connect = []
        self.thread_pool = ThreadPool(processes=TestDNSVanillaAggressivePacketCapture.NUM_PROCESSES)
        self.results = []
        self.semaphore = Semaphore(value=0)

    def filter_packets(self, packets):
        packets = super().filter_packets(packets)
        # Note that this filter does a reverse match, i.e. we match against port 53 but keep just
        # those packets
        just_port_53_packets = self.traffic_filter.filter_traffic(packets, dst_port=53)[0]
        return just_port_53_packets

    def thread_should_exit(self):
        return self.semaphore.acquire(blocking=False)

    def do_dns_requests(self):
        while not self.thread_should_exit():
            try:
                hostname = random.choice(TestDNSVanillaAggressivePacketCapture.HOSTNAMES)
                self.localhost['dns_tool'].lookup(hostname)
            except XVEx as ex:
                L.debug("DNS lookup failed. Not considering this an error: {}".format(ex))

    def start_traffic_generation(self):
        L.info("Starting background DNS lookup threads")
        # TODO: Consider making this into a generic traffic generator.
        for _ in range(0, TestDNSVanillaAggressivePacketCapture.NUM_PROCESSES):
            self.results.append(self.thread_pool.apply_async(self.do_dns_requests))

    def stop_traffic_generation(self):
        L.info("Stopping background DNS lookup threads")
        for _ in range(0, TestDNSVanillaAggressivePacketCapture.NUM_PROCESSES):
            self.semaphore.release()

        # There is no result returned from check_dns, but .get() will propagate any exception
        # thrown by check_dns, which is what we want.
        for result in self.results:
            result.get()
Exemple #60
0
    def map(self,
            func,
            iterdata,
            extra_env=None,
            extra_meta=None,
            invoke_pool_threads=64,
            data_all_as_one=True,
            use_cached_runtime=True,
            overwrite_invoke_args=None):
        """
        # FIXME work with an actual iterable instead of just a list

        data_all_as_one : upload the data as a single s3 object; fewer
        tcp transactions (good) but potentially higher latency for workers (bad)

        use_cached_runtime : if runtime has been cached, use that. When set
        to False, redownloads runtime.
        """

        host_job_meta = {}

        pool = ThreadPool(invoke_pool_threads)
        callset_id = s3util.create_callset_id()
        data = list(iterdata)

        ### pickle func and all data (to capture module dependencies
        serializer = serialize.SerializeIndependent()
        func_and_data_ser, mod_paths = serializer([func] + data)

        func_str = func_and_data_ser[0]
        data_strs = func_and_data_ser[1:]
        data_size_bytes = sum(len(x) for x in data_strs)
        s3_agg_data_key = None
        host_job_meta['aggregated_data_in_s3'] = False
        host_job_meta['data_size_bytes'] = data_size_bytes

        if data_size_bytes < wrenconfig.MAX_AGG_DATA_SIZE and data_all_as_one:
            s3_agg_data_key = s3util.create_agg_data_key(
                self.s3_bucket, self.s3_prefix, callset_id)
            agg_data_bytes, agg_data_ranges = self.agg_data(data_strs)
            agg_upload_time = time.time()
            self.s3client.put_object(Bucket=s3_agg_data_key[0],
                                     Key=s3_agg_data_key[1],
                                     Body=agg_data_bytes)
            host_job_meta['agg_data_in_s3'] = True
            host_job_meta['data_upload_time'] = time.time() - agg_upload_time
            host_job_meta['data_upload_timestamp'] = time.time()
        else:
            # FIXME add warning that you wanted data all as one but
            # it exceeded max data size
            pass

        module_data = self.create_mod_data(mod_paths)
        func_str_encoded = wrenutil.bytes_to_b64str(func_str)
        #debug_foo = {'func' : func_str_encoded,
        #             'module_data' : module_data}

        #pickle.dump(debug_foo, open("/tmp/py35.debug.pickle", 'wb'))
        ### Create func and upload
        func_module_str = json.dumps({
            'func': func_str_encoded,
            'module_data': module_data
        })
        host_job_meta['func_module_str_len'] = len(func_module_str)

        func_upload_time = time.time()
        s3_func_key = s3util.create_func_key(self.s3_bucket, self.s3_prefix,
                                             callset_id)
        self.s3client.put_object(Bucket=s3_func_key[0],
                                 Key=s3_func_key[1],
                                 Body=func_module_str)
        host_job_meta['func_upload_time'] = time.time() - func_upload_time
        host_job_meta['func_upload_timestamp'] = time.time()

        def invoke(data_str,
                   callset_id,
                   call_id,
                   s3_func_key,
                   host_job_meta,
                   s3_agg_data_key=None,
                   data_byte_range=None):
            s3_data_key, s3_output_key, s3_status_key \
                = s3util.create_keys(self.s3_bucket,
                                     self.s3_prefix,
                                     callset_id, call_id)

            host_job_meta['job_invoke_timestamp'] = time.time()

            if s3_agg_data_key is None:
                data_upload_time = time.time()
                self.put_data(s3_data_key, data_str, callset_id, call_id)
                data_upload_time = time.time() - data_upload_time
                host_job_meta['data_upload_time'] = data_upload_time
                host_job_meta['data_upload_timestamp'] = time.time()

                data_key = s3_data_key
            else:
                data_key = s3_agg_data_key

            return self.invoke_with_keys(
                s3_func_key,
                data_key,
                s3_output_key,
                s3_status_key,
                callset_id,
                call_id,
                extra_env,
                extra_meta,
                data_byte_range,
                use_cached_runtime,
                host_job_meta.copy(),
                self.job_max_runtime,
                overwrite_invoke_args=overwrite_invoke_args)

        N = len(data)
        call_result_objs = []
        for i in range(N):
            call_id = "{:05d}".format(i)

            data_byte_range = None
            if s3_agg_data_key is not None:
                data_byte_range = agg_data_ranges[i]

            cb = pool.apply_async(
                invoke,
                (data_strs[i], callset_id, call_id, s3_func_key,
                 host_job_meta.copy(), s3_agg_data_key, data_byte_range))

            logger.info("map {} {} apply async".format(callset_id, call_id))

            call_result_objs.append(cb)

        res = [c.get() for c in call_result_objs]
        pool.close()
        pool.join()
        logger.info("map invoked {} {} pool join".format(callset_id, call_id))

        # FIXME take advantage of the callset to return a lot of these

        # note these are just the invocation futures

        return res