示例#1
0
    def submit(self, df: pd.DataFrame, job_opts: JobOpts, deplay=0.02, progressbar=True):
        """Sumit jobs to the cluster.

        You have to establish a connection first (explicit is better than implicit).

        Examples:
            >>> with js.connect():
            ...     js.submit([(0, 'echo "Hello world!"), (1, 'echo "Goodbye world!"')]
        """
        assert 'system_command' in df
        assert not df.duplicated().any()

        job_opts.working_dir.joinpath(job_opts.job_id).mkdir(parents=True, exist_ok=True)

        if self.host_opts.scheme in ['local']:
            worker = functools.partial(self._local_worker, job_opts=job_opts)
        else:
            worker = functools.partial(self._remote_worker, job_opts=job_opts)

        # Submit multiple jobs in parallel
        futures = []
        pool = concurrent.futures.ThreadPoolExecutor()
        for row in self._itertuples(df, progressbar=progressbar):
            future = pool.submit(worker, row)
            futures.append(future)
            time.sleep(deplay)
        pool.shutdown(wait=False)
        return futures
示例#2
0
def test_urls(urls, results):
    loop = asyncio.get_event_loop()
    futures =[]
    with concurrent.futures.ThreadPoolExecutor(max_workers=50) as e:
        for url in urls:
            if url[:6].lower() =='ftp://':
                future = loop.run_in_executor(e, test_ftp,url)
            else:
                future = loop.run_in_executor(e, partial(requests.get, headers={"user-agent":USER_AGENT},
                                              hooks={'response': get_a_byte}, verify=False,
                                              timeout=URL_TIMEOUT, stream=True), url)
            futures.append(future)
    for future in futures:
        try:
            res = yield from future
        except requests.exceptions.ProxyError:
            print('proxy error', urls[futures.index(future)])
            res = Exception()
        except (requests.exceptions.ReadTimeout, requests.packages.urllib3.exceptions.MaxRetryError,
                requests.exceptions.ConnectTimeout, requests.packages.urllib3.exceptions.ConnectTimeoutError,
                socket.timeout):
            print('timeout', urls[futures.index(future)])
            res = Exception()
        except (requests.exceptions.InvalidSchema, requests.exceptions.InvalidURL):
            print('invalidURL', urls[futures.index(future)])
            res = Response()
            res.status_code = 404
        except:
            import traceback
            traceback.print_exc()
            res = Exception()
        results.append(res)
示例#3
0
 def wrapper(*args, **kwargs):
     if list_of_dicts:
         for params in list_of_dicts:
             if type(params) is not dict:
                 raise TypeError('%r is not a dict.' % params)
         with concurrent.futures.ThreadPoolExecutor(max_workers) as executor:
             futures = []
             for params in list_of_dicts:
                 kwargs.update(params)
                 futures.append(executor.submit(_fn_with_traceback, method, *args, **kwargs))
             _handle_futures(futures)
         return
     if dict_of_lists:
         values_list = dict_of_lists.values()
         for values in values_list:
             if type(values) is not list:
                 raise TypeError('%r is not a list.' % values)
         values_groups = list(zip(*values_list))
         with concurrent.futures.ThreadPoolExecutor(max_workers) as executor:
             futures = []
             keys = dict_of_lists.keys()
             for values in values_groups:
                 params = dict(zip(keys, values))
                 kwargs.update(params)
                 futures.append(executor.submit(_fn_with_traceback, method, *args, **kwargs))
             _handle_futures(futures)
         return
示例#4
0
def submit_subdivide(thread_count, f, length, max_length):
    futures = []
    thread_pool = concurrent.futures.ThreadPoolExecutor(thread_count)
    # thread_pool = concurrent.futures.ProcessPoolExecutor(thread_count)
    for i1, i2 in list(subdivide(length, max_length=max_length)):
        futures.append(thread_pool.submit(f, i1, i2))
    return futures
示例#5
0
def file_as_training_vectors(filename, gram_size):
    worker_count = 8
    with ProcessPoolExecutor(max_workers=worker_count) as exe:
        itr =  enumerate(file_as_posts(filename))
        futures = []
        for i in range(worker_count):
            try:
                idx, post = next(itr)
                futures.append(exe.submit(post_to_vector, post, gram_size))
            except StopIteration:
                break # we've submitted all the tasks already!



        while True:
            done, not_done = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)
            futures = []
            for i in done:
                yield from i.result()
                try:
                    idx, post = next(itr)
                    futures.append(exe.submit(post_to_vector, post, gram_size))
                except StopIteration:
                    # just wait it out
                    pass

            futures.extend(not_done)
            if len(futures) == 0:
                return
示例#6
0
    def handle(self, *args, **options):
        if len(args) != 1:
            raise CommandError("Need to (only) specify project/branch")
        project = args[0]

        server_params = urlparse(options['server'])

        time_interval = options['time_interval']

        pc = PerfherderClient(protocol=server_params.scheme,
                              host=server_params.netloc)
        signatures = pc.get_performance_signatures(
            project,
            interval=time_interval)

        if options['filter_props']:
            for kv in options['filter_props']:
                if ':' not in kv or len(kv) < 3:
                    raise CommandError("Must specify --filter-props as "
                                       "'key:value'")
                k, v = kv.split(':')
                signatures = signatures.filter((k, v))

        with concurrent.futures.ProcessPoolExecutor(
                options['num_workers']) as executor:
            futures = []
            # add signatures without parents first, then those with parents
            with_parents = []
            for signature_hash in signatures.get_signature_hashes():
                if 'parent_signature' in signatures[signature_hash]:
                    with_parents.append(signature_hash)
                else:
                    futures.append(executor.submit(_add_series, pc,
                                                   project,
                                                   signature_hash,
                                                   signatures[signature_hash],
                                                   options['verbosity'],
                                                   time_interval=time_interval))
            for signature_hash in with_parents:
                parent_hash = signatures[signature_hash]['parent_signature']
                futures.append(executor.submit(_add_series, pc,
                                               project,
                                               signature_hash,
                                               signatures[signature_hash],
                                               options['verbosity'],
                                               time_interval=time_interval,
                                               parent_hash=parent_hash))

            for future in futures:
                try:
                    future.result()
                except Exception as e:
                    self.stderr.write("FAIL: {}".format(e))
                    # shutdown any pending tasks and exit (if something
                    # is in progress, no wait to stop it)
                    executor.shutdown(wait=False)
                    for future in futures:
                        future.cancel()
                    raise CommandError(
                        "Failed to import performance data: {}".format(e))
示例#7
0
文件: cum.py 项目: CounterPillow/cum
def update():
    """Gather new chapters from followed series."""
    pool = concurrent.futures.ThreadPoolExecutor(config.get().download_threads)
    futures = []
    warnings = []
    aliases = {}
    query = db.session.query(db.Series).filter_by(following=True).all()
    output.series('Updating {} series'.format(len(query)))
    for follow in query:
        fut = pool.submit(series_by_url, follow.url)
        futures.append(fut)
        aliases[fut] = follow.alias
    with click.progressbar(length=len(futures), show_pos=True,
                           fill_char='>', empty_char=' ') as bar:
        for future in concurrent.futures.as_completed(futures):
            try:
                series = future.result()
            except requests.exceptions.ConnectionError as e:
                warnings.append('Unable to update {} (connection error)'
                                .format(aliases[future]))
            except exceptions.ScrapingError:
                warnings.append('Unable to update {} (scraping error)'
                                .format(follow.alias))
            else:
                series.update()
            bar.update(1)
    for w in warnings:
        output.warning(w)
    list_new()
示例#8
0
    def image_for_domain(self, target_domain, target_z):
        tiles = []

        def fetch_tile(tile):
            try:
                img, extent, origin = self.get_image(tile)
            except IOError:
                # Some services 404 for tiles that aren't supposed to be
                # there (e.g. out of range).
                raise
            img = np.array(img)
            x = np.linspace(extent[0], extent[1], img.shape[1])
            y = np.linspace(extent[2], extent[3], img.shape[0])
            return img, x, y, origin

        with concurrent.futures.ThreadPoolExecutor(
                max_workers=self._MAX_THREADS) as executor:
            futures = []
            for tile in self.find_images(target_domain, target_z):
                futures.append(executor.submit(fetch_tile, tile))
            for future in concurrent.futures.as_completed(futures):
                try:
                    img, x, y, origin = future.result()
                    tiles.append([img, x, y, origin])
                except IOError:
                    pass

        img, extent, origin = _merge_tiles(tiles)
        return img, extent, origin
示例#9
0
文件: main.py 项目: ussraf/unishark
 def _run_suites_concurrently(self, max_workers_on_suites):
     exit_code = 0
     suites = unishark.DefaultTestLoader(name_pattern=self.name_pattern).load_tests_from_dict(self.test_dict_conf)
     start_time = time.time()
     with concurrent.futures.ThreadPoolExecutor(max_workers_on_suites) as executor:
         futures = []
         for suite_name, suite_content in suites.items():
             package_name = suite_content['package']
             suite = suite_content['suite']
             max_workers = suite_content['max_workers']
             runner = unishark.BufferedTestRunner(reporters=self.reporters,
                                                  verbosity=self.verbosity,
                                                  descriptions=self.descriptions)
             future = executor.submit(runner.run, suite,
                                      name=suite_name,
                                      description='Package: ' + package_name,
                                      max_workers=max_workers)
             futures.append(future)
         for future in concurrent.futures.as_completed(futures):
             result = future.result()
             exit_code += 0 if result.wasSuccessful() else 1
     actual_duration = time.time() - start_time
     log.info('Actual total time taken: %.3fs' % actual_duration)
     for reporter in self.reporters:
         reporter.set_actual_duration(actual_duration)
         reporter.collect()
     return exit_code
    def find(self, request):
        """Find the specified resource.

        :type request: FindRequest
        :rtype: FindResponse
        :raise: InvalidScheduler
        """
        if self._configured == ConfigStates.UNINITIALIZED:
            raise InvalidScheduler()

        # Host service only has a single scheduler
        request.scheduler_id = None

        futures = []
        for agent in self._get_hosts():
            future = self._threadpool.submit(
                self._find_worker, agent.address, agent.port,
                agent.id, request)
            futures.append(future)

        done, not_done = concurrent.futures.wait(futures, timeout=FIND_TIMEOUT)
        self._logger.info("Find responses received: %d, timed out: %d",
                          len(done), len(not_done))

        for future in done:
            response = future.result()
            if response.result == FindResultCode.OK:
                return response

        return FindResponse(FindResultCode.NOT_FOUND)
示例#11
0
def recalc_groups_pool(groups, newTable):
    with concurrent.futures.ProcessPoolExecutor() as executor:
        priorityLevel = (psutil.BELOW_NORMAL_PRIORITY_CLASS
                         if sys.platform == 'win32' else 10)
        parent = psutil.Process()
        parent.nice(priorityLevel)
        for child in parent.children():
            child.nice(priorityLevel)

        futures = []
        for groupkey in sorted(groups):
            entry = groups[groupkey]
            for idx, case in enumerate(entry['cases']):
                future = executor.submit(recalc_case, case, newTable)
                future._info = (entry, idx)
                futures.append(future)
        for future in concurrent.futures.as_completed(futures):
            entry, idx = future._info
            power_factor = future.result()
            entry['pf'][idx] = power_factor
            sys.stdout.write('%s %d %3.1f %3.1f %3.1f  \r' % (
                entry['key'], idx, power_factor,
                entry['entries'][idx]['power_factor'],
                power_factor - entry['entries'][idx]['power_factor']))
            sys.stdout.flush()
示例#12
0
def main2():
    #go database
    print '---'    
    annotationArray = defaultdict(list)
    with open('annotations.dump','rb') as f:
        ar = pickle.load(f)
    modelAnnotations = Counter()

    annotationSet = set()
    futures = []

    for idx, element in enumerate(ar):
        for index in element:
            for annotation in element[index]:
                    annotationSet.add(annotation)
    print len(annotationSet)
    with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor:
        for annotation in annotationSet:
            futures.append(executor.submit(resolveAnnotation, annotation))
        for future in concurrent.futures.as_completed(futures):
            resolvedAnnotation = future.result()
            if type(resolvedAnnotation) is list:
                annotationArray[resolvedAnnotation[0]].extend(resolvedAnnotation[1])
            else:
                annotationArray[resolvedAnnotation[0]].append(resolvedAnnotation[1])
    
        #annotationArray.append(modelAnnotations)
    with open('parsedAnnotations.dump','wb') as f:
        pickle.dump(annotationArray,f)
示例#13
0
文件: mrzero.py 项目: mgeisler/mrzero
    def cleanup(self, timeout=45):
        """Cleanup errors/results for job.

        Try to do this asynchronously.
        """
        def should_cleanup(name):
            """Determine whether the object should be deleted."""
            sw = lambda substr: name.startswith(substr)
            ew = lambda substr: name.endswith(substr)
            return any((sw('errors'), sw('results'), ew('.err'), ew('.pyc')))

        with concurrent.futures.ThreadPoolExecutor(CPU_COUNT*128) as cleanup_pool:
            futures = []
            for r in self.list_objects(self.jobtainer, select='name'):
                if should_cleanup(r):
                    futures.append(
                        cleanup_pool.submit(self.client.delete_object,
                                            self.jobtainer, r))
                    time.sleep()
            if futures:
                score = concurrent.futures.wait(futures, timeout=timeout)
                if score.done and not score.not_done:
                    print ("\nFinished cleaning up [%s object(s)] "
                           "from previous run." % len(score.done))
                else:
                    print ("Timed out after %ss: Cleaned up [%s object(s)] "
                           "and left behind some ( ~%s )"
                           % (timeout, len(score.done), len(score.not_done)))
            else:
                print "Nothing to clean up."
示例#14
0
    def fit(self, data, labels, n_jobs=None):
        """
        Train a random forest.

        :param data: the data
        :param labels: classes of the data
        :param n_jobs: number of parallel jobs
        """
        if n_jobs == 1 or multiprocessing.cpu_count() == 1:
            for tree in self._trees:
                tree.fit(data, labels)
        else:
            with concurrent.futures.ProcessPoolExecutor(n_jobs) as executor:
                futures = []
                for i, tree in enumerate(self._trees):
                    sample_indices = numpy.random.random_integers(0, data.shape[0]-1, data.shape[0])
                    tree_data = data[sample_indices]
                    tree_labels = labels[sample_indices]
                    futures.append((i, executor.submit(train_single_tree, tree, tree_data, tree_labels)))
                for i, future in futures:
                    self._trees[i] = future.result()

        self._label_names = self._trees[0].classes()
        for tree in self._trees[1:]:
            assert (self._label_names == tree.classes()).all()
示例#15
0
  def AddDictionaries(self, dicts):
    tsts = dict()

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
      for fdict in dicts:
        futures = list()
        words = list()
        letter = ""
        for word in [word.rstrip('\n') for word in open(fdict)]:
          # this assumes the dictionary is in lower-case order
          if word[0].lower() == letter.lower():
            words.append(word)
          else:
            tst = tsts.get(letter, TST())
            tsts[letter] = tst
            futures.append(executor.submit(self.FillTST, tst, words))
            letter = word[0].lower()
            words = list(word)

        for future in concurrent.futures.as_completed(futures):
          try:
            tst = future.result()
          except Exception as e:
            print e

    for tst in tsts.values():
      # TODO Add posibility to merge two tsts if they are overlapping
      self._tst.Take(tst)

    print 'Added {} words'.format(self._tst.Size())
示例#16
0
    def download_data_files(data_files):
        def download_file(file):
            with open(file['filename'], 'wb') as handle:
                headers = {'Accept-Encoding': 'gzip, deflate'}
                response = requests.get(file['url'], headers=headers, stream=True)

                if not response.ok:
                    print('file: "{0}"; url: {1}\n {2}'.format(file['filename'], file['url'], response.reason))

                for block in response.iter_content(1024):
                    handle.write(block)

            return "file [{0}] has finished downloading".format(file['filename'])

        with concurrent.futures.ThreadPoolExecutor(max_workers=len(data_files)) as executor:
            futures = []
            for df in data_files:
                futures.append(executor.submit(download_file, df))

            for future in concurrent.futures.as_completed(futures):
                try:
                    print(future.result())
                except Exception as e:
                    template = 'exception of type {0} occurred. \narguments:\n{1} \nmessage: {2}'
                    message = template.format(type(e).__name__, e.args, e)
                    print(message)
示例#17
0
def threadpool_map(task, args, message, concurrency, batchsize=1):
    """
    Helper to map a function over a range of inputs, using a threadpool, with a progress meter
    """
    import concurrent.futures

    njobs = len(args)
    batches = grouper(batchsize, tupleise(args))
    batched_task = lambda batch: [task(*job) for job in batch]
    PROGRESS = message is not None
    if PROGRESS:
        message += ' (TP:{}w:{}b)'.format(concurrency, batchsize)
        pbar = setup_progressbar(message, njobs, simple_progress=True)
        pbar.start()
    with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency) as executor:
        futures = []
        completed_count = 0
        for batch in batches:
            futures.append(executor.submit(batched_task, batch))

        if PROGRESS:
            for i, fut in enumerate(concurrent.futures.as_completed(futures), start=1):
                completed_count += len(fut.result())
                pbar.update(completed_count)

        else:
            concurrent.futures.wait(futures)

    if PROGRESS:
        pbar.finish()

    return flatten_list([fut.result() for fut in futures])
示例#18
0
def main(path_or_paths, output_dir=None, verbose=1):
    if isinstance(path_or_paths, basestring):
        path_or_paths = [path_or_paths]

    with concurrent.futures.ProcessPoolExecutor() as executor:
        futures = []
        for p in path_or_paths:
            if os.path.isdir(p):
                for file_n, pdffile in enumerate(util.find_files(p, '*.pdf')):
                    try:
                        futures.append(executor.submit(
                            convert_and_write, pdffile, output_dir, True))
                    except (PDFException, PSException):
                        print("Skipping {0} due to PDF Exception".format(pdffile))
            else:
                pdffile = p
                futures.append(executor.submit(convert_and_write, pdffile, output_dir, True, True))

        if verbose == 1:
            pbar = ProgressBar(widgets=[Percentage(), Bar()],
                               maxval=len(futures)).start()

            for file_n, f in enumerate(concurrent.futures.as_completed(futures)):
                pbar.update(file_n)

            pbar.finish()
示例#19
0
def write_frame_to_buffer(np_array):
    # don't allow 255 in the byte sequence for the control header
    np_array = (np_array * BRIGHTNESS_MULTIPLIER).astype(np.uint8)
    np_array[np_array == 255] = 254

    # swap rgb bits to match LED byte order
    np_array = np_array[:, :, BRG_ORDER]
    _mutate_for_alternating_led_strips(np_array)

    if SHOULD_REVERSE_ROWS:
        np_array = np_array[::-1, :, :]

    if SHOULD_REVERSE_COLS:
        np_array = np_array[:, ::-1, :]

    sleep_to_maintain_framerate()

    futures = []
    for shard_index in xrange(NUM_SHARDS):
        futures.append(
            GLOBAL_THREAD_POOL.submit(
                _submit_frame_to_serial_interface,
                ordered_serial_interfaces[shard_index],
                _sharded_np_array(np_array, shard_index),
                # _output,
                # _sharded_np_array(np_array, shard_index),
            )
        )
    _block_for_futures(futures)
示例#20
0
 def upload(self):
     futures = deque([self.executor.submit(self.root_node.upload)])
     while futures:
         current = futures.popleft().result()
         for child in current.children():
             futures.append(self.executor.submit(child.upload))
     print("")
     self.sync_progress.report_and_wait_for_uploads()
示例#21
0
def update_timeline():
    futures = []
    with concurrent.futures.ProcessPoolExecutor(max_workers=50) as executor:
        for user in usercredentials.find():
            # print(user)
            futures.append(executor.submit(contact_transmission, user=user, xTransmissionSessionId=""))
    for future in concurrent.futures.as_completed(futures):
        future.result()
示例#22
0
    def compute(self, *args, **kwargs):
        """ Compute dask collections on cluster

        Parameters
        ----------
        args: iterable of dask objects
            Collections like dask.array or dataframe or dask.value objects
        sync: bool (optional)
            Returns Futures if False (default) or concrete values if True

        Returns
        -------
        Tuple of Futures or concrete values

        Examples
        --------

        >>> from dask import do, value
        >>> from operator import add
        >>> x = dask.do(add)(1, 2)
        >>> y = dask.do(add)(x, x)
        >>> xx, yy = executor.compute(x, y)  # doctest: +SKIP
        >>> xx  # doctest: +SKIP
        <Future: status: finished, key: add-8f6e709446674bad78ea8aeecfee188e>
        >>> xx.result()  # doctest: +SKIP
        3
        >>> yy.result()  # doctest: +SKIP
        6
        """
        sync = kwargs.pop('sync', False)
        assert not kwargs
        if sync:
            return dask.compute(*args, get=self.get)

        variables = [a for a in args if isinstance(a, Base)]

        groups = groupby(lambda x: x._optimize, variables)
        dsk = merge([opt(merge([v.dask for v in val]),
                         [v._keys() for v in val])
                    for opt, val in groups.items()])
        names = ['finalize-%s' % tokenize(v) for v in variables]
        dsk2 = {name: (v._finalize, v, v._keys()) for name, v in zip(names, variables)}

        self.loop.add_callback(self.scheduler_queue.put_nowait,
                                {'op': 'update-graph',
                                'dsk': merge(dsk, dsk2),
                                'keys': names})

        i = 0
        futures = []
        for arg in args:
            if isinstance(arg, Base):
                futures.append(Future(names[i], self))
                i += 1
            else:
                futures.append(arg)

        return futures
示例#23
0
    def _doMoveAbs(self, pos, **kwargs):
        futures = []
        for child, move in self._moveToChildMove(pos).items():
            f = child.moveAbs(move, **kwargs)
            futures.append(f)

        # just wait for all futures to finish
        for f in futures:
            f.result()
示例#24
0
def distributedMerging(datafilenames, outputdir, workers):
    workers = mp.cpu_count() - 1
    progress = progressbar.ProgressBar(maxval=options['repetitions']).start()
    i = 0
    print 'running in {0} cores'.format(workers)
    with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as executor:
        workunit = min(len(datafilenames)/workers, 2)
        for i in range(0, len(datafilenames), workunit):
            futures.append(executor.submit(mergedataframe, datafilenames[i:i+workunit], outputdir, i))
示例#25
0
	def try_to_connect(self):
		"""
		Use the credentials and try to establish SSH connections to
		the host.
		"""
		LOG.debug("         Number of hosts to test: {num}".format(num=len(self.hosts)))
		LOG.debug("             Number of usernames: {num}".format(num=len(self.users)))
		LOG.debug("             Number of passwords: {num}".format(num=len(self.passwords)))
		LOG.debug("Total number of tests to conduct: {num}".format(num=len(self.hosts) * len(self.users) * len(self.passwords)))

		# determine the needed size of the thread pool but keep the upper
		# limit into consideration
		num_workers = 0
		if self.num_threads > 0:
			num_workers = min(self.num_threads, len(self.hosts) * len(self.users) * len(self.passwords))
		else:
			num_workers = len(self.hosts) * len(self.users) * len(self.passwords)

		LOG.debug("Initializing empty list for successful connections")
		self.successful_connections = {}
		LOG.debug("Length of list: {}".format(len(self.successful_connections)))

		LOG.debug("Initializing worker pool")
		self._create_worker_pool(num_workers)

		LOG.debug("Using pool with %d workers" % (num_workers))

		futures = []

		if self.pool is not None:
			for hostline in self.hosts:
				hostline = hostline.strip()
				try:
					host, port = hostline.split(":", 1)
				except:
					host = hostline
					port = 22

				try:
					port = int(port)
				except:
					port = 22

				for user in self.users:
					for passwd in self.passwords:
						# submit the job to the ThreadPoolExecutor
						a = self.pool.submit(self.ssh_connect, user, passwd, host, port)
						futures.append(a)
		else:
			LOG.error("Worker pool not initialized. Skipping ...")


		LOG.debug("Waiting for tasks to complete")
		results = concurrent.futures.wait(futures, timeout=120)
		LOG.debug("{} tasks completed".format(len(results.done)))

		LOG.debug("Successful connections: %d\n%s" % (len(self.successful_connections), self.successful_connections))
示例#26
0
 async def map(self, func, delay=0, *args, **kwargs):
     """Execute a blocking func with args/kwargs across all instances."""
     futures = []
     for x in self.instances:
         fut = self.execute(func, x, *args, **kwargs)
         futures.append(fut)
         if delay:
             await self.wait(delay)
     results = await gen.multi(futures)
     return results
示例#27
0
    def _doReference(self, axes):
        child_to_axes = self._axesToChildAxes(axes)
        futures = []
        for child, a in child_to_axes.items():
            f = child.reference(a)
            futures.append(f)

        # just wait for all futures to finish
        for f in futures:
            f.result()
示例#28
0
def run():
    # Parse command line
    args = parse_args()

    # import config file module
    import_config(args.config)

    # Configure logging
    logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
                        level=config.verb_level)
    if args.debug:
        logging.getLogger().setLevel('DEBUG')

    # register signal handling
    signal.signal(signal.SIGUSR1, __usr1_handler)
    signal.signal(signal.SIGALRM, __alarm_handler)

    # register report signal interval
    if config.emails.report.every > 0:
        signal.setitimer(signal.ITIMER_REAL, config.emails.report.every,
                                             config.emails.report.every)

    # do the actual polling
    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
        if args.one:
            def runner(check):
                return check.run(immediate=True), check

            futures = []
            for check in config.checks:
                futures.append(executor.submit(runner, check))

            for future in concurrent.futures.as_completed(futures):
                success, check = future.result()
                if success:
                    print("Check %s successful!" % (str(check)))
                else:
                    print("Check %s failed:\n%s" %
                          (str(check), check.errmsg.strip()))
        else:
            # Since we never reclaim finished tasks, exceptions raised during
            # run are never seen. Using a runner we can at least display them.
            def runner(check):
                try:
                    return check.run()
                except Exception as e:
                    traceback.print_exc()
                    raise e

            # This will drift slowly as it takes (base_tick + espilon) seconds
            while True:
                for check in config.checks:
                    executor.submit(runner, check)
                sleep(config.base_tick)
    mails.quit()
def download_images(image_urls, dst_dir, file_prefix="img", concurrency=50):
    with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency) as executor:
        futures = list()
        count = 0
        if not os.path.exists(dst_dir):
            os.makedirs(dst_dir)
        for image_url in image_urls:
            file_name = file_prefix + "_" + "%03d" % count
            futures.append(executor.submit(
                download_image, image_url, dst_dir, file_name))
            count += 1
示例#30
0
def get_containers():
    result = []
    hosts = Host.query.all()
    futures = []
    with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
        for host in hosts:
            futures.append(executor.submit(get_container_from_host, host))
    for f in concurrent.futures.as_completed(futures):
        result = result + f.result()
    result.sort(key=lambda x:x['name'])
    return jsonify(result=result)
示例#31
0
def summarize_logs(logs_dir,
                   keys,
                   target_key,
                   objective,
                   sort=None,
                   show_range=True,
                   as_df=False,
                   key_remap=None):
    assert objective in ['min', 'max']
    assert target_key in keys
    if key_remap is None:
        key_remap = {}

    futures = []
    with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor:
        for name in os.listdir(logs_dir):
            future = executor.submit(
                summarize_log,
                logs_dir=logs_dir,
                name=name,
                keys=keys,
                target_key=target_key,
                objective=objective,
                show_range=show_range,
            )
            futures.append(future)

    rows = []
    ignored = []
    for future in futures:
        row, log_dir_ignored = future.result()
        if log_dir_ignored:
            ignored.append(log_dir_ignored)
            continue
        rows.append(row)

    if as_df:
        df = pandas.DataFrame(data=rows, columns=keys)
        return df

    if sort is None:
        sort = [keys[0]]

    for k in sort:
        rows = sorted(rows, key=lambda x: x[keys.index(k)])

    headers = [key_remap.get(key, key) for key in keys]
    print(
        tabulate.tabulate(rows,
                          headers=headers,
                          floatfmt='.3f',
                          tablefmt='simple',
                          numalign='center',
                          stralign='center',
                          showindex=True,
                          disable_numparse=True))

    if not ignored:
        return

    print('Ignored logs:')
    for log_dir in ignored:
        print('  - %s' % log_dir)
示例#32
0
    def find_unique_contents(self):

        items = [
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000314,sig:06,src:009250,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000280,sig:11,src:009516,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000271,sig:06,src:009713,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000433,sig:06,src:009990+009218,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000561,sig:06,src:009947,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000214,sig:06,src:009254,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000330,sig:11,src:009249,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000311,sig:06,src:009338+006239,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000325,sig:06,src:010434,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000541,sig:06,src:009768+009462,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000217,sig:11,src:000680+009338,op:splice,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000555,sig:06,src:009799,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000300,sig:11,src:009192+009338,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000199,sig:06,src:008142,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000356,sig:06,src:009618+009240,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000534,sig:06,src:009947,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000410,sig:11,src:010138,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000272,sig:11,src:010046+009338,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000379,sig:06,src:010371,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000505,sig:06,src:010369+009555,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000124,sig:11,src:007443,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000323,sig:06,src:009885,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000238,sig:06,src:009305,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000342,sig:06,src:009313,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000512,sig:06,src:009419,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000430,sig:06,src:009538+009797,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000574,sig:06,src:009842,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000310,sig:06,src:009245,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000279,sig:06,src:009564,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000497,sig:06,src:009737,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000208,sig:06,src:009438,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000316,sig:06,src:009722,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000446,sig:11,src:009757,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000371,sig:11,src:009438+009538,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000273,sig:06,src:009262,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000302,sig:11,src:009594,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000322,sig:06,src:009223,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000219,sig:06,sync:slave0,src:010088",
            "/home/happy/afl_chengang2/tmp/findings/slave5/crashes/id:000428,sig:06,src:006631,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000309,sig:06,src:009871,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000392,sig:06,src:009604,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000275,sig:06,src:010004+009609,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000323,sig:06,src:010258,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000593,sig:11,src:010279,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000660,sig:06,src:010072,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000508,sig:06,src:010110,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000329,sig:11,src:009937,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000419,sig:06,src:010019,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000579,sig:06,src:009909,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000511,sig:06,src:010216,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000486,sig:06,src:007106+010660,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000229,sig:06,src:009441,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000349,sig:11,src:009606+008739,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000433,sig:06,src:010520,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000289,sig:06,src:009869,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000425,sig:06,src:009969,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000288,sig:06,src:009373+009605,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000503,sig:11,src:009526,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000227,sig:11,src:009735,op:havoc,rep:8",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000645,sig:06,src:008887,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000501,sig:11,src:010071,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000344,sig:06,src:010224+008865,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000400,sig:06,src:009797+003140,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000580,sig:06,src:009996,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000290,sig:11,src:009415,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000616,sig:06,src:010181+006212,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000257,sig:06,src:009531,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000655,sig:06,src:009794,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000394,sig:06,src:008051+010053,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000104,sig:11,src:006838,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000261,sig:06,src:009912,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000339,sig:06,src:009927+009700,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000432,sig:06,src:009950,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000573,sig:06,src:010100,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000228,sig:06,sync:master,src:007172",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000271,sig:06,src:009773+003816,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000477,sig:11,src:006389+009350,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000369,sig:06,src:009772,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000458,sig:11,src:009637+009617,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000252,sig:06,src:009775,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000516,sig:06,src:010490,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000614,sig:06,src:010147,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000249,sig:11,src:005978+009622,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000591,sig:11,src:010172+009922,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000441,sig:06,src:009940,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000283,sig:11,src:000825+009428,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000336,sig:06,src:009464,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000305,sig:06,src:009549,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000237,sig:11,src:004887+009350,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000485,sig:11,src:009482,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000582,sig:06,src:010602,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000497,sig:06,src:009968,op:havoc,rep:8",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000581,sig:06,src:010533+009218,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000272,sig:11,src:009842,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000222,sig:11,sync:slave6,src:009923",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000315,sig:11,src:009363,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000638,sig:11,src:009827,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000398,sig:06,src:010051,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000298,sig:11,src:002103+009672,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000332,sig:11,src:010017+010029,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000046,sig:11,src:000096+003539,op:splice,rep:32",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000455,sig:06,src:010053,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000372,sig:06,src:008651,op:havoc,rep:64",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000280,sig:06,src:010166,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000314,sig:06,src:000156+009834,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000387,sig:06,src:010044,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000297,sig:06,src:009648,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000633,sig:06,src:009781,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000381,sig:06,src:009519+009350,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000482,sig:11,src:009422,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000293,sig:06,src:009654+004280,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000401,sig:06,src:010146,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000426,sig:06,src:010172,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000543,sig:06,src:009545+009910,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000570,sig:06,src:010106,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000232,sig:11,src:009839,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000547,sig:06,src:010493+010044,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave4/crashes/id:000279,sig:11,src:010165,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000336,sig:11,src:009967,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000434,sig:06,src:010511,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000375,sig:11,src:009537+010502,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000267,sig:06,src:009803,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000381,sig:11,src:010372,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000274,sig:06,src:009816,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000218,sig:11,sync:slave2,src:009579",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000473,sig:11,src:000064+009924,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000324,sig:11,src:009910,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000431,sig:11,src:009771+010082,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000258,sig:06,src:009851,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000314,sig:11,src:009694,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000403,sig:06,src:009839,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000476,sig:06,src:010089+010096,op:splice,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000276,sig:11,src:010019,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000561,sig:06,src:010305+010189,op:splice,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000082,sig:06,src:005658,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000548,sig:06,src:008598+008956,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000353,sig:11,src:008531+010008,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000400,sig:06,src:010033,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000281,sig:11,src:009794+006971,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000391,sig:06,src:009911,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave6/crashes/id:000321,sig:06,src:010050,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave0/crashes/id:000394,sig:11,src:009930,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave0/crashes/id:000326,sig:06,src:009519,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave0/crashes/id:000332,sig:06,src:009465,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave0/crashes/id:000525,sig:06,src:009974+009663,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave0/crashes/id:000287,sig:06,src:009473,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave0/crashes/id:000163,sig:06,src:006224+005467,op:splice,rep:128",
            "/home/happy/afl_chengang2/tmp/findings/slave0/crashes/id:000011,sig:11,src:000503,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/master/crashes/id:000154,sig:06,sync:slave5,src:010116",
            "/home/happy/afl_chengang2/tmp/findings/master/crashes/id:000185,sig:06,src:006837,op:arith8,pos:72,val:+11",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000310,sig:06,src:009903,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000495,sig:06,src:009719,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000048,sig:11,src:000241+001445,op:splice,rep:64",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000492,sig:06,src:009794,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000235,sig:06,src:009933+009085,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000370,sig:06,src:010015,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000325,sig:06,src:009884,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000242,sig:11,src:009118+009413,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000420,sig:11,src:009598,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000311,sig:06,src:010151,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000496,sig:06,src:010190,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000366,sig:06,src:009889,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000299,sig:06,src:009713,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000357,sig:11,src:009861,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000271,sig:06,src:009346+009796,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000487,sig:06,src:010214+010103,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000306,sig:06,src:009915,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000410,sig:06,src:009751,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000347,sig:06,src:009766,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000605,sig:06,src:010081+006488,op:splice,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000521,sig:11,src:010559,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000258,sig:11,src:004028+010017,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000578,sig:06,src:010739+010123,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000491,sig:06,src:009886,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000489,sig:11,src:009591,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000227,sig:06,src:009706,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000355,sig:06,src:008184+009917,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000387,sig:06,src:010163,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000475,sig:06,src:009946,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000575,sig:06,src:008462+009631,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000453,sig:06,src:010007+000283,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000412,sig:06,src:009898,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000211,sig:11,src:009316,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000344,sig:06,src:010165,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000323,sig:06,src:009731,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000406,sig:06,src:009489,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000411,sig:06,src:010187,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000599,sig:06,src:009937,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000249,sig:11,src:009393,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000537,sig:06,src:009841,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000348,sig:06,src:010148,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000309,sig:06,src:009725+000156,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000398,sig:06,src:010138,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000315,sig:06,src:009719,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000400,sig:06,src:009719,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000359,sig:06,src:010071,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000243,sig:11,src:009964,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000534,sig:06,src:009794,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000557,sig:06,src:009817,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000332,sig:11,src:010280,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000516,sig:06,src:009957+000436,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000508,sig:06,src:009942+010215,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000456,sig:06,src:009999,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000367,sig:11,src:010109+003087,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000423,sig:06,src:008295,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000566,sig:11,src:010163,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000316,sig:06,src:010109+010034,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000351,sig:11,src:009358,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000230,sig:06,src:009802,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000223,sig:11,src:000835+009477,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000213,sig:06,src:009370,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000236,sig:06,src:009946,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave2/crashes/id:000210,sig:11,src:009298,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000303,sig:06,src:010115,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000287,sig:06,src:010086,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000278,sig:06,src:010005,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000389,sig:06,src:010190,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000078,sig:11,src:004569+002250,op:splice,rep:32",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000353,sig:06,src:010086,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000320,sig:06,src:010080,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000599,sig:11,src:010657,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000406,sig:11,src:010215,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000343,sig:11,src:009413,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000352,sig:11,src:009776,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000334,sig:06,src:010278,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000625,sig:06,src:010639+009401,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000375,sig:11,src:009485,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000675,sig:06,src:010041+009607,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000387,sig:11,src:009790,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000333,sig:06,src:010023,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000272,sig:11,src:009587,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000481,sig:11,src:009738,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000402,sig:06,src:009975,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000269,sig:11,src:009529,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000518,sig:06,src:010544,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000643,sig:06,src:009813+006990,op:splice,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000477,sig:11,src:009375+009407,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000555,sig:11,src:009988+010110,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000507,sig:11,src:009631,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000456,sig:06,src:009860+010021,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000056,sig:11,src:000288+001596,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000432,sig:11,src:002790+010110,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000510,sig:06,src:009852,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000321,sig:06,src:009787,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000496,sig:06,src:010229,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000635,sig:06,src:010167,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000572,sig:11,src:008152+010083,op:splice,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000506,sig:11,src:008105+009918,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000541,sig:11,src:010384+009389,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000312,sig:11,src:009886,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000408,sig:11,src:007584+009873,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000405,sig:11,src:010174,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000462,sig:06,src:009890+009707,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000362,sig:11,src:010004,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000619,sig:06,src:009554,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000249,sig:11,src:009456,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000550,sig:11,src:009964,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000452,sig:06,src:010030,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000515,sig:06,src:009922,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000504,sig:06,src:009467+002993,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000517,sig:11,src:010614,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000598,sig:06,src:010003+010191,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000311,sig:06,src:009822,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000413,sig:06,src:010101,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000328,sig:06,src:009519,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000449,sig:06,src:009528,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000316,sig:11,src:009486,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000378,sig:11,src:009689,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000388,sig:06,src:010101,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000489,sig:06,src:009926,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000530,sig:06,src:010280,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000478,sig:11,src:009800,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000486,sig:06,src:008204,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000463,sig:06,src:009974,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000373,sig:06,src:009993,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000301,sig:06,src:009765,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000370,sig:11,src:009534,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000632,sig:06,src:009966+009132,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000310,sig:06,src:009513+010094,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000422,sig:06,src:010156,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000302,sig:06,src:010111,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000404,sig:06,src:010127,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000564,sig:11,src:001842+010161,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000365,sig:06,src:009992,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000495,sig:06,src:010202,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000376,sig:11,src:009504+009555,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000658,sig:06,src:009949+010050,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000630,sig:06,src:006581+009816,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000414,sig:11,src:009494,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000364,sig:11,src:002120+009777,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000546,sig:06,src:009866,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000323,sig:06,src:010108,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000377,sig:11,src:009534,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave3/crashes/id:000574,sig:06,src:010017,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000343,sig:11,src:009590+006769,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000309,sig:06,src:009313,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000323,sig:11,src:009386,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000333,sig:11,src:005564+009826,op:splice,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000412,sig:06,src:009269,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000195,sig:11,src:009271,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000239,sig:06,src:009410,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000196,sig:11,src:009367,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000279,sig:06,src:010063,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000179,sig:11,src:009274,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000215,sig:11,src:009612+008172,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000391,sig:06,src:009429,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000350,sig:11,src:009663,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000175,sig:11,src:001917+009276,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000265,sig:06,src:009372,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000365,sig:11,src:009420,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000295,sig:11,src:009552,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000223,sig:11,src:009253,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000415,sig:11,src:009355+006033,op:splice,rep:64",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000074,sig:06,src:003215,op:havoc,rep:8",
            "/home/happy/afl_chengang2/tmp/findings/slave7/crashes/id:000168,sig:11,src:001436+009442,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave1/crashes/id:000352,sig:11,src:009546,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave1/crashes/id:000435,sig:06,src:009888+009463,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave1/crashes/id:000164,sig:11,src:009475,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave1/crashes/id:000264,sig:11,src:000333+009954,op:splice,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave1/crashes/id:000327,sig:11,src:009482,op:havoc,rep:2",
            "/home/happy/afl_chengang2/tmp/findings/slave1/crashes/id:000082,sig:06,src:005388,op:havoc,rep:4",
            "/home/happy/afl_chengang2/tmp/findings/slave1/crashes/id:000293,sig:11,src:010258,op:havoc,rep:2",
        ]

        print( "total ", len(items), " files found.")

        event_loop = asyncio.get_event_loop()

        try:
            futures = []

            n = 0
            k = 0;
            for fname in items:
                n += 1
                k += 1

                args = [self.exe_name, fname]
                futures.append(asyncio.ensure_future(self.run_command_async(n, *args)))

                if k >= self.job_limit:
                    event_loop.run_until_complete(asyncio.gather(*futures))
                    futures.clear()
                    k = 0;

                # if (n > 2000):
                #     break

            if len(futures) > 0:
                event_loop.run_until_complete(asyncio.gather(*futures))
        finally:
            event_loop.close()

        print("count = ", len(self.timeouted), ", timeout")
        for fname in self.timeouted:
            print("    ", fname)
示例#33
0
def convert(absolute_path: str = None):
    start_time = time.time()
    converting_time = 0
    mp3_path = '..\\music'  #default path
    if absolute_path != None:
        mp3_path = absolute_path  #not granted to be valid path
        logging.info(absolute_path)
        logging.info("Is absolute? " + str(os.path.isabs(absolute_path)))

        if os.path.isabs(absolute_path):
            #can download path should be valid
            mp3_path = absolute_path
    mp4_path = "..\\videos"
    music_files_paths = []
    for file in os.listdir(mp4_path):
        if re.search('.mp4', file):
            music_files_paths.append({
                'source_path':
                os.path.join(mp4_path, file),
                'destination_path':
                None
            })
            print(os.path.join(mp4_path, file))  #debugging purposes
    for paths_elem in music_files_paths:
        file_name = os.path.split(paths_elem['source_path'])[1]
        paths_elem['destination_path'] = os.path.join(
            mp3_path,
            os.path.splitext(file_name)[0] + '.mp3')
        print("destination path: " + paths_elem['destination_path'])
    #actually converting the elements
    convert_to_mp3 = input("Convert to mp3  y/n")
    manuall_tag_rename_bool = False
    if convert_to_mp3 == 'y':
        futures = []
        if len(music_files_paths) > 40:
            rest_of_futures = []
            end_index = 40

            # will do it using concurennt.futuree.ProcessPoolExecutor
            with concurrent.futures.ProcessPoolExecutor(
                    max_workers=end_index) as executor:
                for i in range(end_index):
                    futures.append(
                        executor.submit(write_to_drive_mp3_file,
                                        music_files_paths[i]))
                    print("File to  convert:  " +
                          music_files_paths[i]['destination_path'])
            logging.info("First 40 files should be converted. ")
            with concurrent.futures.ProcessPoolExecutor(
                    max_workers=len(music_files_paths) -
                    end_index) as executor:
                for i in range(len(music_files_paths) - end_index):
                    rest_of_futures.append(
                        executor.submit(write_to_drive_mp3_file,
                                        music_files_paths[i + end_index]))
                    print("File to  convert:  " +
                          music_files_paths[i + end_index]['destination_path'])
        else:
            with concurrent.futures.ProcessPoolExecutor(
                    max_workers=len(music_files_paths)) as executor:
                for paths_elem in music_files_paths:
                    futures.append(
                        executor.submit(write_to_drive_mp3_file, paths_elem))
                    print("File to  convert:  " +
                          paths_elem['destination_path'])

        print("Converting should ended. ")
        converting_time = time.time() - start_time
        print("Converting took " + str(converting_time))
        start_time = time.time()  #now it will check the converting time
        #Renaming the tags
        manuall_tag_rename = input(
            "Do you want to rename tags by yourself? y/n (will set the default - not granted to be right) "
        )
        if manuall_tag_rename == 'y':
            manuall_tag_rename_bool = True
        music_files_paths_to_edit = []
        for paths_elem in music_files_paths:
            music_files_paths_to_edit.append(
                paths_elem['destination_path'])  #TODO clean the code
        #calling the change tags function
        for path in music_files_paths_to_edit:
            change_tags(path, manuall_tag_rename_bool)
        #the code below is basically the same
    elif convert_to_mp3 == 'n':
        logging.info("Converting function did basically nothing")
    print("Renaming  took " + str(time.time() - start_time))
    print("While converting took " + str(converting_time))
示例#34
0
def label_Sentences(sentence):
    #Create a LabeledSentence object:
    #1. For each unique sentences (first argument)-- tokenized sentence (split sentences by words)
    #2. The corresponding Reference labels (second argument)
    tmp = dfTrain.ix[dfTrain[dfTrain['Sentence'] ==
                             sentence].index.tolist()].Reference.tolist()
    return tmp


executor = concurrent.futures.ProcessPoolExecutor(12)
futures = []
num = 0
for item in sentences2:
    num += 1
    print(str(round(num * 100 / len(sentences2), 3)) + '%', end='\r')
    futures.append(executor.submit(label_Sentences, item))
concurrent.futures.wait(futures)

tags = []
for i in futures:
    tags.append(i.result())

#Pickle/save list of tags for sentences
f = open('/home/lanna/Dropbox/Insight/tags', 'wb')
pickle.dump(tags, f)

LabeledSentences = []
for i in range(0, len(sentences2)):
    LabeledSentences.append(
        doc2vec.LabeledSentence(sentences2[i].split(), tags[i]))
示例#35
0
def produce_iperf_output(basevm, guest_cmd_builder, current_avail_cpu, runtime,
                         omit, load_factor, modes):
    """Produce iperf raw output from server-client connection."""
    # Check if we have enough CPUs to pin the servers on the host.
    # The available CPUs are the total minus vcpus, vmm and API threads.
    assert load_factor * basevm.vcpus_count < CpuMap.len() - \
        basevm.vcpus_count - 2

    host_uds_path = os.path.join(basevm.path, VSOCK_UDS_PATH)

    # Start the servers.
    for server_idx in range(load_factor * basevm.vcpus_count):
        assigned_cpu = CpuMap(current_avail_cpu)
        iperf_server = \
            CmdBuilder(f"taskset --cpu-list {assigned_cpu}") \
            .with_arg(test_cfg.IPERF3) \
            .with_arg("-sD") \
            .with_arg("--vsock") \
            .with_arg("-B", host_uds_path) \
            .with_arg("-p", f"{test_cfg.BASE_PORT + server_idx}") \
            .with_arg("-1") \
            .build()

        run_cmd(iperf_server)
        current_avail_cpu += 1

    # Wait for iperf3 servers to start.
    time.sleep(SERVER_STARTUP_TIME)

    # Start `vcpus` iperf3 clients. We can not use iperf3 parallel streams
    # due to non deterministic results and lack of scaling.
    def spawn_iperf_client(conn, client_idx, mode):
        # Add the port where the iperf3 client is going to send/receive.
        cmd = guest_cmd_builder.with_arg("-p", test_cfg.BASE_PORT +
                                         client_idx).with_arg(mode).build()

        # Bind the UDS in the jailer's root.
        basevm.create_jailed_resource(
            os.path.join(
                basevm.path,
                _make_host_port_path(VSOCK_UDS_PATH,
                                     test_cfg.BASE_PORT + client_idx)))

        pinned_cmd = f"taskset --cpu-list {client_idx % basevm.vcpus_count}" \
            f" {cmd}"
        rc, stdout, _ = conn.execute_command(pinned_cmd)

        assert rc == 0

        return stdout.read()

    with concurrent.futures.ThreadPoolExecutor() as executor:
        futures = list()
        cpu_load_future = executor.submit(get_cpu_percent,
                                          basevm.jailer_clone_pid,
                                          runtime - SERVER_STARTUP_TIME, omit)

        modes_len = len(modes)
        ssh_connection = net_tools.SSHConnection(basevm.ssh_config)
        for client_idx in range(load_factor * basevm.vcpus_count):
            futures.append(
                executor.submit(
                    spawn_iperf_client,
                    ssh_connection,
                    client_idx,
                    # Distribute the modes evenly.
                    modes[client_idx % modes_len]))

        cpu_load = cpu_load_future.result()
        for future in futures[:-1]:
            res = json.loads(future.result())
            res[test_cfg.IPERF3_END_RESULTS_TAG][
                test_cfg.IPERF3_CPU_UTILIZATION_PERCENT_OUT_TAG] = None
            yield res

        # Attach the real CPU utilization vmm/vcpus to
        # the last iperf3 server-client pair measurements.
        res = json.loads(futures[-1].result())

        # We expect a single emulation thread tagged with `firecracker` name.
        tag = "firecracker"
        assert tag in cpu_load and len(cpu_load[tag]) == 1
        thread_id = list(cpu_load[tag])[0]
        data = cpu_load[tag][thread_id]
        vmm_util = sum(data) / len(data)
        cpu_util_perc = res[test_cfg.IPERF3_END_RESULTS_TAG][
            test_cfg.IPERF3_CPU_UTILIZATION_PERCENT_OUT_TAG] = dict()
        cpu_util_perc[test_cfg.CPU_UTILIZATION_VMM] = vmm_util

        vcpus_util = 0
        for vcpu in range(basevm.vcpus_count):
            # We expect a single fc_vcpu thread tagged with
            # f`fc_vcpu {vcpu}`.
            tag = f"fc_vcpu {vcpu}"
            assert tag in cpu_load and len(cpu_load[tag]) == 1
            thread_id = list(cpu_load[tag])[0]
            data = cpu_load[tag][thread_id]
            vcpus_util += (sum(data) / len(data))

        cpu_util_perc[test_cfg.CPU_UTILIZATION_VCPUS_TOTAL] = vcpus_util

        yield res
示例#36
0
    def execute(self):
        """The main agent execution method"""
        # Now we are getting what's going to be checked
        futures = []

        # 1) Queueing the jobs that might be marked Stalled
        # This is the minimum time we wait for declaring a job Stalled, therefore it is safe
        checkTime = dateTime() - self.stalledTime * second
        checkedStatuses = [JobStatus.RUNNING, JobStatus.COMPLETING]
        # Only get jobs whose HeartBeat is older than the stalledTime
        result = self.jobDB.selectJobs({"Status": checkedStatuses},
                                       older=checkTime,
                                       timeStamp="HeartBeatTime")
        if not result["OK"]:
            self.log.error(
                "Issue selecting %s jobs" % " & ".join(checkedStatuses),
                result["Message"])
        if result["Value"]:
            jobs = sorted(result["Value"])
            self.log.info(
                "%s jobs will be checked for being stalled" %
                " & ".join(checkedStatuses),
                "(n=%d, heartbeat before %s)" % (len(jobs), str(checkTime)),
            )
            for job in jobs:
                future = self.threadPoolExecutor.submit(
                    self._execute, "%s:_markStalledJobs" % job)
                futures.append(future)

        # 2) fail Stalled Jobs
        result = self.jobDB.selectJobs({"Status": JobStatus.STALLED})
        if not result["OK"]:
            self.log.error("Issue selecting Stalled jobs", result["Message"])
        if result["Value"]:
            jobs = sorted(result["Value"])
            self.log.info("Jobs Stalled will be checked for failure",
                          "(n=%d)" % len(jobs))
            for job in jobs:
                future = self.threadPoolExecutor.submit(
                    self._execute, "%s:_failStalledJobs" % job)
                futures.append(future)

        # 3) Send accounting
        for minor in self.minorStalledStatuses:
            result = self.jobDB.selectJobs({
                "Status": JobStatus.FAILED,
                "MinorStatus": minor,
                "AccountedFlag": "False"
            })
            if not result["OK"]:
                self.log.error("Issue selecting jobs for accounting",
                               result["Message"])
            if result["Value"]:
                jobs = result["Value"]
                self.log.info("Stalled jobs will be Accounted",
                              "(n=%d)" % (len(jobs)))
                for job in jobs:
                    future = self.threadPoolExecutor.submit(
                        self._execute, "%s:_sendAccounting" % job)
                    futures.append(future)

        for future in concurrent.futures.as_completed(futures):
            try:
                future.result()
            except Exception as exc:
                self.log.error("_execute generated an exception: %s" % exc)

        # From here on we don't use the threads

        # 4) Fail submitting jobs
        result = self._failSubmittingJobs()
        if not result["OK"]:
            self.log.error("Failed to process jobs being submitted",
                           result["Message"])

        # 5) Kick stuck jobs
        result = self._kickStuckJobs()
        if not result["OK"]:
            self.log.error("Failed to kick stuck jobs", result["Message"])

        return S_OK()
示例#37
0
def _export(dataset_input, dataset_output, random_index_column, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=True, sort=None, ascending=True):
    """
    :param DatasetLocal dataset: dataset to export
    :param str path: path for file
    :param lis[str] column_names: list of column names to export or None for all columns
    :param str byteorder: = for native, < for little endian and > for big endian
    :param bool shuffle: export rows in random order
    :param bool selection: export selection or not
    :param progress: progress callback that gets a progress fraction as argument and should return True to continue
    :return:
    """

    if selection:
        if selection == True:  # easier to work with the name
            selection = "default"

    N = len(dataset_input) if not selection else dataset_input.selected_length(selection)
    if N == 0:
        raise ValueError("Cannot export empty table")

    if shuffle and sort:
        raise ValueError("Cannot shuffle and sort at the same time")

    if shuffle:
        shuffle_array = dataset_output.columns[random_index_column]

    partial_shuffle = shuffle and len(dataset_input) != N

    order_array = None
    order_array_inverse = None

    # for strings we also need the inverse order_array, keep track of that
    has_strings = any([dataset_input.dtype(k) == str_type for k in column_names])

    if partial_shuffle:
        # if we only export a portion, we need to create the full length random_index array, and
        shuffle_array_full = np.random.choice(len(dataset_input), len(dataset_input), replace=False)
        # then take a section of it
        shuffle_array[:] = shuffle_array_full[shuffle_array_full < N]
        del shuffle_array_full
        order_array = shuffle_array
    elif shuffle:
        # better to do this in memory
        shuffle_array_memory = np.random.choice(N, N, replace=False)
        shuffle_array[:] = shuffle_array_memory
        order_array = shuffle_array
    if order_array is not None:
        indices_r = np.zeros_like(order_array)
        indices_r[order_array] = np.arange(len(order_array))
        order_array_inverse = indices_r
        del indices_r

    if sort:
        if selection:
            raise ValueError("sorting selections not yet supported")
        # these indices sort the input array, but we evaluate the input in sequential order and write it out in sorted order
        # e.g., not b[:] = a[indices]
        # but b[indices_r] = a
        logger.info("sorting...")
        indices = np.argsort(dataset_input.evaluate(sort))
        indices_r = np.zeros_like(indices)
        indices_r[indices] = np.arange(len(indices))
        if has_strings:
            # in this case we already have the inverse ready
            order_array_inverse = indices if ascending else indices[:--1]
        else:
            del indices
        order_array = indices_r if ascending else indices_r[::-1]
        logger.info("sorting done")

    if progress == True:
        progress = vaex.utils.progressbar_callable(title="exporting")
    progress = progress or (lambda value: True)
    progress_total = len(column_names) * len(dataset_input)
    progress_status = ProgressStatus()
    progress_status.cancelled = False
    progress_status.value = 0
    if selection:
        full_mask = dataset_input.evaluate_selection_mask(selection)
    else:
        full_mask = None

    sparse_groups = collections.defaultdict(list)
    sparse_matrices = {}  # alternative to a set of matrices, since they are not hashable
    string_columns = []
    futures = []

    thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=1) 
    if True:
        for column_name in column_names:
            sparse_matrix = dataset_output._sparse_matrix(column_name)
            if sparse_matrix is not None:
                # sparse columns are written differently
                sparse_groups[id(sparse_matrix)].append(column_name)
                sparse_matrices[id(sparse_matrix)] = sparse_matrix
                continue
            logger.debug("  exporting column: %s " % column_name)
            future = thread_pool.submit(_export_column, dataset_input, dataset_output, column_name, full_mask,
                shuffle, sort, selection, N, order_array, order_array_inverse, progress_status)
            futures.append(future)

    done = False
    while not done:
        done = True
        for future in futures:
            try:
                future.result(0.1/4)
            except concurrent.futures.TimeoutError:
                done = False
                break
        if not done:
            if not progress(progress_status.value / float(progress_total)):
                progress_status.cancelled = True

    for sparse_matrix_id, column_names in sparse_groups.items():
        sparse_matrix = sparse_matrices[sparse_matrix_id]
        for column_name in column_names:
            assert not shuffle
            assert selection in [None, False]
            column = dataset_output.columns[column_name]
            column.matrix.data[:] = dataset_input.columns[column_name].matrix.data
            column.matrix.indptr[:] = dataset_input.columns[column_name].matrix.indptr
            column.matrix.indices[:] = dataset_input.columns[column_name].matrix.indices
    return column_names
示例#38
0
def diffexp_ttest(adaptor, maskA, maskB, top_n=8, diffexp_lfc_cutoff=0.01):

    matrix = adaptor.open_array("X")
    row_selector_A = np.where(maskA)[0]
    row_selector_B = np.where(maskB)[0]
    nA = len(row_selector_A)
    nB = len(row_selector_B)

    dtype = matrix.dtype
    cols = matrix.shape[1]
    tile_extent = [dim.tile for dim in matrix.schema.domain]

    is_sparse = matrix.schema.sparse

    if is_sparse:
        row_selector_A = pack_selector_from_indices(row_selector_A)
        row_selector_B = pack_selector_from_indices(row_selector_B)
    else:
        # The rows from both row_selector_A and row_selector_B are gathered at the
        # same time, then the mean and variance are computed by subsetting on that
        # combined submatrix.  Combining the gather reduces number of requests/bandwidth
        # to the data source.
        row_selector_AB = np.union1d(row_selector_A, row_selector_B)
        row_selector_A_in_AB = np.in1d(row_selector_AB, row_selector_A, assume_unique=True)
        row_selector_B_in_AB = np.in1d(row_selector_AB, row_selector_B, assume_unique=True)
        row_selector_AB = pack_selector_from_indices(row_selector_AB)

    # because all IO is done per-tile, and we are always col-major,
    # use the tile column size as the unit of partition.  Possibly access
    # more than one column tile at a time based on the target_workunit.
    # Revisit partitioning if we change the X layout, or start using a non-local execution environment
    # which may have other constraints.

    # TODO: If the number of row selections is large enough, then the cells_per_coltile will exceed
    # the target_workunit.  A potential improvement would be to partition by both columns and rows.
    # However partitioning the rows is slightly more complex due to the arbitrary distribution
    # of row selections that are passed into this algorithm.

    cells_per_coltile = (nA + nB) * tile_extent[1]
    cols_per_partition = max(1, int(target_workunit / cells_per_coltile)) * tile_extent[1]
    col_partitions = [(c, min(c + cols_per_partition, cols)) for c in range(0, cols, cols_per_partition)]

    meanA = np.zeros((cols,), dtype=np.float64)
    varA = np.zeros((cols,), dtype=np.float64)
    meanB = np.zeros((cols,), dtype=np.float64)
    varB = np.zeros((cols,), dtype=np.float64)

    executor = get_thread_executor()
    futures = []

    if is_sparse:
        for cols in col_partitions:
            futures.append(executor.submit(_mean_var_sparse_ab, matrix, row_selector_A, nA, row_selector_B, nB, cols))
    else:
        for cols in col_partitions:
            futures.append(
                executor.submit(_mean_var_ab, matrix, row_selector_AB, row_selector_A_in_AB, row_selector_B_in_AB, cols)
            )

    for future in futures:
        # returns tuple: (meanA, varA, meanB, varB, cols)
        try:
            result = future.result()
            part_meanA, part_varA, part_meanB, part_varB, cols = result
            meanA[cols[0] : cols[1]] += part_meanA
            varA[cols[0] : cols[1]] += part_varA
            meanB[cols[0] : cols[1]] += part_meanB
            varB[cols[0] : cols[1]] += part_varB
        except Exception as e:
            for future in futures:
                future.cancel()
            raise ComputeError(str(e))

    if is_sparse:
        if adaptor.has_array("X_col_shift"):
            X_col_shift = adaptor.open_array("X_col_shift")[:]
            meanA += X_col_shift
            meanB += X_col_shift

    r = diffexp_ttest_from_mean_var(
        meanA.astype(dtype),
        varA.astype(dtype),
        nA,
        meanB.astype(dtype),
        varB.astype(dtype),
        nB,
        top_n,
        diffexp_lfc_cutoff,
    )

    return r
示例#39
0
    def feed_event(self, event):
        futures = []
        with concurrent.futures.ThreadPoolExecutor() as executor:
            ###################################################################
            # This needs to be first, as other tasks will need to write in   #
            # the resulting folders.                                          #
            ###################################################################

            # Depends on folder: 'static/'
            if event & STATIC_FOLDER:
                create_site_structure(static_path=STATIC_PATH)
                print_progress(text='Create _site')

            ###################################################################
            # We then reload data in memory, before generating the site       #
            ###################################################################

            # Depends on folder: 'data/'
            if self.data_source is None or event & DATA_FOLDER:
                # class where all data can be accessed from
                data_source = DataSource()
                print_progress(text='Load data sources')

            # Depends on: 'blog/'
            if self.blog_posts is None or event & BLOG_FOLDER:
                self.blog_posts = load_blog_posts()
                print_progress(text='Load blog posts')

            ###################################################################
            # Once site structure has been created and data is refreshed, we  #
            # can build all parts of the site in parallel, since there is no  #
            # dependencies between them.                                      #
            ###################################################################

            # Depends on: 'templates/', 'data/'
            if event & DATA_FOLDER or event & TEMPLATES_FOLDER:
                print_progress(text='Generate error pages')
                copy_custom_error_pages(data=data_source)

            # Depends on: 'data/', 'templates/'
            if event & DATA_FOLDER or event & TEMPLATES_FOLDER:
                # Home
                futures.append(executor.submit(build_home, data=data_source))

                # Trackers
                futures.append(
                    executor.submit(build_trackers_list, data=data_source))
                futures.append(
                    executor.submit(build_tracker_pages, data=data_source))

                # Websites
                futures.append(
                    executor.submit(build_website_list, data=data_source))
                futures.append(
                    executor.submit(build_website_pages, data=data_source))

                # Companies
                futures.append(
                    executor.submit(build_company_reach_chart_page,
                                    data=data_source))

            # Depends on: 'data/', 'blog/', 'templates/'
            if event & DATA_FOLDER or event & BLOG_FOLDER or event & TEMPLATES_FOLDER:
                futures.append(
                    executor.submit(build_blogpost_list,
                                    data=data_source,
                                    blog_posts=self.blog_posts))

                futures.append(
                    executor.submit(build_blogpost_pages,
                                    data=data_source,
                                    blog_posts=self.blog_posts))

            # Depends on: 'data/', 'blog/', 'templates/'
            if event & DATA_FOLDER or event & BLOG_FOLDER or event & TEMPLATES_FOLDER:
                futures.append(
                    executor.submit(generate_sitemap,
                                    data=data_source,
                                    blog_posts=self.blog_posts))

            # TODO: uncomment when company profiles are ready
            # if args['site'] or args['companies']:
            #     company_process = Process(target=build_company_pages, args=(data_source,))
            #     company_process.start()

            # Wait for all jobs to finish
            concurrent.futures.wait(futures)

            # Getting the `result` of each promise (although none is expected)
            # allows to re-raise exception happening in children processes. If
            # we don't do it, exceptions will be silently ignored.
            for future in futures:
                future.result()

            print('Done')
示例#40
0
import concurrent.futures
import random


def my_function(num):
    return num**2


with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:

    futures = []
    for _ in range(99):
        futures.append(executor.submit(my_function, random.randint(1, 99)))

    results = []
    for future in concurrent.futures.as_completed(futures):
        results.append(future.result())

    print(results)
def main():

    config = Config()

    bucket_name = config.source_bucket_name
    total_count = config.total_objects  # Number of objects to upload.
    # Must be multiple of total_count
    max_pool_connections = config.max_s3_connections
    # Must be less than and multiple of total_count
    max_threads = config.max_threads_for_boto3
    object_size = config.object_size  # Bytes.

    assert total_count % max_pool_connections == 0, \
        "max_pool_connections must be multiple of total_count"

    assert max_threads <= total_count and total_count % max_threads == 0, \
        "max_threads must be less than or equal to and multiple of total_count"

    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__), 'config',
                                   'logger_config.yaml')

    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)

    # Init Global.
    GlobalTestDataBlock.create(object_size)

    session = boto3.session.Session()

    client = session.client("s3",
                            use_ssl=False,
                            endpoint_url=config.endpoint,
                            aws_access_key_id=config.access_key,
                            aws_secret_access_key=config.secret_key,
                            config=botocore.client.Config(
                                max_pool_connections=max_pool_connections))

    # Create resources for each thread.
    work_items = []
    start_time = time.perf_counter()
    with concurrent.futures.ThreadPoolExecutor(max_workers=max_threads) \
            as executor:
        futures = []
        for i in range(total_count):
            # Generate object name
            object_name = "test_object_" + str(i) + "_sz" + str(object_size)
            work_item = WorkItem(bucket_name, object_name, object_size, client)
            work_items.append(work_item)
            futures.append(
                executor.submit(upload_object,
                                logger=logger,
                                work_item=work_items[i]))
        # Wait for all threads to complete.
        for future in concurrent.futures.as_completed(futures):
            future.result()

    end_time = time.perf_counter()
    total_time_ms_threads_requests = int(round((end_time - start_time) * 1000))

    logger.info(
        "Total time to upload {} objects including thread creation = {} ms.".
        format(total_count, total_time_ms_threads_requests))
    logger.info("Avg time per upload = {} ms.".format(
        total_time_ms_threads_requests / total_count))
示例#42
0
import concurrent.futures
import logging
import requests


# import time
#
def request():
    logging.info("Hitting webserver")
    r = requests.get("http://localhost:8080", data={"foobar": "baz"})
    logging.info(r.status_code)
    logging.info(len(r.content))


if __name__ == "__main__":
    format = "%(asctime)s: %(message)s"
    logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")

    with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor:
        futures = []
        for index in range(300):
            futures.append(executor.submit(request))

            # Return the value returned by shutil.copytree(), None.
            # Raise any exceptions raised during the copy process.
        for future in futures:
            if future.exception():
                print(repr(future.exception()))
        "Would you like to remove the zip files after extraction?", "no")
    batchIndex = 0
    executor = concurrent.futures.ProcessPoolExecutor(batchSize)
    running = 1
    while running:
        # get commands for current batch
        if batchIndex + batchSize > len(zipList):
            print("Processing LAST BATCH...")
            running = 0
            currentZips = zipList[batchIndex:]  # till the end
        else:
            currentZips = zipList[batchIndex:batchIndex + batchSize]

        # execute the commands
        futures = []
        for i in range(len(currentZips)):
            filePath = currentZips[i]
            print("Unzipping ", filePath)
            futures.append(executor.submit(extractZip, filePath, deleteZips))
        concurrent.futures.wait(futures)

        # update the batchIndex
        batchIndex += batchSize

        print "One batch complete."
        print "---------------------------------"

    print "All done!"
else:
    print("Okay, then goodbye!")
    def convert_smooth(self):
        # 最初に全打ち

        futures = []
        with ThreadPoolExecutor(
                thread_name_prefix="prepare",
                max_workers=self.options.max_workers) as executor:
            for bone_name in self.options.motion.bones.keys():
                if bone_name in self.options.model.bones:
                    if self.options.interpolation == 0 and len(
                            self.options.motion.bones[bone_name].keys()) >= 2:
                        # 線形補間の場合、そのまま全打ち
                        futures.append(
                            executor.submit(self.prepare_linear, bone_name))
                    elif self.options.interpolation == 1:
                        if len(self.options.motion.bones[bone_name].keys()
                               ) > 2:
                            # 円形補間の場合、円形全打ち
                            futures.append(
                                executor.submit(self.prepare_circle,
                                                bone_name))
                        else:
                            # 円形補間でキー数が足りない場合、線形補間
                            logger.warning(
                                "円形補間が指定されましたが、キー数が3つに満たないため、計算出来ません。ボーン名: %s",
                                bone_name)
                            futures.append(
                                executor.submit(self.prepare_linear,
                                                bone_name))
                    elif self.options.interpolation == 2:
                        if len(self.options.motion.bones[bone_name].keys()
                               ) > 2:
                            # 曲線補間の場合、カトマル曲線全打ち
                            futures.append(
                                executor.submit(self.prepare_curve, bone_name))
                        else:
                            # 曲線補間でキー数が足りない場合、線形補間
                            logger.warning(
                                "曲線補間が指定されましたが、キー数が3つに満たないため、計算出来ません。ボーン名: %s",
                                bone_name)
                            futures.append(
                                executor.submit(self.prepare_linear,
                                                bone_name))
        concurrent.futures.wait(futures,
                                timeout=None,
                                return_when=concurrent.futures.FIRST_EXCEPTION)

        for f in futures:
            if not f.result():
                return False

        # 処理回数が3回以上の場合、フィルタをかける
        if self.options.loop_cnt >= 3:
            futures = []
            with ThreadPoolExecutor(
                    thread_name_prefix="filter",
                    max_workers=self.options.max_workers) as executor:
                for bone_name in self.options.motion.bones.keys():
                    if bone_name in self.options.model.bones:
                        futures.append(
                            executor.submit(self.fitering, bone_name))
            concurrent.futures.wait(
                futures,
                timeout=None,
                return_when=concurrent.futures.FIRST_EXCEPTION)

            for f in futures:
                if not f.result():
                    return False

        # 処理回数が2回以上の場合、不要キー削除
        if self.options.loop_cnt >= 2:
            futures = []
            with ThreadPoolExecutor(
                    thread_name_prefix="remove",
                    max_workers=self.options.max_workers) as executor:
                for bone_name in self.options.motion.bones.keys():
                    if bone_name in self.options.model.bones:
                        futures.append(
                            executor.submit(self.remove_unnecessary_bf,
                                            bone_name))
            concurrent.futures.wait(
                futures,
                timeout=None,
                return_when=concurrent.futures.FIRST_EXCEPTION)

            for f in futures:
                if not f.result():
                    return False

        return True
示例#45
0
            def report_subcause():
                print_status('  with error {e}\n'.format(e=e), file=file)

            report_error(e.out, report_subcause=report_subcause)
        return (path, success, file.getvalue())

    sysmem = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')
    testmem = 2e9
    executor = concurrent.futures.ThreadPoolExecutor(
        max_workers=((sysmem - 4e9) // testmem))
    futures = []
    for n, test in enumerate(test_to_run):
        path = test[0]
        type = test[1]
        exec_args = test[2] if len(test) >= 3 else []
        futures.append(executor.submit(run_test, path, type, exec_args))
    for future in futures:
        path, success, out = future.result()
        if not success:
            failed_tests.append(path)
        print(out)

    if not failed_tests:
        print('\nOK.')
    else:
        print('\n\nThe following test(s) have failed:')
        for test in failed_tests:
            print('  {}'.format(test))
        print('\nSummary: {} of the total {} tests failed'.format(
            len(failed_tests), len(test_to_run)))
        sys.exit(1)
示例#46
0
    def send_async_requests(self):
        """
		Sends all HTTP requests that have been requested by calls to add_async(). Returns when all requests
		have completed. Raises an exception if any of the calls fail, but waits until all are completed before doing so.
		:rtype: ``list[Any]`` A list of the requested items, transformed by their transform function.
		:return:
		"""
        if len(self._async_http_requests) <= 0:
            return ()

        if self._session is None:
            self.start_new_session()
        session = self._session

        responses = [None] * len(self._async_http_requests)
        ":type : list"

        futures = []
        for req, uri, host, auth, decode, ignored in self._async_http_requests:
            if host is None:
                host = self._host
            _log_http_request(req, uri, host, auth, self.log_full_request)
            f = self._async_executor.submit(session.send, req)
            # mini data-structure, Tuple[done_yet, future]
            futures.append((False, f, decode, ignored))
        self._async_http_requests = []

        # now wait for them to complete
        while len([x for x in futures if not x[0]]) > 0:
            next_futures = []
            for idx, f in enumerate(futures):
                done_now = f[0]
                if not done_now:
                    if f[1].done():
                        r = f[1].result()
                        _log_http_response(r, self.log_full_response)
                        responses[idx] = (r, f[2], f[3])
                        done_now = True
                next_futures.append((done_now, f[1], f[2], f[3]))
            futures = next_futures
            time.sleep(0.01)
        # they are now done

        # we need to re-raise any exceptions that occur
        bad_responses = []
        for idx, resp_items in enumerate(responses):
            resp, decode, ignored = resp_items
            if resp.status_code not in ignored:
                try:
                    resp.raise_for_status()
                except requests.HTTPError as e:
                    _log.exception("HTTPError in request #" + str(idx) + ": " +
                                   str(e))
                    bad_responses.append(idx)
        if len(bad_responses) > 0:
            self._async_transforms = []
            raise AsyncHTTPError(bad_responses)

        # finally, call the transform function on each one
        transformed = []
        for r_items, xform in zip(responses, self._async_transforms):
            r, decode, ignored = r_items
            data = None
            if r.content is not None:
                if decode == 'text':
                    data = r.text
                elif decode == 'json':
                    data = r.json(parse_float=decimal.Decimal)
                elif decode == 'binary':
                    data = r.content
                else:
                    raise ValueError("Bad response_payload encoding: " +
                                     decode)
                data = xform(data)
            transformed.append(data)
        self._async_transforms = []
        return transformed
def _upload_blob_chunks(blob_service,
                        container_name,
                        blob_name,
                        blob_size,
                        block_size,
                        stream,
                        max_connections,
                        progress_callback,
                        validate_content,
                        lease_id,
                        uploader_class,
                        maxsize_condition=None,
                        if_match=None,
                        timeout=None,
                        content_encryption_key=None,
                        initialization_vector=None,
                        resource_properties=None):
    encryptor, padder = _get_blob_encryptor_and_padder(
        content_encryption_key, initialization_vector, uploader_class
        is not _PageBlobChunkUploader)

    uploader = uploader_class(blob_service, container_name, blob_name,
                              blob_size, block_size, stream,
                              max_connections > 1, progress_callback,
                              validate_content, lease_id, timeout, encryptor,
                              padder)

    uploader.maxsize_condition = maxsize_condition

    # ETag matching does not work with parallelism as a ranged upload may start
    # before the previous finishes and provides an etag
    uploader.if_match = if_match if not max_connections > 1 else None

    if progress_callback is not None:
        progress_callback(0, blob_size)

    if max_connections > 1:
        import concurrent.futures
        from threading import BoundedSemaphore
        '''
        Ensures we bound the chunking so we only buffer and submit 'max_connections' amount of work items to the executor.
        This is necessary as the executor queue will keep accepting submitted work items, which results in buffering all the blocks if
        the max_connections + 1 ensures the next chunk is already buffered and ready for when the worker thread is available.
        '''
        chunk_throttler = BoundedSemaphore(max_connections + 1)

        executor = concurrent.futures.ThreadPoolExecutor(max_connections)
        futures = []
        running_futures = []

        # Check for exceptions and fail fast.
        for chunk in uploader.get_chunk_streams():
            for f in running_futures:
                if f.done():
                    if f.exception():
                        raise f.exception()
                    else:
                        running_futures.remove(f)

            chunk_throttler.acquire()
            future = executor.submit(uploader.process_chunk, chunk)

            # Calls callback upon completion (even if the callback was added after the Future task is done).
            future.add_done_callback(lambda x: chunk_throttler.release())
            futures.append(future)
            running_futures.append(future)

        # result() will wait until completion and also raise any exceptions that may have been set.
        range_ids = [f.result() for f in futures]
    else:
        range_ids = [
            uploader.process_chunk(result)
            for result in uploader.get_chunk_streams()
        ]

    if resource_properties:
        resource_properties.last_modified = uploader.last_modified
        resource_properties.etag = uploader.etag

    return range_ids
示例#48
0
    conn = psycopg2.connect(host="localhost",
                            user="******",
                            password="******")
    conn.autocommit = True

    # Add seed URLs to frontier
    if len(SEED_URLS) > 0:
        with conn.cursor() as cur:
            for seed_url in SEED_URLS:
                frontier_append(cur, seed_url, None)

    # Start the crawler
    # crawl(0, conn)
    with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_THREADS +
                                               1) as executor:
        # Start server thread
        future_listen = executor.submit(listen, sock)
        # Start crawler threads
        futures = []
        for i in range(NUM_THREADS):
            futures.append(executor.submit(crawl, i, conn))
        # Join crawler threads
        for future in futures:
            future.result()
        is_running = False
        # Stop server thread
        sock.close()

    # Cleanup
    conn.close()
示例#49
0
def main():
        # Load parameters from comman line arguments
        parameters = retrieve_parameters()
        S = parameters['S']
        p = parameters['p']
        q = parameters['q']
        M = parameters['M']
        gamma = parameters['gamma']
        output_directory = parameters['output_directory']
        contact_map_path = parameters['contact_map_path']
        contact_map_name = parameters['contact_map_name']
        N = parameters['N']
        verbose = parameters['verbose']
        num_threads = parameters['threads']
        time_start = time.time()

        # Display parameters
        if verbose:
                sys.stderr.write('Maximum TAD size : {}\n'.format(S))
                sys.stderr.write('Maximum TADs in each tree : {}\n'.format(M))
                sys.stderr.write('Boundary index parameter : {} , {}\n'.format(p, q))
                sys.stderr.write('Balance between boundary : {}\n'.format(gamma))
                sys.stderr.write('Output directory : {}\n'.format(output_directory))
                sys.stderr.write('Contact files : {}\n'.format(','.join(contact_map_path)))
                sys.stderr.write('Contact names : {}\n'.format(','.join(contact_map_name)))
                sys.stderr.write('Contact sizes : {}\n'.format(','.join(['{}'.format(n) for n in N])))
                sys.stderr.write('Number of threads: {}\n'.format(num_threads))

        # Load data
        if verbose: sys.stderr.write(time.strftime('[%H:%M:%S] Loading data        \n', time.localtime()))
        chrs = contact_map_name
        paths = contact_map_path
        height = S
        mats = {chrs[i] : np.loadtxt(paths[i]) for i in range(len(paths))}
        backbins = []
        for chr in chrs:
                for i in range(mats[chr].shape[0]-height):
                        backbins += [mats[chr][i,i:i+height]]
        backgrnd = np.mean(backbins,axis=0)

        tadscores = {}
        bakscores = {}
        chrdeltas = {}
        chrbetas = {}

        # Parameters precomputation
        for chr_index in range(len(chrs)):
                chr = chrs[chr_index]
                if verbose: 
                        if chr_index > 0:
                                sys.stderr.write('\033[F\033[K')
                        sys.stderr.write(time.strftime('[%H:%M:%S] Precomputing paramters for ', time.localtime()) + chr + '\n')
                n = mats[chr].shape[0]
                smat = np.zeros((n,n))
                gmat = np.zeros((n,n))
                bmat = np.zeros((n,n))
                x = np.ndarray(n ** 2 // 2, np.float64)
                y = np.ndarray(n ** 2 // 2, np.float64)
                __update_matrices(mats[chr], backgrnd, smat, gmat, bmat, n, height, x, y)

                tadscores.update({chr:smat})
                chrdeltas.update({chr:gmat})
                chrbetas.update({chr:bmat})
        if verbose: 
                sys.stderr.write(time.strftime('\033[F\033[K[%H:%M:%S] Precomputing parameters completed\n', time.localtime()))

        # Background calculation
        for chr_index in numba.prange(len(chrs)):
                chr = chrs[chr_index]
                if verbose: 
                        if chr_index > 0:
                                sys.stderr.write('\033[F\033[K')
                        sys.stderr.write(time.strftime('[%H:%M:%S] Precomputing background scores for ', time.localtime()) + chr + '\n')
                mat = mats[chr]
                n = mat.shape[0]
                smat = np.zeros((n,n))
                __update_smat(mat, backgrnd, smat, height)
                bakscores.update({chr:smat})
        
        if verbose: 
                sys.stderr.write(time.strftime('\033[F\033[K[%H:%M:%S] Precomputing background completed\n', time.localtime()))

        if sys.version_info[0] >= 3 and sys.version_info[1] >= 2 and num_threads > 1:
                import concurrent.futures
                executor = concurrent.futures.ProcessPoolExecutor(max_workers=num_threads)
                futures = []
        else:
                executor = None

        for chr_index in range(len(chrs)):
                chr = chrs[chr_index]
                map_file = contact_map_path[chr_index]
                map_name = contact_map_name[chr_index]
                map_size = N[chr_index]
                min_size = 2
                t_lim = M
                T_lim = N[chr_index]

                mat = mats[chr]
                gmat = chrdeltas[chr]
                bmat = chrbetas[chr]
                bakmat = bakscores[chr]
                smat = tadscores[chr] - bakscores[chr]
                # Dynamic programing
                if executor:
                        # print('\n\nconcurrent ' + chr + '\n\n')
                        fs = executor.submit(__process_chromosome, chr_index, min_size, t_lim, T_lim, mat, backgrnd, gmat, bmat, bakmat, smat, parameters)
                        futures.append(fs)
                        # print(fs)
                else:
                        if verbose: sys.stderr.write(time.strftime('[%H:%M:%S] Running dynamic program for ', time.localtime()) + chr + '\n')
                        __process_chromosome(chr_index, min_size, t_lim, T_lim, mat, backgrnd, gmat, bmat, bakmat, smat, parameters)
        if executor: 
                concurrent.futures.wait(futures)

        # output summary
        time_end = time.time()
        with open(os.path.join(output_directory, 'jitadtree.info'),'w') as fo:
                fo.write('Filename : {}\n'.format(','.join(contact_map_path)))
                fo.write('Name : {}\n'.format(','.join(contact_map_name)))
                fo.write('Size : {}\n'.format(','.join(['{}'.format(x) for x in N])))
                fo.write('Maximum TAD size : {}\n'.format(S))
                fo.write('Maximum TADs in each tree : {}\n'.format(M))
                fo.write('Boundary index parameter : {} , {}\n'.format(p, q))
                fo.write('Balance between boundary : {}\n'.format(gamma))
                fo.write('Threads : {}\n'.format(num_threads))
                fo.write('Elapsed time : {:.1f} sec\n'.format(time_end - time_start))
if __name__ == '__main__':
    __spec__ = None
    
    count = len(animations_data['animations'])
    errors_data = {'errors':[], 'accessor':np.array([[-1]*count for i in range(count)])}
    
    print('start multiprocessors')
    with concurrent.futures.ProcessPoolExecutor() as executor:
        futures = []
        for i in range(count):
            for j in range(i, count):
                futures.append(
                    executor.submit(
                        _future_compute_errors_between_2_animations, 
                        animations_data['animations'][i],
                        animations_data['clouds'][i],
                        animations_data['animations'][j],
                        animations_data['clouds'][j], 
                        i,j))

        for future in concurrent.futures.as_completed(futures):
            i,j,error = future.result()
            errors_data['errors'].append(error)
            errors_data['accessor'][i][j] = len(errors_data['errors'])-1
            
            if i!=j:
                errors_data['errors'].append(error.T)
                errors_data['accessor'][j][i] = len(errors_data['errors'])-1
            
    print('end multiprocessors')
示例#51
0
            sys.stdout.flush()


if __name__ == '__main__':
    try:
        if len(sys.argv) < 3:
            print('Arg1 => BED file')
            print('Arg2 => MAF file')
        else:
            futures = []
            executor = ThreadPoolExecutor(1)
            maf_data = parse_maf(sys.argv[2])
            for bed_entry in open(sys.argv[1]):
                bed_entry = bed_entry.strip().split('\t')
                bed_chromosome, bed_start, bed_end = bed_entry[0], int(
                    bed_entry[1]), int(bed_entry[2])

                if bed_chromosome in maf_data:
                    #print(map_intervals(bed_chromosome, bed_start, bed_end, maf_data[bed_chromosome]))
                    futures.append(
                        executor.submit(map_intervals, bed_chromosome,
                                        bed_start, bed_end,
                                        maf_data[bed_chromosome]))
            for future in concurrent.futures.as_completed(futures):
                prettify(future)

    except KeyboardInterrupt:
        print()
    except KeyError as e:
        print(e)
示例#52
0
def single_job(i):
    geometry = [[points[i][0] - 1, points[i][1] - 1],
                [points[i][0] + 1, points[i][1] + 1],
                [points[i][0] - 1, points[i][1] + 1],
                [points[i][0] + 1, points[i][1] - 1]]
    name = 'day_' + str(i) + '_' + str(points[i][0]) + '_' + str(points[i][1])
    name = name.replace(".", "_")
    task = ee.batch.Export.image.toDrive(dataset,
                                         description=name,
                                         scale=250,
                                         region=geometry,
                                         folder='geoDataF',
                                         maxPixels=1e9)
    task.start()
    while task.active():
        time.sleep(5)
    print("Job", i, "completed")
    return "Done"


pool = ThreadPoolExecutor(10)

futures = []
for i in range(len(points)):
    future = pool.submit(single_job, (i))
    futures.append(future)

concurrent.futures.wait(futures)
print("All jobs completed.")
示例#53
0
def main():
    args = parse_args()
    logging.basicConfig(level=getattr(logging, args.log_level.upper()))

    # Load list of profiles from file.
    #
    # If file does not exist or list is empty, the profile list is initialized
    # with the default profile directory path.
    df_profiles = load_profiles_info(args.profiles_path)
    drop_version_errors(df_profiles,
                        missing=False,
                        mismatch=True,
                        inplace=True)

    # Save most recent list of profiles to disk.
    with args.profiles_path.open('w') as output:
        profiles_str = yaml.dump(
            df_profiles[SAVED_COLUMNS].astype(str).to_dict('records'),
            default_flow_style=False)
        output.write(profiles_str)

    # Look up major version of each profile.
    df_profiles['major_version'] = df_profiles.path.map(profile_major_version)

    # Perform the following tasks in the background:
    #
    #  - Upgrade `microdrop-launcher` package
    #  - Cache latest `microdrop` package version
    with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:

        def _auto_upgrade():
            process = sp.Popen(
                [sys.executable, '-m', 'microdrop_launcher.auto_upgrade'])

            return process.communicate()

        def _cache_latest_microdrop_version():
            process = sp.Popen(
                [sys.executable, '-m', 'microdrop_launcher.microdrop_version'])

            return process.communicate()

        def _launch(args, df_profiles):
            if args.default or (not args.no_auto
                                and df_profiles.shape[0] == 1):
                # Launch MicroDrop with most recently used (or only available) profile.
                return_code = launch_profile_row(df_profiles.iloc[0])
                if return_code == 0:
                    df_profiles.used_timestamp[0] = str(dt.datetime.now())
            else:
                # Display dialog to manage profiles or launch a profile.
                launch_dialog = LaunchDialog(df_profiles)
                launch_dialog.run()
                return_code = launch_dialog.return_code
                df_profiles = launch_dialog.df_profiles

            # Save most recent list of profiles to disk (most recently used first).
            #
            # List can be changed using dialog by:
            #  - Creating a new profile.
            #  - Importing a profile.
            #  - Updating used timestamp by launching a profile.
            df_profiles = df_profiles.astype(str)
            df_profiles.loc[df_profiles.used_timestamp == 'nan',
                            'used_timestamp'] = ''
            df_profiles.sort_values('used_timestamp',
                                    ascending=False,
                                    inplace=True)

            with args.profiles_path.open('w') as output:
                profiles_str = yaml.dump(
                    df_profiles[SAVED_COLUMNS].to_dict('records'),
                    default_flow_style=False)
                output.write(profiles_str)
            return return_code

        futures = []
        if not args.no_upgrade:
            upgrade_future = executor.submit(_auto_upgrade)
            microdrop_version_future = \
                executor.submit(_cache_latest_microdrop_version)
            futures.extend([upgrade_future, microdrop_version_future])
        launch_future = executor.submit(_launch, args, df_profiles)
        futures.append(launch_future)
        concurrent.futures.wait(futures)
        return launch_future.result()
示例#54
0
                                               server: str):
                        data = zfr.read(zfinfo)
                        if zfinfo.filename.endswith(
                                ".apbp") or zfinfo.filename.endswith(".apm3"):
                            data = update_patch_data(data, server)
                        with ziplock:
                            zfw.writestr(zfinfo, data)
                        return zfinfo.filename

                    futures = []
                    with zipfile.ZipFile(rom, "r") as zfr:
                        updated_zip = os.path.splitext(rom)[0] + "_updated.zip"
                        with zipfile.ZipFile(updated_zip,
                                             "w",
                                             compression=zipfile.ZIP_DEFLATED,
                                             compresslevel=9) as zfw:
                            for zfname in zfr.namelist():
                                futures.append(
                                    pool.submit(_handle_zip_file_entry,
                                                zfr.getinfo(zfname), address))
                            for future in futures:
                                print(
                                    f"File {future.result()} added to {os.path.split(updated_zip)[1]}"
                                )

            except:
                import traceback

                traceback.print_exc()
                input("Press enter to close.")
示例#55
0
        # use CPU parallel to calculate
        result_list = []

        with concurrent.futures.ProcessPoolExecutor(
                max_workers=cores) as executor:
            futures = []
            for y_list in chunks_idx_y:
                # get the stack data
                img_wa_stack = img_wa[y_list, :, :]
                ref_wa_stack = ref_wa_pad[y_list[0]:y_list[-1] +
                                          2 * cal_half_window + 1, :, :]

                # start the jobs
                futures.append(
                    executor.submit(displace_wavelet, y_list, img_wa_stack,
                                    ref_wa_stack, pixel_sample,
                                    cal_half_window))

            for future in concurrent.futures.as_completed(futures):

                try:
                    result_list.append(future.result())
                    # display the status of the program
                    Total_iter = cores * n_group
                    Current_iter = len(result_list)
                    percent_iter = Current_iter / Total_iter * 100
                    str_bar = '>' * (int(np.ceil(
                        percent_iter / 2))) + ' ' * (int(
                            (100 - percent_iter) // 2))
                    prColor(
                        '\r' + str_bar + 'processing: [%3.1f%%] ' %
示例#56
0
    def upload(self, items: typing.Iterable[_SyncUploadItem]):
        progress = CumulativeTransferProgress('Uploaded')

        # flag to set in a child in an upload thread if an error occurs to signal to the entrant
        # thread to stop processing.
        abort_event = threading.Event()

        # used to lock around shared state and to notify when dependencies are resolved
        # so that provenance dependent files can be uploaded
        dependency_condition = threading.Condition()

        pending_provenance = _PendingProvenance()
        finished_items = {}

        ordered_items = self._order_items([i for i in items])

        futures = []
        while ordered_items:
            skipped_items = []
            for item in ordered_items:
                if abort_event.is_set():
                    # if this flag is set, one of the upload threads failed and we should raise
                    # it's error and cancel any remaining futures
                    self._abort(futures)

                with dependency_condition:
                    used, used_pending = self._convert_provenance(
                        item.used, finished_items)
                    executed, executed_pending = self._convert_provenance(
                        item.executed, finished_items)

                    if used_pending or executed_pending:
                        # we can't upload this item yet, it has provenance that hasn't yet been uploaded
                        skipped_items.append(item)
                        pending_provenance.update(
                            used_pending.union(executed_pending))

                        # skip uploading because dependent provenance hasn't finished uploading
                        continue

                # else not continued above due to pending provenance
                # all provenance that this item depends on has already been uploaded
                # so we can go ahead and upload this item

                # we acquire the semaphore to ensure that we aren't uploading more than
                # our configured maximum number of files here at once. once we reach the limit
                # we'll block here until one of the existing file uploads completes
                self._file_semaphore.acquire()
                future = self._executor.submit(
                    self._upload_item,
                    item,
                    used,
                    executed,
                    finished_items,
                    pending_provenance,
                    dependency_condition,
                    abort_event,
                    progress,
                )
                futures.append(future)

            with dependency_condition:
                if pending_provenance.has_pending():
                    # skipped_items contains all the items that we couldn't upload the previous time through
                    # the loop because they depended on another item for provenance. wait until there
                    # at least one those items finishes before continuing another time through the loop.
                    if not abort_event.is_set():
                        dependency_condition.wait_for(lambda: (
                            pending_provenance.has_finished_provenance(
                            ) or abort_event.is_set()))

                pending_provenance.reset_count()

            ordered_items = skipped_items

        # all items have been submitted for upload

        concurrent.futures.wait(futures,
                                return_when=concurrent.futures.FIRST_EXCEPTION)
        if abort_event.is_set():
            # at least one item failed to upload
            self._abort(futures)
示例#57
0
    def handle(self, *args, **options):
        # We only want to invalidate the API response cache once data loading
        # completes. Disconnecting the api_change_receiver function from post_save
        # and post_delete signals prevents model changes during data loading from
        # repeatedly invalidating the cache.
        for model in apps.get_app_config('course_metadata').get_models():
            for signal in (post_save, post_delete):
                signal.disconnect(receiver=api_change_receiver, sender=model)

        # For each partner defined...
        partners = Partner.objects.all()

        # If a specific partner was indicated, filter down the set
        partner_code = options.get('partner_code')
        if partner_code:
            partners = partners.filter(short_code=partner_code)

        if not partners:
            raise CommandError('No partners available!')

        success = True
        for partner in partners:

            # The Linux kernel implements copy-on-write when fork() is called to create a new
            # process. Pages that the parent and child processes share, such as the database
            # connection, are marked read-only. If a write is performed on a read-only page
            # (e.g., closing the connection), it is then copied, since the memory is no longer
            # identical between the two processes. This leads to the following behavior:
            #
            # 1) Newly forked process
            #       parent
            #              -> connection (Django open, MySQL open)
            #       child
            #
            # 2) Child process closes the connection
            #       parent -> connection (*Django open, MySQL closed*)
            #       child  -> connection (Django closed, MySQL closed)
            #
            # Calling connection.close() from a child process causes the MySQL server to
            # close a connection which the parent process thinks is still usable. Since
            # the parent process thinks the connection is still open, Django won't attempt
            # to open a new one, and the parent ends up running a query on a closed connection.
            # This results in a 'MySQL server has gone away' error.
            #
            # To resolve this, we force Django to reconnect to the database before running any queries.
            connection.connect()

            # If no courses exist for this partner, this command is likely being run on a
            # new catalog installation. In that case, we don't want multiple threads racing
            # to create courses. If courses do exist, this command is likely being run
            # as an update, significantly lowering the probability of race conditions.
            courses_exist = Course.objects.filter(partner=partner).exists()
            is_threadsafe = courses_exist and waffle.switch_is_active(
                'threaded_metadata_write')
            max_workers = DataLoaderConfig.get_solo().max_workers

            logger.info(
                'Command is{negation} using threads to write data.'.format(
                    negation='' if is_threadsafe else ' not'))

            pipeline = (
                ((CoursesApiDataLoader, partner.courses_api_url,
                  max_workers), ),
                (
                    (EcommerceApiDataLoader, partner.ecommerce_api_url, 1),
                    (ProgramsApiDataLoader, partner.programs_api_url,
                     max_workers),
                ),
                ((AnalyticsAPIDataLoader, partner.analytics_url, 1), ),
            )

            if waffle.switch_is_active('parallel_refresh_pipeline'):
                futures = []
                for stage in pipeline:
                    with concurrent.futures.ProcessPoolExecutor() as executor:
                        for loader_class, api_url, max_workers in stage:
                            if api_url:
                                logger.info('Executing Loader [%s]', api_url)
                                futures.append(
                                    executor.submit(
                                        execute_parallel_loader,
                                        loader_class,
                                        partner,
                                        api_url,
                                        max_workers,
                                        is_threadsafe,
                                    ))

                success = success and all(f.result() for f in futures)
            else:
                # Flatten pipeline and run serially.
                for loader_class, api_url, max_workers in itertools.chain(
                        *(stage for stage in pipeline)):
                    if api_url:
                        logger.info('Executing Loader [%s]', api_url)
                        success = execute_loader(
                            loader_class,
                            partner,
                            api_url,
                            max_workers,
                            is_threadsafe,
                        ) and success

            # TODO Cleanup CourseRun overrides equivalent to the Course values.

        connection.connect(
        )  # reconnect to django outside of loop (see connect comment above)

        # Clean up any media orphans that we might have created
        delete_orphans(Image)
        delete_orphans(Video)

        set_api_timestamp()

        if not success:
            raise CommandError('One or more of the data loaders above failed.')
示例#58
0
    def convert_smooth(self):
        # 最初に全打ち

        futures = []
        with ThreadPoolExecutor(thread_name_prefix="prepare", max_workers=self.options.max_workers) as executor:
            for bone_name in self.options.motion.bones.keys():
                if bone_name in self.options.model.bones and bone_name in self.options.bone_list and bone_name not in ["両目"]:
                # if bone_name in self.options.model.bones and bone_name in self.options.bone_list:
                    if self.options.interpolation == 0 and len(self.options.motion.bones[bone_name].keys()) >= 2:
                        # 線形補間の場合、そのまま全打ち
                        futures.append(executor.submit(self.prepare_linear, bone_name))
                    elif self.options.interpolation == 1:
                        if len(self.options.motion.bones[bone_name].keys()) > 2:
                            # 円形補間の場合、円形全打ち
                            futures.append(executor.submit(self.prepare_circle, bone_name))
                        else:
                            # 円形補間でキー数が足りない場合、線形補間
                            logger.warning("円形補間が指定されましたが、キー数が3つに満たないため、計算出来ません。ボーン名: %s", bone_name)
                            futures.append(executor.submit(self.prepare_linear, bone_name))
                    elif self.options.interpolation == 2:
                        if len(self.options.motion.bones[bone_name].keys()) > 2:
                            # 曲線補間の場合、カトマル曲線全打ち
                            futures.append(executor.submit(self.prepare_curve, bone_name))
                        else:
                            # 曲線補間でキー数が足りない場合、線形補間
                            logger.warning("曲線補間が指定されましたが、キー数が3つに満たないため、計算出来ません。ボーン名: %s", bone_name)
                            futures.append(executor.submit(self.prepare_linear, bone_name))

            for morph_name in self.options.motion.morphs.keys():
                if morph_name in self.options.model.morphs and morph_name in self.options.bone_list:
                    if self.options.interpolation == 0 and len(self.options.motion.morphs[morph_name].keys()) >= 2:
                        # 線形補間の場合、そのまま全打ち
                        futures.append(executor.submit(self.prepare_linear, morph_name, is_morph=True))
                    elif self.options.interpolation == 1:
                        if len(self.options.motion.morphs[morph_name].keys()) > 2:
                            # 円形補間の場合、モーフはそのまま
                            logger.warning("円形補間が指定されましたが、モーフは円形補間計算が出来ません。モーフ名: %s", morph_name)
                            futures.append(executor.submit(self.prepare_linear, morph_name, is_morph=True))
                        else:
                            # 円形補間でキー数が足りない場合、線形補間
                            logger.warning("円形補間が指定されましたが、キー数が3つに満たないため、計算出来ません。モーフ名: %s", morph_name)
                            futures.append(executor.submit(self.prepare_linear, morph_name, is_morph=True))
                    elif self.options.interpolation == 2:
                        if len(self.options.motion.morphs[morph_name].keys()) > 2:
                            # 曲線補間の場合、カトマル曲線全打ち
                            futures.append(executor.submit(self.prepare_curve_morph, morph_name))
                        else:
                            # 曲線補間でキー数が足りない場合、線形補間
                            logger.warning("曲線補間が指定されましたが、キー数が3つに満たないため、計算出来ません。モーフ名: %s", morph_name)
                            futures.append(executor.submit(self.prepare_linear, morph_name, is_morph=True))

        concurrent.futures.wait(futures, timeout=None, return_when=concurrent.futures.FIRST_EXCEPTION)

        for f in futures:
            if not f.result():
                return False
        
        # 処理回数が2回以上の場合、不要キー削除
        if self.options.loop_cnt >= 2 and self.options.remove_unnecessary_flg:
            futures = []
            with ThreadPoolExecutor(thread_name_prefix="remove", max_workers=self.options.max_workers) as executor:
                for bone_name in self.options.motion.bones.keys():
                    if bone_name in self.options.model.bones and bone_name in self.options.bone_list and bone_name not in ["両目"]:
                    # if bone_name in self.options.model.bones and bone_name in self.options.bone_list:
                        futures.append(executor.submit(self.remove_filterd_bf, bone_name))
                for morph_name in self.options.motion.morphs.keys():
                    if morph_name in self.options.model.morphs and morph_name in self.options.bone_list:
                        futures.append(executor.submit(self.remove_filterd_mf, morph_name))
            concurrent.futures.wait(futures, timeout=None, return_when=concurrent.futures.FIRST_EXCEPTION)

            for f in futures:
                if not f.result():
                    return False

        return True
示例#59
0
文件: tasks.py 项目: fakegit/rippy
def handle_job(job_id):
    logger.info("Trying to handle job %s" % (job_id,))
    job = Job.objects.get(pk=job_id)
    if job.status != Job.PENDING:
        raise JobNotPendingException(
            "Job %i is not in correct status, it is in %s" % (job_id, job.status)
        )

    for extractor_cls in EXTRACTORS:
        if extractor_cls.matcher.match(job.url):
            break
    else:
        logger.warning("Failed to find any extractor for %s" % (job_id,))
        job.status = job.FAILED
        job.status_message = "Failed to find any extractor"
        job.save()
        return

    job.status = job.PARSING
    job.status_message = "Extracting video using %s" % (extractor_cls.name,)
    job.save()

    for i in range(3):
        try:
            chrome_url = get_chrome_url()
        except:
            logger.exception("Failed to get chrome URL")

            job.status_message = "Failed to get chrome URL"
            job.status = job.FAILED
            job.save()

            return

        try:
            result = asyncio.get_event_loop().run_until_complete(
                execute_job(extractor_cls, chrome_url, job)
            )
        except (errors.PyppeteerError, asyncio.TimeoutError):
            logger.exception("Pyppeteer failed, attempt %s of 3" % (i + 1))
            time.sleep(5)
        else:
            break
    else:
        job.status_message = "Failed to get puppeteer to get url"
        job.status = job.FAILED
        job.save()

        return

    media_path = str(job.pk)
    target_path = os.path.join(settings.MEDIA_ROOT, media_path)
    if not os.path.isdir(target_path):
        os.makedirs(target_path)

    logger.debug("Fetching m3u8")
    headers = {"User-Agent": USER_AGENT}
    headers.update(result["headers"])
    r = requests.get(result["url"], headers=headers)
    playlist = m3u8.loads(r.text)

    target_filename = "%s - %s.mp4" % (sanitize_title(result["title"]), result["id"])
    target_full_path = os.path.join(target_path, target_filename)

    job.name = target_filename
    job.status = job.DOWNLOADING
    job.status_message = f"Downloading {len(playlist.segments)} segments"
    job.save()

    logger.debug("Queuing up %s segments" % (len(playlist.segments),))
    with tempfile.TemporaryDirectory() as download_dir:
        segments = []
        with concurrent.futures.ThreadPoolExecutor(
            max_workers=settings.DOWNLOAD_CONCURRENCY
        ) as executor:
            futures = []
            for i, segment in enumerate(playlist.segments):
                target = os.path.join(download_dir, f"{i:05}.ts")
                segments.append(target)
                futures.append(
                    (
                        executor.submit(
                            download_file_segment, segment.uri, target, headers
                        )
                    )
                )

            for i, future in enumerate(futures, 1):
                try:
                    future.result()
                except Exception:
                    logger.exception("Failed to download")
                    job.status = job.FAILED
                    job.status_message = f"Failed while downloading segment {i}"
                    job.save()
                    return
                else:
                    job.send_progress_update(total=len(playlist.segments), progress=i)

        job.status_message = f"Finished downloading {len(playlist.segments)} segments, merging with ffmpeg"
        job.save()

        cmd = ["ffmpeg"]
        cmd += [
            "-y",
            "-i",
            f"concat:{'|'.join(segments)}",
            "-c",
            "copy",
            "-f",
            "mp4",
            target_full_path,
        ]

        logger.debug("Merging result with ffmpeg")

        p = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE)

        duration, progress = None, None

        for line in p.stderr:
            line = line.decode("utf-8")
            if not duration:
                duration_result = re.findall(
                    r"Duration: (\d{2}:\d{2}:\d{2}\.\d{2})", line
                )
                if duration_result:
                    duration = duration_to_number(duration_result[0])

            progress_result = re.findall(r"time=(\d{2}:\d{2}:\d{2}\.\d{2})", line)
            if progress_result:
                progress = duration_to_number(progress_result[0])

            if duration is not None and progress is not None:
                job.send_progress_update(duration, progress)

        returncode = p.wait()

        if returncode:
            job.status_message = "Failed FFMpeg with returncode %s" % (returncode,)
            job.status = job.FAILED
        else:
            job.path = os.path.join(media_path, target_filename)
            job.status_message = "Finished"
            job.status = job.SUCCESS
        job.save()
	# Measurement loop
	measurements = []
	for i in xrange( REPS ):
		measurement = measure_once( s, str( i ) )
		if ( i != 0 ): # Skip the first measurement, seems to be an outlier
			measurements.append( measurement )

	s.close()
	return measurements


# Create parallel clients
executor = ThreadPoolExecutor( max_workers=CLIENT_COUNT )
futures = []
for i in xrange( CLIENT_COUNT ):
	futures.append( executor.submit( measure ) )

print( "Waiting for clients to finish..." )
concurrent.futures.wait( futures )
print( "...done" )

combined = []
for future in futures:
	combined = combined + future.result()

for q in [50, 90, 95, 100]:
  print ("{}% percentile: {}".format (q, np.percentile(combined, q)))

mean = np.mean( combined )
std = np.std( combined )
median = np.median( combined )