def wait_futures(futures):
    results = []

    for future in concurrent.futures.as_completed(futures.keys()):
        if future.exception() is None:
            source, to_find = futures.get(future)
            if future.result():
                results.append(
                    f"La stringa {to_find} e` presente nel file {source}")
            else:
                results.append(
                    f"La stringa {to_find} non e' presente nel file {source}")

    return results
Ejemplo n.º 2
0
    def read(self):
        futures = {}
        with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
            for disk in self.disks:
                futures[executor.submit(self.get_temperature, disk)] = disk

            for fut in concurrent.futures.as_completed(futures.keys()):
                disk = futures.get(fut)
                if not disk:
                    continue
                try:
                    temp = fut.result()
                    if temp is None:
                        continue
                    self.dispatch_value(disk, 'temperature', temp, data_type='temperature')
                except Exception as e:
                    collectd.info(traceback.format_exc())
Ejemplo n.º 3
0
    def read(self):
        futures = {}
        with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
            for disk in self.disks:
                futures[executor.submit(self.get_temperature, disk)] = disk

            for fut in concurrent.futures.as_completed(futures.keys()):
                disk = futures.get(fut)
                if not disk:
                    continue
                try:
                    temp = fut.result()
                    if temp is None:
                        continue
                    self.dispatch_value(disk, 'temperature', temp, data_type='temperature')
                except Exception as e:
                    collectd.info(traceback.format_exc())
Ejemplo n.º 4
0
def extract_everything():
    """Extract everything in components.yml, respecting order requirements."""
    def q_key(comp):
        """Decide extract priority by pointer-chase depth, filesize in ties."""
        after = {c.install_after: c.name for c in component.ALL.values()}
        name, seen = comp.name, []
        while name in after:
            seen.append(name)
            name = after.get(name)
            if name in seen:
                raise ValueError('Cyclic "install_after" config detected: ' +
                                 " -> ".join(seen + [name]))
        return len(seen), os.path.getsize(comp.path)

    queue = list(component.ALL.values()) + [
        component.ALL["Dwarf Fortress"]._replace(name=path, extract_to=path)
        for path in ("curr_baseline", "graphics/ASCII")
    ]
    queue.sort(key=q_key, reverse=True)
    with concurrent.futures.ProcessPoolExecutor(8) as pool:
        futures = {}
        while queue:
            while sum(f.running() for f in futures.values()) < 8:
                for idx, comp in enumerate(queue):
                    if comp.extract_to is False:
                        assert comp.filename.endswith(".ini")
                        queue.pop(idx)
                        continue  # for Therapist, handled in build.py
                    aft = futures.get(comp.install_after)
                    # Even if it's highest-priority, wait for parent job(s)
                    if aft is None or aft.done():
                        futures[comp.name] = extract_comp(pool, queue.pop(idx))
                        break  # reset index or we might pop the wrong item
                else:
                    break  # if there was nothing eligible to extract, sleep
            time.sleep(0.01)
    failed = [k for k, v in futures.items() if v.exception() is not None]
    for key in failed:
        comp = component.ALL.pop(key, None)
        for lst in (component.FILES, component.GRAPHICS, component.UTILITIES):
            if comp in lst:
                lst.remove(comp)
    if failed:
        print("ERROR:  Could not extract: " + ", ".join(failed))
Ejemplo n.º 5
0
def dataflow_jobs_read(projectId, location, file, output, workers, pagefrom,
                       pageto, besteffort):

    logging.info('worker pool set to %d ', workers)

    pages = json.load(file)
    logging.debug('pages loaded %s', str(file))

    files = list()
    failed = list()

    for page in pages:
        pageId = page.get('page')

        if pagefrom and pageId < pagefrom:
            continue
        elif pageto and pageId > pageto:
            break

        jobs = page.get('jobs')

        logging.debug('processing page = %d', pageId)

        try:
            with concurrent.futures.ProcessPoolExecutor(
                    max_workers=workers) as executor:
                logging.debug(executor)

                futures = {
                    executor.submit(dataflow_job, projectId, location, job,
                                    pageId): job
                    for job in jobs
                }

                responses = list()
                responses_failed = list()

                for future in concurrent.futures.as_completed(futures):
                    if besteffort:
                        try:
                            responses.append(future.result())
                        except Exception as e:
                            job = futures.get(future)
                            responses_failed.append(job)
                            logging.error('failed %s %s', job.get('id'), e)
                            # the way it is done is wrong, but who cares
                    else:
                        responses.append(future.result())

                if responses_failed:
                    # store failed jobs as the jobs in the page
                    # and append the page to the failed ones
                    page['jobs'] = responses_failed
                    failed.append(page)

                filepath = '{}{}{}_dataflow.{}.json'.format(
                    output, os.path.sep, projectId, f'{pageId}'.zfill(3))

                logging.info('writing results into %s', filepath)
                json.dump(responses, open(filepath, 'w'))

                files.append(filepath)
        except:
            logging.exception('fail to process page %d', pageId)
            failed.append(page)

    if failed:
        filepath = '{}{}{}_dataflow.failed.json'.format(
            output, os.path.sep, projectId)
        logging.info('writing failed pages into %s', filepath)
        json.dump(failed, open(filepath, 'w'))