Example #1
0
	for line in range(0,len(str)):
		matrix2.append([])
		linenow = str[line].split(',')
		for elem in range(0,len(linenow)):
			matrix2[line].append(int(linenow[elem]))
	
	

validmatrix = matrix2           #определяем размерность итоговой матрицы для правильности границ списков в циклах
if len(matrix1) > len(matrix2):
	validmatrix = matrix1


if __name__ == '__main__':      #если запускается непосредственно данный файл, не в составе подключенной библиотеки. Для работы процессов.

	manager = Manager()     #менеджер общего пространства памяти процессов

	Global = manager.Namespace()        #область имен
	Global.matrix_result = [0 for i in range(0,len(validmatrix)*len(validmatrix[0]))]       #генерируем размеры итоговой матрицы
	
	
	pool = Pool(len(matrix1))       #создаем пул
	
	pool.starmap(multiply_matrix,[(matrix1,matrix2,linehere,Global) for linehere in range(0,len(matrix1))])     #используем starmap, так как нужно передать несколько аргументов

	matrix_result = Global.matrix_result        #обновляем переменную в главном процессе

	#print result
	now = 0
	for line in range(0,len(validmatrix)):      #выводим результат в консоль
		print('[',end='')
Example #2
0
def main():
    """Entry point."""
    import os
    import sys
    import gc
    from multiprocessing import Process, Manager
    from .parser import parse_args
    from ..utils.bids import write_derivative_description

    parse_args()

    popylar = None
    if not config.execution.notrack:
        import popylar
        from ..__about__ import __ga_id__

        config.loggers.cli.info(
            "Your usage of dmriprep is being recorded using popylar (https://popylar.github.io/). ",  # noqa
            "For details, see https://nipreps.github.io/dmriprep/usage.html. ",
            "To opt out, call dmriprep with a `--notrack` flag",
        )
        popylar.track_event(__ga_id__, "run", "cli_run")

    # CRITICAL Save the config to a file. This is necessary because the execution graph
    # is built as a separate process to keep the memory footprint low. The most
    # straightforward way to communicate with the child process is via the filesystem.
    config_file = config.execution.work_dir / ".dmriprep.toml"
    config.to_filename(config_file)

    # CRITICAL Call build_workflow(config_file, retval) in a subprocess.
    # Because Python on Linux does not ever free virtual memory (VM), running the
    # workflow construction jailed within a process preempts excessive VM buildup.
    with Manager() as mgr:
        from .workflow import build_workflow

        retval = mgr.dict()
        p = Process(target=build_workflow, args=(str(config_file), retval))
        p.start()
        p.join()

        retcode = p.exitcode or retval.get("return_code", 0)
        dmriprep_wf = retval.get("workflow", None)

    # CRITICAL Load the config from the file. This is necessary because the ``build_workflow``
    # function executed constrained in a process may change the config (and thus the global
    # state of dMRIPrep).
    config.load(config_file)

    if config.execution.reports_only:
        sys.exit(int(retcode > 0))

    if dmriprep_wf and config.execution.write_graph:
        dmriprep_wf.write_graph(graph2use="colored",
                                format="svg",
                                simple_form=True)

    retcode = retcode or (dmriprep_wf is None) * os.EX_SOFTWARE
    if retcode != 0:
        sys.exit(retcode)

    # Generate boilerplate
    with Manager() as mgr:
        from .workflow import build_boilerplate

        p = Process(target=build_boilerplate,
                    args=(str(config_file), dmriprep_wf))
        p.start()
        p.join()

    if config.execution.boilerplate_only:
        sys.exit(int(retcode > 0))

    # Clean up master process before running workflow, which may create forks
    gc.collect()

    if popylar is not None:
        popylar.track_event(__ga_id__, "run", "started")

    config.loggers.workflow.log(
        15,
        "\n".join(["dMRIPrep config:"] +
                  ["\t\t%s" % s for s in config.dumps().splitlines()]),
    )
    config.loggers.workflow.log(25, "dMRIPrep started!")
    errno = 1  # Default is error exit unless otherwise set
    try:
        dmriprep_wf.run(**config.nipype.get_plugin())
    except Exception as e:
        if not config.execution.notrack:
            popylar.track_event(__ga_id__, "run", "error")
        config.loggers.workflow.critical("dMRIPrep failed: %s", e)
        raise
    else:
        config.loggers.workflow.log(25, "dMRIPrep finished successfully!")

        # Bother users with the boilerplate only iff the workflow went okay.
        if (config.execution.output_dir / "dmriprep" / "logs" /
                "CITATION.md").exists():
            config.loggers.workflow.log(
                25, "Works derived from this dMRIPrep execution should "
                "include the following boilerplate: "
                f"{config.execution.output_dir / 'dmriprep' / 'logs' / 'CITATION.md'}."
            )

        if config.workflow.run_reconall:
            from templateflow import api
            from niworkflows.utils.misc import _copy_any

            dseg_tsv = str(
                api.get("fsaverage", suffix="dseg", extension=[".tsv"]))
            _copy_any(
                dseg_tsv,
                str(config.execution.output_dir / "dmriprep" /
                    "desc-aseg_dseg.tsv"),
            )
            _copy_any(
                dseg_tsv,
                str(config.execution.output_dir / "dmriprep" /
                    "desc-aparcaseg_dseg.tsv"),
            )
        errno = 0
    finally:
        from niworkflows.reports import generate_reports
        from pkg_resources import resource_filename as pkgrf

        # Generate reports phase
        failed_reports = generate_reports(
            config.execution.participant_label,
            config.execution.output_dir,
            config.execution.run_uuid,
            config=pkgrf("dmriprep", "config/reports-spec.yml"),
            packagename="dmriprep",
        )
        write_derivative_description(config.execution.bids_dir,
                                     config.execution.output_dir / "dmriprep")

        if failed_reports and not config.execution.notrack:
            popylar.track_event(__ga_id__, "run", "reporting_error")
        sys.exit(int((errno + failed_reports) > 0))
Example #3
0
from pathlib import Path
import os

# TODO: Fix "type: ignore" -> why do I get "Module ... has no attribute ..."?
# from my_garlicsim_py3.garlicsim.general_misc import pickle_tools
from flapy.pickle_tools import dumps_skip, loads_skip  # type: ignore  # pylint: disable=ungrouped-imports
from flapy.copy_fallback import deepcopy  # type: ignore  # pylint: disable=ungrouped-imports
from deepdiff import DeepHash  # type: ignore  # pylint: disable=ungrouped-imports
from flapy.results_parser import FuncDescriptor  # type: ignore  # pylint: disable=ungrouped-imports

T = TypeVar("T")  # pylint: disable=invalid-name

logging.getLogger("deepdiff").setLevel(logging.ERROR)

# Initialize multiprocessing.Manager
MANAGER = Manager()


@contextmanager
def timeout(time):
    """
    Usage: `with timeout(5): foo` in order to execute foo with the given timeout
    """
    # Register a function to raise a TimeoutError on the signal.
    signal.signal(signal.SIGALRM, raise_timeout)
    # Schedule the signal to be sent after ``time``.
    signal.alarm(time)

    try:
        yield
    finally:
Example #4
0
def main(argv=None):
    if argv is None:
        argv = sys.argv

    id_base = int(sys.argv[1])

    manager = Manager()

    wait_q = PriorityQueue()

    out_file = open('latency.csv', 'a')

    load_thread = Thread(target=check_load)
    load_thread.start()

    logger.info("PID: %d" % os.getpid())
    logger.info("Running the gauntlet:")
    id = id_base
    num_clients = 0
    for i in range(2):
        wait_q.put((time.time(), False, id))
        id += 1
        num_clients += 1
    for i in range(2):
        wait_q.put((time.time(), True, id))
        id += 1
        num_clients += 1

    pool = Pool(processes=num_clients)

    while True:
        t, attacker, id = wait_q.get()
        delta = t - time.time()
        delay = max(delta, 0)
        if delay > 0:
            Timer(delay, pool.apply_async, (fetch, (attacker, id))).start()
        else:
            pool.apply_async(fetch, (attacker, id))
        wait_q.task_done()

        while not wait_q_q.empty():
            wait_q.put(wait_q_q.get())

        # we have to make sure there is at least one element in wait_q before the end of the loop
        if wait_q.empty():
            r = wait_q_q.get()
            wait_q.put(r)

        while not res_q.empty():
            id, n, t, error = res_q.get()
            logger.info("id: %d got a response for n = %d in %f error: %d" %
                        (id, n, t, error))
            out_file.write("%d, %d, %f, %d\n" % (id, n, t, error))
        #if time.time() - start_time > 120:
        #    break

        out_file.flush()

    while not res_q.empty():
        res = res_q.get()
        out_file.write("%d, %f, %d\n" % (res[0], res[1], res[2]))

    return 0
Example #5
0
    def evolve(self):
        """
        the main method, that runs the evolutionary algorithm
        """
        # generate the population if starting
        if self.started_index == 0:
            self.generate_initial_population()

        cores_num = cpu_count()
        loop_length = int(self.iterations / cores_num)

        i = self.started_index
        with click.progressbar(length=loop_length,
                               label='Evolving...') as progressbar:
            while i < loop_length:
                with Manager() as manager:
                    people = manager.list([])
                    workers = []

                    def get_baby(people):
                        """

                        :param people:
                        """
                        # let's make a baby together LOL
                        baby = self.make_love()
                        # let's mutate baby's genes, who knows, maybe we create a x-man or something
                        baby = self.mutate(baby)
                        people.append(baby)

                    for _ in range(cores_num):
                        w = Process(target=get_baby, args=[people])
                        w.start()
                        workers.append(w)

                    for w in workers:
                        w.join()

                    # update dashboard
                    click.clear()
                    progressbar.update(1)
                    print('\n')

                    table.key_value(
                        [[
                            'started at',
                            jh.get_arrow(self.start_time).humanize()
                        ],
                         [
                             'index/total', '{}/{}'.format(
                                 (i + 1) * cores_num, self.iterations)
                         ],
                         [
                             'population_size, solution_len', '{}, {}'.format(
                                 self.population_size, self.solution_len)
                         ],
                         [
                             'route', '{}, {}, {}, {}'.format(
                                 router.routes[0].exchange,
                                 router.routes[0].symbol,
                                 router.routes[0].timeframe,
                                 router.routes[0].strategy_name)
                         ]],
                        'info',
                        alignments=('left', 'right'))

                    print('\n')

                    # print fittest individuals
                    fittest_list = [
                        ['rank', 'DNA', 'fitness', 'training|testing logs'],
                    ]
                    if self.population_size > 50:
                        number_of_ind_to_show = 25
                    elif self.population_size > 20:
                        number_of_ind_to_show = 20
                    elif self.population_size > 9:
                        number_of_ind_to_show = 9
                    else:
                        raise ValueError(
                            'self.population_size cannot be less than 10')

                    for j in range(number_of_ind_to_show):
                        fittest_list.append([
                            j + 1, self.population[j]['dna'],
                            self.population[j]['fitness'],
                            self.population[j]['log']
                        ], )
                    table.multi_value(fittest_list,
                                      with_headers=True,
                                      alignments=('left', 'left', 'right',
                                                  'left'))

                    # one person has to die and be replaced with the newborn baby
                    for baby in people:
                        random_index = randint(0, len(self.population) - 1)
                        try:
                            self.population[random_index] = baby
                        except IndexError:
                            print('=============')
                            print('self.population_size: {}'.format(
                                self.population_size))
                            print('self.population length: {}'.format(
                                len(self.population)))
                            jh.terminate_app()

                        self.population = list(
                            sorted(self.population,
                                   key=lambda x: x['fitness'],
                                   reverse=True))

                        # reaching the fitness goal could also end the process
                        if baby['fitness'] >= self.fitness_goal:
                            progressbar.update(self.iterations - i)
                            print('\n')
                            print('fitness goal reached after iteration {}'.
                                  format(i))
                            return baby

                    # save progress after every n iterations
                    if i != 0 and int(i * cores_num) % 50 == 0:
                        self.save_progress(i)

                    # store a take_snapshot of the fittest individuals of the population
                    if i != 0 and i % int(100 / cores_num) == 0:
                        self.take_snapshot(i * cores_num)

                    i += 1

        print('\n\n')
        print('Finished {} iterations.'.format(self.iterations))

        return self.population
Example #6
0
    def _calc_output(self,
                     place,
                     parallel=False,
                     no_check_set=None,
                     loss=None,
                     enable_inplace=None,
                     for_inplace_test=False):
        program = Program()
        block = program.global_block()
        op = self._append_ops(block)

        inputs = self._get_inputs(block)
        outputs = self._get_outputs(block)
        feed_map = self.feed_var(inputs, place)

        if for_inplace_test:
            # Some variables' tensors hold no buffer (tensor's _holder is NULL), like XShape in reshape2 op,
            # and the shapes of those variables contain 0 (eg. Xshape.shape = [0, 2, 5]).
            # Set persistable for those variables in order to get them from global_scope for inplace grad test directly other than feed them,
            # since feed op calls check_memory_size() which fails when tensor's holder_ is NULL.
            for name in op.output_arg_names:
                var = block.var(name)
                var.persistable = True
        original_program = program
        #if parallel:
        #    use_cuda = False
        #    if isinstance(place, fluid.CUDAPlace):
        #        use_cuda = True
        #    compiled_prog = fluid.CompiledProgram(program).with_data_parallel(
        #        loss_name=loss.name if loss else None, places=place)
        #    program = compiled_prog
        fetch_list = getattr(self, "fetch_list", [])
        # if the fetch_list is customized by user, we use it directly.
        # if not, fill the fetch_list by the user configured outputs in test.
        if len(fetch_list) == 0:
            for var_name, var in six.iteritems(outputs):
                if no_check_set is not None and var_name in no_check_set:
                    continue
                if isinstance(var, list):
                    for v in var:
                        fetch_list.append(v.name)
                else:
                    fetch_list.append(var.name)
        # if the fetch_list still empty, fill the fetch_list by the operator output.
        if len(fetch_list) == 0:
            for out_name, out_dup in Operator.get_op_outputs(self.op_type):
                fetch_list.append(str(out_name))

        if enable_inplace is not None:
            build_strategy = fluid.BuildStrategy()
            build_strategy.enable_inplace = enable_inplace

            compiled_prog = fluid.CompiledProgram(program).with_data_parallel(
                build_strategy=build_strategy, places=place)
            program = compiled_prog
        # Manager() can not store LoDTensor directly
        # So, use one additional element to store output lod
        return_results = [Manager().list() for _ in range(len(fetch_list) + 1)]

        def closure(**kwargs):
            role = kwargs['role']

            pfl_mpc.init("aby3", role, "localhost", self.server,
                         int(self.port))

            #init_op = fluid.default_main_program().global_block().ops[0]

            #_insert_init_op(program, init_op)

            executor = Executor(place)

            executor.run()
            outs = executor.run(program,
                                feed=feed_map,
                                fetch_list=fetch_list,
                                return_numpy=False)
            lod = []
            for idx in range(len(fetch_list)):
                return_results[idx].append(np.array(outs[idx]))

                lod_i = outs[idx].lod()
                lod_concat = []
                for i in lod_i:
                    lod_concat.append(i)
                lod.append(lod_concat)
            return_results[len(fetch_list)].append(lod)

        ret = self.multi_party_run(target=closure)
        self.assertEqual(ret[0], True)

        outs = []
        lod = np.array(return_results[len(fetch_list)])
        for idx in range(len(fetch_list)):
            t = fluid.LoDTensor()
            reveal_data = aby3.reconstruct(np.array(return_results[idx]))
            t.set(reveal_data, place)
            lod_idx = lod[0][idx]

            try:
                t.set_lod(lod_idx)
            except Exception as e:
                pass

            outs.append(t)

        self.op = op
        self.program = original_program
        if for_inplace_test:
            return outs, fetch_list, feed_map, original_program, op.desc
        else:
            return outs, fetch_list
Example #7
0
    Ctr = 0
    values = ""
    BallsCnt = len(vB)
    while (Ctr < 5):
        rgen = randint(1, int(BallsCnt)) - 1
        rval = vB.pop(rgen)
        Res[Ctr] = rval
        values = values + " " + str(rval)
        BallsCnt = BallsCnt - 1
        Ctr = Ctr + 1
        time.sleep(1)


while (tickets <= args.tickets):

    with Manager() as manager:
        Balls = manager.list(range(1, 71))
        MBalls = manager.list(range(1, 26))
        Res = manager.list(range(1, 7))

        p1 = Process(target=shuffle_balls, args=(Balls, ))
        p1.start()
        p2 = Process(target=shuffle_balls, args=(MBalls, ))
        p2.start()
        time.sleep(2)
        p = Process(target=popb, args=(
            Balls,
            Res,
        ))
        p.start()
Example #8
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='ECT for face alignment')

    parser.add_argument('--gpus', default=None, type=int, help='specify the gpu ID')
    parser.add_argument('--imgDir', default='../imgs/', type=str, help='path to test images')
    parser.add_argument('--outDir', default='../output/', type=str, help='path for saving prediction results')
    parser.add_argument('--prototxt', default='../caffe/models/300w/matlab.prototxt', type=str,
                        help='path to caffe model prototxt')
    parser.add_argument('--model', default='../model_data/300w_68pt.caffemodel', type=str,
                        help='path to the pre-trained caffe model')
    parser.add_argument('--verbose', default=True, help='show the landmark prediction results')
    parser.add_argument('--nIter', default=5, type=int, help='number of iterations for the turning step')
    parser.add_argument('--nComponent', default=30, type=int, help='number of PDM components to be used')
    
    q = Manager().Queue()  
    po = Pool()
    lock = Manager().Lock()
    
    dir_list = get_img_dir_list(parser.parse_args().imgDir)
    args = parser.parse_args()

    length = len(dir_list)
    if (0 == args.gpus):
        start = 0
        end = length / 2
    else:
        start = length / 2 - 1
        end = length

    for i in range(start, end):
Example #9
0
    def recluster(self, request, pk):
        #need to add error handling and resilence
        start = time.time()
        # print ('run',pk)
        clusters = self.request.query_params.get('clusters')
        # print ('clusters',clusters)
        if clusters is not None:
            if int(clusters)<=0:
                return Response({'error':'Not a valid number of clusters'},status=500)

            #files to be used by the GOUtil
            run_obj = Run.objects.get(id=pk)
            run_uuid = run_obj.name

            genefile_name = 'useruploads/inputgenes-' + run_uuid + '.txt'
            enrich_outputfile_name = 'useruploads/enrichment-' + run_uuid + '.txt'
            sim_outputfile_name = 'useruploads/funsim-' + run_uuid + '.txt'
            semsim_outputfile_name = 'useruploads/semsim-' + run_uuid + '.txt'
            clusters_outputfile_name = 'useruploads/clusters-' + run_uuid + '.txt'

            ###### Data Processing Pipeline ######
            try:
                clusters_outputfile = open(clusters_outputfile_name, 'w').close()#clear the file
            except IOError:
                print "Error cluster file doesn't exist"
                return Response({'error': 'You must run an enrichment before clustering.'},status=500)

            #spectral clustering
            spectral_start = time.time()
            subprocess.call(['python','/GOUtil/spectralClustering.py', semsim_outputfile_name, clusters_outputfile_name, clusters])
            print "Spectral run time %s" % (time.time()-spectral_start)

            try:
                clusters_outputfile = open(clusters_outputfile_name, 'r')
            except IOError:
                print "Error no clusters exist"
                return Response({'error': 'No clusters found for these Genes and Organism.'},status=500)


            #Multi-threaded loader to update the cluster data
            load_start = time.time()
            manager = Manager()
            lock = manager.Lock()
            taskworker = partial(loadClustersWorker, lock)
            pool = Pool(5)
            tokens = [(run_obj.id, line) for line in clusters_outputfile]

            pool.map(taskworker, tokens)
            pool.close()
            pool.join()
            print "Multi-threading Loading time: %s" % (time.time()-load_start)

            # single_load_start = time.time()
            # for (enrichmentrun, enrichmentinfo) in tokens:
            #     token = enrichmentinfo.split('\t')
            #     enrichment = Enrichment.objects.get(run__id=enrichmentrun,term__termid=token[0])
            #     enrichment.cluster = int(token[1])
            #     enrichment.medoid = 'True'==token[2].replace('\n','')
            #     enrichment.save()
            # print "Single Threaded Loading time: %s" % (time.time()-single_load_start)

            db.connection.close()


            serializer = RunIncludesSerializer(run_obj)

            #cleanup files
            clusters_outputfile.close()

            print "Overall Loading time: %s" % (time.time()-start)
            return Response(serializer.data)
        else:
            return Response({'error':'Not a valid number of clusters'},status=500)
Example #10
0
    def generate_initial_population(self):
        """
        generates the initial population
        """
        cores_num = cpu_count()
        loop_length = int(self.population_size / cores_num)

        with click.progressbar(
                length=loop_length,
                label='Generating initial population...') as progressbar:
            for i in range(loop_length):
                people = []
                with Manager() as manager:
                    dna_bucket = manager.list([])
                    workers = []

                    def get_fitness(dna, dna_bucket):
                        try:
                            fitness_score, fitness_log = self.fitness(dna)
                            dna_bucket.append(
                                (dna, fitness_score, fitness_log))
                        except Exception as e:
                            proc = os.getpid()
                            logger.error('process failed - ID: {}'.format(
                                str(proc)))
                            logger.error("".join(
                                traceback.TracebackException.from_exception(
                                    e).format()))
                            raise e

                    try:
                        for _ in range(cores_num):
                            dna = ''.join(
                                choices(self.charset, k=self.solution_len))
                            w = Process(target=get_fitness,
                                        args=(dna, dna_bucket))
                            w.start()
                            workers.append(w)

                        # join workers
                        for w in workers:
                            w.join()
                            if w.exitcode > 0:
                                logger.error(
                                    'a process exited with exitcode: {}'.
                                    format(str(w.exitcode)))
                    except KeyboardInterrupt:
                        print(jh.color('Terminating session...', 'red'))

                        # terminate all workers
                        for w in workers:
                            w.terminate()

                        # shutdown the manager process manually since garbage collection cannot won't get to do it for us
                        manager.shutdown()

                        # now we can terminate the main session safely
                        jh.terminate_app()
                    except:
                        raise

                    for d in dna_bucket:
                        people.append({
                            'dna': d[0],
                            'fitness': d[1],
                            'log': d[2]
                        })

                # update dashboard
                click.clear()
                progressbar.update(1)
                print('\n')

                table_items = [
                    ['Started at',
                     jh.get_arrow(self.start_time).humanize()],
                    [
                        'Index', '{}/{}'.format(len(self.population),
                                                self.population_size)
                    ],
                    [
                        'errors/info', '{}/{}'.format(len(store.logs.errors),
                                                      len(store.logs.info))
                    ],
                    [
                        'Trading Route',
                        '{}, {}, {}, {}'.format(router.routes[0].exchange,
                                                router.routes[0].symbol,
                                                router.routes[0].timeframe,
                                                router.routes[0].strategy_name)
                    ],
                    # TODO: add generated DNAs?
                    # ['-'*10, '-'*10],
                    # ['DNA', people[0]['dna']],
                    # ['fitness', round(people[0]['fitness'], 6)],
                    # ['training|testing logs', people[0]['log']],
                ]
                if jh.is_debugging():
                    table_items.insert(
                        3, ['Population Size', self.population_size])
                    table_items.insert(3, ['Iterations', self.iterations])
                    table_items.insert(3,
                                       ['Solution Length', self.solution_len])
                    table_items.insert(3, ['-' * 10, '-' * 10])

                table.key_value(table_items,
                                'Optimize Mode',
                                alignments=('left', 'right'))

                # errors
                if jh.is_debugging() and len(report.errors()):
                    print('\n')
                    table.key_value(report.errors(), 'Error Logs')

                for p in people:
                    self.population.append(p)

        # sort the population
        self.population = list(
            sorted(self.population, key=lambda x: x['fitness'], reverse=True))
Example #11
0
def train(epochs, policy):
    dir = os.path.abspath(os.getcwd())
    print("Current Working Directory: " + dir)
    if not os.path.exists(dir + "/tmp"):
        os.makedirs(dir + "/tmp")
    manager = Manager()
    rewardDict = manager.dict()
    ns = manager.Namespace()
    ns.rewardCount = 0
    file = "PSR/train/setting/PacMan.json"
    if (policy == "fitted_Q"):
        Parameter.edit(file=file, param="algorithm", newval="fitted_Q")
    elif (policy == "DRL"):
        Parameter.edit(file=file, param="algorithm", newval="DRL")
    else:
        print(
            "Please check the policy input, fitted_Q or DRL. fitted_Q will be set as default."
        )
        Parameter.edit(file=file, param="algorithm", newval="fitted_Q")
    Parameter.readfile(file=file)
    print("Learning algorithm / Policy: " + Parameter.algorithm)
    RandomSamplingForPSR = True
    isbuiltPSR = True
    game = PacMan()
    game.calulateMaxTestID()
    Parameter.maxTestID = game.maxTestID
    trainData = TrainingData()
    iterNo = 0
    agent = Agent(PnumActions=game.getNumActions(),
                  epsilon=Parameter.epsilon,
                  inputDim=(Parameter.svdDim, ),
                  algorithm=Parameter.algorithm,
                  Parrallel=True)
    rdict = readMemoryfromdisk(file="PSR/rewardDict.txt")
    copyRewardDict(rewardDict=rewardDict, rewardDict1=rdict)
    psrModel = CompressedPSR(game.getGameName())
    psrPool = Pool(Parameter.threadPoolSize,
                   initializer=init,
                   initargs=(
                       Parameter.maxTestID,
                       file,
                       Lock(),
                   ))
    print("Finishing Preparation!")
    loadCheckPoint(trainData=trainData,
                   epoch=iterNo,
                   psrModel=psrModel,
                   rewardDict=rewardDict)
    trainData = trainData.MergeAllBatchData()
    trainSet = None
    while iterNo < epochs:
        print("Starting Iteration: " + str(iterNo + 1))
        if RandomSamplingForPSR:
            trainData.newDataBatch()
            game.SimulateTrainData(runs=Parameter.runsForCPSR,
                                   isRandom=True,
                                   psrModel=psrModel,
                                   trainData=trainData,
                                   epoch=iterNo - 1,
                                   pool=psrPool,
                                   RunOnVirtualEnvironment=False,
                                   name=game.getGameName(),
                                   rewardDict=rewardDict,
                                   ns=ns)
            psrModel.validActObset = trainData.validActOb
            WriteEvalUateDataForPacMan(
                EvalData=trainData.data[trainData.getBatch()], epoch=-1)
            trainData.WriteData(file=dir + "/RandomSampling" + str(iterNo) +
                                ".txt")
            RandomSamplingForPSR = False
        if isbuiltPSR:
            psrModel.build(data=trainData,
                           aos=trainData.validActOb,
                           pool=psrPool,
                           rewardDict=rewardDict)
        psrModel.saveModel(epoch=iterNo)
        from bin.Util import writeMemoryintodisk
        writeMemoryintodisk(file="PSR/rewardDict.txt", data=rewardDict.copy())
        print("Convert sampling data into training forms")
        if trainSet is None:
            trainSet = ConvertToTrainSet(data=trainData,
                                         RewardDict=rewardDict,
                                         pool=psrPool,
                                         epoch=iterNo,
                                         name=game.getGameName(),
                                         psrModel=psrModel)
        else:
            trainSet = trainSet + ConvertLastBatchToTrainSet(
                data=trainData,
                RewardDict=rewardDict,
                pool=psrPool,
                epoch=iterNo,
                name=game.getGameName(),
                psrModel=psrModel)
        print("Starting training")
        tick1 = time.time()
        print("Iteration: %d/%d" % (iterNo + 1, epochs))
        agent.Train_And_Update(data=trainSet, epoch=iterNo, pool=psrPool)
        tick2 = time.time()
        print("The time spent on training: " + str(tick2 - tick1) + "s")
        agent.SaveWeight(epoch=iterNo)
        print("Evaluating the agent")
        tick3 = time.time()
        EvalData = game.SimulateTestingRun(runs=Parameter.testingRuns,
                                           epoch=iterNo,
                                           pool=psrPool,
                                           psrModel=psrModel,
                                           name=game.getGameName(),
                                           rewardDict=rewardDict,
                                           ns=ns)
        tick4 = time.time()
        print("The time spent on evaluating agent: " + str(tick4 - tick3) +
              "s")
        trainData.newDataBatch()
        game.SimulateTrainData(
            runs=Parameter.runsForLearning,
            psrModel=psrModel,
            trainData=trainData,
            isRandom=False,
            epoch=iterNo,
            pool=psrPool,
            RunOnVirtualEnvironment=Parameter.trainingOnVirtualEnvironment,
            name=game.getGameName(),
            rewardDict=rewardDict,
            ns=ns)
        trainData.WriteData(file=dir + "/observations/epsilonGreedySampling" +
                            str(iterNo) + ".txt")
        WriteEvalUateDataForPacMan(EvalData=EvalData, epoch=iterNo)
        iterNo = iterNo + 1
Example #12
0
    def evolve(self):
        """
        the main method, that runs the evolutionary algorithm
        """
        # generate the population if starting
        if self.started_index == 0:
            self.generate_initial_population()
            if len(self.population) < 0.5 * self.population_size:
                raise ValueError(
                    'Too many errors: less then half of the planned population size could be generated.'
                )

        cores_num = cpu_count()
        loop_length = int(self.iterations / cores_num)

        i = self.started_index
        with click.progressbar(length=loop_length,
                               label='Evolving...') as progressbar:
            while i < loop_length:
                with Manager() as manager:
                    people = manager.list([])
                    workers = []

                    def get_baby(people):
                        try:
                            # let's make a baby together LOL
                            baby = self.make_love()
                            # let's mutate baby's genes, who knows, maybe we create a x-man or something
                            baby = self.mutate(baby)
                            people.append(baby)
                        except Exception as e:
                            proc = os.getpid()
                            logger.error('process failed - ID: {}'.format(
                                str(proc)))
                            logger.error("".join(
                                traceback.TracebackException.from_exception(
                                    e).format()))
                            raise e

                    try:
                        for _ in range(cores_num):
                            w = Process(target=get_baby, args=[people])
                            w.start()
                            workers.append(w)

                        for w in workers:
                            w.join()
                            if w.exitcode > 0:
                                logger.error(
                                    'a process exited with exitcode: {}'.
                                    format(str(w.exitcode)))
                    except KeyboardInterrupt:
                        print(jh.color('Terminating session...', 'red'))

                        # terminate all workers
                        for w in workers:
                            w.terminate()

                        # shutdown the manager process manually since garbage collection cannot won't get to do it for us
                        manager.shutdown()

                        # now we can terminate the main session safely
                        jh.terminate_app()
                    except:
                        raise

                    # update dashboard
                    click.clear()
                    progressbar.update(1)
                    print('\n')

                    table_items = [[
                        'Started At',
                        jh.get_arrow(self.start_time).humanize()
                    ],
                                   [
                                       'Index/Total',
                                       '{}/{}'.format((i + 1) * cores_num,
                                                      self.iterations)
                                   ],
                                   [
                                       'errors/info',
                                       '{}/{}'.format(len(store.logs.errors),
                                                      len(store.logs.info))
                                   ],
                                   [
                                       'Route', '{}, {}, {}, {}'.format(
                                           router.routes[0].exchange,
                                           router.routes[0].symbol,
                                           router.routes[0].timeframe,
                                           router.routes[0].strategy_name)
                                   ]]
                    if jh.is_debugging():
                        table_items.insert(3, [
                            'Population Size, Solution Length',
                            '{}, {}'.format(self.population_size,
                                            self.solution_len)
                        ])

                    table.key_value(table_items,
                                    'info',
                                    alignments=('left', 'right'))

                    # errors
                    if jh.is_debugging() and len(report.errors()):
                        print('\n')
                        table.key_value(report.errors(), 'Error Logs')

                    print('\n')
                    print('Best DNA candidates:')
                    print('\n')

                    # print fittest individuals
                    fittest_list = [
                        [
                            'Rank', 'DNA', 'Fitness',
                            'Training log || Testing log'
                        ],
                    ]
                    if self.population_size > 50:
                        number_of_ind_to_show = 15
                    elif self.population_size > 20:
                        number_of_ind_to_show = 10
                    elif self.population_size > 9:
                        number_of_ind_to_show = 9
                    else:
                        raise ValueError(
                            'self.population_size cannot be less than 10')

                    for j in range(number_of_ind_to_show):
                        fittest_list.append([
                            j + 1, self.population[j]['dna'],
                            self.population[j]['fitness'],
                            self.population[j]['log']
                        ], )
                    table.multi_value(fittest_list,
                                      with_headers=True,
                                      alignments=('left', 'left', 'right',
                                                  'left'))

                    # one person has to die and be replaced with the newborn baby
                    for baby in people:
                        random_index = randint(0, len(self.population) - 1)
                        try:
                            self.population[random_index] = baby
                        except IndexError:
                            print('=============')
                            print('self.population_size: {}'.format(
                                self.population_size))
                            print('self.population length: {}'.format(
                                len(self.population)))
                            jh.terminate_app()

                        self.population = list(
                            sorted(self.population,
                                   key=lambda x: x['fitness'],
                                   reverse=True))

                        # reaching the fitness goal could also end the process
                        if baby['fitness'] >= self.fitness_goal:
                            progressbar.update(self.iterations - i)
                            print('\n')
                            print('fitness goal reached after iteration {}'.
                                  format(i))
                            return baby

                    # save progress after every n iterations
                    if i != 0 and int(i * cores_num) % 50 == 0:
                        self.save_progress(i)

                    # store a take_snapshot of the fittest individuals of the population
                    if i != 0 and i % int(100 / cores_num) == 0:
                        self.take_snapshot(i * cores_num)

                    i += 1

        print('\n\n')
        print('Finished {} iterations.'.format(self.iterations))

        return self.population
Example #13
0
        board.getMessageFromPlayer(listOfPlayer)
        if board.playerLost() or len(deck_shared_memory) == 0:
            board.sendMessageToPlayers("everyone_looses", listOfPlayer)
            print(" C'est terminé ! ")
            sys.exit(0)


if __name__ == "__main__":

    key = 667

    lock = threading.Lock()
    mq = sysv_ipc.MessageQueue(key, sysv_ipc.IPC_CREAT)

    BUFFER_SIZE = 100
    deck_shared_memory = Manager().list()
    game_shared_memory = Manager().list()

    lock = Lock()

    mqTypeBoard = 1
    os.system("clear")
    player_nb = int(input("combien de joueurs ?"))

    process_pere = Process(target=board,
                           args=(mq, mqTypeBoard, game_shared_memory,
                                 deck_shared_memory, lock,
                                 [i + 2 for i in range(player_nb)]))
    process_pere.start()

    mq.send("creation_jeu".encode(), type=1)
Example #14
0
    def fill_skeleton_with_model_threaded_new(
        self,
        nodes,
        partition=False,
        augment=False,
        bounds_input_file=None,
        bias=True,
        move_batch_size=1,
        max_moves=None,
        remask_interval=None,
        sparse=False,
        moves=None,
        num_workers=1,
        worker_prequeue=1,
        reject_early_termination=False,
        reject_non_seed_components=True,
        region_shape=CONFIG.model.input_fov_shape,
    ):
        """
        Floodfill small regions around a list of seed points.
        Necessary inputs:
        - model_file
        - seeds
        - volume
        - config

        returns Dict[seed: Mask]
        """

        self.bias = bias
        self.move_batch_size = move_batch_size
        self.max_moves = max_moves
        self.remask_interval = remask_interval
        self.reject_non_seed_components = reject_non_seed_components

        # Get Volume
        self.volume = self.volume.downsample(CONFIG.volume.resolution)

        # Seeds come in real coordinates
        node_ids, seeds = zip(*[(
            nid,
            np.array(
                self.volume.world_coord_to_local(
                    self.volume.real_coord_to_world(seed))),
        ) for nid, seed in nodes.items()])
        map_back = {tuple(seed): nid for seed, nid in zip(seeds, node_ids)}
        logger.warning(
            "CONFIG input_fov_shape: {} vs Sarbor fov_shape: {}".format(
                CONFIG.model.input_fov_shape,
                (
                    region_shape,
                    CONFIG.volume.resolution,
                    region_shape / CONFIG.volume.resolution,
                ),
            ))
        region_shape = region_shape // CONFIG.volume.resolution

        pbar = tqdm(desc="Seed queue",
                    total=len(seeds),
                    miniters=1,
                    smoothing=0.0)
        num_nodes = len(seeds)
        seed_generator = iter(seeds)

        manager = Manager()
        # Queue of seeds to be picked up by workers.
        seed_queue = manager.Queue()
        # Queue of results from workers.
        results_queue = manager.Queue()
        # Dequeue of seeds that were put in seed_queue but have not yet been
        # combined by the main process.
        dispatched_seeds = deque()
        # Seeds that were placed in seed_queue but subsequently covered by other
        # results before their results have been processed. This allows workers to
        # abort working on these seeds by checking this list.
        revoked_seeds = manager.list()
        # Results that have been received by the main process but have not yet
        # been combined because they were not received in the dispatch order.
        unordered_results = {}

        final_results = {}

        def queue_next_seed():
            total = 0
            for seed in seed_generator:
                if unordered_results.get(tuple(seed)) is not None:
                    # This seed has already been filled.
                    total += 1
                    continue
                dispatched_seeds.append(seed)
                seed_queue.put(seed)

                break

            return total

        for _ in range(min(num_nodes, num_workers * worker_prequeue)):
            processed_nodes = queue_next_seed()
            pbar.update(processed_nodes)

        if "CUDA_VISIBLE_DEVICES" in os.environ:
            set_devices = False
            num_workers = 1
            logger.warn("Environment variable CUDA_VISIBLE_DEVICES is set, " +
                        "so only one worker can be used.\n" +
                        "See https://github.com/aschampion/diluvian/issues/11")
        else:
            set_devices = True

        workers = []
        loading_lock = manager.Lock()
        for worker_id in range(num_workers):
            w = Process(
                target=self.worker,
                args=(
                    worker_id,
                    set_devices,
                    self.volume,
                    region_shape,
                    seed_queue,
                    results_queue,
                    loading_lock,
                    revoked_seeds,
                ),
            )
            w.start()
            workers.append(w)

        while dispatched_seeds:
            processed_seeds = 1
            expected_seed = dispatched_seeds.popleft()
            logger.debug("Expecting seed %s", expected_seed)

            if tuple(expected_seed) in unordered_results:
                logger.debug("Expected seed %s is in old results",
                             expected_seed)
                seed = expected_seed
                body = unordered_results[tuple(seed)]
                del unordered_results[tuple(seed)]  # WHY

            else:
                seed, body = results_queue.get(True)
                processed_seeds += queue_next_seed()

                while not np.array_equal(seed, expected_seed):
                    logger.debug("Node %s is early, stashing", seed)
                    unordered_results[tuple(seed)] = body
                    seed, body = results_queue.get(True)
                    processed_seeds += queue_next_seed()

            logger.debug("Processing node at %s", seed)
            pbar.update(processed_seeds)

            if final_results.get(tuple(seed)) is not None:
                # This seed has already been filled.
                logger.debug(
                    "Seed (%s) was filled but has been covered in the meantime.",
                    np.array_str(seed),
                )
                loading_lock.acquire()
                if tuple(seed) in revoked_seeds:
                    revoked_seeds.remove(tuple(seed))
                loading_lock.release()
                continue

            if body is None:
                logger.warning("Body of Seed ({}) is None".format(seed))

                continue
                # REDO THIS SEED
                raise NotImplementedError
                continue

            if not body.is_seed_in_mask():
                logger.warning("Seed ({}) is not in its body.".format(seed))

                continue
                # REDO THIS SEED
                raise NotImplementedError
                continue

            mask, bounds = body._get_bounded_mask(
                CONFIG.postprocessing.closing_shape)

            body_size = np.count_nonzero(mask)

            if body_size == 0:
                logger.warning("Body of seed {} is empty.".format(seed))

                continue
                # REDO THIS SEED
                raise NotImplementedError
                continue

            final_results[map_back[tuple(seed)]] = mask
            logger.warning("Filled seed ({})".format(seed))

        for _ in range(num_workers):
            seed_queue.put("DONE")
        for wid, worker in enumerate(workers):
            worker.join()
        manager.shutdown()

        pbar.close()

        return final_results
Example #15
0
def multi_mode(cli_parsed):
    dbm = db_manager.DB_Manager(cli_parsed.d + '/ew.db')
    dbm.open_connection()
    if not cli_parsed.resume:
        dbm.initialize_db()
    dbm.save_options(cli_parsed)
    m = Manager()
    targets = m.Queue()
    lock = m.Lock()
    multi_counter = m.Value('i', 0)
    display = None

    def exitsig(*args):
        dbm.close()
        if current_process().name == 'MainProcess':
            print('')
            print('Resume using ./EyeWitness.py --resume {0}'.format(cli_parsed.d + '/ew.db'))
        os._exit(1)

    signal.signal(signal.SIGINT, exitsig)
    if cli_parsed.resume:
        pass
    else:
        url_list = target_creator(cli_parsed)
        if cli_parsed.web:
            for url in url_list:
                dbm.create_http_object(url, cli_parsed)

    if cli_parsed.web:
        if cli_parsed.web and not cli_parsed.show_selenium:
            display = Display(visible=0, size=(1920, 1080))
            display.start()

        multi_total = dbm.get_incomplete_http(targets)
        if multi_total > 0:
            if cli_parsed.resume:
                print('Resuming Web Scan ({0} Hosts Remaining)'.format(str(multi_total)))
            else:
                print('Starting Web Requests ({0} Hosts)'.format(str(multi_total)))

        if multi_total < cli_parsed.threads:
            num_threads = multi_total
        else:
            num_threads = cli_parsed.threads
        for i in range(num_threads):
            targets.put(None)
        try:
            workers = [Process(target=worker_thread, args=(
                cli_parsed, targets, lock, (multi_counter, multi_total))) for i in range(num_threads)]
            for w in workers:
                w.start()
            for w in workers:
                w.join()
        except Exception as e:
            print(str(e))

    if display is not None:
        display.stop()
    results = dbm.get_complete_http()
    dbm.close()
    m.shutdown()
    sort_data_and_write(cli_parsed, results)
Example #16
0
    def invoke(self, request):
        #need to add error handling and resilence
        start = time.time()
        genes = request.data.get('genes')
        pvalue = request.data.get('pvalue')
        clusters = request.data.get('clusters')
        organism = request.data.get('organism')
        background = request.data.get('background')
        namespace = request.data.get('namespace')
        goontology = request.data.get('goontology')
        if genes is not None and pvalue is not None and clusters is not None and goontology is not None:
            if organism not in ['hsa','gga','bta','cfa','mmu','rno','cel','ath','dme','sce','eco','dre'] or int(clusters)<=0:
                return Response({'error': 'Organism type and/or clusters are not valid parameters'},status=500)

            genes = bleach.clean(genes).replace(',', '\n')
            background = bleach.clean(background)
            goontology = bleach.clean(goontology)

            try:
                ontology = Ontology.objects.get(pk=int(goontology))
            except Ontology.DoesNotExist:
                return Response({'error':'Ontology does not exist'},status=404)

            #temp files to be used by the GOUtil
            tmp_uuid = str(uuid.uuid4())
            genefile_name = 'useruploads/inputgenes-' + tmp_uuid + '.txt'
            enrich_outputfile_name = 'useruploads/enrichment-' + tmp_uuid + '.txt'
            sim_outputfile_name = 'useruploads/funsim-' + tmp_uuid + '.txt'
            semsim_outputfile_name = 'useruploads/semsim-' + tmp_uuid + '.txt'
            clusters_outputfile_name = 'useruploads/clusters-' + tmp_uuid + '.txt'
            genefile = open(genefile_name, 'w+')

            genefile.write(genes)
            genefile.close()

            new_run = Run(name=tmp_uuid,ip=get_ip(request))
            new_run.save()
            base_dir = '/GOUtildata/'+ontology.name+'/'
            annotation_file_name = base_dir+'ann.'+organism+'.'+namespace+'.txt'
            edgelist_file_name = base_dir+'edgeList.'+'bp.txt'

            if background:
                background_file_name = 'useruploads/background-' + tmp_uuid + '.txt'
                backgroundfile = open(background_file_name, 'w+')

                backgroundfile.write(background)
                backgroundfile.close()
            else:
                background_file_name = base_dir+'background.'+organism+'.'+namespace+'.txt'

            ###### Enrichment Pipeline ######
            #invoke enrichment util to compute enrichments
            e_start = time.time()


            subprocess.call(['/GOUtil/./enrich', '-a', annotation_file_name, '-e', edgelist_file_name, '-t', genefile_name, '-b', background_file_name, '-o', enrich_outputfile_name, '-p', pvalue])
            print "Enrich run time %s" % (time.time()-e_start)

            try:
                enrich_outputfile = open(enrich_outputfile_name, 'r')
            except IOError:
                print "Error no enrichment file."
                return Response({'error': 'No Enrichment found for these Genes and Organism.'},status=500)
            #Multi-threaded loader to load in all of the enrichment terms
            enrich_manager = Manager()
            enrich_lock = enrich_manager.Lock()
            enrich_taskworker = partial(loadEnrichmentsWorker, enrich_lock)
            enrich_pool = Pool(8)
            enrich_tokens = [(new_run.id, line, ontology.id) for line in enrich_outputfile]

            enrich_pool.map(enrich_taskworker, enrich_tokens)

            fun_sim_start = time.time()
            #invoke funSim util to compute semantic similarity
            subprocess.call(['/GOUtil/./funSim', '-a', annotation_file_name, '-e', edgelist_file_name, '-o', sim_outputfile_name, '-t',"AIC", '-f', enrich_outputfile_name])
            print "FunSim run time %s" % (time.time()-fun_sim_start)


            mds_sim_start = time.time()
            #compute x, y coordinates for enriched terms
            subprocess.call(['python','/GOUtil/mdsSemSim.py', sim_outputfile_name, semsim_outputfile_name])
            print "MDS run time %s" % (time.time()-mds_sim_start)

            #Multi-threaded loader to load in all of the enrichment term coordinates
            try:
                semsim_outputfile = open(semsim_outputfile_name, 'r')
            except IOError:
                print "Error No MDS file"
                return Response({'error': 'Semantic Similarity Computation Failed for these Genes and Organism.'},status=500)

            # ensure enrichments are loaded before proceeding
            enrich_pool.close()
            enrich_pool.join()

            coord_manager = Manager()
            coord_lock = coord_manager.Lock()
            coord_taskworker = partial(loadCoordsWorker, coord_lock)
            coord_pool = Pool(5)
            coord_tokens = [(new_run.id, line) for line in semsim_outputfile]

            coord_pool.map(coord_taskworker, coord_tokens)
            coord_pool.close()
            coord_pool.join()

            #spectral clustering
            spectral_start = time.time()
            subprocess.call(['python','/GOUtil/spectralClustering.py', semsim_outputfile_name, clusters_outputfile_name, "-1"])
            print "Spectral run time %s" % (time.time()-spectral_start)

            try:
                clusters_outputfile = open(clusters_outputfile_name, 'r')
            except IOError:
                print "Error no clusters exist"
                return Response({'error': 'No clusters found for these Genes and Organism.'},status=500)

            #Multi-threaded loader to update the cluster data
            cluster_manager = Manager()
            cluster_lock = cluster_manager.Lock()
            cluster_taskworker = partial(loadClustersWorker, cluster_lock)
            cluster_pool = Pool(5)
            cluster_tokens = [(new_run.id, line) for line in clusters_outputfile]

            cluster_pool.map(cluster_taskworker, cluster_tokens)
            cluster_pool.close()
            cluster_pool.join()

            print "Loading time: %s" % (time.time()-start)
            db.connection.close()
            serializer = RunSerializer(new_run)
            #cleanup temp files
            enrich_outputfile.close()
            semsim_outputfile.close()
            clusters_outputfile.close()
            # os.remove(genefile_name)
            # os.remove(enrich_outputfile_name)
            return Response(serializer.data)
            # return Response({'runid': new_run.id})
        else:
            return Response({'error': 'Invalid parameters'},status=500)
        # Send frame to global
        write_frame_list[worker_id] = frame_process

        # Expect next worker to write frame
        Global.write_num = next_id(Global.write_num, worker_num)


if __name__ == '__main__':
    Total_time=0
    # Fix Bug on MacOS
    if platform.system() == 'Darwin':
        set_start_method('forkserver')

    # Global variables
    Global = Manager().Namespace()
    Global.buff_num = 1
    Global.read_num = 1
    Global.write_num = 1
    Global.frame_delay = 0
    Global.is_exit = False
    read_frame_list = Manager().dict()
    write_frame_list = Manager().dict()

    # Number of workers (subprocess use to process frames)
    if cpu_count() > 2:
        worker_num = cpu_count() - 1  # 1 for capturing frames
    else:
        worker_num = 2

    # Subprocess list
Example #18
0
# 进程池之间的通信
from multiprocessing import Manager, Pool
import time
import random


def write(q):
    l = [1, 2, 3]
    for i in l:
        q.put(i)


def read(q):
    for i in range(q.qsize()):
        print(q.get())


if __name__ == "__main__":
    q = Manager().Queue()
    pool = Pool()
    pool.apply(write, (q, ))
    pool.apply(read, (q, ))
    pool.close()
    pool.join()
Example #19
0
    def _get_gradient(self,
                      input_to_check,
                      place,
                      output_names,
                      no_grad_set,
                      parallel=False):
        prog = Program()
        block = prog.global_block()
        self._append_ops(block)

        # Manager() can not store LoDTensor directly
        # So, use one additional element to store output lod
        fetch_list = []
        fetch_list_len = len(input_to_check)
        return_results = [Manager().list() for _ in range(fetch_list_len + 1)]

        def closure(**kwargs):
            role = kwargs['role']

            pfl_mpc.init("aby3", role, "localhost", self.server,
                         int(self.port))
            loss = append_loss_ops(block, output_names)
            param_grad_list = append_backward(loss=loss,
                                              parameter_list=input_to_check,
                                              no_grad_set=no_grad_set)

            inputs = self._get_inputs(block)
            feed_dict = self.feed_var(inputs, place)

            fetch_list = [g for p, g in param_grad_list]

            executor = Executor(place)

            executor.run()
            outs = executor.run(prog,
                                feed=feed_dict,
                                fetch_list=fetch_list,
                                return_numpy=False)
            # append lod information in last position
            lod = []
            for idx in range(fetch_list_len):
                return_results[idx].append(np.array(outs[idx]))
                lod_i = outs[idx].lod()
                lod_concat = []
                for i in lod_i:
                    lod_concat.append(i)
                lod.append(lod_concat)
            return_results[fetch_list_len].append(lod)

        ret = self.multi_party_run(target=closure)
        self.assertEqual(ret[0], True)

        outs = []

        lod = np.array(return_results[fetch_list_len])
        # from numpy array to LoDTensor
        for idx in range(fetch_list_len):
            t = fluid.LoDTensor()
            reveal_data = aby3.reconstruct(np.array(return_results[idx]))
            t.set(reveal_data, place)
            lod_idx = lod[0][idx]
            # TODO: fix: exception throw because some output lod error in gru op
            # out.set_lod(out.lod()) will throw exception
            try:
                t.set_lod(lod_idx)
            except Exception as e:
                pass

            outs.append(t)
        return outs
Example #20
0
#-*-coding:utf-8-*-
# Author:Lu Wei
from multiprocessing import Process, Manager, Lock


def func(dic, l):
    with l:
        dic['count'] -= 1


if __name__ == '__main__':
    m = Manager()
    l = Lock()
    dic = m.dict({'count': 100})
    p_l = []
    for i in range(10):
        p = Process(target=func, args=(dic, l))
        p.start()
        p_l.append(p)
    for p in p_l:
        p.join()
    print(dic)
Example #21
0
    def run_wrap_production(self, test=None):
        """
        Wrapper function. It assumes the initialisation stage has already
        happend Writes XRSL file with the appropiate information and send a
        producrun number of jobs to the arc queue
        """
        from pyHepGrid.src.header import baseSeed, producRun, jobName, count, memory

        # runcard names (keys)
        # dCards, dictionary of { 'runcard' : 'name' }
        rncards, dCards = util.expandCard()
        self.runfolder = header.runcardDir
        job_type = "Production"

        header.logger.info("Runcards selected: {0}".format(" ".join(
            r for r in rncards)))
        for r in rncards:
            joblist = []
            # Check whether this run has something on the gridStorage
            self.check_for_existing_output(r, dCards[r])
            # use the same unique name for all seeds since
            # we cannot multiprocess the arc submission
            keyquit = None

            # Sanity check for test queue
            if test and producRun > 5:
                self._press_yes_to_continue(
                    "  \033[93m WARNING:\033[0m About to submit a large "
                    "number ({0}) of jobs to the test queue.".format(
                        producRun))

            # use iterator for memory reasons :)
            from multiprocessing import Manager
            # Use shared memory list in case of submission failure
            jobids = Manager().list()
            arg_sets = self.arg_iterator(r, dCards, jobName, baseSeed,
                                         producRun, test, jobids, count,
                                         memory)

            try:
                joblist = self._multirun(self.run_single_production,
                                         arg_sets,
                                         n_threads=min(
                                             header.arc_submit_threads,
                                             producRun))
            except (Exception, KeyboardInterrupt) as interrupt:
                print("\n")
                joblist = jobids
                header.logger.error(
                    "Submission error encountered. "
                    "Inserting all successful submissions to database")
                keyquit = interrupt

            # Create daily path
            pathfolder = util.generatePath(warmup=False)
            # Create database entry
            jobStr = ' '.join(joblist)
            dataDict = {
                'jobid': jobStr,
                'date': str(datetime.now()),
                'pathfolder': pathfolder,
                'runcard': r,
                'jobtype': job_type,
                'runfolder': dCards[r],
                'iseed': str(baseSeed),
                'no_runs': str(producRun),
                'status': "active",
            }
            if len(joblist) > 0:
                self.dbase.insert_data(self.table, dataDict)
                # Set jobs to failed status if no jobid returned
                dbid = self.get_active_dbids()[-1]
                statuses = [
                    self.cUNK if i != "None" else self.cMISS for i in joblist
                ]
                self._set_new_status(dbid, statuses)
            else:
                header.logger.critical(
                    "No jobids returned, no database entry inserted for "
                    "submission: {0} {1}".format(r, dCards[r]))
            if keyquit is not None:
                raise keyquit
Example #22
0
def runModel(train,
             test,
             trainTarget,
             testTarget,
             targetVar=target,
             subMode=None,
             submission=0):
    print("started")
    from sklearn.neural_network import MLPClassifier
    if subMode is None:
        que = Manager().Queue()
        pool = Pool(processes=1)
        counter = 0

    dict1 = {}
    L1 = [i for i in range(12, 20)]
    L2 = [i for i in range(4, 12)]
    for i in L1:
        for j in L2:
            if j < i and subMode is None:
                if j != 0:
                    mlp = MLPClassifier(hidden_layer_sizes=(i, j),
                                        max_iter=250,
                                        alpha=1e-4,
                                        solver='sgd',
                                        verbose=False,
                                        tol=1e-4,
                                        random_state=1,
                                        learning_rate_init=0.1)
                else:
                    mlp = MLPClassifier(hidden_layer_sizes=(i),
                                        max_iter=250,
                                        alpha=1e-4,
                                        solver='sgd',
                                        verbose=False,
                                        tol=1e-4,
                                        random_state=1,
                                        learning_rate_init=0.1)
                pool.apply_async(jugad,
                                 args=(que, mlp, train, trainTarget, i, j))
                counter = counter + 1
            elif subMode is not None:
                mlp = MLPClassifier(hidden_layer_sizes=subMode,
                                    max_iter=250,
                                    alpha=1e-4,
                                    solver='sgd',
                                    verbose=False,
                                    tol=1e-4,
                                    random_state=1,
                                    learning_rate_init=0.1)
                mlp.fit(train, trainTarget[target])
                trainer = pd.DataFrame(mlp.predict_proba(train.values),
                                       columns=['good', target],
                                       index=train.index)[[target]]
                submision = pd.DataFrame(mlp.predict_proba(test.values),
                                         columns=['good', target],
                                         index=test.index)[[target]]
                print(trainTarget[[target]].mean(axis=0),
                      trainer[[target]].mean(axis=0))
                #lorenzCurve(trainTarget["TARGET"].values.flatten(),trainer[target].values.flatten())
                # rawTest['actual'] = testTarget[target]
                # rawTest['TARGET1']=submision[target]
                score_test = metrics.roc_auc_score(testTarget[target],
                                                   submision[[target]])
                score_train = metrics.roc_auc_score(trainTarget[target],
                                                    trainer[[target]])
                print(score_train, score_test)
                #error = logLoss(rawTest[varSelected+['actual', 'TARGET1']], 'actual', 'TARGET1').sort_values(['error'])
                #error.to_csv("/home/pooja/PycharmProjects/datanalysis/finalDatasets/error.csv")
                return pd.DataFrame(mlp.predict_proba(test.values),
                                    columns=['good', target],
                                    index=testTarget.index)[[target]]

    pool.close()
    pool.join()
    for element in range(counter):
        field = que.get()
        mlp = field[0]
        i = field[1]
        j = field[2]
        trainer = pd.DataFrame(mlp.predict_proba(train.values),
                               columns=['good', target],
                               index=train.index)[[target]]
        submision = pd.DataFrame(mlp.predict_proba(test.values),
                                 columns=['good', target],
                                 index=test.index)[[target]]
        # submision.index.name='SK_ID_CURR'
        # print(submision.shape)
        # submision.to_csv("submission.csv")
        score_test = metrics.roc_auc_score(testTarget[target],
                                           submision[[target]])
        score_train = metrics.roc_auc_score(trainTarget[target],
                                            trainer[[target]])

        try:
            dict1[str(i) + "_" +
                  str(j)] = str(score_train) + "_" + str(score_test)
        except:
            print(i, j)
            pass

    print("starting")
    for key in dict1.keys():
        print(key)
        print(dict1[key])
    return None
Example #23
0
    def hc(self,
           metric='AIC',
           max_iter=100,
           debug=False,
           restriction=None,
           whitelist=None):
        """
        Greedy Hill Climbing search proceeds by choosing the move
        which maximizes the increase in fitness of the
        network at the current step. It continues until
        it reaches a point where there does not exist any
        feasible single move that increases the network fitness.

        It is called "greedy" because it simply does what is
        best at the current iteration only, and thus does not
        look ahead to what may be better later on in the search.

        For computational saving, a Priority Queue (python's heapq)
        can be used	to maintain the best operators and reduce the
        complexity of picking the best operator from O(n^2) to O(nlogn).
        This works by maintaining the heapq of operators sorted by their
        delta score, and each time a move is made, we only have to recompute
        the O(n) delta-scores which were affected by the move. The rest of
        the operator delta-scores are not affected.

        For additional computational efficiency, we can cache the
        sufficient statistics for various families of distributions -
        therefore, computing the mutual information for a given family
        only needs to happen once.

        The possible moves are the following:
            - add edge
            - delete edge
            - invert edge

        Arguments
        ---------
        *data* : a nested numpy array
            The data from which the Bayesian network
            structure will be learned.

        *metric* : a string
            Which score metric to use.
            Options:
                - AIC
                - BIC / MDL
                - LL (log-likelihood)

        *max_iter* : an integer
            The maximum number of iterations of the
            hill-climbing algorithm to run. Note that
            the algorithm will terminate on its own if no
            improvement is made in a given iteration.

        *debug* : boolean
            Whether to print the scores/moves of the
            algorithm as its happening.

        *restriction* : a list of 2-tuples
            For MMHC algorithm, the list of allowable edge additions.

        Returns
        -------
        *bn* : a BayesNet object

        """

        # INITIALIZE NETWORK W/ NO EDGES
        # maintain children and parents dict for fast lookups
        self.c_dict = dict([(n, []) for n in self.nodes])
        self.p_dict = dict([(n, []) for n in self.nodes])

        self.restriction = restriction
        self.whitelist = whitelist

        if whitelist is None:
            whitelist = []
        for (u, v) in whitelist:
            self.c_dict[u].append(v)
            self.p_dict[v].append(u)
        print("Whitelist", whitelist)

        self.bn = BayesNet(self.c_dict)

        # COMPUTE INITIAL LIKELIHOOD SCORE
        #    value_dict = dict([(n, np.unique(np_data[:,i])) for i,n in enumerate(names)])
        print("Nodes:", list(self.bn.nodes()))

        score = model_score(self.data, self.bn) - model_complexity(
            self.bn, self.nrow, metric)
        print("Initial Score:", score)

        # CREATE EMPIRICAL DISTRIBUTION OBJECT FOR CACHING
        #ED = EmpiricalDistribution(data,names)

        _iter = 0
        improvement = True

        man = Manager()

        mut_inf_cache = man.dict()
        configs_cache = man.dict()

        while improvement:
            start_t = time.time()
            improvement = False
            max_delta = 0
            max_operation = None

            if debug:
                print('ITERATION: ', _iter)

            return_queue = Queue()
            p_add = Process(target=self.test_arc_additions,
                            args=(configs_cache, mut_inf_cache, return_queue))
            p_rem = Process(target=self.test_arc_deletions,
                            args=(configs_cache, mut_inf_cache, return_queue))
            p_rev = Process(target=self.test_arc_reversals,
                            args=(configs_cache, mut_inf_cache, return_queue))

            p_add.start()
            p_rem.start()
            p_rev.start()

            p_add.join()
            p_rem.join()
            p_rev.join()

            while not return_queue.empty():
                results = return_queue.get()
                if results[1] > max_delta:
                    max_arc = results[0]
                    max_delta = results[1]
                    max_operation = results[2]
                    max_qi = results[3]

            ### DETERMINE IF/WHERE IMPROVEMENT WAS MADE ###
            if max_operation:
                score += max_delta
                improvement = True
                u, v = max_arc
                str_arc = [e for e in max_arc]
                if max_operation == 'Addition':
                    if debug:
                        print("delta:", max_delta)
                        print('ADDING: ', str_arc, '\n')
                    self.p_dict[v].append(u)
                    self.bn.add_edge(u, v)
                    self.bn.F[v]["qi"] = max_qi
                elif max_operation == 'Deletion':
                    if debug:
                        print("delta:", max_delta)
                        print('DELETING: ', str_arc, '\n')
                    self.p_dict[v].remove(u)
                    self.bn.remove_edge(u, v)
                    self.bn.F[v]["qi"] = max_qi
                elif max_operation == 'Reversal':
                    if debug:
                        print("delta:", max_delta)
                        print('REVERSING: ', str_arc, '\n')
                    self.p_dict[v].remove(u)
                    self.bn.remove_edge(u, v)
                    self.bn.F[v]['qi'] = max_qi[1]
                    self.p_dict[u].append(v)
                    self.bn.add_edge(v, u)
                    self.bn.F[u]['qi'] = max_qi[0]
                print("Model score:", score
                      )  # TODO: improve so only changed elements get an update
            else:
                if debug:
                    print('No Improvement on Iter: ', _iter)
            print("Time for iteration:", time.time() - start_t)

            ### TEST FOR MAX ITERATION ###
            _iter += 1
        #    if _iter > max_iter:
        #        if debug:
        #            print('Max Iteration Reached')
        #        break

        bn = BayesNet(self.c_dict)
        print("Size of Cache", len(mut_inf_cache))
        print("SCORE =", score)

        return bn
Example #24
0
def run_tests_parallel(tests, prefix, options):
    # This queue will contain the results of the various tests run.
    # We could make this queue a global variable instead of using
    # a manager to share, but this will not work on Windows.
    queue_manager = Manager()
    async_test_result_queue = queue_manager.Queue()

    # This queue will be used by the result process to indicate
    # that it has received a result and we can start a new process
    # on our end. The advantage is that we don't have to sleep and
    # check for worker completion ourselves regularly.
    notify_queue = queue_manager.Queue()

    # This queue will contain the return value of the function
    # processing the test results.
    total_tests = len(tests) * options.repeat
    result_process_return_queue = queue_manager.Queue()
    result_process = Process(target=process_test_results_parallel,
                             args=(async_test_result_queue,
                                   result_process_return_queue, notify_queue,
                                   total_tests, options))
    result_process.start()

    # Ensure that a SIGTERM is handled the same way as SIGINT
    # to terminate all child processes.
    sigint_handler = signal.getsignal(signal.SIGINT)
    signal.signal(signal.SIGTERM, sigint_handler)

    worker_processes = []

    def remove_completed_workers(workers):
        new_workers = []
        for worker in workers:
            if worker.is_alive():
                new_workers.append(worker)
            else:
                worker.join()
        return new_workers

    try:
        testcnt = 0
        # Initially start as many jobs as allowed to run parallel
        # Always enqueue at least one to avoid a curious deadlock
        for i in range(max(1, min(options.max_jobs, total_tests))):
            notify_queue.put(True)

        # For every item in the notify queue, start one new worker.
        # Every completed worker adds a new item to this queue.
        while notify_queue.get():
            if testcnt < total_tests:
                # Start one new worker
                test = tests[testcnt % len(tests)]
                worker_process = Process(target=wrap_parallel_run_test,
                                         args=(test, prefix,
                                               async_test_result_queue,
                                               options))
                worker_processes.append(worker_process)
                worker_process.start()
                testcnt += 1

                # Collect completed workers
                worker_processes = remove_completed_workers(worker_processes)
            else:
                break

        # Wait for all processes to terminate
        while len(worker_processes) > 0:
            worker_processes = remove_completed_workers(worker_processes)

        # Signal completion to result processor, then wait for it to complete
        # on its own
        async_test_result_queue.put(None)
        result_process.join()

        # Return what the result process has returned to us
        return result_process_return_queue.get()
    except (Exception, KeyboardInterrupt) as e:
        # Print the exception if it's not an interrupt,
        # might point to a bug or other faulty condition
        if not isinstance(e, KeyboardInterrupt):
            traceback.print_exc()

        for worker in worker_processes:
            try:
                worker.terminate()
            except:
                pass

        result_process.terminate()

    return False
Example #25
0
    def generate_initial_population(self):
        """
        generates the initial population
        """
        cores_num = cpu_count()
        loop_length = int(self.population_size / cores_num)

        with click.progressbar(
                length=loop_length,
                label='Generating initial population...') as progressbar:
            for i in range(loop_length):
                people = []
                with Manager() as manager:
                    dna_bucket = manager.list([])
                    workers = []

                    def get_fitness(dna, dna_bucket):
                        """

                        :param dna:
                        :param dna_bucket:
                        """
                        fitness_score, fitness_log = self.fitness(dna)
                        dna_bucket.append((dna, fitness_score, fitness_log))

                    for _ in range(cores_num):
                        dna = ''.join(
                            choices(self.charset, k=self.solution_len))
                        w = Process(target=get_fitness, args=(dna, dna_bucket))
                        w.start()
                        workers.append(w)

                    # join workers
                    for w in workers:
                        w.join()

                    for d in dna_bucket:
                        people.append({
                            'dna': d[0],
                            'fitness': d[1],
                            'log': d[2]
                        })

                # update dashboard
                click.clear()
                progressbar.update(1)
                print('\n')
                table.key_value([
                    ['started at',
                     jh.get_arrow(self.start_time).humanize()],
                    [
                        'index/total', '{}/{}'.format(len(self.population),
                                                      self.population_size)
                    ],
                    ['-', '-'],
                    ['population_size', self.population_size],
                    ['iterations', self.iterations],
                    ['solution_len', self.solution_len],
                    [
                        'route', '{}, {}, {}, {}'.format(
                            router.routes[0].exchange, router.routes[0].symbol,
                            router.routes[0].timeframe,
                            router.routes[0].strategy_name)
                    ],
                    ['-', '-'],
                    ['DNA', people[0]['dna']],
                    ['fitness', round(people[0]['fitness'], 6)],
                    ['training|testing logs', people[0]['log']],
                ],
                                'baby',
                                alignments=('left', 'right'))

                for p in people:
                    self.population.append(p)

        # sort the population
        self.population = list(
            sorted(self.population, key=lambda x: x['fitness'], reverse=True))
Example #26
0
  taskQueue.close()
  #taskQueue.join()


logger = multiprocessing.log_to_stderr()
logger.setLevel(logging.WARNING)

def error(msg, *args):
  return logger.error(msg, *args)


# start (in parallel)!
nCores = max(1, multiprocessing.cpu_count()-1)
#print(nCores)
with ProcessPoolExecutor(max_workers=nCores) as workers, Manager() as manager:
  taskQueue = JoinableQueue()
  result = workers.submit(run)

  while not result.done():
    taskArgs = taskQueue.get(block=True)
    if isinstance(taskArgs, Iterable):
      task, *args = taskArgs
      task(*args)
    else:
      taskArgs()

#run()
print('Done!')
sleep(2)
Example #27
0
    def run(self):  # call start()时 就会调用run(run为单进程).
      while True:
          item = self.queue.get()
          self.horizontal_mirror_imgs(self.args['imgs_path'],self.args['xml_path'],item,self.args['save_path'])
          print("Consumer-->%s" % item, self.label)
          self.queue.task_done()


if __name__ == '__main__':
	imgs_path = '/home/lzc274500/WorkSpace/ZOUZHEN/Pytorch/crnn_chinese_characters_rec/data/vehicle/Image'
	xml_path = '/home/lzc274500/WorkSpace/ZOUZHEN/Pytorch/crnn_chinese_characters_rec/data_generator/data_set/PK5HD00000002.xml'
	save_path = '/home/lzc274500/WorkSpace/ZOUZHEN/Pytorch/crnn_chinese_characters_rec/data_generator/data_set/save_path'
	label_path = '/home/lzc274500/WorkSpace/ZOUZHEN/Pytorch/crnn_chinese_characters_rec/data_generator/data_set/train.txt'
	pathlist = os.listdir(imgs_path)
	# 统计计算内部的核心进程数
	if not os.path.exists(save_path):
	  os.makedirs(save_path)
	cores = multiprocessing.cpu_count()
	qMar = Manager()
	# 取核心进程数的一半建立数据队列
	q1 = qMar.Queue(cores-5)
	p = Producer(q1, pathlist)
	processes = []
	processes.append(p)
	# print(int(cores/2))
	for i in range(cores-5):
		processes.append(Consumer(q1,i,label_path,imgs_path=imgs_path,xml_path=xml_path, save_path=save_path))
		
	[process.start() for process in processes]
	[process.join() for process in processes]
Example #28
0
async def schedule_formatting(
    sources: Set[Path],
    fast: bool,
    write_back: WriteBack,
    mode: Mode,
    report: "Report",
    loop: asyncio.AbstractEventLoop,
    executor: Executor,
) -> None:
    """Run formatting of `sources` in parallel using the provided `executor`.

    (Use ProcessPoolExecutors for actual parallelism.)

    `write_back`, `fast`, and `mode` options are passed to
    :func:`format_file_in_place`.
    """
    cache: Cache = {}
    if write_back not in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
        cache = read_cache(mode)
        sources, cached = filter_cached(cache, sources)
        for src in sorted(cached):
            report.done(src, Changed.CACHED)
    if not sources:
        return

    cancelled = []
    sources_to_cache = []
    lock = None
    if write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
        # For diff output, we need locks to ensure we don't interleave output
        # from different processes.
        manager = Manager()
        lock = manager.Lock()
    tasks = {
        asyncio.ensure_future(
            loop.run_in_executor(
                executor, format_file_in_place, src, fast, mode, write_back, lock
            )
        ): src
        for src in sorted(sources)
    }
    pending = tasks.keys()
    try:
        loop.add_signal_handler(signal.SIGINT, cancel, pending)
        loop.add_signal_handler(signal.SIGTERM, cancel, pending)
    except NotImplementedError:
        # There are no good alternatives for these on Windows.
        pass
    while pending:
        done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
        for task in done:
            src = tasks.pop(task)
            if task.cancelled():
                cancelled.append(task)
            elif task.exception():
                report.failed(src, str(task.exception()))
            else:
                changed = Changed.YES if task.result() else Changed.NO
                # If the file was written back or was successfully checked as
                # well-formatted, store this information in the cache.
                if write_back is WriteBack.YES or (
                    write_back is WriteBack.CHECK and changed is Changed.NO
                ):
                    sources_to_cache.append(src)
                report.done(src, changed)
    if cancelled:
        await asyncio.gather(*cancelled, loop=loop, return_exceptions=True)
    if sources_to_cache:
        write_cache(cache, sources_to_cache, mode)
def consumer_task(q, fibo_dict):
    while not q.empty():
        value = q.get(True, 0.05)
        a, b = 0, 1

        for item in range(value):
            a, b = b, a + b
            fibo_dict[value] = a
        logger.info("consumer [%s] getting value [%d] from queue..." %
                    (current_process().name, value))


if __name__ == "__main__":
    data_queue = Queue()
    number_of_cpus = cpu_count()
    manager = Manager()

    logger.info("cpu core count: " + str(number_of_cpus))

    # 프로세스의 최종 결과를 삽입.
    fibo_dict = manager.dict()

    producer = Process(target=producer_task, args=(data_queue, fibo_dict))
    producer.start()
    producer.join()

    consumer_list = []
    for i in range(number_of_cpus):
        consumer = Process(target=consumer_task, args=(data_queue, fibo_dict))
        consumer.start()
        consumer_list.append(consumer)
Example #30
0
    def _executors_repro(
        self, executors: dict, jobs: Optional[int] = 1
    ) -> Mapping[str, Mapping[str, str]]:
        """Run dvc repro for the specified BaseExecutors in parallel.

        Returns:
            dict mapping stash revs to the successfully executed experiments
            for each stash rev.
        """
        result: Dict[str, Dict[str, str]] = defaultdict(dict)

        manager = Manager()
        pid_q = manager.Queue()

        rel_cwd = relpath(os.getcwd(), self.repo.root_dir)
        with ProcessPoolExecutor(max_workers=jobs) as workers:
            futures = {}
            for rev, executor in executors.items():
                future = workers.submit(
                    executor.reproduce,
                    executor.dvc_dir,
                    rev,
                    queue=pid_q,
                    name=executor.name,
                    rel_cwd=rel_cwd,
                    log_level=logger.getEffectiveLevel(),
                )
                futures[future] = (rev, executor)

            try:
                wait(futures)
            except KeyboardInterrupt:
                # forward SIGINT to any running executor processes and
                # cancel any remaining futures
                pids = {}
                while not pid_q.empty():
                    rev, pid = pid_q.get()
                    pids[rev] = pid
                for future, (rev, _) in futures.items():
                    if future.running():
                        os.kill(pids[rev], signal.SIGINT)
                    elif not future.done():
                        future.cancel()

            for future, (rev, executor) in futures.items():
                rev, executor = futures[future]

                try:
                    exc = future.exception()
                    if exc is None:
                        exec_result = future.result()
                        result[rev].update(
                            self._collect_executor(executor, exec_result)
                        )
                    elif not isinstance(exc, CheckpointKilledError):
                        logger.error(
                            "Failed to reproduce experiment '%s'", rev[:7],
                        )
                except CancelledError:
                    logger.error(
                        "Cancelled before attempting to reproduce experiment "
                        "'%s'",
                        rev[:7],
                    )
                finally:
                    executor.cleanup()

        return result