Exemplo n.º 1
0
    def __init__(self, objective_function=None, dimensions=None, **kwargs):

        if dimensions is None:
            try:
                # Use the objective function's number of arguments as dimensions
                dimensions = objective_function.__code__.co_argcount
            except TypeError:
                raise TypeError("Invalid function passed.")

        # Construct PopulationParameters object
        parameters = PopulationParameters(dimensions=dimensions, **kwargs)

        self.objective_function = objective_function
        self.num_dimensions = parameters.num_dimensions
        self.size = parameters.population_size
        self.elite_population_size = int(parameters.elite_fraction * self.size)
        self.mutation_probability = parameters.mutation_probability
        self.mutation_range = parameters.mutation_range
        self.boundaries = parameters.boundaries
        self.evaluated_fitness_ranks = False
        self.evaluated_diversity_ranks = False
        self.mean_fitness = 0
        self.mean_diversity = 0
        self.mean_coordinates = np.zeros((self.num_dimensions, 1))
        self.num_iterations = 1

        # Multiprocessing defaults
        self.multiprocessing = kwargs.get('multiprocessing', False)
        self.processes = kwargs.get('processes')

        # Create points as Point objects
        self.points = []
        for pointnumber in range(self.size):
            point = Point(associated_population=self,
                          dimensions=self.num_dimensions)
            self.points.append(point)
            self.points[pointnumber].index = pointnumber

        # If multiprocessing is enabled, create pool of processes.
        if self.multiprocessing:
            if self.processes is None:
                self.pool = mp.ProcessingPool()
            else:
                self.pool = mp.ProcessingPool(ncpus=self.processes)

            fitnesses = self.pool.map(
                lambda coordinates, func: func(*coordinates),
                [point.coordinates for point in self.points],
                [self.objective_function] * self.size)

            # Assign fitnesses to each point
            for index, point in enumerate(self.points):
                point.fitness = fitnesses[index]
        else:
            for point in self.points:
                point.evaluate_fitness(self.objective_function)

        # Evaluate fitness and diversity ranks
        self.__evaluate_fitness_ranks()
        self.__evaluate_diversity_ranks()
Exemplo n.º 2
0
 def __init__(self,
              journal_paths_dict,
              feature_generation_instance,
              n_cores=-1):
     """
     Class constructor
     :param mypath: system path to the folder with the files to be be organized
     """
     if isinstance(journal_paths_dict, dict):
         self._mypaths = journal_paths_dict.keys()
         self._myjournals = journal_paths_dict.values()
         self._mydict = journal_paths_dict
     else:
         print 'Input must be a dictionary, with the keys being the journal and the values being the respective ' \
               'paths'
         raise ValueError
     self._feat_gen = feature_generation_instance
     if n_cores == 1:
         self._n_cores = 1
         self._pool = None
     elif n_cores == -1:
         self._n_cores = mp.cpu_count()
         self._pool = mp.ProcessingPool(self._n_cores)
     else:
         self._n_cores = n_cores
         self._pool = mp.ProcessingPool(self._n_cores)
Exemplo n.º 3
0
 def __init__(self, source1, source2):
     global newindex
     self.text_sub = rospy.Subscriber(source1, String, self.callback)
     self.text_sub2 = rospy.Subscriber(source2, String, self.callback2)      
    
     pool = mp.ProcessingPool(6)
     pool.map(self.multi_run_wrapper, [(0,newindex,(newindex+8)),(1,(newindex+4),(newindex+15)),(2,(newindex+11),(newindex+26)),(3,(newindex+22),(newindex+36)),(4,(newindex+32),(newindex+51)),(5,(newindex+47),(newindex+72))])
Exemplo n.º 4
0
def runParallel(nb_processes,
                input_files,
                fcbo_path,
                disable_naming=False,
                min_support=[0.5],
                min_conf=[0.95],
                max_conf=[0.05],
                num_rules=[100],
                ncpus=[1],
                workflow='both',
                fca_algo='C'):
    #runs FCA in parallel
    params = constructParamsList(name_tuple='Params',
                                 inputFiles=input_files,
                                 minsupports=min_support,
                                 minconfs=min_conf,
                                 maxconfs=max_conf,
                                 numrules=num_rules,
                                 procs=ncpus,
                                 work_flow=[workflow],
                                 fcaAlgo=[fca_algo],
                                 disableNaming=[disable_naming],
                                 fcboPath=[fcbo_path])
    param_list = formatParamsList(params)
    pool = mp.ProcessingPool(nb_processes)
    results = pool.amap(FCAOneRun, [p for p in param_list])
Exemplo n.º 5
0
def DOMEP(N, k, m, L, lam):
    N = np.array(N)
    n = N.shape[1]
    Tl = randomly_partition(n=n, m=m)
    Tl = np.array(Tl)
    Sl = np.zeros(n, dtype=DTY_INT) - 1  # initial

    # concurrent selection
    pool = pp.ProcessingPool(nodes=m)
    sub_idx = pool.map(find_idx_in_sub, list(range(m)), [Tl] * m, [N] * m,
                       [k] * m, [L] * m, [lam] * m)
    del pool, Tl

    for i in range(m):
        Sl[sub_idx[i]] = i
    del sub_idx
    sub_all_in_N = np.where(Sl != -1)[0]
    sub_all_single = COMEP(N[:, (Sl != -1)].tolist(), k, L, lam)
    sub_all_single = np.where(sub_all_single)[0]

    final_S = np.zeros(n, dtype=DTY_BOL)
    final_S[sub_all_in_N[sub_all_single]] = 1
    del sub_all_in_N, sub_all_single

    tdas_temS = TDAS1(N[:, final_S].tolist(), L, lam)
    tdas_Sl = [TDAS1(N[:, (Sl == i)].tolist(), L, lam) for i in range(m)]
    if np.sum(np.array(tdas_Sl) > tdas_temS) >= 1:
        tem_argmax_l = tdas_Sl.index(np.max(tdas_Sl))
        final_S = (Sl == tem_argmax_l)
        del tem_argmax_l

    del tdas_temS, tdas_Sl, N, n, m, Sl
    final_S = final_S.tolist()
    gc.collect()
    return deepcopy(final_S)
    def run(self):
        self.logger.error("NEEDS TESTING.. TALK TO FORREST IF BROKEN OR WORKS")
        stack = self.args['stack']
        zvals = np.array(self.render.run(renderapi.stack.get_z_values_for_stack,stack))
        minZ = self.args.get('minZ',np.min(zvals))
        maxZ = self.args.get('maxZ',mp.max(zvals))

        figdir='%s-%s-%s'%(self.args['figdir'],self.args['matchcollection'],self.args['stack'])
        if not os.path.isdir(figdir):
            os.makedirs(figdir)
        pool = mp.ProcessingPool(20)

        groups=self.render.run(renderapi.pointmatch.get_match_groupIds,self.args['matchcollection'])
        groups=np.array(map(int,groups))
        groups.sort()
        zvalues = {}
        for group in groups:
            z = self.render.run(renderapi.stack.get_z_value_for_section,self.args['stack'],group)
            if z is not None:
                zvalues[group]=int(z)


        items = []
        for group,z in zvalues.items():
            if (z>=minZ)&(z<=maxZ):
                for k in range(self.args['dz']+1):
                    z2=z+k
                    group2 = [g for g in zvalues.keys() if zvalues[g]==z2]
                    if len(group2)>0:
                        group2=group2[0]
                        items.append((group,group2))

        mypartial= partial(make_plot,r,self.args['matchcollection'],zvalues,figdir)
        with renderapi.client.WithPool(self.args['pool_size']) as pool:
            res=pool.map(mypartial,items)
Exemplo n.º 7
0
def calc_geometry(parameters_1,
                  parameters_2,
                  samples=50,
                  processes=8,
                  progress_bar=None):
    def calc_point_1(t):
        point = calc_parametric_point(t, parameters_1)
        return np.array([point[0], point[1], t, 0])

    def calc_point_2(t_1):
        t_2 = calc_parametric_dual(t_1, parameters_1, parameters_2)
        point = calc_parametric_point(t_2, parameters_2)
        return np.array([point[0], point[1], t_1, 1])

    def calc_point(pair):
        polarity, t = pair

        if not polarity:
            return calc_point_1(t)

        return calc_point_2(t)

    with mp.ProcessingPool(processes) as pool:
        t_range = np.linspace(-np.pi, np.pi, samples)
        pair_range = [(polarity, t) for t in t_range
                      for polarity in [False, True]]
        points = []

        for point in pool.imap(calc_point, pair_range):
            progress_bar.update(1)
            points.append(point)

    return np.array(points)
Exemplo n.º 8
0
 def __init__(self, hostfile, oidfile):
     self.hosts = []
     self.oids = []
     self.recentData = {}
     self.setHosts(hostfile)
     self.setOids(oidfile)
     self.pool = mp.ProcessingPool(len(self.hosts))
Exemplo n.º 9
0
def eval_parallel(samples, function, nprocesses=0, function_kwargs={}):
    """Evaluate a function for many samples in parallel

    Parameters
    ----------
    samples:list
        Samples for which the function is to be computed.
    function: function(numpy.array)
        Function of interest.
    nprocesses: int
        The number of processes used for the Hessian computation. By default, this
        chooses the number of CPU cores available.

    Returns
    -------
    val: list
        Function values at samples.
    """

    procs = _get_number_processes(nprocesses)
    local_func = lambda x: function(x, **function_kwargs)

    if procs > 1:
        with pathos_mp.ProcessingPool(procs) as pool:
            val = pool.map(local_func, samples)
    else:
        val = [local_func(i) for x in samples]

    return val
Exemplo n.º 10
0
    def callback(self, data):
        global newone
        if newone != str(data.data):
            newone = str(data.data)
            words = newone.split()
            for word in words:
                if re.search(r'\d+.?\d*', word):
                    newone2 = newone.replace(word, num2words(word))
                    newone2 = newone2.replace(" point zero", "")
                    newone2 = newone2.replace("-", " ")
            text_file = open(
                "/home/enas/catkin_ws/src/multiprocess_render/src/txtscript/script1.txt",
                "w")
            text_file.write(str(newone2))
            text_file.close()
            dist1 = "/home/enas/catkin_ws/src/multiprocess_render/src/txtscript/script1.txt"
            self.synthesize_text(newone)

            dist2 = "/home/enas/catkin_ws/src/multiprocess_render/src/audio/say.mp3"
            pool = mp.ProcessingPool(6)
            pool.map(self.multi_run_wrapper, [(dist1, dist2, 0),
                                              (dist1, dist2, 1),
                                              (dist1, dist2, 2),
                                              (dist1, dist2, 3),
                                              (dist1, dist2, 4),
                                              (dist1, dist2, 5)])

            self.text_pub.publish(str(newone))
Exemplo n.º 11
0
    def generate_data(self, dates, njobs=10):
        from longbeach.utils import pushd, mkdir_p
        mkdir_p(self.projpath)
        import pathos.multiprocessing as mp
        pool = mp.ProcessingPool(njobs)

        def process(fname, dbfile):
            import os.path
            with pushd(p.projpath):
                df = da.getSignalDataFrame(fname, 'signalgen.dat.desc')
                conn = sqlite3.connect(dbfile)
                df.to_sql('data', conn, if_exists='append')

        with pushd(self.projpath):
            da.getSignalOnDateList(dates, self.model, njobs=njobs)
            import glob
            a = [
                f for f in glob.iglob('signalgen.*.trigger.dat')
                if 'ov' not in f
            ]
            conn = sqlite3.connect(self.__ivar_file)
            c = conn.cursor()
            c.execute('drop table if exists data')
            conn.commit()

            pool.map(process, a, [self.__ivar_file] * len(a))

        return None
    def evaluate_summarizer(self, parsers, **kwargs):
        """
        :param parsers: list
            List stores newssum.parser.StoryParser.
        :param kwargs:
            See below

        :Keyword Arguments:
            * *w_threshod* (``int``)
                word length threshold
            * *s_threshod* (``int``)
                sentence length threshold
        """
        print("Evaluating summarizer...")
        p = multiprocessing.ProcessingPool(multiprocessing.cpu_count() - 2)

        def get_rouges(parser):
            print("Get rouges...")
            print(parser)
            selected_sents = self.get_best_sents(parser, **kwargs)
            rouge = Rouge(selected_sents, parser.highlights).get_rouge()

            return rouge

        rouges = p.map(get_rouges, parsers)
        # p.close()
        # p.join()

        avg_rouge = Rouge.cal_avg_rouge(rouges)
        Rouge.print("InfoFilter", avg_rouge)
Exemplo n.º 13
0
def prepdistribute(iterator, preptrain, numworkers, unordered=True):
    assert mp is not None, "Pathos is required to have parallel preptrains. Install Pathos " \
                           "from https://github.com/uqfoundation/pathos"
    workerpool = mp.ProcessingPool(numworkers)
    imap = workerpool.uimap if unordered else workerpool.imap
    lliterator = imap(preptrain, iterator)
    return lliterator
Exemplo n.º 14
0
 def __init__(self, process_count=1):
     self.pool = mp.ProcessingPool(process_count)
     self.blacklist = \
         [u'Lahenduste esitamiseks peate olema sisse loginud ja kursusele registreerunud.',
          u'Antud kursusel pole ühtegi ülesannet.',
          u'Sellele ülesandele ei saa hetkel lahendusi esitada.',
          u'You must be logged in and registered to the course in order to submit solutions.',
          u'There are no tasks for this course.',
          u'Solutions to this task cannot be submitted at the moment.'
          ]
Exemplo n.º 15
0
 def export_latest_backups(self, device_ids, dest_dir=None):
     latest_backups = [b['ID'] for b in self.latest_backups(device_ids)]
     pool = mp.ProcessingPool()
     func = functools.partial(self.export_backup, dest_dir=dest_dir)
     res = pool.amap(func, latest_backups)
     while not res.ready():
         logger.info('Remaining: {}/{}'.format(
             len(latest_backups) - res._number_left, len(latest_backups)))
         time.sleep(1)
     return res.get()
Exemplo n.º 16
0
def BaggingEnsembleParallel(X_trn, y_trn, name_cls, nb_cls, cores):
    pool = pp.ProcessingPool(nodes=cores)
    wXy = pool.map(BaggingSelectTraining, [X_trn] * nb_cls, [y_trn] * nb_cls)
    wX, wy = zip(*wXy)  # list, [[..] nb_cls]
    clfs = pool.map(individual, [name_cls] * nb_cls, wX, wy)
    coef = [
        1. / nb_cls
    ] * nb_cls  # coef = np.array([1. / nb_cls] * nb_cls, dtype=DTY_FLT)
    del pool, wXy, wX, wy
    gc.collect()
    return deepcopy(coef), deepcopy(clfs)
Exemplo n.º 17
0
 def registerInitialState(self, gameState):
     CaptureAgent.registerInitialState(self, gameState)
     self.allies = self.getTeam( gameState )
     if self.allies[0] != self.index:
         self.allies = self.allies[::-1]
     self.enemies = self.getOpponents( gameState )
     print self.allies, self.enemies
     self.MCTS_ITERATION = 10000
     self.ROLLOUT_DEPTH = 10
     self.cores = mp.cpu_count() - 2
     print self.cores
     self.pool = mp.ProcessingPool( processes = self.cores )
     self.PreviousTree = None
Exemplo n.º 18
0
def make_pointmatch_summary_plot(render,
                                 stack,
                                 matchcollection,
                                 sections_per_row=200):
    matchcollections = [matchcollection]
    groups = get_merged_groups(render, matchcollections)
    with mp.ProcessingPool(5) as pool:
        mypartial = partial(process_group, render, matchcollections)
        answers = pool.map(mypartial, groups)

    zvalues = get_z_value_dict(render, stack, answers, groups)
    maxz = np.max(zvalues.values())
    match_matrix = assemble_match_matrix(answers, groups, zvalues)

    rows = int(np.ceil(maxz * 1.0 / sections_per_row))
    sections_per_row = int(np.ceil(maxz * 1.0 / rows))
    rows = int(np.ceil(maxz * 1.0 / sections_per_row))

    f, ax = plt.subplots(rows, 1, figsize=(8, 2 * rows))
    first_section_indices = np.concatenate(
        [np.array([0]),
         np.where(np.diff(groups % 1000) < 0)[0] + 1])
    first_sections = groups[first_section_indices]
    first_section_zs = [zvalues[section] for section in first_sections]

    maxval = np.max(np.log(match_matrix))

    for row in range(rows):
        startz = row * sections_per_row
        endz = row * sections_per_row + sections_per_row
        endz = min(endz, maxz)
        if rows > 1:
            theax = ax[row]
        else:
            theax = ax

        img = theax.imshow(np.log(match_matrix[startz:endz, :].T),
                           interpolation='nearest',
                           cmap=plt.cm.viridis,
                           extent=(startz - .5, endz - .5, maxdz, -maxdz),
                           vmax=maxval)

        theax.autoscale(tight=True)
        for z in first_section_zs:
            if (z >= startz) & (z <= endz):
                theax.plot([z - .5, z - .5], [-maxdz, maxdz],
                           c='w',
                           linewidth=2,
                           linestyle='--',
                           alpha=.5)
    return f
Exemplo n.º 19
0
def _take_global_optimisation_step(positions, objective_function,
                                   cma_processes, **kwargs):
    """
    Takes a global optimisation step either using one core, or multiprocessing
    """
    assert cma_processes > 0, "cma_processes must be bigger than 0"
    if cma_processes == 1:
        ret = [objective_function(x, grad=0, **kwargs) for x in positions]
    elif cma_processes > 1:
        # Using the pool as a context manager will make any exceptions raised kill the other processes.
        with pathos_mp.ProcessingPool(cma_processes) as pool:
            ret = pool.map(lambda x: objective_function(x, grad=0, **kwargs),
                           positions)
    return ret
Exemplo n.º 20
0
 def __init__(self):
     self.debug = False
     self.lemmatizer = WordNetLemmatizer()
     self.sw = StopWord()
     self.co_occ = CoOccurrence()
     self.co_occurring_words = []
     self.acronyms = {}
     self.latin_letters = {}
     self.detect_lang = detect
     self.tokenize_sent = sent_tokenize
     self.tokenize_word = word_tokenize
     self.tag_words = pos_tag
     self.est_analyser = Text
     self.ud = unicodedata
     self.pool = mp.ProcessingPool(8)
Exemplo n.º 21
0
def herd(list_input):
    """ Wrapper to parallelise MrMoose on samples"""

    ndim = len(list_input)
    Parallel_cpu = mp.cpu_count()  # take the number of CPUs
    print '# files: ', ndim
    print '# cores: ', Parallel_cpu

    # create the pool of jobs
    pool = mp.ProcessingPool(Parallel_cpu)

    # run and display progress bar
    with tqdm(total=ndim):
        herd_outputs = pool.map(mm.SED_fit, list_input)
    return herd_outputs
Exemplo n.º 22
0
def pmap(f, xs, num_procs=4):
    '''
    Parallel Map.
    The pool spawns threads and never closes them.  Fixed the memory
    leak on our server by doing the following below.
    Note: not modifying the state as done below will cause
    pmap to fail when run more than twice in the same process.
    This has been reproduced reliably in the repl, and was fixed
    by mutating the state of the pool.  Icky.
    https://github.com/uqfoundation/pathos/issues/46
    '''
    pool = PM.ProcessingPool(num_procs)
    result = pool.map(f, xs)
    pool.close()
    PM.__STATE['pool'] = None
    return result
Exemplo n.º 23
0
def run_cv_model(label,
                 train,
                 test,
                 data_key,
                 model_fn,
                 kf,
                 train_key=None,
                 targets=None,
                 eval_fn=None):
    mean_cv_scores = []
    actual_cv_scores = []

    if targets is None:
        targets = [
            'toxic', 'severe_toxic', 'obscene', 'threat', 'insult',
            'identity_hate'
        ]
    if eval_fn is None:
        eval_fn = roc_auc_score

    n_cpu = mp.cpu_count()
    n_nodes = min(n_cpu - 1, 6)
    print('Starting a jobs server with %d nodes' % n_nodes)
    pool = mp.ProcessingPool(n_nodes, maxtasksperchild=500)
    results = pool.map(
        lambda targ: run_with_target(label, targ, data_key, model_fn, kf,
                                     train_key, eval_fn), targets)
    for rr in results:
        print(rr['target'] + ' CV scores : ' + str(rr['cv']))
        mean_cv_score = np.mean(rr['cv'])
        actual_cv_score = eval_fn(train[rr['target']], rr['train'])
        print(rr['target'] + ' mean CV : ' + str(mean_cv_score) +
              ' overall: ' + str(actual_cv_score))
        mean_cv_scores.append(mean_cv_score)
        actual_cv_scores.append(actual_cv_score)
        train[rr['label'] + '_' + rr['target']] = rr['train']
        if test is not None:
            test[rr['label'] + '_' + rr['target']] = rr['test']
    pool.close()
    pool.join()
    pool.terminate()
    pool.restart()

    print(label + ' CV mean : ' + str(np.mean(mean_cv_scores)) +
          ', overall: ' + str(np.mean(actual_cv_scores)))
    return train, test
Exemplo n.º 24
0
    def __init__(self, conf_file=[]):
        '''
        Constructor
        '''
        super(AssocInfraPy_LANL,self).__init__(conf_file,'AssocLocParams')
        self.assocversion=0
        print('Assoc version:',self.assocversion)
        self.year=int(self.general_PARAM['year'])
        self.dayofyearini=int(self.general_PARAM['dayofyearini'])
        self.dayofyearend=int(self.general_PARAM['dayofyearend'])
        self.jdayini = int(self.year*1000 + self.dayofyearini)
        self.jdayend = int(self.year*1000 + self.dayofyearend)
        self.cpu=self.general_PARAM['cpucnt']
        self.pl = mp.ProcessingPool(cpu_count() - 1)


        self.net=self.task_PARAM['network']
        self.pfdid=self.task_PARAM['pfdetectid']
        self.pfkid=self.task_PARAM['pfkid']
        self.beamwidth=float(self.task_PARAM['beamwidth'])
        self.rangemax=float(self.task_PARAM['rangemax'])
        self.distmax=float(self.task_PARAM['distmax'])
        self.clusterthresh=float(self.task_PARAM['clusterthresh'])
        self.trimthresh=(self.task_PARAM['trimthresh'])
        self.trimthreshscalar=(self.task_PARAM['trimthreshscalar'])
        self.mindetpop=(self.task_PARAM['mindetpop'])
        self.minarraypop=(self.task_PARAM['minarraypop'])
        self.duration=float(self.task_PARAM['duration'])


        try:
            self.resultstable=self.task_PARAM['resultstable']
        except :
            print('No specific tables')
            self.resultstable=None

        listK=list(self.task_PARAM.keys())
        self.fdtables_names=[]
        for li in listK:
            if bool('fdtable_' in li):
                self.fdtables_names.append(self.task_PARAM[li])
        if len( self.fdtables_names)==0:
            print('NO tables with fd results were included, define one fd results tables, or include Fd_results (this is the table where results are written by default, but needs to be specified ) ')
            sys.exit()
        self.num_tables=len(self.fdtables_names)
Exemplo n.º 25
0
    def test_trees(self, new_trees, depth):
        """
        Run qpGraph on a list of trees
        """
        if self.nthreads > 1:
            # we need to buffer the results to use multi-threading
            pool = mp.ProcessingPool(self.nthreads)
            results = pool.map(
                self.run_qpgraph,
                itertools.izip(new_trees, itertools.repeat(depth)))
        else:
            # test the trees without multi-threading
            results = []
            for new_tree in new_trees:
                result = self.run_qpgraph((new_tree, depth))
                results.append(result)

        return results
Exemplo n.º 26
0
    def calculate_bayes_factors(self):
        """
        Use `admixturegraph` to calculate Bayes factors for all fitted graphs.

        See https://github.com/mailund/admixture_graph
        """
        self.log(
            "INFO: There are {:,} graphs to compute Bayes factors for.".format(
                len(self.graphs)))

        if self.nthreads > 1:
            # compute the model likelihoods
            pool = mp.ProcessingPool(self.nthreads)
            pool.map(self.model_likelihood, self.graphs)
        else:
            # compute likelihoods without multi-threading
            for graph in self.graphs:
                self.model_likelihood(graph)
Exemplo n.º 27
0
    def build_matrix(self):
        """
        Build a symmetrical distance matrix for all graphs.
        """

        # instantiate all the graph objects
        for graph_name in self.graph_names:
            dot_file = self.dot_path + '-{name}.dot'.format(name=graph_name)
            graph = load_graph(self.parse_dot_file(dot_file), fmt='dot')
            self.graphs.append(graph)

        # how many graphs are we comparing
        size = len(self.graph_names)

        # initialise the distance matrix
        dist_matrix = np.zeros([size, size])

        # get all the i,j pairs for one diagonal half
        idxs = [(i, j) for i in range(1, size) for j in range(i)]

        self.log(
            "INFO: Calculating distance matrix for {:,} graph pairs".format(
                len(idxs)))

        if self.nthreads > 1:
            # we need to buffer the results to use multi-threading
            pool = mp.ProcessingPool(self.nthreads)
            results = pool.map(self.calculate_distance, idxs)
        else:
            # compute distances without multi-threading
            results = []
            for i, j in idxs:
                result = self.calculate_distance((i, j))
                results.append(result)

        # populate the distance matrix
        for i, j, dist in results:
            dist_matrix[i, j] = dist_matrix[j, i] = dist

        # save the matrix
        np.save(self.mtx_file, dist_matrix)

        return dist_matrix
Exemplo n.º 28
0
def main():
    # pool
    gc.collect()
    pool = mp.ProcessingPool(nodes=8)

    # actual work
    workdict['toolbox'].register('map', pool.map)
    ghistory.update(workdict['population'])
    gc.collect()
    if tag == 'eaSimple':
        pop, logs = algorithms.eaSimple(**workdict)
    elif tag == 'eaMuPlusLambda':
        pop, logs = algorithms.eaMuPlusLambda(**workdict)
    pool.close()
    pool.join()

    plot_image: np.ndarray = plot_records(logbook=logs)
    plot_genealogy: np.ndarray = genealogy_plot(history=ghistory,
                                                toolbox=workdict['toolbox'])
    tosave = {
        'func': gtoolbox.compile(expr=workdict['halloffame'][0]),
        'plot': plot_image,
        'history': plot_genealogy
    }
    to_state = {'all_pops': pop, 'history': ghistory}
    print(str(workdict['halloffame'][0]))
    dill.dump(tosave, args.ofile, protocol=-1)
    args.ofile.close()
    with open(append_savefile(args.ofile.name), 'wb') as state_file:
        dill.dump(to_state, state_file, protocol=-1)
        state_file.close()
    print(f'file written as dict object to {args.ofile.name}')

    if args.test:
        with open(args.ofile.name, 'rb') as ifile:
            refunc: dict = pickle.load(ifile)
            ifile.close()
        if isinstance(refunc, dict) and isinstance(refunc['func'], LambdaType):
            print(f'File correctly reloaded, testing for output..')
            temp = refunc["func"](*np.random.randn(g_data.numfeatures))
            temp = f"OK, output: {temp:.2f} is a float" if isinstance(temp, float) \
                else f"Error!, Lambda produced output of type {str(type(temp))}"
            print(f'{temp}')
Exemplo n.º 29
0
    def run(self,listChecker,histograms):
         """
         Main function to be executed when starting the code.
         """
    # global configuration
   # parser = argparse.ArgumentParser( description = 'Analysis Tool using XMLs' )
   # parser.add_argument('-n', '--nWorkers',   default=4,                                 type=int,   help='number of workers' )  
   # parser.add_argument('-p', '--parallel',   default=False,   action='store_const',     const=True, help='enables running in parallel')
   # parser.add_argument('-c', '--configfile', default="Configurations/Configuration.py", type=str,   help='files to be analysed')
   # parser.add_argument('-a', '--analysis',   default=""                               , type=str,   help='overrides the analysis specified in configuration file')
   # parser.add_argument('-s', '--samples',    default=""                               , type=str,   help='string with comma separated list of samples to analyse')
   # parser.add_argument('-o', '--output',     default=""                               , type=str,   help='name of the output directory')
   # args = parser.parse_args()
    
    #configModuleName = "CustomConfiguration"
    #configuration = importlib.import_module(configModuleName)
  
    #checkAnalysis(configuration, args.analysis)
         processingDict = CustomConfiguration.Processes
         print CustomConfiguration.Job["Fraction"]

        
         CustomConfiguration.Job["Batch"] = True
         self.jobs = [BuildJob(CustomConfiguration.Job, processName, fileLocation,listChecker,histograms,self.stopping) for processName, fileLocation in processingDict.items()]
         self.jobs = SortJobsBySize(self.jobs)
         self.pool = mp.ProcessingPool(4)
             # start with n worker processes
         self.pool.map(RunJob, self.jobs)
         
         """jobThreads =[]
         for job in self.jobs:
             jobThread = JobThread(job)
             jobThreads.append(jobThread)
             jobThread.start()
             
         for jobThread in jobThreads:
             jobThread.join()
         """
        
             
        
         print "test2"
         print NewJob.stop
    def __init__(self, qApp, progname, progversion):
        super().__init__()

        self.ipApp = qApp  # reference to the application

        pg.setConfigOption('background', 'w')
        pg.setConfigOption('foreground', 'k')
        pg.setConfigOptions(antialias=False)

        self.settings = QSettings('LANL', 'InfraView')

        self.progname = progname
        self.progversion = progversion

        # initialize the multiproccessor pool
        self.mp_pool = mp.ProcessingPool(cpu_count() - 1)

        self.buildUI()
        self.restoreWindowGeometrySettings()