예제 #1
0
def validate(directory,
             classifierType,
             mtStep,
             mtWin,
             stStep,
             stWin,
             num_threads=mp.cpu_count()):

    birds = []
    for root, dirs, files in os.walk(os.path.join(directory, 'Training')):
        for bird in dirs:
            birds.append(bird)
        break
    current_dir = os.getcwd()
    model_dir = os.path.join(current_dir, 'random_forest_model')
    #directory = directory + 'Training'
    parameters = list(
        itertools.product(classifierType, mtStep, mtWin, stStep, stWin))
    #Gets rid of invalid sets of parameters
    parameters_temp = deepcopy(parameters)
    for p in parameters:
        if p[1] > p[2] or p[3] > p[4] or p[4] >= p[2]:
            parameters_temp.remove(p)
        model = 'x'.join([p[0], str(p[1]), str(p[2]), str(p[3]), str(p[4])])
        if os.path.isfile(os.path.join(model_dir, model)):
            parameters_temp.remove(p)
    parameters = parameters_temp
    verifier = partial(train_and_verify, directory=directory, birds=birds)

    pros = Pool(num_threads)
    pros.map(verifier, parameters)
예제 #2
0
    def noise_removal_dir(self, rootdir):

        num_threads = self.num_threads

        if not os.path.exists(rootdir):
            raise Exception(rootdir + " not found!")

        for root, dirs, files in os.walk(rootdir):
            parent, folder_name = os.path.split(root)
            if folder_name == 'activity' or folder_name == 'noise' or '_clean' in folder_name:
                shutil.rmtree(root)
        num_samples_processed = 0
        wav_files = []
        for root, dirs, files in os.walk(rootdir):
            for file in files:
                if file.endswith('.wav'):
                    wav_files.append(os.path.join(root, file))
                    num_samples_processed += 1
                    if not num_threads:
                        self.noise_removal(os.path.join(root,file))

        print "Now beginning preprocessing for: ", num_samples_processed, " samples."
        
        if num_threads:
            pros = Pool(num_threads)
            pros.map(self.noise_removal, wav_files)

        print "Preprocessing complete!\n"
예제 #3
0
def run_posterior_grid(tree_files, alpha, wishart_df):

    #true_trees= [tree_generation_laboratory.load_tree(tree_file) for tree_file in tree_files]
    summaries = [
        summary.s_posterior(),
        summary.s_variable('mhr', output='double_missing'),
        summary.s_no_admixes(),
        summary.s_average_branch_length(),
        summary.s_total_branch_length(),
        summary.s_basic_tree_statistics(
            Rtree_operations.get_number_of_ghost_populations,
            'ghost_pops',
            output='integer'),
        summary.s_basic_tree_statistics(
            Rtree_operations.get_max_distance_to_root, 'max_root'),
        summary.s_basic_tree_statistics(
            Rtree_operations.get_min_distance_to_root, 'min_root'),
        summary.s_basic_tree_statistics(
            Rtree_operations.get_average_distance_to_root, 'average_root'),
        summary.s_basic_tree_statistics(
            tree_statistics.unique_identifier_and_branch_lengths,
            'tree',
            output='string'),
        summary.s_variable('proposal_type', output='string'),
        summary.s_variable('sliding_regraft_adap_param',
                           output='double_missing'),
        summary.s_variable('rescale_adap_param', output='double_missing'),
        summary.s_likelihood(),
        summary.s_prior()
    ]

    def f(x):
        unsuffixed_filename = '.'.join(x.split('.')[:-1])
        true_tree = tree_generation_laboratory.identifier_to_tree_clean_wrapper(
            tree_generation_laboratory.load_tree(x))
        s_tree = Rtree_operations.create_trivial_tree(
            Rtree_operations.get_no_leaves(true_tree))
        simulation_sanity.test_posterior_model(
            true_tree,
            s_tree,
            100,
            summaries=summaries,
            thinning_coef=30,
            wishart_df=wishart_df,
            resimulate_regrafted_branch_length=alpha,
            filename=unsuffixed_filename + '-results.csv')

    from pathos.multiprocessing import Pool
    p = Pool(len(tree_files))
    p.map(f, tree_files)
예제 #4
0
파일: struc2vec.py 프로젝트: Yelrose/PGL
    def calc_distances_between_nodes(self):
        """
        Use the dtw algorithm to calculate the distance between nodes. 
        """
        from fastdtw import fastdtw
        from pathos.multiprocessing import Pool
        # decide use which algo to use 
        if self.opt1 == True:
            self.distance_calc_func = self.distance_opt1_func
        else:
            self.distance_calc_func = self.distance_func

        dtws = []
        if self.opt2:
            depth = 0
            for node in self.nodes:
                if node in self.degree_list:
                    if depth in self.degree_list[node]:
                        degree = self.degree_list[node][depth]
                        if args.opt1:
                            degree = degree[0][0]
                        else:
                            degree = degree[0]
                    if degree not in self.degree2nodes:
                        self.degree2nodes[degree] = []
                    if node not in self.node2degree:
                        self.node2degree[node] = degree
                    self.degree2nodes[degree].append(node)
            # select the log(n) node to select data 
            degree_keys = self.degree2nodes.keys()
            degree_keys = np.array(list(degree_keys), dtype='int')
            self.degrees_sorted = list(np.sort(degree_keys))
            selected_nbh_nums = 2 * math.log(self.graph.num_nodes - 1, 2)
            self.selected_nbh_nums = selected_nbh_nums

            pool = Pool(10)
            dtws = pool.map(self.calc_node_with_neighbor_dtw_opt2, self.nodes)
            pool.close()
            pool.join()
        else:
            src_indices = range(0, self.graph.num_nodes - 2)

            pool = Pool(10)
            dtws = pool.map(self.calc_node_with_neighbor_dtw, src_indices)
            pool.close()
            pool.join()
        print('calc the dtw done.')
        for dtw in dtws:
            self.distance.update(dtw)
예제 #5
0
    def create_initial_population(self):
        """Create members of the first population randomly."""

        for _ in range(self.pop_size):
            individual = Particle(self.chromosome_size, self.fitness_function)
            if not self.pool:
                individual.calculate_fitness()
            self.add_individual_to_pop(individual)

        if self.pool:
            p = Pool(self.pool_size)
            manager = Manager()
            lock = manager.Lock()
            counter = manager.Value('i', 0)

            def pool_function(inside_lock, inside_counter, inside_member):
                inside_lock.acquire()
                inside_counter.value += 1
                inside_lock.release()

                fitness_value = inside_member.calculate_fitness(
                    gpu=inside_counter.value % 4)

                return fitness_value

            func = partial(pool_function, lock, counter)
            fitness_values = p.map(func, self.current_population[:])

            for value, member in zip(fitness_values,
                                     self.current_population[:]):
                member.fitness = value

            p.terminate()
    def _filter_biology(self):

        # 3.3.3.1  todo: parallel
        # Helper method, used to execute in parallel
        def __process_input(memory):
            # BCF for every memory is stored in the tail of the cultural group
            bcf = memory.get_tail_knowledge()
            distance = abs(
                (bcf.get_biology() + self.internal_state.get_biology()) / 2.0 -
                self.desired_state.get_biology())
            return distance, memory

        # Init thread's pool, with the determined processor number
        pool = Pool(DetectSystem().cpu_count())
        # Parallel execution
        __temp_result = pool.map(__process_input, self.inputs)
        # Calculate the minimum distance
        __min_result = min(__temp_result, key=lambda t: t[0])
        # Extract the memory from the tuple
        best_biology = __min_result[1]

        #best_biology = self.inputs[0]
        #bcf = best_biology.get_tail_knowledge()
        #min_distance = abs((bcf.get_biology() + self.internal_state.get_biology())/2.0 - self.desired_state.get_biology())

        #for memory in self.inputs:
        # BCF for every memory is stored in the tail of the cultural group
        #    bcf = memory.get_tail_knowledge()
        #    distance = abs((bcf.get_biology() + self.internal_state.get_biology())/2.0 - self.desired_state.get_biology())
        #    if distance < min_distance:
        #        best_biology = memory
        #        min_distance = distance
        return best_biology
    def _filter_feelings(self):
        # 3.3.3.3 todo: parallel

        # Helper method, used to execute in parallel
        def __process_input(memory):
            # BCF for every memory is stored in the tail of the cultural group
            bcf = memory.get_tail_knowledge()
            feeling = bcf.get_feelings()
            return feeling, memory

        # Init thread's pool, with the determined processor number
        pool = Pool(DetectSystem().cpu_count())
        # Parallel execution
        __temp_result = pool.map(__process_input, self.inputs)
        # Calculate the maximum feeling
        __max_result = max(__temp_result, key=lambda t: t[0])
        # Extract the memory from the tuple
        best_feelings = __max_result[1]

        # best_feelings = self.inputs[0]
        # bcf = best_feelings.get_tail_knowledge()
        # max = bcf.get_feelings()
        #
        # for memory in self.inputs:
        #     # BCF for every memory is stored in the tail of the cultural group
        #     bcf = memory.get_tail_knowledge()
        #     if bcf.get_feelings() > max:
        #         best_feelings = memory
        #         max = bcf.get_feelings()
        return best_feelings
예제 #8
0
 def multiproc_map(self, func):
     from pathos.multiprocessing import Pool
     pool = Pool()
     result = List(pool.map(func, self))
     pool.close()
     pool.join()
     return result
예제 #9
0
    def learn(self, knowledge):
        # If there is no capacity in neuron list, double size
        if self._index_ready_to_learn == (len(self.neuron_list) - 1):
            new_list = []
            # Fill neuron list with nre RelNeuron instances

            # 3.2.2.2 todo: parallel
            # Detect system and determine threads number to use
            detect_system = DetectSystem()
            # Init thread's pool, with the determined threads number
            pool = Pool(detect_system.cpu_count())

            new_list = pool.map(lambda index: RelNeuron(),
                                range(len(self.neuron_list)))
            #for index in range(len(self.neuron_list)):
            #    new_list.append(RelNeuron())
            self.neuron_list = self.neuron_list + new_list
        # Check for neurons that already have given knowledge ids
        for index in range(self._index_ready_to_learn):
            if self.neuron_list[index].has_ids(knowledge.get_h_id(),
                                               knowledge.get_s_id()):
                return False
        # If there are no neurons with given pair of ids, learn
        self.neuron_list[self._index_ready_to_learn].learn(knowledge)
        self._index_ready_to_learn += 1
        return True
예제 #10
0
def apply_by_multiprocessing(df, func, **kwargs):
    workers = kwargs.pop('workers')
    pool = Pool(processes=workers)


    result = pool.map(_apply_df, [(d, func, kwargs)
                                  for d in np.array_split(df, workers)])
    pool.close()
    return pd.concat(list(result))
예제 #11
0
def searchn(x, pred, n):
    import multiprocessing
    from pathos.multiprocessing import Pool
    nworkers = int(multiprocessing.cpu_count() * 0.75)
    pool = Pool(processes=nworkers)

    def searcher(args):
        return search(*args)

    return pool.map(searcher, [(x, pred)] * n)
    def retrieve_exact_memory(self, trigger):
        # Use bbcc protocol
        self.bum()

        # 3.2.7.1 TODO: parallel
        # Init thread's pool, with the determined processor number
        pool = Pool(DetectSystem().cpu_count())
        # Parallel execution
        __temp = pool.map(lambda index: self.bip(trigger[index]),
                          range(len(trigger) - 1))
        return self.group_list[self.check(trigger[len(trigger) - 1])]
예제 #13
0
    def cluster(self):
        """
        Perform clustering on each image. 

        Returns
        -------
        A list of tuples (w, z), one for each image. w is the final cluster 
        centers, z are the fuzzy, weighted membership values for each pixel.
        """
        pool = Pool()
        return pool.map(self.__csFCM, self.__images)
예제 #14
0
 def classify(self):
     directory = self.directory
     num_threads = self.num_threads
     wav_files = []
     for file in os.listdir(directory):
         if file.endswith('.wav') or file.endswith('.WAV'):
             file = os.path.join(directory, file)
             wav_files.append(file)
             if not num_threads:
                 self.classFile(file)
     
     if num_threads:
         try:
             pros = Pool(num_threads)
             pros.map(self.classFile, wav_files)
         except cPickle.PicklingError:
             for wfile in wav_files:
                 self.classFile(wfile)
     if os.path.exists(os.path.join(directory, "noise")):
         shutil.rmtree(os.path.join(directory, "noise"))
     if os.path.exists(os.path.join(directory, "activity")):
         shutil.rmtree(os.path.join(directory, "activity"))
예제 #15
0
    def create_histograms(self):
        logger.debug('--->Systematics::create_histograms:')
        # collect ROOT objects
        self._root_objects_holder = RootObjects(self._output_file)

        if self._num_threads == 1:
            for systematic in self._systematics:
                logger.debug("---->Create ROOT objects for systematic %s.",
                             systematic.name)
                if logger.getEffectiveLevel() == 10: print '---->Systematics::create_histograms: systematic', systematic.process, systematic._process.estimation_method._friend_directories
                systematic.create_root_objects()
        else:
            logger.debug("Create ROOT objects for all systematics.")

            from pathos.multiprocessing import Pool
            pool = Pool(processes=self._num_threads)

            systematics_new = pool.map(systematic_create_root_objects,
                                       [s for s in self._systematics])
            pool.close()
            pool.join()
            del pool

            # Because the new objects have different addresses in memory,
            # the result objects have to be copied.
            for i_sys in range(len(systematics_new)):
                self._systematics[i_sys] = systematics_new[i_sys]
        logger.debug('-->Create root holders')
        for systematic in self._systematics:
            if self._find_unique_objects:
                self._root_objects_holder.add_unique(systematic.root_objects)
            else:
                self._root_objects_holder.add(systematic.root_objects)
        # self._root_objects_holder.check_duplicates() # TODO: Implement this if needed

        # produce ROOT objects (in parallel)
        logger.debug("Produce ROOT objects using the %s backend.",
                     self._backend)
        logger.debug('-->Produce root with' + self._backend + 'backend')
        if self._backend == "classic":
            self._root_objects_holder.produce_classic(self._num_threads)
        elif self._backend == "tdf":
            self._root_objects_holder.produce_tdf(self._num_threads)
        else:
            logger.fatal("Backend %s is not implemented.", self._backend)
            raise Exception

        # set duplicates to the produced ROOT objects
        logger.debug('--># set duplicates to the produced ROOT objects')
        if self._find_unique_objects:
            self._root_objects_holder.set_duplicates()
    def resize(self):
        new_list = []
        # Fill neuron list with memories
        # 3.2.5.2  todo: parallel

        # Init thread's pool, with the determined processor number
        pool = Pool(DetectSystem().cpu_count())
        # Parallel execution
        new_list = pool.map(lambda index: CulturalGroup(),
                            range(len(self.group_list)))

        # for index in range(len(self.group_list)):
        #     new_list.append(CulturalGroup())
        self.group_list = self.group_list + new_list
    def __init__(self, group_count=1):
        self.group_list = []
        # 3.2.5.1 todo: parallel

        # Init thread's pool, with the determined processor number
        pool = Pool(DetectSystem().cpu_count())
        # Parallel execution
        self.group_list = pool.map(lambda index: CulturalGroup(),
                                   range(group_count))

        #for index in range(group_count):
        #     self.group_list.append(CulturalGroup())
        self._index_ready_to_learn = 0
        self._clack = False
        self._recognized_indexes = []
예제 #18
0
def json_to_metadata_chunks(args,file_chunks = []):
    # Open the json file, and split the reading of the file among worker threads
    results = [] 

    if args.json_metadata == None:
        return None
    num_proc = len(file_chunks)
    print (file_chunks, file=sys.stderr)
    pool = Pool(processes = num_proc)
    

    with open(args.json_metadata) as json_metadata_file:
        results = [pool.map(process_json_line,json_metadata_file, chunk) for index, chunk in enumerate(file_chunks)]
    #objs = [p.get() for p in results] 
    return merge_exact_duplicates(results)
예제 #19
0
def json_to_metadata_chunks(args,file_chunks = []):
    # Open the json file, and split the reading of the file among worker threads
    results = [] 

    if args.json_metadata == None:
        return None
    num_proc = len(file_chunks)
    print (file_chunks, file=sys.stderr)
    pool = Pool(processes = num_proc)
    

    with open(args.json_metadata) as json_metadata_file:
        results = [pool.map(process_json_line,json_metadata_file, chunk) for index, chunk in enumerate(file_chunks)]
    #objs = [p.get() for p in results] 
    return merge_exact_duplicates(results)
예제 #20
0
def test_prior_model_several_chains(start_trees,
                                    sim_length=100000,
                                    summaries=None,
                                    thinning_coef=1):
    posterior = initialize_prior_as_posterior()
    if summaries is None:
        summaries = [
            s_variable('posterior'),
            s_variable('mhr'),
            s_no_admixes()
        ]
    proposal = basic_meta_proposal()
    sample_verbose_scheme = {summary.name: (1, 0) for summary in summaries}
    p = Pool(len(start_trees))

    def func(nstart_tree):
        n, start_tree = nstart_tree
        final_tree, final_posterior, results, _ = basic_chain(
            start_tree,
            summaries,
            posterior,
            proposal,
            post=None,
            N=sim_length,
            sample_verbose_scheme=sample_verbose_scheme,
            overall_thinning=int(thinning_coef + sim_length / 60000),
            i_start_from=0,
            temperature=1.0,
            proposal_update=None,
            check_trees=True)
        save_to_csv(results,
                    summaries,
                    filename='results_' + str(n + 1) + 'csv',
                    origin_layer=(n + 1, 1))

    p.map(func, enumerate(start_trees))
예제 #21
0
    def get_sight_rels(self, s_id):
        # List of sight relations
        sight_rels = []
        # 3.2.2.3 todo: parallel
        # Detect system and create threads pool
        pool = Pool(DetectSystem().cpu_count())

        sight_rels = pool.map(
            lambda index: self.neuron_list[index].get_knowledge()
            if self.neuron_list[index].recognize_sight(s_id) else None,
            range(self._index_ready_to_learn))
        sight_rels = filter(None, sight_rels)
        #for index in range(self._index_ready_to_learn):
        #    if self.neuron_list[index].recognize_sight(s_id):
        #        sight_rels.append(self.neuron_list[index].get_knowledge())
        return sight_rels
예제 #22
0
def solve_parallel(nSolns, T, Nc, v, nJobs=None, **kwargs):
    """
    Run solve() in parallel
    """
    if nJobs is None:
        nJobs = cpu_count()
    assert nJobs > 0

    def wrapped_solve(i):
        rng = np.random.RandomState()
        return solve(T, Nc, v, rng=rng, **kwargs)

    p = Pool(nJobs)
    Solutions = p.map(wrapped_solve, list(range(nSolns)))
    p.close()
    return Solutions
    def __init__(self):
        # Desired state
        self.desired_state = InternalState()
        self.desired_state.set_state([0.5,1,1])
        # Initial internal state
        self.internal_state = InternalState([0.5,0.5,0.5])

        # Decision by prediction network
        self.decision_prediction_block = DecisionByPredictionBlock()
        self.decision_prediction_block.set_desired_state(self.desired_state)
        self.decision_prediction_block.set_internal_state(self.internal_state)

        # DEFAULT TRAINING, IT CAN LATER BE OVERRIDEN
        # Create a random training set so that the net can learn the relation prediction = (ei + choice.bcf)/2
        # We require a minimum of 18 points
        training_set = []
        # 3.2.4.1 todo: parallelize

        # Helper method, used to execute in parallel
        def __generate_training(index):
            ei = [random.random(), random.random(), random.random()]
            choice_bcf = [random.random(), random.random(), random.random()]
            prediction = [ei_j / 2.0 + choice_bcf_j / 2.0 for ei_j, choice_bcf_j in zip(ei, choice_bcf)]
            return ei + choice_bcf, prediction

        # Init thread's pool, with the determined processor number
        pool = Pool(DetectSystem().cpu_count())
        # Parallel execution
        training_set = pool.map(__generate_training, range(20))


        # for index in range(20):
        #     ei = [random.random(), random.random(), random.random()]
        #     choice_bcf = [random.random(), random.random(), random.random()]
        #     prediction = [ei_j / 2.0 + choice_bcf_j / 2.0 for ei_j, choice_bcf_j in zip(ei, choice_bcf)]
        #     training_set.append((ei + choice_bcf, prediction))

        # Remodel predictive net
        self.decision_prediction_block.remodel_predictive_net(training_set)

        self._inputs = None
        self._new_inputs = False
        self.decision = None
        self._last_decision_type = None
        self._last_selected_input = None
        self._last_decision_internal_state = None
예제 #24
0
파일: preprocess.py 프로젝트: yakzan/ktext
def apply_parallel(data: List[Any], func: Callable) -> List[Any]:
    """
    Apply function to list of elements.

    Automatically determines the chunk size.
    """
    cpu_cores = cpu_count()

    try:
        chunk_size = ceil(len(data) / cpu_cores)
        pool = Pool(cpu_cores)
        transformed_data = pool.map(func,
                                    chunked(data, chunk_size),
                                    chunksize=1)
    finally:
        pool.close()
        pool.join()
        return transformed_data
예제 #25
0
    def __init__(self, neuron_count):
        # Create neuron list
        self.neuron_list = []
        # Fill neuron list with nre RelNeuron instances

        # 3.2.2.1 todo: parallel
        # Detect system and determine threads number to use
        detect_system = DetectSystem()
        # Init thread's pool, with the determined threads number
        pool = Pool(detect_system.cpu_count())

        self.neuron_list = pool.map(lambda index: RelNeuron(),
                                    range(neuron_count))

        #for index in range(neuron_count):
        #    self.neuron_list.append(RelNeuron())
        # Index of ready to learn neuron
        self._index_ready_to_learn = 0
def process_batch(df, directory, engine, num_cores = 12):
  
    p = Pool(num_cores)
  
    try:
      
        success = pd.Series(p.map(lambda i: write_indexes_to_table(df.loc[i, 'file_name'], df.loc[i, 'document'],\
                                df.loc[i, 'form_type'], directory, engine),\
                              range(df.shape[0])), ignore_index = True)
        p.close()
        
        num_success = success.sum()
        return(num_success)
      
    except Exception as e:
      
        print(e)
        p.close()
        return(None)
예제 #27
0
파일: struc2vec.py 프로젝트: Yelrose/PGL
    def random_walk_structual_sim(self):
        """
        According to struct distance to walk the path 
        """
        from pathos.multiprocessing import Pool
        print('start process struc2vec random walk.')
        walks_process_ids = [i for i in range(0, self.num_walks)]
        pool = Pool(10)
        walks = pool.map(self.executor_random_walk, walks_process_ids)
        pool.close()
        pool.join()

        #save the final walk result 
        file_result = open(args.tag + "_walk_path", "w")
        for walk in walks:
            for walk_node in walk:
                walk_node_str = " ".join([str(node) for node in walk_node])
                file_result.write(walk_node_str + "\n")
        file_result.close()
        print('process struc2vec random walk done.')
    def get_output_memory(self):
        self.unconscious_block.set_internal_state(self.internal_state)
        self.unconscious_block.set_desired_state(self.desired_state)
        self.unconscious_block.set_inputs(self.input_memories)
        self.unconscious_output = self.unconscious_block.get_outputs()
        self.conscious_block.set_desired_state(self.desired_state)
        self.conscious_block.set_internal_state(self.internal_state)
        conscious_inputs = []

        # 3.2.6.1  todo: parallel
        # Init thread's pool, with the determined processor number
        pool = Pool(DetectSystem().cpu_count())
        # Parallel execution
        conscious_inputs = pool.map(lambda memory: memory.get_tail_knowledge(), self.unconscious_output)

        # for memory in self.unconscious_output:
        #     conscious_inputs.append(memory.get_tail_knowledge())
        self.conscious_block.set_inputs(conscious_inputs)
        conscious_output_index = self.conscious_block.get_decision()
        self.conscious_output = self.unconscious_output[conscious_output_index]
        return self.conscious_output
예제 #29
0
파일: process.py 프로젝트: pg42862/mewpy
class MultiProcessorEvaluator(Evaluator):

    def __init__(self, evaluator, mp_num_cpus):
        """A multiprocessing evaluator

        Args:
            evaluator(function): Evaluation function.
            mp_num_cpus(int): Number of CPUs
        """
        self.pool = Pool(mp_num_cpus)
        self.evaluator = evaluator
        self.__name__ = self.__class__.__name__

    def evaluate(self, candidates, args):
        """
        Values in args will be ignored and not passed to the evaluator to avoid unnecessary pickling in inspyred.
        """
        results = self.pool.map(self.evaluator, candidates)
        return results

    def __call__(self, candidates, args):
        return self.evaluate(candidates, args)
    def run(self):
        outputProject=self.args['render']['project']
        outputOwner = self.args['render']['owner']
        statetablefile = self.args['statetableFile']
        rootdir = self.args['projectDirectory']
		
        df = pd.read_csv(statetablefile)
        ribbons = df.groupby('ribbon')
        k=0
        pool = Pool(self.args['pool_size'])
        for ribnum,ribbon in ribbons:
            mydf = ribbon.groupby('ch_name')
            for channum,chan in mydf:
                outputStack = self.args['outputStackPrefix'] + '_%s'%(channum)

                self.logger.info("creating tilespecs and cmds....")
                tilespecpaths,mipmap_args = make_tilespec_from_statetable(chan,rootdir,outputProject,outputOwner,outputStack,0,65000)
                self.logger.info("importing tilespecs into render....")
                self.logger.info("creating downsampled images ...")

                results=pool.map(create_mipmap_from_tuple,mipmap_args)

                #groups = [(subprocess.Popen(cmd,\
                # stdout=subprocess.PIPE) for cmd in cmds)] \
                # * self.args['pool_size'] # itertools' grouper recipe
                #for processes in izip_longest(*groups): # run len(processes) == limit at a time
                #   for p in filter(None, processes):
                #        p.wait()
                self.logger.info("uploading to render ...")
                if k==0:
                    #renderapi.stack.delete_stack(outputStack,owner=outputOwner,
                    #project=outputProject,render=self.render)
                    renderapi.stack.create_stack(outputStack,owner=outputOwner, cycleNumber=1, cycleStepNumber=1,
                    project=outputProject,verbose=False,render=self.render)
                    print k
                self.logger.info(tilespecpaths)
                renderapi.client.import_jsonfiles_parallel(outputStack,tilespecpaths,render=self.render)
                
            k+=1
예제 #31
0
    def __call__(self, with_mp=False):
        cm   = self.scatm.cmodel
        scat = self.scatm.smodel

        cgeo = np.pi * np.power( self.dist.a * c.micron2cm(), 2 )

        # Test for graphite case
        if cm.cmtype == 'Graphite':
            if np.size(self.dist.a) > 1:
                for i in range( np.size(self.dist.a) ):
                    self.qsca_pe[:,i] = scat.Qsca( self.E, a=self.dist.a[i], cm=cmi.CmGraphite(size=cm.size, orient='perp') )
                    self.qsca_pa[:,i] = scat.Qsca( self.E, a=self.dist.a[i], cm=cmi.CmGraphite(size=cm.size, orient='para') )
            else:
                self.qsca_pe = scat.Qsca( self.E, a=self.dist.a, cm=cmi.CmGraphite(size=cm.size, orient='perp') )
                self.qsca_pa = scat.Qsca( self.E, a=self.dist.a, cm=cmi.CmGraphite(size=cm.size, orient='para') )
            
            self.qsca = ( self.qsca_pa + 2.0 * self.qsca_pe ) / 3.0

        else:
            if np.size(self.dist.a) > 1:
                if with_mp:
                    pool = Pool(processes=2)
                    self.qsca = np.array(pool.map(self._one_scatter,self.dist.a)).T
                else:
                    for i in range( np.size(self.dist.a) ):
                        self.qsca[:,i] = self._one_scatter(self.dist.a[i])
            else:
                self.qsca = scat.Qsca( self.E, a=self.dist.a, cm=cm )

        if np.size(self.dist.a) == 1:
            kappa = self.dist.nd * self.qsca * cgeo / self.dist.md
        else:
            kappa = np.array([])
            for j in range( np.size(self.E) ):
                kappa = np.append( kappa, \
                                   c.intz( self.dist.a, self.dist.nd * self.qsca[j,:] * cgeo ) / self.dist.md )

        self.kappa = kappa
예제 #32
0
파일: variation.py 프로젝트: julianoks/Tush
    def evaluation(self, batches, programs):
        def loss_fn(pred, target_idx):
            return -torch.log(torch.nn.functional.softmax(pred)[target_idx])

        def train_validate(program, loss_fn, batches):
            ind = tush.Tush(program)
            ind.stage_two(batches['train'], loss_fn)
            results = ind.stage_three(validation_batch=batches['validation'],
                                      loss_fn=loss_fn)

            return results

        for prog in programs:
            logging.debug(prog)
        a = lambda x: train_validate(x, loss_fn, batches)['accuracy']
        if self.parallel == 'True':

            p = Pool(len(programs))
            accuracy = p.map(a, programs)

        else:
            accuracy = map(a, programs)
        return accuracy
예제 #33
0
    def calculate_centroids(self, p=None):
        """
        Perform integration to find centroid at all turns up to N. Multiprocessing pool used to calculate independent
        turn values.
        Will automatically use `integrate_first_order` or `integrate_second_order` if appropriate.
        Args:
            p: Specify number of processes for pool. If not given then `cpu_count` is used.

        Returns:
            array of floats
        """
        if p:
            pool_size = p
        else:
            pool_size = cpu_count()

        pool = Pool(pool_size)

        #  attempt to speed things up by spreading out difficult integration values at the end of range
        #  appeared to not work
        #     x = []
        #     for i in range(cpu_count()):
        #         x += range(N)[i::4]

        if len(self.mu) == 1:
            integration_function = self.integrate_first_order
        elif len(self.mu) == 2:
            integration_function = self.integrate_second_order
        else:
            integration_function = self.integrate_any_order

        x = range(self.N)
        results = pool.map(integration_function, x)
        pool.close()

        return results
예제 #34
0
from collections import Counter
import glob


type = ['night','day','*']

for t in type:

    f = glob.glob('lhsgroup/*_gps.'+t)
    lf = len(f)
    print 'nfiles',lf

    def readme(x):
        return ['-'.join(set(i.strip().split('-')))  for i in tuple(open(x))]

    batch = pool.map(readme,f)

    l = []

    for i in batch:
        l.extend(i)

    print len(l)




    print 'save collections'
    with open('lhs_'+t+'.txt','w') as f:
                items = Counter(l).items()
                items = sorted(items,key=lambda x:x[1],reverse=True)
예제 #35
0
# pool = Pool(NUM_WORKERS)
# args = [1 for i in range(NUM_SURR)]
# results = pool.map(_corrs_surrs, args)
# pool.close()
# pool.join()

# with open("NCEP-SAT-annual-phase-fluc-%dFTsurrs.bin" % NUM_SURR, "wb") as f:
#     cPickle.dump({'data': index_correlations, 'surrs' : results}, f, protocol = cPickle.HIGHEST_PROTOCOL)


def _corrs_surrs_ind(args):
    index_surr = DataField()
    index_surr.data = get_single_FT_surrogate(index_data.data)
    index_correlations_surrs = get_corrs(net, index_surr)

    return index_correlations_surrs



pool = Pool(NUM_WORKERS)
args = [1 for i in range(NUM_SURR)]
results = pool.map(_corrs_surrs_ind, args)
pool.close()
pool.join()

with open("ECAD-SAT-annual-phase-fluc-%dFTsurrs-from-indices.bin" % NUM_SURR, "wb") as f:
    cPickle.dump({'data': index_correlations, 'surrs' : results}, f, protocol = cPickle.HIGHEST_PROTOCOL)


예제 #36
0
파일: search.py 프로젝트: markm541374/GPc
def multiPESIS(ojf,lb,ub,ki,b,fnames):
    def f(fn):
        return PESIS(ojf,lb,ub,ki,b,fn)
    p = Pool(nproc)
    return p.map(f,fnames)
예제 #37
0
add_me = adder(5)
pinner = pickle.dumps(add_me)
p_add_me = pickle.loads(pinner)
assert add_me(10) == p_add_me(10)

# pickle fails for lambda functions
squ = lambda x:x**2

# test the pickle-ability of inner function
psqu = pickle.dumps(squ)
p_squ = pickle.loads(psqu)
assert squ(10) == p_squ(10)

# if pickle works, then multiprocessing should too
print "Evaluate 10 items on 2 proc:"
pool.ncpus = 2
p_res = pool.map(add_me, range(10))
print pool
print '%s' % p_res
print ''

# if pickle works, then multiprocessing should too
print "Evaluate 10 items on 4 proc:"
pool.ncpus = 4
p2res = pool.map(squ, range(10))
print pool
print '%s' % p2res
print ''

# end of file
to_do_periods = np.arange(2,15.5,0.5)
net = ScaleSpecificNetwork('/Users/nikola/work-ui/data/NCEP/air.mon.mean.levels.nc', 
                                    'air', date(1950,1,1), date(2014,1,1), None, None, 
                                    level = 0, dataset="NCEP", sampling='monthly', anom=False)

synchronization = {}
for period in to_do_periods:
    print("running for %.1f period..." % (period))
    _, nao_ph, sg_nao, a_nao = load_NAOindex_wavelet_phase(date(1950,1,1), date(2014,1,1), period, anom=False)
    _, nino_ph, sg_nino, a_nino = load_nino34_wavelet_phase(date(1950,1,1), date(2014,1,1), period, anom=False)
    _, sunspots_ph, sg_sunspots, a_sunspots = load_sunspot_number_phase(date(1950,1,1), date(2014,1,1), period, anom=False)
    _, pdo_ph, sg_pdo, a_pdo = load_pdo_phase(date(1950,1,1), date(2014,1,1), period, anom=False)
    pool = Pool(WORKERS)
    net.wavelet(period, period_unit='y', cut=2, pool=pool)
    args = [(net.phase[:, i, j], i, j, nao_ph, nino_ph, sunspots_ph, pdo_ph) for i in range(net.lats.shape[0]) for j in range(net.lons.shape[0])]
    result = pool.map(_compute_MI_synch, args)
    synchs = np.zeros((4, net.lats.shape[0], net.lons.shape[0]))
    synchs_surrs = np.zeros((NUM_SURRS, 4, net.lats.shape[0], net.lons.shape[0]))
    for i, j, naos, ninos, suns, pdos in result:
        synchs[0, i, j] = naos
        synchs[1, i, j] = ninos
        synchs[2, i, j] = suns
        synchs[3, i, j] = pdos
    for surr in range(NUM_SURRS):
        sg_nao.construct_fourier_surrogates(algorithm='FT')
        sg_nao.add_seasonality(a_nao[0], a_nao[1], a_nao[2])
        sg_nao.wavelet(period, period_unit="y", cut=2)
        sg_nino.construct_fourier_surrogates(algorithm='FT')
        sg_nino.add_seasonality(a_nino[0], a_nino[1], a_nino[2])
        sg_nino.wavelet(period, period_unit="y", cut=2)
        sg_sunspots.construct_fourier_surrogates(algorithm='FT')
예제 #39
0
파일: search.py 프로젝트: markm541374/GPc
def multiPESVS(ojf,lb,ub,ki,s,b,cfn,lsl,lsu,fnames):
    def f(fn):
        return PESVS(ojf,lb,ub,ki,s,b,cfn,lsl,lsu,fn)
    p = Pool(nproc)
    return p.map(f,fnames)