Example #1
0
    def _find_inconsistent_list(self, index, label):

        incons = []

        threads2incons = pymp.shared.dict()
        feature = self.features[index]
        if label == 1:
            num_nonmatch = len(self.nonmatch_indices)
            with pymp.Parallel(self.num_cores) as p:
                local_incons = []
                for i in p.range(num_nonmatch):
                    k = self.nonmatch_indices[i]
                    if self.compare_features(feature, self.features[k],
                                             self.min_con_dim) == True:
                        local_incons.append(k)
                threads2incons[p.thread_num] = local_incons

            for tmp in threads2incons.values():
                incons.extend(tmp)

        else:
            num_match = len(self.match_indices)
            with pymp.Parallel(self.num_cores) as p:
                local_incons = []
                for i in p.range(num_match):
                    k = self.match_indices[i]
                    if self.compare_features(self.features[k], feature,
                                             self.min_con_dim) == True:
                        local_incons.append(k)
                threads2incons[p.thread_num] = local_incons

            for tmp in threads2incons.values():
                incons.extend(tmp)

        return incons
Example #2
0
 def test_print(self):  # pylint: disable=no-self-use
     """Test the print method."""
     import pymp
     pymp.config.thread_limit = 3
     pymp.config.nested = True
     with pymp.Parallel(2):
         with pymp.Parallel(2) as p:
             p.print("Hi from thread {0}.".format(p.thread_num))
Example #3
0
def create_roiset(IMAGE, ROISIZE=1, extend=True):
    """
    Create roi set of the given image by creating an image containing the average value of pixels within the
    specified ROISIZE. The returned image will have twice the size in the third axis as the both halfs will be doubled
    for the peak detection.

    Arguments:
        IMAGE: Image containing multiple images in a 3D-stack
        ROISIZE: Size in pixels which are used to create the region of interest image

    Returns:
        numpy.array: Image with shape [x/ROISIZE, y/ROISIZE, 2*'number of measurements'] containing the average value
        of the given roi for each image in z-axis.
    """
    # Get image dimensions
    x = IMAGE.shape[0]
    y = IMAGE.shape[1]
    number_of_measurements = IMAGE.shape[2]
    nx = numpy.ceil(x / ROISIZE).astype('int')
    ny = numpy.ceil(y / ROISIZE).astype('int')

    if extend:
        roi_set = pymp.shared.array((nx * ny, 2 * number_of_measurements),
                                    dtype='float32')
    else:
        roi_set = pymp.shared.array((nx * ny, number_of_measurements),
                                    dtype='float32')

    # ROISIZE == 1 is exactly the same as the original image
    if ROISIZE > 1:
        with pymp.Parallel(CPU_COUNT) as p:
            for i in p.range(0, nx):
                for j in range(0, ny):
                    # Create average of selected ROI and append two halfs to the front and back
                    roi = IMAGE[ROISIZE * i:ROISIZE * i + ROISIZE,
                                ROISIZE * j:ROISIZE * j + ROISIZE, :]
                    average_per_dimension = numpy.average(numpy.average(
                        roi, axis=1),
                                                          axis=0).flatten()
                    if extend:
                        average_per_dimension = numpy.concatenate(
                            (average_per_dimension[-number_of_measurements //
                                                   2:], average_per_dimension,
                             average_per_dimension[:number_of_measurements //
                                                   2]))
                    roi_set[i * ny + j] = average_per_dimension
    else:
        with pymp.Parallel(CPU_COUNT) as p:
            for i in p.range(0, nx):
                for j in range(0, ny):
                    roi = IMAGE[i, j, :]
                    if extend:
                        roi = numpy.concatenate(
                            (roi[-number_of_measurements // 2:], roi,
                             roi[:number_of_measurements // 2]))
                    roi_set[i * ny + j] = roi

    return roi_set
Example #4
0
def eqs_gen(n, thread):
    eqs = pymp.shared.list() 
    with pymp.Parallel(thread) as p1:      
        with pymp.Parallel(thread) as p2:
            for i in p1.range(1, n+1):
                for j in p2.range (1, n+1):  
                    eqs += eqs_gen_ij(i, j, n)
    
            return eqs
Example #5
0
def power_law(img, gamma):
    x = img.shape[0]
    y = img.shape[1]
    with pymp.Parallel(2) as p1:
        with pymp.Parallel(2) as p2:
            for i in p1.range(0, x):
                for j in p2.range(0, y):
                    img[i][j] = 255 * (img[i][j] / 255)**gamma

    return img
Example #6
0
def binarize_array(numpy_array, threshold=200):
    with pymp.Parallel(2) as p1:
        with pymp.Parallel(2) as p2:
            for i in p1.range(len(numpy_array)):
                for j in p2.range(len(numpy_array[0])):
                    if numpy_array[i][j] > threshold:
                        numpy_array[i][j] = 0
                    else:
                        numpy_array[i][j] = 255
    return numpy_array
def sobel(img):
    x = img.shape[0]
    y = img.shape[1]
    with pymp.Parallel(2) as p1:
        with pymp.Parallel(2) as p2:
            for i in p1.range(1, x - 2):
                for j in p2.range(1, y - 2):
                    img[i - 1][j - 1] = math.sqrt(
                        sobel_util_horizontal(img, i, j)**2 +
                        sobel_util_vertical(img, i, j)**2)
    return img
Example #8
0
 def test_xrange(self):
     """Test the dynamic schedule."""
     import pymp
     pymp.config.thread_limit = 4
     pymp.config.nested = True
     tlist = pymp.shared.list()
     with pymp.Parallel(2):
         with pymp.Parallel(2) as p:
             for idx in p.xrange(5):
                 tlist.append(idx)
     self.assertEqual(len(tlist), 10)
def otsu_th():
    print("Otsu's binarization process starts now.\n")
    #/* Histogram generation */
    for y in range(0, y_size1):
        for x in range(0, x_size1):
            hist[image1[y][x]] += 1

    #/* calculation of probability density */
    for i in range(0, GRAYLEVEL):
        prob[i] = float(hist[i]) / (x_size1 * y_size1)
    for i in range(0, 256):
        print("Serial: " + str(prob[i]))
    #/* omega & myu generation */
    omega[0] = prob[0]
    myu[0] = 0.0  #/* 0.0 times prob[0] equals zero */
    for i in range(1, GRAYLEVEL):
        omega[i] = omega[i - 1] + prob[i]
        myu[i] = myu[i - 1] + i * prob[i]
    '''/* sigma maximization
     sigma stands for inter-class variance
     and determines optimal threshold value */'''
    threshold = 0
    max_sigma = 0.0
    for i in range(0, GRAYLEVEL - 1):
        if (omega[i] != 0.0 and omega[i] != 1.0):
            sigma[i] = ((myu[GRAYLEVEL - 1] * omega[i] - myu[i])**
                        2) / (omega[i] * (1.0 - omega[i]))
        else:
            sigma[i] = 0.0
        if (sigma[i] > max_sigma):
            max_sigma = sigma[i]
            threshold = i

    print("\nthreshold value = " + str(threshold))

    #/* binarization output into image2 */
    x_size2 = x_size1
    y_size2 = y_size1

    with pymp.Parallel(2) as p1:
        with pymp.Parallel(2) as p2:
            for y in p1.range(0, y_size2):
                for x in p2.range(0, x_size2):
                    if (image1[y][x] > threshold):
                        image2[y][x] = MAX_BRIGHTNESS
                    else:
                        image2[y][x] = 0
    print("hi")
Example #10
0
    def process_page(self, thread=4):
        for i in range(2300):
            try:
                search_url = self.create_url_for_search_page()
                urls = self.find_building_page_in_search_page(search_url)

                with pymp.Parallel(thread) as p:
                    for url in p.iterate(urls):
                        if (url not in self.cache_urls) and (
                                url not in self.addr_faulty_urls):
                            try:
                                page_metadata = self.parse_building_page(url)
                                message = "New url: " + url
                                p.print(message)
                            except Exception as e:
                                print(e)
                                page_metadata = None
                        elif url in self.addr_faulty_urls:
                            try:
                                page_metadata = self.parse_building_page(url)
                                message = "Update addr: " + url
                                p.print(message)
                            except Exception as e:
                                print(e)
                                page_metadata = None
                        else:
                            page_metadata = None
                            message = "Already cached: " + url
                            p.print(message)

                        return page_metadata
            except Exception as e:
                print(e)
Example #11
0
def peakdistance_image(roiset,
                       low_prominence=TARGET_PROMINENCE,
                       high_prominence=numpy.inf,
                       cut_edges=True,
                       centroid_calculation=True):
    """
    Calculate the mean peak distance in degrees between two corresponding peaks for each line profile in an SLI image
    series.
    Note: Please do not use this method when evaluating many line profiles while generating most if not all of the
    parameter maps. In this case, it is faster to write a simple pipeline as seen in 'SLIXParameterGenerator'.

    Parameters
    ----------
    roiset: Full SLI measurement (series of images) which is prepared for the pipeline using the SLIX toolbox methods.
    low_prominence: Lower prominence bound for detecting a peak.
    high_prominence: Higher prominence bound for detecting a peak.
    cut_edges: If True, only consider peaks within the second third of all detected peaks.
    centroid_calculation: Use centroid calculation to better determine the peak position regardless of the number of
    measurements / illumination angles used.

    Returns
    -------
    NumPy array of floating point values containing the mean peak distance of the line profiles in degrees.
    """
    return_value = pymp.shared.array((roiset.shape[0], 1), dtype=numpy.float)
    pbar = tqdm.tqdm(total=len(roiset), desc='Peak distance')
    number_of_finished_pixels = pymp.shared.array(CPU_COUNT, dtype=numpy.long)
    last_sum_of_finished_pixels = 0
    active_cores = pymp.shared.array(CPU_COUNT, dtype=numpy.bool)
    active_cores[:] = True

    with pymp.Parallel(CPU_COUNT) as p:
        number_of_finished_pixels[p.thread_num] = 0
        for i in p.range(0, len(roiset)):
            roi = roiset[i]
            peaks = all_peaks(roi, cut_edges)
            peaks = accurate_peak_positions(peaks, roi, low_prominence,
                                            high_prominence,
                                            centroid_calculation)
            return_value[i] = peakdistance(peaks, len(roi))

            number_of_finished_pixels[p.thread_num] += 1
            if p.thread_num == 0 and number_of_finished_pixels[
                    p.thread_num] % 1000 == 0:
                sum_of_finished_pixels = numpy.sum(number_of_finished_pixels)
                pbar.update(sum_of_finished_pixels -
                            last_sum_of_finished_pixels)
                last_sum_of_finished_pixels = sum_of_finished_pixels
        # When one core has finished, mark it. As long as not all threads are finished continue to update the
        # progress bar.
        active_cores[p.thread_num] = False
        if p.thread_num == 0:
            while numpy.any(active_cores == True):
                time.sleep(0.5)
                sum_of_finished_pixels = numpy.sum(number_of_finished_pixels)
                pbar.update(sum_of_finished_pixels -
                            last_sum_of_finished_pixels)
                last_sum_of_finished_pixels = sum_of_finished_pixels
            pbar.close()
    return return_value
Example #12
0
def top_25_recommended_movies(pred_rating_file, users, unrated_movies_per_user,
                              movies_mapping_names, movie_mapping_id, thr):
    #dicitonary with numpy movie id as key and actual movie id as value
    reverse_movie_id_mapping = {}
    for key, val in movie_mapping_id.items():
        reverse_movie_id_mapping[val] = key
    #for each user, predict top 25 movies
    with pymp.Parallel(thr) as p:
        # for user in users:
        for u in p.range(0, len(users)):
            user = users[u]
            dict_pred_unrated_movies = {}
            unrated_movies = unrated_movies_per_user[int(user)]
            for unrated_movie in unrated_movies:
                dict_pred_unrated_movies[int(
                    unrated_movie)] = pred_rating_file[int(user) -
                                                       1][int(unrated_movie) -
                                                          1]
            #recommend top k movies
            SortedMovies = sorted(dict_pred_unrated_movies.items(),
                                  key=operator.itemgetter(1),
                                  reverse=True)
            # print ("Top 25 movies recommendation for the user", user)
            for i in range(25):
                movie_id, rating = SortedMovies[i]
                actual_movie_id = reverse_movie_id_mapping[movie_id]
Example #13
0
def plot_pipeline(run_dir, label2id):
    true_label_filename = os.path.join(run_dir, 'labels.txt')
    tsne_feat_filename = os.path.join(run_dir, 'v_feat.txt')
    if False:
        logging.info('visualize distance matrix in {}'.format(run_dir))
        visualize_distance(os.path.join(run_dir, 'distance_matrix.txt'),
                           true_label_filename,
                           os.path.join(run_dir, 'distance.png'))
    logging.info('visualize tsne feat with true labels in {}'.format(run_dir))
    plot_tsne_feat(tsne_feat_filename, true_label_filename,
                   os.path.join(run_dir, 'tsne_feat.png'), label2id)

    all_dir = list(os.listdir(run_dir))
    all_dir = [
        os.path.join(run_dir, d) for d in all_dir
        if os.path.isdir(os.path.join(run_dir, d))
    ]
    import pymp
    with pymp.Parallel(len(all_dir)) as p:
        for i in p.range(len(all_dir)):
            cur_dir = all_dir[i]
            if False:
                logging.info('visualize distance matrix in {}'.format(cur_dir))
                visualize_distance(
                    os.path.join(cur_dir, 'distance_matrix.txt'),
                    os.path.join(cur_dir, 'labels.txt'),
                    os.path.join(cur_dir, 'distance.png'))
            logging.info(
                'visualize tsne feat with pred labels in {}'.format(cur_dir))
            plot_tsne_feat(tsne_feat_filename,
                           os.path.join(cur_dir, 'pred.txt'),
                           os.path.join(cur_dir, 'tsne_feat.png'))
Example #14
0
 def test_num_threads(self):
     """Test num threads property."""
     import pymp
     import os
     pymp.config.nested = False
     pymp.config.thread_limit = 4
     tlist = pymp.shared.list()
     with pymp.Parallel(2) as p:
         tlist.append(p.num_threads)
     self.assertEqual(list(tlist), [2, 2])
     pymp.config.nested = True
     tlist = pymp.shared.list()
     with pymp.Parallel(2) as p:
         with pymp.Parallel(2) as p2:
             tlist.append(p2.num_threads)
     self.assertEqual(list(tlist), [2, 2, 2, 2])
Example #15
0
    def _get_inconsistency_indices(self):

        # clustering
        self.cluster_indices_for_all_features()

        #self.create_mixes()
        self.create_sets()
        #self.create_bitarrays()

        if self.num_cores == 1:
            # probing
            for match_pair_pos, match_index in self.match_pos_to_index.items():
                #incons_nonmatch_indices = self.get_list_of_inconsistent_indices_mixes(match_pair_pos)
                incons_nonmatch_indices = self.get_list_of_inconsistent_indices_vset(
                    match_pair_pos)
                #incons_nonmatch_indices = self.get_list_of_inconsistent_indices_vbitarray(match_pair_pos)

                if len(incons_nonmatch_indices) == 0:
                    continue

                self.index2incons[match_index] = incons_nonmatch_indices
                for nonmatch_index in incons_nonmatch_indices:
                    if nonmatch_index in self.index2incons:
                        self.index2incons[nonmatch_index].append(match_index)
                    else:
                        self.index2incons[nonmatch_index] = [match_index]

        else:
            # probing
            threads2incons = pymp.shared.dict()
            tmp = list(self.match_pos_to_index.items())
            num = len(tmp)

            with pymp.Parallel(self.num_cores) as p:
                local_index2incons = {}

                #for match_pair_pos, match_index in p.iterate(self.match_pos_to_index.items()):
                for i in p.range(num):
                    match_pair_pos, match_index = tmp[i]
                    #incons_nonmatch_indices = self.get_list_of_inconsistent_indices_mixes(match_pair_pos)
                    incons_nonmatch_indices = self.get_list_of_inconsistent_indices_vset(
                        match_pair_pos)
                    #incons_nonmatch_indices = self.get_list_of_inconsistent_indices_vbitarray(match_pair_pos)

                    if len(incons_nonmatch_indices) == 0:
                        continue

                    local_index2incons[match_index] = incons_nonmatch_indices

                threads2incons[p.thread_num] = local_index2incons
                #print(p.num_threads, p.thread_num)

            for _, local_index2incons in threads2incons.items():
                for mi, ni_indices in local_index2incons.items():
                    self.index2incons[mi] = ni_indices
                    for ni in ni_indices:
                        if ni in self.index2incons:
                            self.index2incons[ni].append(mi)
                        else:
                            self.index2incons[ni] = [mi]
Example #16
0
def iteration_of_mutation(generation, number_of_iteration, number_of_nodes,
                          number_of_processes):
    start_mutation_time = time.time()
    generation_return = []
    tau = (np.sqrt(2 * np.sqrt(number_of_nodes + 1)))**(-1)
    vau = (np.sqrt(2 * number_of_nodes + 1))**(-1)
    with pymp.Parallel(number_of_processes) as p:
        for index in p.xrange(0, len(generation)):

            with p.lock:
                object = generation[index]
            zeta = np.random.normal()

            for i in range(0, len(object.parameters)):
                zeta_item = np.random.normal()
                epsilon_item = np.random.normal()
                object.standard_deviation[i] = object.standard_deviation[
                    i] * np.exp(tau * zeta + vau * zeta_item)

                if abs(object.standard_deviation[i] * epsilon_item) > 0.8:
                    r2 = random.randint(0, len(object.parameters) - 1)
                    object.parameters[i], object.parameters[
                        r2] = object.parameters[r2], object.parameters[i]
            generation_return.append(object)

    end_mutation_time = time.time()
    # print("Time of one iteration of mutation:", end_mutation_time-start_mutation_time)
    return generation_return
def MapReduce(total_threads):
    start = time.time()
    file_list = getFilenames()
    loaded_files = []

    #Files are loaded and ready to be read
    for file in file_list:
        loaded_files.append(file.read())
    end = time.time()
    print("Loading files runtime:", end - start, "s")

    words = getWords()
    resultant_dict = pymp.shared.dict()

    with pymp.Parallel(total_threads) as p:
        the_lock = p.lock
        # Initialize the dictionary with the given words
        for curr_word in words:
            resultant_dict[curr_word] = 0;
        for curr_file in p.iterate(loaded_files):
            for curr_word in words:
                # Regular expression
                rex = '(?<![\w\d])' + curr_word + '(?![\w\d])'
                # List of occurrences
                start = time.time()
                occurrences = re.findall(rex, curr_file, re.IGNORECASE)
                end = time.time()

                the_lock.acquire()
                resultant_dict[curr_word] += len(occurrences);
                the_lock.release()
    print("Counting words runtime:", end - start, "s")
    return resultant_dict
def weighted_average_parallel(real ,weights , arr ):
  final = []
  copy = [0]*len(weights)

  # Calculating Weights
  
  for i in range(len(weights)) :
    m = weights.index(min(weights))
    copy[m] = i+1

  with pymp.Parallel(2) as p:
    for i in range(4,len(real)):
      before = real[i-1]
      t,f,ind = 0,0,0

      for j in arr:
        with p.lock:
          s=sum(j[i-4:i+1])/5
          if(s>before):
            t=t+(1*copy[ind])
          else:
            f=f+(1*copy[ind])
          ind = ind + 1
      if(t>f):
        final.append("Buy")
      else:
        final.append("Don't Buy")  
  return(final)
def compute_local_pe(df_list, cnt_id, threshold=3.0):

    pe = pymp.shared.array((len(df_list), ), dtype='float64')

    with pymp.Parallel(multiprocessing.cpu_count()) as p:
        for t in p.range(0, len(df_list)):

            df = df_list[t]

            #locate cnt atom
            df_select = df.loc[df['id'] == cnt_id]
            mat_select = df_select.loc[:, ['x', 'y', 'z']].as_matrix()
            mat_other = df.loc[:, ['x', 'y', 'z']].as_matrix()
            pe_all = df.loc[:, ['c_poteng']].as_matrix()

            dist = compute_distance(mat_select, mat_other)

            idx = np.where(dist < threshold)

            pe_select = pe_all[idx[0]]

            pe[t] = np.sum(pe_select)

            #dist = compute_distance(mat_select, mat_other)

    T = np.arange(0, len(pe), 1, dtype=float)

    plt.plot(T, pe, 'k-')
    plt.show()

    return None
Example #20
0
def np2pymp(x, thr):
    result = pymp.shared.array((x.shape[0], x.shape[1]))
    with pymp.Parallel(thr) as p:
        for i in p.range(0, x.shape[0]):
            for j in range(x.shape[1]):
                result[i][j] = x[i][j]
    return result
def exponential_average_parallel(real ,weights , arr ):
  final = []
  copy = [0]*len(weights)

  # Calculating Weights
  for i in range(len(weights)) :
    m = weights.index(min(weights))
    copy[m] = i+1
  with pymp.Parallel(2) as p:
    for i in range(4,len(real)):
      before = real[i-1]
      t,f,ind = 0,0,0
      for j in arr:
        with p.lock:
          s=0
          exp = [0.1,0.2,0.3,0.4,0.5]
          ide = 0
          for k in j[i-4:i+1] :
            s=s + k*(exp[ide])
            ide = ide+1
          s=s/1.5
          if(s>before):
            t=t+(1*copy[ind])
          else:
            f=f+(1*copy[ind])
          ind = ind + 1
      if(t>f):
        final.append("Buy")
      else:
        final.append("Don't Buy")  
  return(final)
Example #22
0
def main():
    """
    main function for when running as a script
    """

    # two lists of numbers
    listOfNumsA = [num for num in range(0,10)]
    listOfNumsB = [num for num in range(0,10)]

    sums = [0 for i in range(len(listOfNumsA))]
    sharedSums = pymp.shared.list(sums) # sharedSums = [x for x in sums]

    # alternatively you could do this
    #
    # empty list
    # sharedSums = pymp.shared.list()
    # for i in range( len(listOfNumsA) ):
    #   sharedSums.append(0)

    
    with pymp.Parallel() as p:
        #split indices across threads
        for index in p.range( len(listOfNumsA) ):
            sharedSums[index] = listOfNumsA[index] + listOfNumsB[index]

        # uncomment to see the work the individual threads did
        # print(f'Summed list for thread {p.thread_num} {sharedSums}')
    print(f'Summed list {sharedSums}')
Example #23
0
 def test_if(self):
     """Test the if_ deactivation."""
     import pymp
     pymp.config.thread_limit = 3
     pymp.config.nested = True
     with pymp.Parallel(if_=False) as p:
         self.assertEqual(p.num_threads, 1)
def get_neighbors(image_list, image_feature):

    total_image = len(image_list)
    batch_size = 1
    p_length = int(total_image / batch_size)
    easy_pool = pymp.shared.array((total_image, 100))
    hard_pool = pymp.shared.array((total_image, 100))

    with pymp.Parallel(40) as p:
        for index in p.range(0, p_length):
            diff = image_feature - image_feature[index]
            L2_dis = np.sqrt(np.sum(pow(diff, 2), 1) / 2048)
            # sort the l2 distance.
            hard_pool[index] = np.argsort(L2_dis)[1:101]
            easy_pool[index] = np.argsort(-L2_dis)[:100]
            print('finish worker', index)

    rand_pool = np.zeros((total_image, 100))
    for i in range(total_image):
        rand_idx = np.random.permutation(total_image)
        rand_pool[
            i] = rand_idx[:
                          100]  #[val_image_list[rand_idx[i]] for i in range(100)]

    return hard_pool, easy_pool, rand_pool
Example #25
0
def MontyCarloMarkovChain(NewGraphStructure):
	numWalkers = 4
	unnaproved = []

	l = int(numpy.maximum(0, NewGraphStructure["noOfNodes"] - 20.0*NewGraphStructure["nodeArrivalSpeed"]))
	u = int(numpy.maximum(1, NewGraphStructure["noOfNodes"] - 10.0*NewGraphStructure["nodeArrivalSpeed"]))
	Nodes = NewGraphStructure["NodeList"][l:u]
	particles = numpy.random.choice(Nodes, min(numWalkers, len(Nodes)), replace=False)
	rng = len(particles)
	time_shared = pymp.shared.list()
	
	#st = time.time()
	with pymp.Parallel(4) as p :
		#sharedtraversalpath_1 = []
		start = time.time()
		for node in p.range(0,rng):
			onenode = RandomWeightedWalk(p,NewGraphStructure,particles[node])
			with p.lock :
				unnaproved.append(onenode)
		end = time.time()
		#print(end-start)
		with p.lock :
			time_shared.append(end-start)
		#l = random.choice(sharedtraversalpath,k=2)
	#et = time.time()
	#print(et-st)
	global globaltime
	#print(time_shared)
	globaltime.append(max(time_shared))
	return unnaproved
Example #26
0
def main(argv):
    from multiprocessing import Process, Manager, freeze_support
    import pymp
    procs = []
    jobs = []
    inputfile = ''
    cachestr = []
    try:
        opts, args = getopt.getopt(argv, "hi:o:", ["ifile=", "ofile="])
    except getopt.GetoptError:
        print("test.py -i " & inputfile)
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print("test.py -i" & inputfile)
            sys.exit()
        elif opt in ("-i", "--ifile"):
            inputfile = arg
    inputfile = adjust(inputfile)
    pymp.config.thread_limit = 1
    pymp.config.nested = True
    listo = pymp.shared.list()
    with pymp.Parallel(4) as p:
        for x in list:
            cache = (scanningmp(x, inputfile))
            if not (None in cache or '[]' in cache):
                listo.extend(cache)
    flat_list = str(listo)
    #matches = match.split()
    #flat_list = reduce(operator.concat, (flat_list))
    #matches = reduce(operator.concat, (matches))
    print(flat_list)
    return flat_list
def get_n_intersections(origin, direction, stl_mesh):

    n_intersections_array = pymp.shared.array((1, ), dtype='uint8')
    n_intersections_array[0] = 0

    # loop over all triangles in mesh
    n_triangles = len(stl_mesh.points)
    with pymp.Parallel(4) as par:
        for i in par.range(0, n_triangles):
            p = stl_mesh.points[i]
            # p contains the 9 entries [p1x p1y p1z p2x p2y p2z p3x p3y p3z] of the triangle with corner points (p1,p2,p3)

            p1 = np.array(p[0:3])
            p2 = np.array(p[3:6])
            p3 = np.array(p[6:9])

            center = (p1 + p2 + p3) / 3.
            if np.linalg.norm(origin - center) < 1e-13:
                continue

            # check if ray intersects triangle
            intersects = ray_triangle_intersection(origin, direction,
                                                   (p1, p2, p3))

            if intersects:
                with par.lock:
                    n_intersections_array[0] += 1

    return n_intersections_array[0]
Example #28
0
def multiplyParallel(mA, mB, numThreads=4, verbose=False):

    rowsA = mA.shape[0]
    rowsB = mB.shape[0]

    colsA = mA.shape[1]
    colsB = mB.shape[1]

    #for matrix multiplication colsA must equal rowsB
    if colsA != rowsB:
        raise Exception(
            "These two matrices can't be multiplied, check shapes!!!")

    #Create resulting array
    #result = [[0 for col in range(0,colsB)] for row in range(0,rowsA)]
    result = pymp.shared.array((rowsA, colsB), dtype="float64")
    #result = np.asarray(result)

    rowsList = []

    #populate
    with pymp.Parallel(numThreads) as p:
        for row in p.range(0, rowsA):
            for col in range(0, colsB):
                for i in range(0, rowsB):
                    result[row][col] += mA[row][i] * mB[i][col]
            rowsList.append(row)

        if verbose == True:
            print("Thread {} of {}, working on rows {} to {}".format(
                p.thread_num, p.num_threads, min(rowsList), max(rowsList)))

    return result
def findIndex(node):
    index = -1
    with pymp.Parallel(2) as p:
        for i in p.range(1, len(bigList)):
            if bigList[i][0][0] == node:
                index = i
    return index
Example #30
0
def grid_search_class_factors(probs, labels, weights, num_grids=10):
  manager = Manager() 
  class_factors_dict = manager.dict()
  with pymp.Parallel(12) as p:
    for i in tqdm(p.range(num_attrs), ascii=True):
    #for i in p.range(num_attrs):
      #p.print(i, ATTRIBUTES[i])
      #p.print('init counts:', get_counts(probs[:, i]))
      index = np.argsort(-np.array(weights[i]))
      def is_ok(factor):
        return np.sum(np.argsort(-factor) == index) == 4
      best = 0
      for a in tqdm(range(1,1 + num_grids), ascii=True):
      #for a in(range(1,1 + num_grids)):
        for b in range(1,1 + num_grids):
          for c in range(1,1 + num_grids):
            for d in range(1,1 + num_grids):
              factor = np.array([a, b, c, d], dtype=np.float)
              factor2 = factor * weights[i]
              if not is_ok(factor2):
                continue
              preds = probs[:, i] * factor2 
              f1 = f1_score(labels[:, i] + 2, np.argmax(preds, 1), average='macro')
              if f1 > best:
                #p.print('\n', ATTRIBUTES[i], factor, factor2, f1)
                best = f1
                #class_factors[i] = factor
                class_factors_dict[i] = factor
                #p.print('counts:', get_counts(probs[:, i] * factor))
                #p.print('class_factors', i, class_factors_dict[i])

  class_factors = np.ones([num_attrs, num_classes])
  for i in range(num_attrs):
    class_factors[i] = class_factors_dict[i]
  return class_factors