Example #1
0
    def __init__(self, *args, **kwargs):
        """Initialization

        Args:
            num_threads(int): 
                number of threads, defaults to number of cores 
                that are available on system
        """
        super(AbstractLocalThreadPool, self).__init__(*args, **kwargs)
        
        # Store tasks
        self.tasks = Queue()
        #Threads put themselves here when they die
        self.dead_thread_queue = Queue()
        
        self.shutdown_event = Event()
        self.shutdown_event.clear()

        self.WorkerCheckInterval=kwargs.get('WorkerCheckInterval', 0.5)
        self._max_threads = kwargs.get('num_threads', 
                                        multiprocessing.cpu_count())
        if self._max_threads is None:
            self._max_threads = multiprocessing.cpu_count()
        
        self.keep_alive_thread = None
        self.threads = []
Example #2
0
def run(args):
    logging.basicConfig(level=int(round(10*args.verbose_level)))

    assert args.n_core <= multiprocessing.cpu_count(), 'Requested n_core={} > cpu_count={}'.format(
            args.n_core, multiprocessing.cpu_count())

    def Start():
        LOG.info('Started a worker in {} from parent {}'.format(
            os.getpid(), os.getppid()))
    exe_pool = Pool(args.n_core, initializer=Start)
    if args.trim:
        get_consensus = get_consensus_with_trim
    else:
        get_consensus = get_consensus_without_trim

    K = 8
    config = args.min_cov, K, \
        args.max_n_read, args.min_idt, args.edge_tolerance, args.trim_size, args.min_cov_aln, args.max_cov_aln
    # TODO: pass config object, not tuple, so we can add fields
    inputs = []
    for datum in get_seq_data(config, args.min_n_read, args.min_len_aln):
        inputs.append((get_consensus, datum))
    try:
        LOG.info('running {!r}'.format(get_consensus))
        for res in exe_pool.imap(io.run_func, inputs):
            process_get_consensus_result(res, args)
        LOG.info('finished {!r}'.format(get_consensus))
    except:
        LOG.exception('failed gen_consensus')
        exe_pool.terminate()
        raise
Example #3
0
  def input_fn():
    def decode(elem):
      model_features = tf.parse_single_example(elem, features=feature_spec)
      model_labels = tf.stack([model_features.pop(label) for label in labels])
      return model_features, model_labels

    # For more information, check:
    # https://www.tensorflow.org/performance/datasets_performance
    files = tf.data.Dataset.list_files(file_pattern)
    dataset = files.apply(tf.contrib.data.parallel_interleave(
        tf.data.TFRecordDataset, cycle_length=mp.cpu_count()))
    dataset = dataset.map(decode, num_parallel_calls=mp.cpu_count())
    dataset = dataset.take(-1)
    if mode == tf.estimator.ModeKeys.TRAIN:
      if shuffle:
        dataset = dataset.apply(tf.contrib.data.shuffle_and_repeat(
            batch_size * 8))
      else:
        dataset = dataset.cache()
        dataset = dataset.repeat()
    dataset = dataset.batch(batch_size)
    dataset = dataset.prefetch(1)

    iterator = dataset.make_one_shot_iterator()
    batch_features, batch_labels = iterator.get_next()
    return batch_features, batch_labels
def main(treeModelPath, dataInputPath, resultOutPath, debug):

    # read model
    treeModel = readModel(treeModelPath)

    # create output dir
    if not os.path.isdir(resultOutPath):
        os.mkdir(resultOutPath)

    if debug:
        pool = multiprocessing.Pool(processes=1)
    else:
        pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())

    print "Number of core: %d" % (multiprocessing.cpu_count())

    start_time = datetime.now()

    jobN = 0
    for filename in os.listdir(dataInputPath):
        if ".json" in filename:
            if debug:
                # debug model just test 1 file in 1 process
                # filterFiles(jobN,filename,treeModel,debug)
                pool.apply_async(filterFiles, (jobN, filename, dataInputPath, resultOutPath, treeModel, debug))
                break
            else:
                pool.apply_async(filterFiles, (jobN, filename, dataInputPath, resultOutPath, treeModel, debug))
            jobN += 1

    pool.close()
    pool.join()

    diff = datetime.now() - start_time
    print "Spend %d.%d seconds" % (diff.seconds, diff.microseconds)
Example #5
0
def get_needle_tips(images):
    """Get sample tips from images."""
    tips = []
    results = []

    # Do not make more processes than needed for the number of images.
    if len(images) > multiprocessing.cpu_count():
        proc_count = multiprocessing.cpu_count()
    else:
        proc_count = len(images)

    pool = Pool(processes=proc_count)

    for image in images:
        results.append(pool.apply_async(_get_ellipse_point,
                                        args=(image,)))

    for result in results:
        tip = result.get()
        if tip is not None:
            tips.append(tip)

    if len(tips) == 0:
        raise ValueError("No sample tip points found.")

    return tips
Example #6
0
def sample_paths_parallel(N,
    policy,
    baseline,
    env_mode='train',
    T=1e6, gamma=1,
    num_cpu=None,
    max_process_time=60,
    max_timeouts=5,
    normalized_env=False):
    
    if num_cpu == None or num_cpu == 'max':
        num_cpu = mp.cpu_count()
    elif num_cpu == 1:
        return sample_paths(N, policy, baseline, evn_mode, T, gamma, normalized_env)
    else:
        num_cpu = min(mp.cpu_count(), num_cpu)       

    paths_per_cpu = int(np.ceil(N/num_cpu))
    args_list = [paths_per_cpu, policy, baseline, env_mode, T, gamma, normalized_env]

    results = _try_multiprocess(args_list, num_cpu, max_process_time, max_timeouts)

    paths = []
    # result is a paths type and results is list of paths
    for result in results:
        for path in result:
            paths.append(path)  

    return paths
Example #7
0
def Nbeam(width, height, start_board, max_turn, playnum, parms):
    if mp.cpu_count() == 1:
        use_cpu_count = 1
    else:
        use_cpu_count = mp.cpu_count() - 1
    #use_cpu_count = 1
    #print "use cpu count: " + str(use_cpu_count)
    p = mp.Pool(use_cpu_count)
    func_args = []
    for i in range(width*height):
        func_args.append((search_node_array, width, height, max_turn, playnum, parms, i, start_board))
    node_array = p.map(wrap_search_node_array, func_args)
    node_array = list(itertools.chain.from_iterable(node_array))

    idx = 0
    best = 0
    for k,v in enumerate(node_array):
        if best < v.score:
            best = v.score
            idx = k

    #print "best score:" + str(node_array[idx].score)
    #print "best combo:" + str(node_array[idx].combo)

    return node_array[idx]
Example #8
0
def multiprocess(function, iterator, func_args=None, combine=True, n_pool=-1, include_process_num=True):
    """
    Processes data on n number of processors, use n_pool = -1 for all processors

    function is the function to apply to each object in iterator, must except the object to process and process number if include_process_num=True

    func_args is a dictionary of args to feed function
    
    iterator must be list of files or chunks of dataframes
    
    if combine is True will return the combined data
    
    n_pool can be greater than 1 and less than the cpu count or if negative is 1+cpu_count-|n_pool|
    """
    # Initialize pool
    if n_pool < 0:
        pool = mp.Pool(processes=mp.cpu_count()+1+n_pool)
    elif n_pool > 1 and n_pool  <= mp.cpu_count():
        pool = mp.Pool(processes=n_pool)
    else:
        raise ValueError("n_pool is out of range for cpu count!")
    
    # Apply Async (returns process once complete
    if include_process_num == True:
        results = [pool.apply_async(function, args=(obj, process), kwds=func_args) for process, obj in enumerate(iterator)] 
    else:
        results = [pool.apply_async(function, args=(obj), kwds=func_args) for obj in iterator] 

    # Get results and concat into a single dataframe
    if combine == True:
        result = pd.concat([p.get() for p in results])
    else:
        result = [p.get() for p in results]
    return result
Example #9
0
def profile_locks(cmd):
    (c_samples, c, cc, c_dev, sections) = lttng_session( "profile_c"
                                                       , cmd(1, 'c')
                                                       , ['memcached:c_begin', 'memcached:c_end', 'memcached:inside_cc']
                                                       , measure_c)

    n = lttng_session( "profile_n"
                     , cmd(multiprocessing.cpu_count(),'c')
                     , ['memcached:contention']
                     , measure_n)[1]

    (nsamples, blk_samples) = lttng_session( "profile_block_costs"
                                           , cmd(1,'c')
                                           , ['memcached:block_id']
                                           , measure_blocks)


    blk_cnts = lttng_session( "profile_block_counts"
                            , cmd(1,'c')
                            , ['memcached:blk_cnts']
                            , count_blocks)
    
    blk_costs = {k: ((blk_samples[k] * c / nsamples), v) for k, v in blk_cnts.items() if k in blk_samples}

    nn = lttng_session( "profile_contention"
                      , cmd(multiprocessing.cpu_count(),'f')
                      , ['memcached:contention']
                      , measure_n)[1]
    return((c,c_dev),cc,n,nn,sections,blk_costs)
Example #10
0
def buildTFIDFDictionary(csvName):
    things = csv_object.getThings(csvName)
    global words
    global descriptions

    for thingy in things:
        description = thingy.description
        descriptions.append(description)
        wordInDoc = description
        words = words.union(wordInDoc)

    # dictionary = [["None" for x in range(len(things)+1)]"None " for x in range(len(words)+1)]] #define matrix of things and words
    # dictionary = {}

    # multiprocessing
    print cpu_count(), len(things)
    thingPool = Pool(cpu_count())
    results = thingPool.map(thingThreadHelper, things)

    # for i, thingy in enumerate(things):
    #     dictionary[i] = thing.title
    #     for j, word in enumerate(words):
    #         dictionary[i][0]= word
    #         dictionary[i][j] = tfidf(word, thingy.description, descriptions)))

    # print type(results)
    # print results

    # for r in results:
    #     print r
    return results
Example #11
0
def parse_arguments():
    """
    Encapsulate the use of argparse

    @param: None

    @return: An instance of argparse
    """
    parser = argparse.ArgumentParser(description="Create some random charts")

    # Required
    # Nothing

    # Optional
    parser.add_argument("-e", "--executable",
                        help="The executable to use [default: None]",
                        type=str,
                        default=None)

    parser.add_argument("-n", "--number",
                        help="Number of charts to randomly create [default: %(default)s]",
                        type=check_positive,
                        default=1)

    parser.add_argument("-t", "--threads",
                        help="Number of threads to use [default: %(default)s]",
                        type=int,
                        default=multiprocessing.cpu_count()-1,
                        choices=range(1, multiprocessing.cpu_count()))

    return parser.parse_args()
Example #12
0
def count_free_cores(max=cpu_count()):
    """Count the number of CPU cores not currently used by this job.
    """
    if max is True:
       max = cpu_count()
    active = 1 #len(active_children()
    return max - (active + 1)
    def GetParallelProcessCount(self):

        # limit based on free memory
        f = os.popen('vmstat', 'r')
        f.readline()
        f.readline()
        line = f.readline()
        f.close()
        freeRAM = line.split()[3]
        cache = line.split()[5]
        ppCount = int((float(freeRAM) + float(cache)) / 80000.0)

        if ppCount > multiprocessing.cpu_count(): # *three* extra processes
            ppCount = multiprocessing.cpu_count()
        if ppCount < 1: # need at least one process
            ppCount = 1

        # now limit based on CPU load
        f = open('/proc/loadavg', 'r')
        line = f.readline()
        f.close()
        load = float(line.split()[0])
        if load > (float(multiprocessing.cpu_count()) + 0.5) and ppCount > 3:
            ppCount = 3
        if load > (float(multiprocessing.cpu_count()) + 1.0) and ppCount > 2:
            ppCount = 2
        if load > (float(multiprocessing.cpu_count()) + 1.5) and ppCount > 1:
            ppCount = 1

        return ppCount
def main(opts):
    """The main loop of the module, do the renaming in parallel etc."""
    log = logging.getLogger("exif2timestream")
    setup_logs(opts)
    # beginneth the actual main loop
    start_time = time()
    cameras = parse_camera_config_csv(opts["-c"])
    n_images = 0
    for camera in cameras:
        msg = "Processing experiment {}, location {}\n".format(
            camera[FIELDS["expt"]],
            camera[FIELDS["location"]],
        )
        msg += "Images are coming from {}, being put in {}".format(
            camera[FIELDS["source"]],
            camera[FIELDS["destination"]],
        )
        print(msg)
        log.info(msg)
        for ext, images in find_image_files(camera).iteritems():
            images = sorted(images)
            n_cam_images = len(images)
            print("{0} {1} images from this camera".format(n_cam_images, ext))
            log.info("Have {0} {1} images from this camera".format(
                n_cam_images, ext))
            n_images += n_cam_images
            last_date = None
            subsec = 0
            count = 0
            # TODO: sort out the whole subsecond clusterfuck
            if "-1" in opts and opts["-1"]:
                log.info("Using 1 process (What is this? F*****g 1990?)")
                for image in images:
                    count += 1
                    print("Processed {: 5d} Images".format(count), end='\r')
                    process_image((image, camera, ext))
            else:
                from multiprocessing import Pool, cpu_count
                if "-t" in opts and opts["-t"] is not None:
                    try:
                        threads = int(opts["-t"])
                    except ValueError:
                        threads = cpu_count() - 1
                else:
                    threads = cpu_count() - 1
                # Ensure that we're using at least one thread
                threads = max(threads, 1)
                log.info("Using {0:d} processes".format(threads))
                # set the function's camera-wide arguments
                args = zip(images, cycle([camera]), cycle([ext]))
                pool = Pool(threads)
                for _ in pool.imap(process_image, args):
                    count += 1
                    print("Processed {: 5d} Images".format(count), end='\r')
                pool.close()
                pool.join()
            print("Processed {: 5d} Images. Finished this cam!".format(count))
    secs_taken = time() - start_time
    print("\nProcessed a total of {0} images in {1:.2f} seconds".format(
          n_images, secs_taken))
def ProcessStuff(spp_list):
	print 'cpu_count() = %d\n' % multiprocessing.cpu_count()
	NUMBER_OF_PROCESSES = multiprocessing.cpu_count()
	TASKS = [(CallMaxent, (spp_list[i],)) for i in range(len(spp_list))]
	#TASKS2 = [(plus, (i, 8)) for i in range(10)]

    # Create queues
	task_queue = Queue()
	done_queue = Queue()

	# Submit tasks
	for task in TASKS:
		task_queue.put(task)

    # Start worker processes
	for i in range(NUMBER_OF_PROCESSES):
		Process(target=worker, args=(task_queue, done_queue)).start()

    # Get and print results
	print 'Unordered results:'
	for i in range(len(TASKS)):
		print '\t', done_queue.get()

    # Tell child processes to stop
	for i in range(NUMBER_OF_PROCESSES):
		task_queue.put('STOP')
Example #16
0
def main(dataInputPath,resultOutPath,ptnOutputPath):

    model, table = projizz.readPrefixTreeModel("../prefix_tree_model/patternTree.json")

    if not os.path.isdir(resultOutPath):
        os.mkdir(resultOutPath)

    if not os.path.isdir(ptnOutputPath):
        os.mkdir(ptnOutputPath)

    pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
    print "Number of core: %d" % (multiprocessing.cpu_count())
    start_time = datetime.now()
    
    jobN = 0
    for filename in os.listdir(dataInputPath):
        if ".json" in filename:
            pool.apply_async(tryToFindRela, (jobN, filename, dataInputPath, resultOutPath,ptnOutputPath, model, table))
            jobN+=1

    pool.close()
    pool.join()

    diff = datetime.now() - start_time
    print "Spend %d.%d seconds" % (diff.seconds,diff.microseconds)

    projizz.combinedFileWriter(model,os.path.join(ptnOutputPath,"model"))
    projizz.combinedFileWriter(table,os.path.join(ptnOutputPath,"table"))
Example #17
0
 def __init__(self, tasks, num_cpus=0, profiling=False):
     self.log = LoggingManager.get_logger('kraken')
     try:
         self.num_processes = int(num_cpus)
         if self.num_processes < 1:
             raise ValueError()
         if self.num_processes > cpu_count():
             self.log.warning("Number of cores (%d) larger than available." % self.num_processes)
             raise ValueError()
     except (ValueError, TypeError):
         self.log.warning("Number of cores has not been specified or is incorrect. Using available cores.")
         self.num_processes = cpu_count()
     
     self.log.info("Kraken has %d tentacles (cpu cores)" % self.num_processes)
     
     self.tasks = tasks
     self.num_tasks = len(tasks)
     self.tentacles = []
     tentacle_tasks = [tasks[i::self.num_processes] for i in xrange(self.num_processes)]
     
     for i in range(self.num_processes):
         tentacle = Tentacle(tentacle_tasks[i], profiling)
         self.tentacles.append(tentacle)
     
     self.log.info("%d ships ready to be smashed" % self.num_tasks)
def interface():
    '''Command-line interface'''
    usage = "usage: %prog [options]"

    p = optparse.OptionParser(usage)
    
    p.add_option('--output', dest = 'output', action='store', type='string', 
default = None, help='The path to the file where you want to store the '+
'barcodes', metavar='FILE')

    p.add_option('--tag-length', dest = 'tl', action='store', 
type='int', default = 6, help='The desired tag length')
    
    p.add_option('--edit-distance', dest = 'ed', action='store', 
type='int', default = 3, help='The desired edit distance')

    p.add_option('--multiprocessing', dest = 'multiprocessing', 
action='store_true', default=False, help='Use multiprocessing')

    p.add_option('--processors', dest = 'nprocs', action='store', 
type='int', default = None, help='The number of processing cores to use when' +
' using multiprocessing.  Default is # of cores - 2')

    p.add_option('--no-polybase', dest = 'polybase', action='store_true', default=False, 
help='Remove tags with > 2 identical nucleotides in a row')

    p.add_option('--gc', dest = 'gc', action='store_true', default=False, 
help='Remove tags with GC content (%) 40 > x > 60')

    p.add_option('--comp', dest = 'comp', action='store_true', default=False, 
help='Remove tags that are perfect self-complements')

    p.add_option('--hamming', dest = 'hamming', action='store_true', default=False, 
help='Use Hamming distance in place of edit (Levenshtein) distance.')
    
    # c is on by default now
    #p.add_option('--use-c', dest = 'clev', action='store_true', default=False, 
#help='Use the C version of Levenshtein (faster)')

    p.add_option('--min-and-greater', dest = 'greater', action='store_true', default=False, 
help='Show tags at all integer values of edit distance > that specified')

    p.add_option('--rescan', dest = 'rescan', action='store', type='string', 
default = None, help='Rescan a file')

    p.add_option('--rescan-length', dest = 'rescan_length', action='store', type='int', 
default = 6, help='Rescan length')

        

    (options,arg) = p.parse_args()
    assert options.nprocs <= multiprocessing.cpu_count(), \
        "Processors count must equal those available"
    # set the number of processors by default
    if options.multiprocessing and not options.nprocs:
        options.nprocs = multiprocessing.cpu_count() - 2
    if not options.tl:
        p.print_help()
        sys.exit(2)
    return options, arg
Example #19
0
    def parse_opt(self):
        """ parses the command line options for different settings. """
        optparser = optparse.OptionParser()
        optparser.add_option('-c', '--config',
            action='store', dest='config', type='string', default='experiments.cfg', 
            help="your experiments config file")
        optparser.add_option('-n', '--numcores',
            action='store', dest='ncores', type='int', default=cpu_count(), 
            help="number of processes you want to use, default is %i"%cpu_count())  
        optparser.add_option('-d', '--del',
            action='store_true', dest='delete', default=False, 
            help="delete experiment folder if it exists")
        optparser.add_option('-e', '--experiment',
            action='append', dest='experiments', type='string',
            help="run only selected experiments, by default run all experiments in config file.")
        optparser.add_option('-b', '--browse',
            action='store_true', dest='browse', default=False, 
            help="browse existing experiments.")      
        optparser.add_option('-B', '--Browse',
            action='store_true', dest='browse_big', default=False, 
            help="browse existing experiments, more verbose than -b")      
        optparser.add_option('-p', '--progress',
            action='store_true', dest='progress', default=False, 
            help="like browse, but only shows name and progress bar")

        options, args = optparser.parse_args()
        self.options = options
        return options, args
Example #20
0
def run_calculation():
    """ Begin multi-process calculation, and save to file """

    print("Creating %d-process pool" % mp.cpu_count())

    pool = mp.Pool(mp.cpu_count())

    f = h5py.File('mandelbrot.hdf5','w')

    print("Creating output dataset with shape %s x %s" % (NX, NY))

    dset = f.create_dataset('mandelbrot', (NX,NY), 'i')
    dset.attrs['XSTART'] = XSTART
    dset.attrs['YSTART'] = YSTART
    dset.attrs['XEXTENT'] = XEXTENT
    dset.attrs['YEXTENT'] = YEXTENT
    
    result = pool.imap(compute_row, (x*xincr for x in xrange(NX)))

    for idx, arr in enumerate(result):
        if idx%25 == 0: print("Recording row %s" % idx)
        dset[idx] = arr

    print("Closing HDF5 file")

    f.close()

    print("Shutting down process pool")

    pool.close()
    pool.join()
Example #21
0
def set_performance(value, notify):
    """Set performance mode.

    This will set CPU frequency scaling as well as the
    Asus Super Hybrid Engine (FSB speed).

    Keyword arguments:
    value  -- "performance", "ondemand" or "powersave" (str)
    notify -- Show popup notification (bool)

    """

    modes = {"performance": '0', "ondemand": '1', "powersave": '2'}
    try:
        with open("/sys/devices/platform/eeepc-wmi/cpufv", 'w') as f:
            f.write(modes[value])
    except:
        pass
    for cpu in range(0, multiprocessing.cpu_count()):
        with open(
            "/sys/devices/system/cpu/cpu{0}/cpufreq/scaling_governor".format(
                cpu), 'w') as f:
            f.write(value)
    with open(
            "/sys/devices/system/cpu/cpu{0}/cpufreq/scaling_governor".format(
                multiprocessing.cpu_count() - 1)) as f:
        mode = f.read()

    print("Performance: {0}".format(mode.rstrip()))

    if notify:
        show_notification("Performance mode", "{0}".format(mode))
Example #22
0
def main():
	if len(sys.argv) != 2 and len(sys.argv) != 3 or sys.argv[1] == '-h':
		help()
		return

	queue = multiprocessing.Queue()
	value = multiprocessing.Value('i', 0)

	processes = []
	count = multiprocessing.cpu_count()

	szc_dir = sys.argv[2] if len(sys.argv) == 3 else None

	for i in range(count):
		converter = Converter(sys.argv[1], szc_dir, i, value, queue)
		p = multiprocessing.Process(target=converter.convert)
		processes.append(p)
		p.start()

	sys.stdout.write("\033[2J")

	while count:
		m = queue.get()
		if m[1] is None:
			count -= 1
		else:
			sys.stdout.write("\033[%d;0H" % (int(m[0])+1))
			print "%60.60s %s/%s f:%s %s/%s   " % m[1:]
	
	for p in processes:
		p.join()

	sys.stdout.write("\033[%d;0H" % (multiprocessing.cpu_count() + 1,))
	print("DONE!!!            ")
Example #23
0
def take_screenshots_async(source_folder, config_path, verbose=False):
	sizes = load_ss_config(config_path)
	slides = parse_slide_folders(source_folder)

	dests = list(map(lambda pair: pair[0], slides[0]))
	urls = list(map(lambda pair: os.path.join(pair[0], pair[1]), slides[0]))

	shots = list(gen_configs(urls, dests, sizes, local_slide_name))

	q = mp.JoinableQueue()
	procs = []

	for i in range(mp.cpu_count()*2):
		p = mp.Process(target=ss_q, args=(q,verbose))
		procs.append(p)
		p.start()

	for item in shots:
		q.put(tuple(item))

	q.join()

	for i in range(mp.cpu_count()*2):
		q.put(None)

	for proc in procs: proc.join()
Example #24
0
def parallel(df, func):
    if len(df) > 0:
        p = Pool(cpu_count())
        df = p.map(func, np.array_split(df, cpu_count()))
        df = pd.concat(df, axis=0, ignore_index=True).reset_index(drop=True)
        p.close(); p.join()
        return df
Example #25
0
def concurrent_test_jobs(platform):
    if platform == "windows":
        return str(multiprocessing.cpu_count() // 4)
    elif platform == "macos":
        return str(multiprocessing.cpu_count() // 2)
    else:
        return str(multiprocessing.cpu_count())
def main():
    total_work = multiprocessing.cpu_count()
    burnin = 30000
    significance_samples = 100000
    per_process_samples = significance_samples / multiprocessing.cpu_count()
    alpha_count_slow = 0.001
    alpha_count_fast = find_optimal_decay(alpha_count_slow)
    alpha_mu_slow = 0.01
    alpha_mu_fast = 0.01
    buckets_slow = 50
    buckets_fast = 50
    pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
    runs = pool.map(
            ergodic_chain,
            [[burnin, per_process_samples,
              alpha_count_slow, alpha_count_fast,
              alpha_mu_slow, alpha_mu_fast,
              buckets_slow, buckets_fast] for _ in range(total_work)])
    aggregator = [[] for _ in range(len(FUNC_LIST))]
    for run in runs:
        for i, data_list in enumerate(run):
            aggregator[i] += data_list
    colors = ['red', 'green', 'blue', 'purple']
    for label, data in zip(FUNC_LABELS, aggregator):
        #data.sort()
        _, _, patches = pylab.hist(
                data, 250, label=label,
                normed=True, histtype='stepfilled')
        pylab.setp(patches, 'alpha', 0.4)
    pylab.legend()
    pylab.show()
def ProcessStuff(spp_list,epoch_list,model_list):
	print 'cpu_count() = %d\n' % multiprocessing.cpu_count()
	
	NUMBER_OF_PROCESSES = multiprocessing.cpu_count()
	task_queue = Queue()
	done_queue = Queue()
	
	for spp in spp_list:
		for model in model_list:
			for epoch in epoch_list:
				TASKS = [(CallMaxEnt,(spp.rstrip("\r\n"),epoch.rstrip("\r\n"),model.rstrip("\r\n"),str(i))) for i in range(10)]

				#print "Number of projections to be made = %d\n" % len(TASKS)
				#print TASKS
				print "   "+spp

				# Submit tasks
				for task in TASKS:
					#print task,"\n"
					task_queue.put(task)

				# Start worker processes
				for i in range(NUMBER_OF_PROCESSES):
					Process(target=worker, args=(task_queue, done_queue)).start()

				# Get and print results
				print 'Unordered results for '+spp.rstrip("\r\n")+':'
				for i in range(len(TASKS)):
					print '\t', done_queue.get()

				# Tell child processes to stop
				for i in range(NUMBER_OF_PROCESSES):
					task_queue.put('STOP')
Example #28
0
def retrieve_clusters(ne_lat, ne_lng, sw_lat, sw_lng, start_date, end_date, fatal, severe, light, inaccurate, zoom):
    marker_boxes = divide_to_boxes(ne_lat, ne_lng, sw_lat, sw_lng)
    result_futures = []
    logging.info("number of cores: " + str(multiprocessing.cpu_count()))
    with concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:
        for marker_box in marker_boxes:
            markers_in_box = Marker.bounding_box_query(
                marker_box[0],
                marker_box[1],
                marker_box[2],
                marker_box[3],
                start_date,
                end_date,
                fatal,
                severe,
                light,
                inaccurate,
            ).all()
            result_futures.append(executor.submit(calculate_clusters, markers_in_box, zoom))

    completed_futures = concurrent.futures.wait(result_futures)
    result = []
    for future in completed_futures.done:
        result.extend(future.result())

    return result
Example #29
0
def check_num_cpus(n_cpus, table_size, min_table_size):

    messages = []
    if multiprocessing.current_process().daemon and n_cpus != 1:
        messages.append("WARNING: you choose n_cpus = %d but integrate already runs inside a "
                        "daemon process which is not allowed. therefore set n_cpus = 1" % n_cpus)
        n_cpus = 1

    if n_cpus < 0:
        n_cpus = multiprocessing.cpu_count() + n_cpus

    if n_cpus <= 0:
        messages.append("WARNING: you requested to use %d cores, "
                        "we use single core instead !" % n_cpus)
        n_cpus = 1

    if n_cpus > 1 and table_size < min_table_size:
        messages.append("INFO: as the table has les thann %d rows, we switch to one cpu mode"
                        % min_table_size)
        n_cpus = 1

    elif n_cpus > multiprocessing.cpu_count():
        messages.append("WARNING: more processes demanded than available cpu cores, this might be "
                        "inefficient")

    return messages, n_cpus
def generate_targets(samples, class_counter):
    """
    Generates array of segmented images.

    samples: list
        list of Sample objects
    class_counter: ClassCounter object
        object used for generating class markings (class ordinal numbers)

    returns: np.array
        array of class ordinal numbers
    """
    y_shape = (len(samples), requested_shape[0], requested_shape[1])

    y = np.zeros(y_shape, dtype='int8')

    logger.info("Segmented images new shape %s", y.shape)

    pool = mp.Pool(mp.cpu_count())
    logger.info("Cpu count %d", mp.cpu_count())

    result_func = lambda result: save_result_segm(y, result)

    for i, sample in enumerate(samples):
        # result_func(mark_image(i, sample.marked_image,
        #                        class_counter, requested_shape))
        pool.apply_async(mark_image,
                         args=(i, sample.marked_image,
                               class_counter, requested_shape,),
                         callback=result_func)
    pool.close()
    pool.join()

    return y
Example #31
0
    # This:
    #    example "testName" : [["--param1", "--param2"] , ["--param3"]]
    # will run the test 3 times:
    #    testName
    #    testName --param1 --param2
    #    testname --param3
    "wallet_txn_doublespend.py": [["--mineblock"]],
    "wallet_txn_clone.py": [["--mineblock"]],
    "wallet_multiwallet.py": [["--usecli"]],
}

# Used to limit the number of tests, when list of tests is not provided on command line
# When --extended is specified, we run all tests, otherwise
# we only run a test if its execution time in seconds does not exceed EXTENDED_CUTOFF
DEFAULT_EXTENDED_CUTOFF = 40
DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1


class TestCase():
    """
    Data structure to hold and run information necessary to launch a test case.
    """
    def __init__(self, test_num, test_case, tests_dir, tmpdir, flags=None):
        self.tests_dir = tests_dir
        self.tmpdir = tmpdir
        self.test_case = test_case
        self.test_num = test_num
        self.flags = flags

    def run(self, portseed_offset):
        t = self.test_case
Example #32
0
def generate_file_pairs(
    location: Path,
    ignore_todo: bool,
    strict: bool,
    unsafe: bool,
    json_output: bool,
    save_test_output_tar: bool = True,
) -> None:
    filenames = list(location.rglob("*"))
    config_filenames = [
        filename
        for filename in filenames
        if filename.suffix in YML_EXTENSIONS
        and not filename.name.startswith(".")
        and not filename.parent.name.startswith(".")
    ]
    config_test_filenames = {
        config_filename: [
            inner_filename
            for inner_filename in filenames
            if inner_filename.with_suffix("") == config_filename.with_suffix("")
            and inner_filename.is_file()
            and inner_filename.suffix not in YML_EXTENSIONS
        ]
        for config_filename in config_filenames
    }
    config_with_tests, config_without_tests = partition(
        lambda c: c[1], config_test_filenames.items()
    )
    config_missing_tests_output = [str(c[0]) for c in config_without_tests]

    invoke_semgrep_fn = functools.partial(
        invoke_semgrep_multi,
        no_git_ignore=True,
        no_rewrite_rule_ids=True,
        strict=strict,
        dangerously_allow_arbitrary_code_execution_from_rules=unsafe,
        testing=True,
    )
    with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:
        results = pool.starmap(invoke_semgrep_fn, config_with_tests)

    config_with_errors, config_without_errors = partition(lambda r: r[1], results)
    config_with_errors_output = [
        {"filename": str(filename), "error": str(error), "output": output}
        for filename, error, output in config_with_errors
    ]

    tested = {
        filename: score_output_json(
            output, config_test_filenames[filename], ignore_todo
        )
        for filename, _, output in config_without_errors
    }

    results_output: Mapping[str, Mapping[str, Any]] = {
        str(filename): {
            "todo": todo,
            "checks": {
                check_id: {
                    "tp": tp,
                    "tn": tn,
                    "fp": fp,
                    "fn": fn,
                    "passed": (fp == 0) and (fn == 0),
                    "matches": matches[check_id],
                }
                for check_id, (tp, tn, fp, fn) in output.items()
            },
        }
        for filename, (output, matches, todo) in tested.items()
    }

    output = {
        "config_missing_tests": config_missing_tests_output,
        "config_with_errors": config_with_errors_output,
        "results": results_output,
    }

    strict_error = bool(config_with_errors_output) and strict
    any_failures = any(
        not check_results["passed"]
        for file_results in results_output.values()
        for check_results in file_results["checks"].values()
    )
    exit_code = int(strict_error or any_failures)

    if json_output:
        print(json.dumps(output, indent=4, separators=(",", ": ")))
        sys.exit(exit_code)

    # save the results to json file and tar the file to upload as github artifact.
    if save_test_output_tar:
        list_to_output = []
        with open(SAVE_TEST_OUTPUT_JSON, "w") as f:
            for tup in results:
                true_result = tup[2]
                list_to_output.append(true_result)
            f.write(json.dumps(list_to_output, indent=4, separators=(",", ":")))

        with tarfile.open(SAVE_TEST_OUTPUT_TAR, "w:gz") as tar:
            tar.add(SAVE_TEST_OUTPUT_JSON)

    if config_missing_tests_output:
        print("The following config files are missing tests:")
        print("\t" + "\n\t".join(config_missing_tests_output))

    if config_with_errors_output:
        print("The following config files produced errors:")
        print(
            "\t"
            + "\n\t".join(
                f"{c['filename']}: {c['error']}" for c in config_with_errors_output
            )
        )

    # Place failed tests at the bottom for higher visibility
    passed_results_first = collections.OrderedDict(
        sorted(
            results_output.items(),
            key=lambda t: any(not c["passed"] for c in t[1]["checks"].values()),
        )
    )

    print(f"{len(tested)} yaml files tested")
    print("check id scoring:")
    print("=" * 80)

    totals: Dict[str, Any] = collections.defaultdict(int)

    for filename, rr in passed_results_first.items():
        print(f"(TODO: {rr['todo']}) {filename}")
        for check_id, check_results in rr["checks"].items():
            print(generate_check_output_line(check_id, check_results))
            if not check_results["passed"]:
                print(generate_matches_line(check_results))
            for confusion in ["tp", "tn", "fp", "fn"]:
                totals[confusion] += check_results[confusion]

    print("=" * 80)
    print(f"final confusion matrix: {generate_confusion_string(totals)}")
    print("=" * 80)

    sys.exit(exit_code)
Example #33
0
## If the running OS is linux; print loadavg.
if (platform == 'Linux'):
    print("Linux Load Avarages:" + str(os.getloadavg()))

# Assigment 3

print('\n')
print('Assigment 3')

## Take and print “5 min loadavg” value and cpu core count. If the loadavg value is near
## (or close) to the cpu core count (hint: nproc - 5min loadavg < 1) then exit script.
load_avg1, load_avg5, load_avg15 = os.getloadavg()

# take CPU count
cpu_count = multiprocessing.cpu_count()

# If the loadavg values is near to CPU core count then exit script.
if ((cpu_count - load_avg5) < 1):
    exit()

#Assigment 4

print('\n')
print('Assigment 4')

URLs = [
    'https://api.github.com', 'http://bilgisayar.mu.edu.tr/',
    'https://www.python.org/', 'http://akrepnalan.com/ceng2034',
    'https://github.com/caesarsalad/wow'
]
Example #34
0
    def get_render_cmd(self):
        self.renderSettings = {}
        self.mappings = {}
        render_cmd = ''

        if self.G_RENDER_OS == '0':
            if float(self.CG_VERSION) < 2016:
                version_name = "%s-x64" % (self.CG_VERSION)
            else:
                version_name = self.CG_VERSION
            self.renderSettings["render.exe"] = "/usr/autodesk/" \
                "maya%s/bin/Render" % (version_name)
            self.renderSettings["mayabatch.exe"] = "/usr/autodesk/" \
                "maya%s/bin/maya -batch" % (version_name)
        self.renderSettings["render.exe"] = "C:/Program Files/Autodesk/" \
            "maya%s/bin/render.exe" % (self.CG_VERSION)
        self.renderSettings["output"] = os.path.normpath(
            self.G_WORK_RENDER_TASK_OUTPUT).replace("\\", "/")

        # 一机多帧
        self.renderSettings[
            "g_one_machine_multiframe"] = self.g_one_machine_multiframe
        if self.g_one_machine_multiframe is True:
            self.renderSettings["output"] = os.path.join(
                os.path.normpath(self.G_WORK_RENDER_TASK_OUTPUT),
                "temp_out").replace("\\", "/")
        self.renderSettings["output_frame"] = os.path.normpath(
            self.G_WORK_RENDER_TASK_OUTPUT).replace("\\", "/")

        if not os.path.exists(self.renderSettings["output"]):
            os.makedirs(self.renderSettings["output"])

        self.renderSettings["tile_region"] = ""
        self.renderSettings["tiles"] = int(self.G_CG_TILE_COUNT)
        self.renderSettings["tile_index"] = int(self.G_CG_TILE)
        # -----------render tiles------------
        if self.renderSettings["tiles"] > 1:
            tile_region = self.get_region(
                int(self.G_CG_TILE_COUNT), int(self.G_CG_TILE),
                int(self.G_TASK_JSON_DICT['scene_info_render'][
                    self.G_CG_LAYER_NAME]['common']['width']),
                int(self.G_TASK_JSON_DICT['scene_info_render'][
                    self.G_CG_LAYER_NAME]['common']['height']))
            self.renderSettings["tile_region"] = " ".join(
                [str(i) for i in tile_region])

            self.renderSettings["output"] = "%s/%s/%s/" % \
                (os.path.normpath(self.G_WORK_RENDER_TASK_OUTPUT).replace("\\", "/"),self.G_CG_START_FRAME,
                 self.renderSettings["tile_index"])
            self.renderSettings["output"] = os.path.normpath(
                self.renderSettings["output"])
            if not os.path.exists(self.renderSettings["output"]):
                os.makedirs(self.renderSettings["output"])

        if self.G_INPUT_PROJECT_PATH:
            if os.path.exists(self.G_INPUT_PROJECT_PATH):
                os.chdir(self.G_INPUT_PROJECT_PATH)

        self.renderSettings["maya_file"] = os.path.normpath(
            self.G_INPUT_CG_FILE)
        self.renderSettings["start"] = self.G_CG_START_FRAME
        self.renderSettings["end"] = self.G_CG_END_FRAME
        self.renderSettings["by"] = self.G_CG_BY_FRAME

        self.renderSettings["renderableCamera"] = self.G_CG_OPTION
        self.renderSettings["renderableLayer"] = self.G_CG_LAYER_NAME
        self.renderSettings["projectPath"] = os.path.normpath(
            self.G_INPUT_PROJECT_PATH)
        self.renderSettings["renderType"] = "render.exe"
        if self.ENABLE_LAYERED == "1":
            self.renderSettings["width"] = int(
                self.G_TASK_JSON_DICT['scene_info_render'][
                    self.G_CG_LAYER_NAME]['common']['width'])
            self.renderSettings["height"] = int(
                self.G_TASK_JSON_DICT['scene_info_render'][
                    self.G_CG_LAYER_NAME]['common']['height'])
        else:
            self.renderSettings["width"] = int(
                self.G_TASK_JSON_DICT['scene_info_render']
                ['defaultRenderLayer']['common']['width'])
            self.renderSettings["height"] = int(
                self.G_TASK_JSON_DICT['scene_info_render']
                ['defaultRenderLayer']['common']['height'])

        #"-----------------------------cmd--------------------------------"
        cmd = "\"%(render.exe)s\" -s %(start)s -e %(end)s -b %(by)s " \
            "-proj \"%(projectPath)s\" -rd \"%(output)s\"" \
            % self.renderSettings
        if self.G_CG_OPTION:
            cmd += " -cam \"%(renderableCamera)s\"" % self.renderSettings
        if self.G_CG_LAYER_NAME:
            cmd += " -rl \"%(renderableLayer)s\"" % self.renderSettings

        pre_render_dict = {}
        pre_render_dict["enable_layered"] = self.ENABLE_LAYERED
        pre_render_dict["projectPath"] = os.path.normpath(
            self.G_INPUT_PROJECT_PATH)
        pre_render_dict["mapping"] = self.mappings
        pre_render_dict["renderableCamera"] = self.G_CG_OPTION
        pre_render_dict["renderableLayer"] = self.G_CG_LAYER_NAME
        pre_render_dict["task_json"] = self.G_TASK_JSON
        pre_render_dict["start"] = self.G_CG_START_FRAME
        pre_render_dict["c_prerender"] = self.G_RN_MAYA_CUSTOME_PRERENDER
        pre_render_dict["user_id"] = self.G_USER_ID
        pre_render_dict["task_id"] = self.G_TASK_ID
        pre_render_dict["plugins"] = self.G_CG_CONFIG_DICT["plugins"]

        # self.MAYA_BASE_RENDER_CMD = cmd

        # cmd += " -preRender \"python \\\"pre_render_dict=%s;execfile(\\\\\\\"%s\\\\\\\")\\\"\"" % (pre_render_dict,self.G_RN_MAYA_PRERENDER)

        #---------------render cmd-------------------------

        # ----------render tile------------
        if self.ENABLE_LAYERED == "1":
            self.renderer = self.G_TASK_JSON_DICT['scene_info_render'][
                self.G_CG_LAYER_NAME]['common']['renderer']
            if self.renderSettings["tile_region"]:
                if self.renderer in ["mentalRay", "arnold", "vray"]:
                    if self.renderer == "mentalRay" and float(
                            self.CG_VERSION) < 2017:
                        cmd += " -r mr -reg %(tile_region)s" % self.renderSettings
                    elif self.renderer == "mentalRay" and float(
                            self.CG_VERSION
                    ) > 2016.5 and "mentalray" in self.CG_PLUGINS_DICT:
                        cmd += " -r mr -reg %(tile_region)s" % self.renderSettings

                    elif self.renderer == "arnold" and "mtoa" in self.CG_PLUGINS_DICT:
                        cmd += " -r arnold -reg %(tile_region)s" % self.renderSettings

                    elif self.renderer == "vray" and "vrayformaya" in self.CG_PLUGINS_DICT:
                        cmd += " -r vray -reg %(tile_region)s" % self.renderSettings

                    else:
                        print("please confirm the renderer is correct!")
                        print(
                            "current render layer \'s render is %s ,not in [mentalRay,arnold,vray]"
                            % (self.renderer))
                        sys.exit(555)
                if self.renderer in ["mayaSoftware"]:
                    cmd += " -r sw -reg %(tile_region)s" % self.renderSettings
            else:
                if self.renderer == "renderman" and "RenderMan_for_Maya" in self.CG_PLUGINS_DICT:
                    cmd += " -r rman"
                if self.renderer == "vray" and "vrayformaya" in self.CG_PLUGINS_DICT:
                    cmd += " -r vray"
                if self.renderer == "redshift" and "redshift_GPU" in self.CG_PLUGINS_DICT:
                    cmd += " -r redshift -logLevel 1"
                    gpu_n = "0,1"
                    cmd += " -gpu {%s}" % (gpu_n)
                else:
                    pass

        else:

            scene_info_render_dict = self.G_TASK_JSON_DICT['scene_info_render']
            renderer_list = CLASS_MAYA_UTIL.dict_get(scene_info_render_dict,
                                                     "renderer")
            self.G_DEBUG_LOG.info(renderer_list)

            if "redshift_GPU" in self.CG_PLUGINS_DICT and "redshift" in renderer_list:
                cmd += " -r redshift -logLevel 1"
                gpu_n = "0,1"
                cmd += " -gpu {%s}" % (gpu_n)

            if "RenderMan_for_Maya" in self.CG_PLUGINS_DICT and "renderman" in renderer_list:
                cmd += " -r rman"

        max_threads_number = int(multiprocessing.cpu_count())

        if " -r " not in cmd and float(self.CG_VERSION) < 2017:
            # cmd += " -mr:art -mr:aml"
            cmd += " -mr:rt %s -mr:aml" % max_threads_number

        # self.RENDERCMD = cmd
        options = {}
        options["output"] = self.renderSettings["output"]
        options["output_frame"] = self.renderSettings["output_frame"]
        options["g_one_machine_multiframe"] = self.g_one_machine_multiframe

        #-------------get custom render cmd-------------

        # -------------add post render cmd-------------
        if self.g_one_machine_multiframe is True:
            cmd += " -postFrame \"python \\\"options=%s;execfile(\\\\\\\"%s\\\\\\\")\\\";\"" % (
                options, self.G_RN_MAYA_POSTRENDER)

        if "-r rman" in cmd:
            cmd += " -setAttr Format:resolution \"%(width)s %(height)s\" \"%(maya_file)s\"" % self.renderSettings
        else:
            cmd += " -x %(width)s -y %(height)s \"%(maya_file)s\"" % self.renderSettings

        print("render cmd info:")
        sys.stdout.flush()
        return cmd
    def __init__(self, params):

        self.param_br = 0.0    # Daily birth rate
        self.param_dr = 0.0     # Daily mortality rate except infected people
        self.param_vr = 0.0           # Daily vaccination rate (Ratio of susceptible
                                       # population getting vaccinated)
        self.param_vir = 0.0           # Ratio of the immunized after vaccination
        self.param_mir = 0.0           # Maternal immunization rate

        self.param_beta_exp = params[0]/100.0      # Susceptible to exposed transition constant
        self.param_qr  = params[1]/100.0            # Daily quarantine rate (Ratio of Exposed getting Quarantined)
        self.param_beta_inf = 0.0                   # Susceptible to infected transition constant
        self.param_sir = params[2]/100.0          # Daily isolation rate (Ratio of Infected getting Isolated)

        self.param_eps_exp = params[3]/100.0       # Disease transmission rate of exposed compared to the infected
        self.param_eps_qua = params[4]/100.0       # Disease transmission rate of quarantined compared to the infected
        self.param_eps_sev  = params[5]/100.0       # Disease transmission rate of Severe Infected compared to Infected

        self.param_hosp_capacity = params[6]   # Maximum amount patients that hospital can accommodate

        self.param_gamma_mor = 0.0    # Infected to Dead transition probability
        self.param_gamma_mor1 = params[7]/100.0 # Severe Infected to Dead transition probability
        self.param_gamma_mor2 = params[8]/100.0 # # Severe Infected to Dead transition probability (Hospital Cap. Exceeded)
        self.param_gamma_im = params[9]/100.0      # Infected to Recovery Immunized transition probability

        self.param_dt = 1/12                # Sampling time in days (1/24 corresponds to one hour)
        self.param_sim_len = params[10]       # Length of simulation in days

        self.param_num_states = 0    # Number of states
        self.param_num_sim = int(self.param_sim_len / self.param_dt) + 1       # Number of simulation

        self.param_t_exp = params[11]           # Incubation period (The period from the start of
                                      # incubation to the end of the incubation state)
        self.param_t_inf = params[12]           # Infection period (The period from the start of
                                      # infection to the end of the infection state)
        self.param_t_vac = 3 # Vaccination immunization period (The time to
                                      # vaccinatization immunization after being vaccinated)

        self.param_n_exp = int(self.param_t_exp / self.param_dt)
        self.param_n_inf = int(self.param_t_inf / self.param_dt)
        self.param_n_vac = int(self.param_t_vac / self.param_dt)

        self.param_save_res = 1
        self.param_disp_progress = 1
        self.param_disp_interval = 100
        self.param_vis_on = 1                 # Visualize results after simulation

        np.random.seed(1)
        self.param_rand_seed = np.random.randint(low = 1, high = 100, size = 625)

        # Define the initial values for the states
        self.init_susceptible = params[13]
        self.init_exposed = params[14]
        self.init_quarantined = 0.0
        self.init_infected = 0.0
        self.init_isolated = 0.0
        self.init_vaccination_imm = 0.0
        self.init_maternally_imm = 0.0
        self.init_recovery_imm = 0.0
        self.init_severe_infected = 0

        # Define states
        self.states_x = [0, self.init_susceptible]
        self.states_dx = []
        self.states_name = ['Birth', 'Susceptible']
        self.states_type = ['Birth', 'Susceptible']

        # Define transitions
        self.source = ['Birth', 'Birth']
        self.dest = ['Susceptible', 'Maternally_Immunized' ]
        self.source_ind = []
        self.dest_ind = []

        self.num_cores = multiprocessing.cpu_count()
Example #36
0
from spams import omp, ompMask, lasso, lassoMask, somp, l1L2BCD, cd
import spams
import time
from ksvd import KSVD_Encode
from skimage import io
from sklearn.feature_extraction import image
from sklearn.linear_model import lasso_path, orthogonal_mp
from scipy.fftpack import dct
import matplotlib.pyplot as plt


from joblib import Parallel, delayed
import multiprocessing
from threading import Thread

num_cores = multiprocessing.cpu_count()
par = Parallel(n_jobs=num_cores)
    

def clip(img):
    img = np.minimum(np.ones(img.shape), img)
    img = np.maximum(np.zeros(img.shape), img)
    return img

def recover_same_kron(p, kronprod):
    y = p.T
    print(y.shape, y.dtype)
    phikron = kronprod.T
    print(phikron.shape)
    sx = spams.omp(np.asfortranarray(y), np.asfortranarray(phikron), eps=0.001, L=6)
    # sx = orthogonal_mp(phikron, y, tol=0.01)
def DoBuildman(options, args, toolchains=None, make_func=None, boards=None,
               clean_dir=False):
    """The main control code for buildman

    Args:
        options: Command line options object
        args: Command line arguments (list of strings)
        toolchains: Toolchains to use - this should be a Toolchains()
                object. If None, then it will be created and scanned
        make_func: Make function to use for the builder. This is called
                to execute 'make'. If this is None, the normal function
                will be used, which calls the 'make' tool with suitable
                arguments. This setting is useful for tests.
        board: Boards() object to use, containing a list of available
                boards. If this is None it will be created and scanned.
    """
    global builder

    if options.full_help:
        pager = os.getenv('PAGER')
        if not pager:
            pager = 'more'
        fname = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),
                             'README')
        command.Run(pager, fname)
        return 0

    gitutil.Setup()
    col = terminal.Color()

    options.git_dir = os.path.join(options.git, '.git')

    no_toolchains = toolchains is None
    if no_toolchains:
        toolchains = toolchain.Toolchains(options.override_toolchain)

    if options.fetch_arch:
        if options.fetch_arch == 'list':
            sorted_list = toolchains.ListArchs()
            print(col.Color(col.BLUE, 'Available architectures: %s\n' %
                            ' '.join(sorted_list)))
            return 0
        else:
            fetch_arch = options.fetch_arch
            if fetch_arch == 'all':
                fetch_arch = ','.join(toolchains.ListArchs())
                print(col.Color(col.CYAN, '\nDownloading toolchains: %s' %
                                fetch_arch))
            for arch in fetch_arch.split(','):
                print()
                ret = toolchains.FetchAndInstall(arch)
                if ret:
                    return ret
            return 0

    if no_toolchains:
        toolchains.GetSettings()
        toolchains.Scan(options.list_tool_chains and options.verbose)
    if options.list_tool_chains:
        toolchains.List()
        print()
        return 0

    # Work out how many commits to build. We want to build everything on the
    # branch. We also build the upstream commit as a control so we can see
    # problems introduced by the first commit on the branch.
    count = options.count
    has_range = options.branch and '..' in options.branch
    if count == -1:
        if not options.branch:
            count = 1
        else:
            if has_range:
                count, msg = gitutil.CountCommitsInRange(options.git_dir,
                                                         options.branch)
            else:
                count, msg = gitutil.CountCommitsInBranch(options.git_dir,
                                                          options.branch)
            if count is None:
                sys.exit(col.Color(col.RED, msg))
            elif count == 0:
                sys.exit(col.Color(col.RED, "Range '%s' has no commits" %
                                   options.branch))
            if msg:
                print(col.Color(col.YELLOW, msg))
            count += 1   # Build upstream commit also

    if not count:
        str = ("No commits found to process in branch '%s': "
               "set branch's upstream or use -c flag" % options.branch)
        sys.exit(col.Color(col.RED, str))

    # Work out what subset of the boards we are building
    if not boards:
        board_file = os.path.join(options.output_dir, 'boards.cfg')
        genboardscfg = os.path.join(options.git, 'tools/genboardscfg.py')
        status = subprocess.call([genboardscfg, '-o', board_file])
        if status != 0:
            sys.exit("Failed to generate boards.cfg")

        boards = board.Boards()
        boards.ReadBoards(board_file)

    exclude = []
    if options.exclude:
        for arg in options.exclude:
            exclude += arg.split(',')


    if options.boards:
        requested_boards = []
        for b in options.boards:
            requested_boards += b.split(',')
    else:
        requested_boards = None
    why_selected, board_warnings = boards.SelectBoards(args, exclude,
                                                       requested_boards)
    selected = boards.GetSelected()
    if not len(selected):
        sys.exit(col.Color(col.RED, 'No matching boards found'))

    # Read the metadata from the commits. First look at the upstream commit,
    # then the ones in the branch. We would like to do something like
    # upstream/master~..branch but that isn't possible if upstream/master is
    # a merge commit (it will list all the commits that form part of the
    # merge)
    # Conflicting tags are not a problem for buildman, since it does not use
    # them. For example, Series-version is not useful for buildman. On the
    # other hand conflicting tags will cause an error. So allow later tags
    # to overwrite earlier ones by setting allow_overwrite=True
    if options.branch:
        if count == -1:
            if has_range:
                range_expr = options.branch
            else:
                range_expr = gitutil.GetRangeInBranch(options.git_dir,
                                                      options.branch)
            upstream_commit = gitutil.GetUpstream(options.git_dir,
                                                  options.branch)
            series = patchstream.GetMetaDataForList(upstream_commit,
                options.git_dir, 1, series=None, allow_overwrite=True)

            series = patchstream.GetMetaDataForList(range_expr,
                    options.git_dir, None, series, allow_overwrite=True)
        else:
            # Honour the count
            series = patchstream.GetMetaDataForList(options.branch,
                    options.git_dir, count, series=None, allow_overwrite=True)
    else:
        series = None
        if not options.dry_run:
            options.verbose = True
            if not options.summary:
                options.show_errors = True

    # By default we have one thread per CPU. But if there are not enough jobs
    # we can have fewer threads and use a high '-j' value for make.
    if not options.threads:
        options.threads = min(multiprocessing.cpu_count(), len(selected))
    if not options.jobs:
        options.jobs = max(1, (multiprocessing.cpu_count() +
                len(selected) - 1) // len(selected))

    if not options.step:
        options.step = len(series.commits) - 1

    gnu_make = command.Output(os.path.join(options.git,
            'scripts/show-gnu-make'), raise_on_error=False).rstrip()
    if not gnu_make:
        sys.exit('GNU Make not found')

    # Create a new builder with the selected options.
    output_dir = options.output_dir
    if options.branch:
        dirname = options.branch.replace('/', '_')
        # As a special case allow the board directory to be placed in the
        # output directory itself rather than any subdirectory.
        if not options.no_subdirs:
            output_dir = os.path.join(options.output_dir, dirname)
        if clean_dir and os.path.exists(output_dir):
            shutil.rmtree(output_dir)
    CheckOutputDir(output_dir)
    builder = Builder(toolchains, output_dir, options.git_dir,
            options.threads, options.jobs, gnu_make=gnu_make, checkout=True,
            show_unknown=options.show_unknown, step=options.step,
            no_subdirs=options.no_subdirs, full_path=options.full_path,
            verbose_build=options.verbose_build,
            incremental=options.incremental,
            per_board_out_dir=options.per_board_out_dir,
            config_only=options.config_only,
            squash_config_y=not options.preserve_config_y,
            warnings_as_errors=options.warnings_as_errors)
    builder.force_config_on_failure = not options.quick
    if make_func:
        builder.do_make = make_func

    # For a dry run, just show our actions as a sanity check
    if options.dry_run:
        ShowActions(series, why_selected, selected, builder, options,
                    board_warnings)
    else:
        builder.force_build = options.force_build
        builder.force_build_failures = options.force_build_failures
        builder.force_reconfig = options.force_reconfig
        builder.in_tree = options.in_tree

        # Work out which boards to build
        board_selected = boards.GetSelectedDict()

        if series:
            commits = series.commits
            # Number the commits for test purposes
            for commit in range(len(commits)):
                commits[commit].sequence = commit
        else:
            commits = None

        Print(GetActionSummary(options.summary, commits, board_selected,
                                options))

        # We can't show function sizes without board details at present
        if options.show_bloat:
            options.show_detail = True
        builder.SetDisplayOptions(options.show_errors, options.show_sizes,
                                  options.show_detail, options.show_bloat,
                                  options.list_error_boards,
                                  options.show_config,
                                  options.show_environment)
        if options.summary:
            builder.ShowSummary(commits, board_selected)
        else:
            fail, warned = builder.BuildBoards(commits, board_selected,
                                options.keep_outputs, options.verbose)
            if fail:
                return 128
            elif warned:
                return 129
    return 0
Example #38
0
            del df_m

    #df_predicted = df_predicted.append(df_temp, ignore_index=True)
    print(df_temp.shape)

    print(" PRN : " + str(prn) + "  acc : " + str(avg_acc / 30))

    return (df_temp, avg_acc, prn)


if __name__ == '__main__':
    total_err = []
    prn_list = []
    a = datetime.datetime.now().replace(microsecond=0)
    input_prn = df_final['prn'].unique()
    pool = mp.Pool(processes=mp.cpu_count())
    for df_pred, err, prn in pool.map(predict_each_satellite, input_prn):
        df_predicted = df_predicted.append(df_pred, ignore_index=True)
        total_err.append(err)
        prn_list.append(prn)
    pool.close()
    pool.join()
    b = datetime.datetime.now().replace(microsecond=0)
    print(b - a)

    df_predicted.shape

    df_predicted

    df_predicted.reset_index()
Example #39
0
import multiprocessing
from multiprocessing import Pool
import os


def wrapper(f):
    os.system('python runner_personal.py -t 12 ' + mydir + f)


if __name__ == '__main__':
    os.chdir('/stor/work/Ochman/brian/ConSpeciFix/database/')
    mydir = '/stor/work/Ochman/brian/dengueMix/'
    print multiprocessing.cpu_count()
    p = Pool(5)
    args = os.listdir(mydir)
    p.map(wrapper, args)
Example #40
0
 def machine_cpu_count():
     """
     :return: Retrieves the machine's CPU count
     :rtype: int
     """
     return multiprocessing.cpu_count()
Example #41
0
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
"""
Demonstration of embarrassingly parallel loops so I can understand / remember
how to apply this to the rucio script
"""

import numpy as np
import multiprocessing
from multiprocessing import Pool
import time

MAXTHREADS=multiprocessing.cpu_count()
print MAXTHREADS
USETHREADS=5

def f(x):
    return x*np.random.randn(100000)


if __name__ == '__main__':
    s = time.time()
    p = Pool(processes=USETHREADS)
    result = p.map(f, [1, 2, 3])
    time_taken = time.time() - s
    print("Time taken in pool: {}".format(time_taken))

    s = time.time()
def main():

	# Start logging

	logger = logging.getLogger("lg")
	logger.setLevel(logging.INFO)
	formatter = logging.Formatter(fmt='%(asctime)s - %(module)s - %(levelname)s - %(message)s')

	handler = logging.FileHandler('%s.log' %  os.path.splitext(os.path.basename(__file__))[0])
	handler.setLevel(logging.INFO)
	handler.setFormatter(formatter)
	logger.addHandler(handler)

	streamhandler = logging.StreamHandler()
	streamhandler.setLevel(logging.INFO)
	streamhandler.setFormatter(formatter)
	logger.addHandler(streamhandler)

	logger.info('Starting %s' % __file__)

	# Parse CMD

	parser = argparse.ArgumentParser(prog="nucleosomeDynamics_wf",  description="Nucleoseom Dynamics workflow")
		
	parser.add_argument("--config",  required=True,  type=Mugparams.check_json, metavar="CONFIG_JSON",
				help="JSON file containing workflow parameters")
	parser.add_argument("--root_dir",  required=True,  type=Mugparams.readable_dir, metavar="ABS_PATH",
				help="Absolute path of the user data directory.")
	parser.add_argument("--metadata",  required=True,  type=Mugparams.check_json, metavar="METADATA_JSON",
				help="JSON file containing MuG metadata files")
	parser.add_argument("--out_metadata",  required=True,  type=Mugparams.writeable_file, metavar="RESULTS_JSON",
				help="JSON file containing results metadata")
	parser.add_argument("-v", "--verbose", required=False, action="store_true", 
				help="increase output verbosity")
	parser.add_argument('--version', action='version', version='%(prog)s 0.1')

	args = parser.parse_args()

	if args.verbose:
		logger.setLevel(logging.DEBUG)
		handler.setLevel(logging.DEBUG)
		handler.setLevel(logging.DEBUG)
		logger.addHandler(handler)
		streamhandler.setLevel(logging.DEBUG)
		logger.addHandler(streamhandler)
		logger.debug("Verbose mode on")

	# Parse config
	Mugparams.process_arguments(args)


	# Print host info
	
	num_cores = multiprocessing.cpu_count()
	host      = socket.gethostname()
	#mem      = psutil.virtual_memory()
	logger.debug('HOST=%s CPUs=%s MEM=x' %(host,num_cores)) 


	# Run pipeline

	outfiles = run_pipeline(args, num_cores)

	# Results 

	prepare_results(args)
Example #43
0
class _RobustMNINormalizationInputSpec(BaseInterfaceInputSpec):
    # Enable deprecation
    package_version = niworkflows_version

    # Moving image.
    moving_image = File(exists=True,
                        mandatory=True,
                        desc='image to apply transformation to')
    # Reference image (optional).
    reference_image = File(exists=True, desc='override the reference image')
    # Moving mask (optional).
    moving_mask = File(exists=True, desc='moving image mask')
    # Reference mask (optional).
    reference_mask = File(exists=True, desc='reference image mask')
    # Lesion mask (optional).
    lesion_mask = File(exists=True, desc='lesion mask image')
    # Number of threads to use for ANTs/ITK processes.
    num_threads = traits.Int(cpu_count(),
                             usedefault=True,
                             nohash=True,
                             desc="Number of ITK threads to use")
    # ANTs parameter set to use.
    flavor = traits.Enum('precise',
                         'testing',
                         'fast',
                         usedefault=True,
                         desc='registration settings parameter set')
    # Template orientation.
    orientation = traits.Enum(
        'RAS',
        'LAS',
        mandatory=True,
        usedefault=True,
        desc='modify template orientation (should match input image)')
    # Modality of the reference image.
    reference = traits.Enum('T1w',
                            'T2w',
                            'boldref',
                            'PDw',
                            mandatory=True,
                            usedefault=True,
                            desc='set the reference modality for registration')
    # T1 or EPI registration?
    moving = traits.Enum('T1w',
                         'boldref',
                         usedefault=True,
                         mandatory=True,
                         desc='registration type')
    # Template to use as the default reference image.
    template = traits.Str('MNI152NLin2009cAsym',
                          usedefault=True,
                          desc='define the template to be used')
    # Load other settings from file.
    settings = traits.List(File(exists=True),
                           desc='pass on the list of settings files')
    # Resolution of the default template.
    template_spec = traits.DictStrAny(desc='template specifications')
    template_resolution = traits.Enum(1,
                                      2,
                                      desc='(DEPRECATED) template resolution')
    # Use explicit masking?
    explicit_masking = traits.Bool(True,
                                   usedefault=True,
                                   desc="""\
Set voxels outside the masks to zero thus creating an artificial border
that can drive the registration. Requires reliable and accurate masks.
See https://sourceforge.net/p/advants/discussion/840261/thread/27216e69/#c7ba\
""")
    initial_moving_transform = File(exists=True,
                                    desc='transform for initialization')
    float = traits.Bool(False,
                        usedefault=True,
                        desc='use single precision calculations')
Example #44
0
def default_concurrenty():
    try:
        return int(os.environ.get('THRIFT_CROSSTEST_CONCURRENCY'))
    except (TypeError, ValueError):
        # Since much time is spent sleeping, use many threads
        return int(multiprocessing.cpu_count() * 1.25) + 1
    def __init__(self, host=None,
                 api_key=None, api_key_prefix=None,
                 username=None, password=None,
                 discard_unknown_keys=False,
                 disabled_client_side_validations="",
                 server_index=None, server_variables=None,
                 server_operation_index=None, server_operation_variables=None,
                 ):
        """Constructor
        """
        self._base_path = "http://localhost:41101" if host is None else host
        """Default Base url
        """
        self.server_index = 0 if server_index is None and host is None else server_index
        self.server_operation_index = server_operation_index or {}
        """Default server index
        """
        self.server_variables = server_variables or {}
        self.server_operation_variables = server_operation_variables or {}
        """Default server variables
        """
        self.temp_folder_path = None
        """Temp file folder for downloading files
        """
        # Authentication Settings
        self.api_key = {}
        if api_key:
            self.api_key = api_key
        """dict to store API key(s)
        """
        self.api_key_prefix = {}
        if api_key_prefix:
            self.api_key_prefix = api_key_prefix
        """dict to store API prefix (e.g. Bearer)
        """
        self.refresh_api_key_hook = None
        """function hook to refresh API key if expired
        """
        self.username = username
        """Username for HTTP basic authentication
        """
        self.password = password
        """Password for HTTP basic authentication
        """
        self.discard_unknown_keys = discard_unknown_keys
        self.disabled_client_side_validations = disabled_client_side_validations
        self.logger = {}
        """Logging Settings
        """
        self.logger["package_logger"] = logging.getLogger("regula.facesdk.webclient.gen")
        self.logger["urllib3_logger"] = logging.getLogger("urllib3")
        self.logger_format = '%(asctime)s %(levelname)s %(message)s'
        """Log format
        """
        self.logger_stream_handler = None
        """Log stream handler
        """
        self.logger_file_handler = None
        """Log file handler
        """
        self.logger_file = None
        """Debug file location
        """
        self.debug = False
        """Debug switch
        """

        self.verify_ssl = True
        """SSL/TLS verification
           Set this to false to skip verifying SSL certificate when calling API
           from https server.
        """
        self.ssl_ca_cert = None
        """Set this to customize the certificate file to verify the peer.
        """
        self.cert_file = None
        """client certificate file
        """
        self.key_file = None
        """client key file
        """
        self.assert_hostname = None
        """Set this to True/False to enable/disable SSL hostname verification.
        """

        self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
        """urllib3 connection pool's maximum number of connections saved
           per pool. urllib3 uses 1 connection as default value, but this is
           not the best value when you are making a lot of possibly parallel
           requests to the same host, which is often the case here.
           cpu_count * 5 is used as default value to increase performance.
        """

        self.proxy = None
        """Proxy URL
        """
        self.proxy_headers = None
        """Proxy headers
        """
        self.safe_chars_for_path_param = ''
        """Safe chars for path_param
        """
        self.retries = None
        """Adding retries to override urllib3 default value 3
        """
        # Enable client side validation
        self.client_side_validation = True
         'objective': 'binary',
         'metric': 'auc',
         'learning_rate': 0.01,
         'max_depth': 6,
         'num_leaves': 63,
         'max_bin': 255,
         
         'min_child_weight': 10,
         'min_data_in_leaf': 150,
         'reg_lambda': 0.5,  # L2 regularization term on weights.
         'reg_alpha': 0.5,  # L1 regularization term on weights.
         
         'colsample_bytree': 0.9,
         'subsample': 0.9,
#         'nthread': 32,
         'nthread': cpu_count(),
         'bagging_freq': 1,
         'verbose':-1,
         'seed': SEED
         }


if ONLY_ME:
    use_files = ['train_f']
else:
    use_files = ['train_']

drop_ids = pd.read_csv('../data/drop_ids.csv')['SK_ID_CURR']

# =============================================================================
# reset load
Example #47
0
def train_rl(
    output_dir,
    train_batch_size,
    eval_batch_size,
    env_name='Acrobot-v1',
    max_timestep=None,
    clip_rewards=False,
    rendered_env=False,
    resize=False,
    resize_dims=(105, 80),
    trainer_class=rl_trainers.PPO,
    n_epochs=10000,
    trajectory_dump_dir=None,
    num_actions=None,
    light_rl=False,
    light_rl_trainer=light_trainers.RLTrainer,
):
  """Train the RL agent.

  Args:
    output_dir: Output directory.
    train_batch_size: Number of parallel environments to use for training.
    eval_batch_size: Number of parallel environments to use for evaluation.
    env_name: Name of the environment.
    max_timestep: Int or None, the maximum number of timesteps in a trajectory.
      The environment is wrapped in a TimeLimit wrapper.
    clip_rewards: Whether to clip and discretize the rewards.
    rendered_env: Whether the environment has visual input. If so, a
      RenderedEnvProblem will be used.
    resize: whether to do resize or not
    resize_dims: Pair (height, width), dimensions to resize the visual
      observations to.
    trainer_class: RLTrainer class to use.
    n_epochs: Number epochs to run the training for.
    trajectory_dump_dir: Directory to dump trajectories to.
    num_actions: None unless one wants to use the discretization wrapper. Then
      num_actions specifies the number of discrete actions.
    light_rl: whether to use the light RL setting (experimental).
    light_rl_trainer: whichh light RL trainer to use (experimental).
  """
  if light_rl:
    task = rl_task.RLTask()
    env_name = task.env_name


  if FLAGS.jax_debug_nans:
    config.update('jax_debug_nans', True)

  if FLAGS.use_tpu:
    config.update('jax_platform_name', 'tpu')
  else:
    config.update('jax_platform_name', '')


  if light_rl:
    trainer = light_rl_trainer(task=task, output_dir=output_dir)
    trainer.run(n_epochs, n_epochs_is_total_epochs=True)
    trainer.close()
    return

  # TODO(pkozakowski): Find a better way to determine this.
  train_env_kwargs = {}
  eval_env_kwargs = {}
  if 'OnlineTuneEnv' in env_name:
    envs_output_dir = FLAGS.envs_output_dir or os.path.join(output_dir, 'envs')
    train_env_output_dir = os.path.join(envs_output_dir, 'train')
    eval_env_output_dir = os.path.join(envs_output_dir, 'eval')
    train_env_kwargs = {'output_dir': train_env_output_dir}
    eval_env_kwargs = {'output_dir': eval_env_output_dir}

  parallelism = multiprocessing.cpu_count() if FLAGS.parallelize_envs else 1

  logging.info('Num discretized actions %s', num_actions)
  logging.info('Resize %d', resize)

  train_env = env_problem_utils.make_env(
      batch_size=train_batch_size,
      env_problem_name=env_name,
      rendered_env=rendered_env,
      resize=resize,
      resize_dims=resize_dims,
      max_timestep=max_timestep,
      clip_rewards=clip_rewards,
      parallelism=parallelism,
      use_tpu=FLAGS.use_tpu,
      num_actions=num_actions,
      **train_env_kwargs)
  assert train_env

  eval_env = env_problem_utils.make_env(
      batch_size=eval_batch_size,
      env_problem_name=env_name,
      rendered_env=rendered_env,
      resize=resize,
      resize_dims=resize_dims,
      max_timestep=max_timestep,
      clip_rewards=clip_rewards,
      parallelism=parallelism,
      use_tpu=FLAGS.use_tpu,
      num_actions=num_actions,
      **eval_env_kwargs)
  assert eval_env

  def run_training_loop():
    """Runs the training loop."""
    logging.info('Starting the training loop.')

    trainer = trainer_class(
        output_dir=output_dir,
        train_env=train_env,
        eval_env=eval_env,
        trajectory_dump_dir=trajectory_dump_dir,
        async_mode=FLAGS.async_mode,
    )
    trainer.training_loop(n_epochs=n_epochs)

  if FLAGS.jax_debug_nans or FLAGS.disable_jit:
    with jax.disable_jit():
      run_training_loop()
  else:
    run_training_loop()
Example #48
0
    log.info('Removing container: %s' % name)
    subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name],
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE)
    cmd_stdout, cmd_stderr = subproc.communicate()
    if cmd_stdout:
        log.debug(cmd_stdout)
    if cmd_stderr and \
           cmd_stderr != 'Error response from daemon: ' \
           'No such container: {}\n'.format(name):
        log.debug(cmd_stderr)


process_count = int(
    os.environ.get('PROCESS_COUNT', multiprocessing.cpu_count()))
log = get_logger()
log.info('Running docker-puppet')
config_file = os.environ.get('CONFIG',
                             '/var/lib/docker-puppet/docker-puppet.json')
log.debug('CONFIG: %s' % config_file)
with open(config_file) as f:
    json_data = json.load(f)

# To save time we support configuring 'shared' services at the same
# time. For example configuring all of the heat services
# in a single container pass makes sense and will save some time.
# To support this we merge shared settings together here.
#
# We key off of config_volume as this should be the same for a
# given group of services.  We are also now specifying the container
Example #49
0
    from thirdparty.fcrypt.fcrypt import crypt

_multiprocessing = None
try:
    import multiprocessing

    # problems on FreeBSD (Reference: https://web.archive.org/web/20110710041353/http://www.eggheadcafe.com/microsoft/Python/35880259/multiprocessing-on-freebsd.aspx)
    _ = multiprocessing.Queue()

    # problems with ctypes (Reference: https://github.com/sqlmapproject/sqlmap/issues/2952)
    _ = multiprocessing.Value('i')
except (ImportError, OSError, AttributeError):
    pass
else:
    try:
        if multiprocessing.cpu_count() > 1:
            _multiprocessing = multiprocessing
    except NotImplementedError:
        pass

import base64
import binascii
import gc
import math
import os
import re
import tempfile
import time
import zipfile

from hashlib import md5
Example #50
0
import re
import logging
import multiprocessing
import tqdm
from .getter import (
    get_films_page,
    get_profile_page,
)
from .parser import (
    auth_check,
    extract_movie_ratings,
    get_pages_count,
    write_data,
)

PARALLEL_PROC = multiprocessing.cpu_count()

def main():
    args = docopt(__doc__)
    user = args['<username>']
    cookie = args['<cookie>']
    assert all([user, cookie]), 'Empty arguments provided'
    file_format = (args['--format'] or 'json').lower()
    assert file_format in ('all', 'csv', 'json'), 'Supported file formats: all, csv, JSON'
    if args['--debug']:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)
    pool = multiprocessing.Pool(processes=PARALLEL_PROC)
    try:
        logging.info('Checking args...')
    cv2.destroyAllWindows()

    print("Video saved by the name Output_Video,mp4:")



if __name__ == '__main__':
    # Some variables that store paths:
    # TO change the input video or output video path one can change the following variables.
    # I was originally planning on doing argpars, but since I am using Pycharm and run scripts from there directly
    # I haven't used argparse.

    input_path = './../Videos/Input_video.mp4'
    output_path = './../Videos/Output_video.mp4'
    frames = read_frames(input_path)
    frame_seq = np.array_split(frames, multiprocessing.cpu_count())

    print("Printing OriginalSizes:")
    print("Total: {}, Batch 1: {}, Batch 2: {}, Batch 3: {}, Batch 4: {}, Total Again:{}".format(len(frames),
                                                                                                 len(frame_seq[0]),
                                                                                                 len(frame_seq[1]),
                                                                                                 len(frame_seq[2]),
                                                                                                 len(frame_seq[3]),
                                                                                                (len(frame_seq[0]) +
                                                                                                 len(frame_seq[1]) +
                                                                                                 len(frame_seq[2]) +
                                                                                                 len(frame_seq[3])
                                                                                                )
                                                                                                )
          )
    print()
# see <http://www.gnu.org/licenses/>.
#

from pysix.cphd import CPHDReader, CPHDWriter, Wideband
import sys
import multiprocessing

if __name__ == '__main__':
    if len(sys.argv) >= 3:
        inputPathname = sys.argv[1]
        outputPathname = sys.argv[2]
    else:
        print("Usage: " + sys.argv[0] + " <Input CPHD> <Output CPHD>")
        sys.exit(0)

    reader = CPHDReader(inputPathname, multiprocessing.cpu_count())
    writer = CPHDWriter(reader.getMetadata())
    wideband = reader.getWideband()
    writer.writeCPHD(outputPathname, wideband.read(), reader.getVBM(), 0)

    roundTrippedReader = CPHDReader(outputPathname,
                                    multiprocessing.cpu_count())
    roundTrippedWideband = roundTrippedReader.getWideband()
    assert (roundTrippedWideband.read() == wideband.read()).all()
    assert (roundTrippedReader.getVBM().toBuffer(0) ==
            reader.getVBM().toBuffer(0)).all()
    assert (roundTrippedReader.getFileHeader().toString() ==
            reader.getFileHeader().toString())
    assert roundTrippedReader.getMetadata() == reader.getMetadata()
    sys.exit(0)
Example #53
0
import time
import secrets
import logging
import random
import logging.handlers
import os
import multiprocessing

CPU_COUNT = multiprocessing.cpu_count()


def log_forver():

    last_log_time = 0.0
    last_log_length = 1
    update_logger = logging.getLogger("update")
    while True:
        length = random.randint(2, 40)
        log_str = f"{secrets.token_urlsafe(length)},{last_log_length} {last_log_time}ms"

        if last_log_time > 10:
            update_log_str = f"CPU{CPU_COUNT} python write {last_log_length} byte {last_log_time} ms"
            if last_log_time > 50:
                update_logger.error(update_log_str)
            else:
                update_logger.info(update_log_str)

        before = time.time() * 1000
        if last_log_time > 50:  # 大于100毫秒
            logging.error(log_str)
        elif last_log_time > 10:
Example #54
0
parser = argparse.ArgumentParser()

# train
parser.add_argument('-lw',
                    '--load-weights',
                    type=str,
                    help='load model weights (and continue training)')
parser.add_argument('-lm',
                    '--load-model',
                    type=str,
                    help='load model (and continue training)')

#num_workers = cpu_count()
parser.add_argument('--num-workers',
                    type=int,
                    default=cpu_count(),
                    help='Workers for multi-thread generators')

#use_metadata = True
parser.add_argument('--no-metadata', action='store_true', help='Use metadata')

#batch_size_cnn = 48
#batch_size_multi = 64
#batch_size_eval = 128
parser.add_argument('-b',
                    '--batch-size',
                    type=int,
                    default=32,
                    help='Batch size')

#metadata_length = 21
ms_pad = ms + 5
n = int(fs*0.001*ms_pad)
fp = open(filename,"rb")
x = io.get_samples_complex(fp,n)

# resample to 3*10.230 MHz

fsr = 3*10230000.0/fs
nco.mix(x,-coffset/fs,0)
h = scipy.signal.firwin(161,12e6/(fs/2),window='hanning')
x = scipy.signal.filtfilt(h,[1],x)
xr = np.interp((1/fsr)*np.arange(ms_pad*3*10230),np.arange(len(x)),np.real(x))
xi = np.interp((1/fsr)*np.arange(ms_pad*3*10230),np.arange(len(x)),np.imag(x))
x = xr+(1j)*xi

# iterate (in parallel) over PRNs of interest

def worker(p):
  x,prn = p
  metric,code,doppler = search(x,prn,doppler_search,ms)
  return 'prn %2d doppler % 7.1f metric % 7.1f code_offset %6.1f' % (prn,doppler,metric,code)

import multiprocessing as mp

cpus = mp.cpu_count()
results = mp.Pool(cpus).map(worker, map(lambda prn: (x,prn),prns))

for r in results:
  print(r)
        xp.Cf1001i.append(vvv)

        vvv = (xp.F1001R[j] - xm.F1001R[j]) / (2 * dpp)
        xp.Cf1010r.append(vvv)

        vvv = (xp.F1010I[j] - xm.F1010I[j]) / (2 * dpp)
        xp.Cf1010i.append(vvv)

    os.remove(path + "/twiss.dp+." + var)
    os.remove(path + "/twiss.dp-." + var)
    return var, xp


if __name__ == '__main__':
    timeStartGlobal = time.time()
    numberofCPUs = multiprocessing.cpu_count()
    pool = multiprocessing.Pool(processes=numberofCPUs)

    ##### optionparser
    parser = optparse.OptionParser()
    parser.add_option("-a",
                      "--accel",
                      help="Which accelerator: LHCB1 LHCB2 SPS RHIC SOLEIL",
                      default="LHCB1",
                      dest="accel")
    parser.add_option("-p",
                      "--path",
                      help="path to save",
                      default="./",
                      dest="path")
    parser.add_option(
Example #57
0
def add_setup_args(parser):
    parser.add_argument('--train_url',
                        type=str,
                        default='https://github.com/chrischute/squad/data/train-v2.0.json')
    parser.add_argument('--dev_url',
                        type=str,
                        default='https://github.com/chrischute/squad/data/dev-v2.0.json')
    parser.add_argument('--test_url',
                        type=str,
                        default='https://github.com/chrischute/squad/data/test-v2.0.json')
    parser.add_argument('--glove_url',
                        type=str,
                        default='http://nlp.stanford.edu/data/glove.840B.300d.zip')
    parser.add_argument('--dev_meta_file',
                        type=str,
                        default='./data/dev_meta.json')
    parser.add_argument('--test_meta_file',
                        type=str,
                        default='./data/test_meta.json')
    parser.add_argument('--para_limit',
                        type=int,
                        default=400,
                        help='Max number of words in a paragraph')
    parser.add_argument('--ques_limit',
                        type=int,
                        default=50,
                        help='Max number of words to keep from a question')
    parser.add_argument('--test_para_limit',
                        type=int,
                        default=1000,
                        help='Max number of words in a paragraph at test time')
    parser.add_argument('--test_ques_limit',
                        type=int,
                        default=100,
                        help='Max number of words in a question at test time')
    parser.add_argument('--char_dim',
                        type=int,
                        default=64,
                        help='Size of char vectors (char-level embeddings)')
    parser.add_argument('--glove_dim',
                        type=int,
                        default=300,
                        help='Size of GloVe word vectors to use')
    parser.add_argument('--glove_num_vecs',
                        type=int,
                        default=2196017,
                        help='Number of GloVe vectors')
    parser.add_argument('--ans_limit',
                        type=int,
                        default=30,
                        help='Max number of words in a training example answer')
    parser.add_argument('--char_limit',
                        type=int,
                        default=16,
                        help='Max number of chars to keep from a word')
    parser.add_argument('--wv_file', default='data/glove.840B.300d/glove.840B.300d.txt',
                        help='path to word vector file.')
    parser.add_argument('--wv_dim', type=int, default=300,
                        help='word vector dimension.')
    parser.add_argument('--wv_cased', type=str2bool, nargs='?',
                        const=True, default=True,
                        help='treat the words as cased or not.')
    parser.add_argument('--sort_all', action='store_true',
                        help='sort the vocabulary by frequencies of all words. '
                             'Otherwise consider question words first.')
    parser.add_argument('--sample_size', type=int, default=0,
                        help='size of sample data (for debugging).')
    parser.add_argument('--threads', type=int, default=min(multiprocessing.cpu_count(), 16),
                        help='number of threads for preprocessing.')
    parser.add_argument('--batch_size', type=int, default=64,
                        help='batch size for multiprocess tokenizing and tagging.')
    args = parser.parse_args()
    return args
Example #58
0
def main():
    """Main"""
    parser = OptionParser(usage='Usage: %prog [options] eval_dir',
                          version='Barnum Cluster ' + module_version)
    parser.add_option(
        '-c',
        '--csv',
        action='store',
        type='str',
        default=None,
        help='Save CSV of results to given filepath (default: no CSV)')
    parser.add_option(
        '-p',
        '--plot',
        action='store',
        type='str',
        default=None,
        help=
        'Save plot as a PNG image to the given filepath (default: no plotting)'
    )
    parser.add_option(
        '-w',
        '--workers',
        action='store',
        dest='workers',
        type='int',
        default=cpu_count(),
        help='Number of workers to use (default: number of cores)')
    parser.add_option('--max-classes',
                      action='store',
                      type='int',
                      default=256,
                      help='How many classes to use (default: 256)')
    parser.add_option(
        '--min-samples',
        action='store',
        type='int',
        default=4,
        help='Minimum samples to form a cluster in DBSCAN (default: 4)')
    parser.add_option('--eps',
                      action='store',
                      type='float',
                      default=0.03,
                      help='Epsilon parameter to DBSCAN (default: 0.03)')

    options, args = parser.parse_args()

    if len(args) != 1 or options.workers < 1:
        parser.print_help()
        sys.exit(ERROR_INVALID_ARG)

    logger.log_start(20)
    logger.log_info(module_name, 'Barnum Cluster %s' % module_version)

    idirpath = args[0]

    if not os.path.isdir(idirpath):
        logger.log_error(module_name,
                         'ERROR: %s is not a directory' % idirpath)
        logger.log_stop()
        sys.exit(ERROR_INVALID_ARG)

    files = [
        os.path.join(idirpath, f) for f in os.listdir(idirpath)
        if os.path.isfile(os.path.join(idirpath, f))
    ]
    # We only care about clustering malicious traces
    mal_files = [fp for fp in files if 'malicious' in os.path.basename(fp)]
    num_mal = len(mal_files)

    # Calculate clustering metrics
    logger.log_info(module_name, "Parsing " + idirpath)
    pool = Pool(options.workers)
    data = [
        sample for sample in pool.map(
            parse_file, zip(mal_files, [options.max_classes] * num_mal))
        if sample
    ]
    pool.close()
    xs = np.array([sample[0] for sample in data])
    ns = [sample[1] for sample in data]

    # Clustering
    logger.log_info(module_name, "Calculating clusters")
    db = DBSCAN(eps=options.eps, min_samples=options.min_samples).fit(xs)
    core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
    core_samples_mask[db.core_sample_indices_] = True
    labels = db.labels_

    # Number of clusters in labels, ignoring noise if present.
    n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
    n_noise = list(labels).count(-1)
    logger.log_info(module_name, '      Number of points: %d' % len(ns))
    logger.log_info(module_name, '    Number of clusters: %d' % n_clusters)
    logger.log_info(module_name, 'Number of noise points: %d' % n_noise)

    # Saving results as CSV
    if not options.csv is None:
        logger.log_info(module_name, "Saving CSV to %s" % options.csv)
        try:
            with open(options.csv, 'w') as csv_file:
                csv_file.write("cluster,filename\n")
                for label, name in zip(labels, ns):
                    csv_file.write(','.join([str(label), name]) + "\n")
        except Exception as ex:
            logger.log_error(module_name, "Failed to save CSV: %s" % str(ex))

    # Saving results as plot image
    if not options.plot is None:
        logger.log_info(module_name, "Generating plot")
        theta = radar_factory(options.max_classes, frame='polygon')
        fig, axes = plt.subplots(subplot_kw=dict(projection='radar'))
        colors = ['b', 'r', 'g', 'm', 'y']
        axes.set_varlabels([""])  # no varlabels, they aren't that meaningful
        axes.set_rgrids([0.2, 0.4, 0.6, 0.8])
        legend_labels = list()
        for label_key in set(labels):
            if label_key == -1:
                continue  # noise
            legend_labels.append(label_key)
            label_color = colors[label_key % len(colors)]
            # Calculate per-cluster average
            label_mask = (labels == label_key)
            label_points = xs[label_mask & core_samples_mask]
            label_means = np.mean(label_points, axis=0)
            axes.plot(theta, label_means, color=label_color)
            axes.fill(theta, label_means, facecolor=label_color, alpha=0.25)
        # Legend
        legend = axes.legend(legend_labels,
                             loc=(0.9, .95),
                             labelspacing=0.1,
                             fontsize='small')

        try:
            plt.savefig(options.plot)
        except:
            logger.log_error(module_name, "Failed to save plot")

    logger.log_stop()
#!/usr/bin/python
import os
import multiprocessing

# Enable multithreading for ccx
os.environ['OMP_NUM_THREADS'] = str(multiprocessing.cpu_count())

os.system("cgx -b pre.fbl")
os.system("ccx solve")
os.system("monitor.py solve")
os.system("cgx -b anim.fbl")
os.system("cgx -b post.fbl")
Example #60
0
File: utils.py Project: emeti/WiPP
import pandas as pd
import numpy as np


parser = argparse.ArgumentParser(description='Check configs for completeness.')
parser.add_argument(
    '-c', '--config', help='Path to config file'
)
parser.add_argument(
    '-t', '--type', help='Config type. Valid options are "tr" and "pp"'
)

CONFIG_DEFAULTS = {
    'static_data': {
        'high_resolution': False, 
        'cores': multiprocessing.cpu_count(),
        'peak_min_width': 0.5,
        'peak_min_mz': 3
    },
    'algorithms': {'XCMS-CW': True, 'XCMS-MF': True},
    'default_configs': {
        'directory': '../../pp_configs',
        'XCMS-CW': 'XCMS-CW_default.INI',
        'XCMS-MF': 'XCMS-MF_default.INI'
    },
    'XCMS_params': {'groupCorr': False},
    'merging': {'RT_tol': 0.2}
}


class bcolors: