def _read_multi(self, graphs, n_jobs, batch_size):
        """
        like read_single but with multiple processes
        """

        if n_jobs > 1:
            pool = Pool(processes=n_jobs)
        else:
            pool = Pool()

        # extract_c_and_i = lambda batch,args: [ extract_cores_and_interfaces(  [y]+args ) for y in batch ]

        results = pool.imap_unordered(extract_cips,
                                      self._multi_process_argbuilder(graphs, batch_size=batch_size))

        # the resulting chips can now be put intro the grammar
        jobs_done = 0
        for batch in results:
            for exci in batch:
                if exci:  # exci might be None because the grouper fills up with empty problems
                    for exci_result_per_node in exci:
                        for cip in exci_result_per_node:
                            self._add_core_interface_data(cip)
                jobs_done += 1
                if jobs_done == self.multiprocess_jobcount and self.mp_prepared:
                    pool.terminate()
        pool.close()
        pool.join()
Exemple #2
0
def main():
    parser = argparse.ArgumentParser(description='Analyze a bandersnatch mirror.')
    parser.add_argument('--json',
                       help='save raw data to a json file',
                       default=None)
    args = parser.parse_args()
    concurrency = 8
    root = "/var/spool/pypi/web/packages/source/"
    p = Pool()
    results = {}
    try:
        try:
            for path, result in \
                p.imap_unordered(analyse_sdist, yield_packages(root)):
                results[path] = result
            p.close()
        except:
            p.terminate()
            raise
    finally:
        p.join()
    if args.json:
        with open(args.json, 'wb') as f:
            f.write(json.dumps(results))
    pprint.pprint(results)
Exemple #3
0
def main():

    parser = ArgumentParser(description="Speed up your SHA. A different hash style.")
    parser.add_argument('-1', '--sha1', action='store_true')
    parser.add_argument('-2', '--sha224', action='store_true')
    parser.add_argument('-3', '--sha256', action='store_true')
    parser.add_argument('-4', '--sha384', action='store_true')
    parser.add_argument('-5', '--sha512', action='store_true')
    parser.add_argument('-f', '--file', type=str, help="The path to the file")

    if len(sys.argv) == 1:
        parser.print_help()
        return

    global args
    args = parser.parse_args()

    hashtree = ''

    big_file = open(args.file, 'rb')
    pool = Pool(multiprocessing.cpu_count())

    for chunk_hash in pool.imap(hashing, chunks(big_file)):
        hashtree += chunk_hash + ":hash"

    pool.terminate()

    print(str(hashing(hashtree.encode('ascii'))))
Exemple #4
0
def run_repeated(name, num_trials, num_examples, incorrect):
    manager = Manager()
    q = manager.Queue()
    pool = Pool(NUM_WORKERS + 1) # Add one process for the listener

    # Start a listener that writes to the output file.
    filename = '{}-{}-{}.results'.format(name, num_examples, num_trials)
    pool.apply_async(listener, (filename, q))

    # Start worker jobs that run trials.
    jobs = []
    for i in range(num_trials):
        job = pool.apply_async(worker, (name, num_examples, q))
        jobs.append(job)
    
    results = []
    for job in jobs:
        results.append(job.get())

        # Check results for early exit
        num_incorrect = 0
        for result in results:
            if is_incorrect(result, incorrect):
                num_incorrect += 1
        if num_incorrect > NUM_TRIALS - MIN_CORRECT:
            pool.terminate() # Kill the rest of the jobs in the pool.
            break

    q.put('kill') # Tell the listener to stop running.
    pool.close()  
    
    print()
    return results
def _doFastPoW(target, initialHash):
    import shared
    import time
    from multiprocessing import Pool, cpu_count
    try:
        pool_size = cpu_count()
    except:
        pool_size = 4
    try:
        maxCores = config.getint('bitmessagesettings', 'maxcores')
    except:
        maxCores = 99999
    if pool_size > maxCores:
        pool_size = maxCores
    pool = Pool(processes=pool_size)
    result = []
    for i in range(pool_size):
        result.append(pool.apply_async(_pool_worker, args = (i, initialHash, target, pool_size)))
    while True:
        if shared.shutdown:
            pool.terminate()
            while True:
                time.sleep(10) # Don't let this thread return here; it will return nothing and cause an exception in bitmessagemain.py
            return
        for i in range(pool_size):
            if result[i].ready():
                result = result[i].get()
                pool.terminate()
                pool.join() #Wait for the workers to exit...
                return result[0], result[1]
        time.sleep(0.2)
Exemple #6
0
class JobPool(object):

    """
    Pool container.
    """
    pool = None
    message_queue = None

    def __init__(self, max_instances=4):
        self.message_queue = Queue()
        self.pool = Pool(max_instances, execute_task, (self.message_queue,))
        atexit.register(self.clear)

    def add_analysis(self, analysis):
        """
        Add analysis to the pool.
        """
        analysis.set_started()
        self.message_queue.put(analysis)

    def clear(self):
        """
        Pool cleanup.
        """
        self.pool.terminate()
        self.pool.join()
Exemple #7
0
class YaraJobPool(object):

    """
    Yara pool container.
    """
    pool = None
    message_queue = None

    def __init__(self, max_instances=3):
        self.message_queue = Queue()
        self.pool = Pool(max_instances, execute_yara_task,
                         (self.message_queue,))
        atexit.register(self.clear)

    def add_yara_task(self, yara_task):
        """
        Adds the yara task.
        """
        self.message_queue.put(yara_task)

    def clear(self):
        """
        Pool cleanup.
        """
        self.pool.terminate()
        self.pool.join()
def applyGrid(_geo_crimes, _n,_grid, _column):
    if _n > 128:
        _n = 128
        print("n was too big. Set to 128.")
    print("splitting crimes in to smaller frames to leverage paralelization")
    _l = len(_geo_crimes.index)
    _crimes_args = []
    _covered = 0
    for _i in range(_n-1):
        _a, _b = int(round(_i*(_l/_n))), int(round((_i+1)*(_l/_n)))
        _crimes_args.append(_geo_crimes[_a:_b])
        _covered = _covered + (_b - _a)
    _crimes_args.append(_geo_crimes[_covered:len(_geo_crimes.index)])
    print("{} data-chunks created.".format(len(_crimes_args)))
    
    
    print("Trying to start {} parallel processes.".format(_n))
    _pool = Pool(processes=_n)
    print("{} parallel process started.".format(_n))
    _result = _pool.starmap(_para_crimes_in_cell, zip(_crimes_args, 
                                          repeat(_grid), repeat(_column)))
    _pool.terminate()
    print("Process terminated.")
    _df = _result.pop(0)
    for _frame in _result:
        _df = _df.append(_frame)
    print("{} crimes where spatialised to their cell.".format(len(_df.index)))
    return _df
Exemple #9
0
class MultiProcessScheduler(LocalScheduler):
    def __init__(self, threads):
        LocalScheduler.__init__(self)
        self.threads = threads
        self.tasks = {}
        from multiprocessing import Pool
        self.pool = Pool(self.threads or 2)

    def start(self):
        pass

    def submitTasks(self, tasks):
        def callback(args):
            logger.debug("got answer: %s", args)
            tid, reason, result, update = args
            task = self.tasks.pop(tid)
            self.taskEnded(task, reason, result, update)

        for task in tasks:
            logger.debug("put task async: %s", task)
            self.tasks[task.id] = task
            self.pool.apply_async(run_task_in_process,
                [task, self.nextAttempId(), env.environ],
                callback=callback)

    def stop(self):
        self.pool.terminate()
        self.pool.join()
        logger.debug("process pool stopped")
Exemple #10
0
def main():
	if not os.path.isfile(task_filename):
		print 'Tasks not exist.'
		return None
	tasks = [task.strip('\r').strip('\n').strip() for task in open(task_filename, 'r').readlines()]

	dones = []
	if os.path.isfile(done_filename):
		dones = [task.strip('\r').strip('\n').strip() for task in open(done_filename, 'r').readlines()]

	for task in tasks:
		if not task in dones:
			task_queue.put(task)

	time.sleep(1)

	if not task_queue.empty():
		pool = Pool(thread + 1)
		try:
			pool.map(process, range(thread + 1))
			pool.close()
			pool.join()
			print '\n*************************************'
			print '*           Tasks all done          *'
			print '*************************************'
		except:
			print '\n*************************************'
			print '*        Tasks be terminated        *'
			print '*************************************'
		finally:
			task_queue.cancel_join_thread()
			done_queue.cancel_join_thread()
			result_queue.cancel_join_thread()
			pool.terminate()
def main():
    if len(sys.argv) != 3:
        print 'Usage: {} <file> <save_dir>'.format(sys.argv[0])
        sys.exit(1)

    csv_file, save_dir = sys.argv[1], sys.argv[2]
    frame = pandas.read_csv(csv_file, sep='\t', header=None)
    counters = map(Counter, [frame[i] for i in range(len(frame.columns))])

    pool = Pool()

    for i in range(len(counters)):
        for j in range(len(counters)):
            nr_keys = len(counters[i]) * len(counters[j])
            if len(counters[i]) > 200 or len(counters[i]) > 200 or nr_keys > 200 * 200:
                print 'too many keys columns `{},{}\': {}'.format(
                    i, j, nr_keys)
            else:
                print 'columns `{},{}\' passed'.format(i, j)
                pool.apply_async(do_main, [
                    csv_file, frame, i, j, os.path.join(
                        save_dir, '{}-{}.png'.format(i, j))])

    pool.close()
    pool.join()
    pool.terminate()
Exemple #12
0
def run(args):
    logfilepaths = find_file_paths(args.searchpath, args.patterns)
    print >> sys.stderr, "processing input from {0} files...".format(len(logfilepaths))

    p = Pool(args.nprocesses)
    r = []
    try:
        mr = p.map_async(process_tgen_log, logfilepaths)
        p.close()
        while not mr.ready(): mr.wait(1)
        r = mr.get()
    except KeyboardInterrupt:
        print >> sys.stderr, "interrupted, terminating process pool"
        p.terminate()
        p.join()
        sys.exit()

    d = {'nodes':{}}
    name_count, noname_count, success_count, error_count = 0, 0, 0, 0
    for item in r:
        if item is None:
            continue
        name, data = item[0], item[1]
        if name is None:
            noname_count += 1
            continue
        name_count += 1
        d['nodes'][name] = data
        success_count += item[2]
        error_count += item[3]

    print >> sys.stderr, "done processing input: {0} total successes, {1} total errors, {2} files with names, {3} files without names".format(success_count, error_count, name_count, noname_count)
    print >> sys.stderr, "dumping stats in {0}".format(args.prefix)
    dump(d, args.prefix, TGENJSON)
    print >> sys.stderr, "all done!"
Exemple #13
0
def main():

    parser = ArgumentParser(description="Speed up your SHA. A different hash style.")
    parser.add_argument("-1", "--sha1", action="store_true")
    parser.add_argument("-2", "--sha224", action="store_true")
    parser.add_argument("-3", "--sha256", action="store_true")
    parser.add_argument("-4", "--sha384", action="store_true")
    parser.add_argument("-5", "--sha512", action="store_true")
    parser.add_argument("-f", "--file", type=str, help="The path to the file")

    if len(sys.argv) == 1:
        parser.print_help()
        return

    global args
    args = parser.parse_args()

    hashtree = ""

    big_file = open(args.file, "rb")
    pool = Pool(multiprocessing.cpu_count())

    for chunk_hash in pool.imap(hashing, chunks(big_file)):
        hashtree = hashtree + chunk_hash

    pool.terminate()

    if os.path.getsize(args.file) < 20971520:
        print(hashtree)
    else:
        print(str(hashing(hashtree)))
Exemple #14
0
def call_scrape_func(siteList, db_collection, pool_size, db_auth, db_user,
                     db_pass):
    """
    Helper function to iterate over a list of RSS feeds and scrape each.

    Parameters
    ----------

    siteList: dictionary
                Dictionary of sites, with a nickname as the key and RSS URL
                as the value.

    db_collection : collection
                    Mongo collection to put stories

    pool_size : int
                Number of processes to distribute work
    """
    pool = Pool(pool_size)
    results = [pool.apply_async(scrape_func, (address, lang, website,
                                              db_collection, db_auth, db_user,
                                              db_pass))
               for address, (website, lang) in siteList.iteritems()]
    timeout = [r.get(9999999) for r in results]
    pool.terminate()
    logger.info('Completed full scrape.')
Exemple #15
0
 def loop(self, argv = sys.argv):
   # Parse arguments
   self.parse(argv)
   # Check for help option
   if self.isParameter('info'):
     self.info()
     return
   # Call start function
   self.start()
   # Number of processors
   nproc = int(self.getParameter('nproc', '1')) 
   # Run in multiprocessing is requested
   if nproc > 1:
     # Creates the log directory is needed
     if not os.path.exists('%s/logs' % Common.NeatDirectory):
       os.makedirs('%s/logs' % Common.NeatDirectory)
     # Create a pool of workers
     pool = Pool(processes = nproc)
     try:
       # Loop over the channels  
       for set in self.__loopsets:
         pool.apply_async(self.wrapper, (set,))
         time.sleep(1)
       pool.close(); pool.join()
     except KeyboardInterrupt:
       pool.terminate(); pool.join()
   else:
     # Run the process with multiprocessing
     for set in self.__loopsets:      
       self.process(set)
   # Call end function
   self.end()
Exemple #16
0
    def run(self):
        """
        Run the Monte carlo algorithm using multi-thread techniques
        @return:
        """
        self.cancel = False

        self.initialize()

        continue_run = True

        print('Monte Carlo run, Not implemented')
        prog = 0.0
        iter = 0
        err = 0
        std_sum = 0
        self.emit(SIGNAL('progress(float)'), prog)

        # setup the multi-treading variables
        cores = cpu_count()
        pool = Pool(processes=cores)

        pf_instances = list()
        for i in range(cores):
            pf_instances.append(self.time_series.pf)

        while continue_run:

            # Execute time series of group runs in parallel
            mx_stdev_arr = pool.map(self.worker, pf_instances)

            # Increase iteration
            iter += 1

            std_sum += max(mx_stdev_arr)
            err = std_sum / iter
            if err == 0:
                err = 1e-200  # to avoid division by zeros
            self.error_series.append(err)

            # emmit the progress signal
            prog = 100 * self.tolerance / err
            if prog > 100:
                prog = 100
            self.emit(SIGNAL('progress(float)'), prog)
            # self.emit(SIGNAL('progress(float)'), 100 * (iter+1)/self.max_iterations)

            if self.cancel:
                continue_run = False

            # check if to stop
            if iter >= self.max_iterations or err <= self.tolerance:
                continue_run = False

        # close pools
        pool.close()
        pool.terminate()

        # send the finnish signal
        self.emit(SIGNAL('done()'))
Exemple #17
0
def run(args):
    print >> sys.stderr, "processing input from {0}...".format(args.logpath)
    source, xzproc = source_prepare(args.logpath)

    d = {'ticks':{}, 'nodes':{}}
    m = {'mem':0, 'hours':0}
    p = Pool(args.nprocesses)
    try:
        lines = []
        for line in source:
            if args.tee: sys.stdout.write(line)
            lines.append(line)
            if len(lines) > args.nprocesses*NUMLINES:
                d, m = do_reduce(d, m, do_map(p, lines))
                lines = []
        if len(lines) > 0: d, m = do_reduce(d, m, do_map(p, lines))
        p.close()
    except KeyboardInterrupt:
        print >> sys.stderr, "interrupted, terminating process pool"
        p.terminate()
        p.join()
        sys.exit()

    source_cleanup(args.logpath, source, xzproc)

    print >> sys.stderr, "done processing input: simulation ran for {0} hours and consumed {1} GiB of RAM".format(m['hours'], m['mem'])
    print >> sys.stderr, "dumping stats in {0}".format(args.prefix)
    dump(d, args.prefix, SHADOWJSON)
    print >> sys.stderr, "all done!"
Exemple #18
0
class MultiProcessScheduler(LocalScheduler):
    def __init__(self, threads):
        LocalScheduler.__init__(self)
        self.threads = threads
        self.tasks = {}
        from multiprocessing import Pool
        self.pool = Pool(self.threads or 2)

    def start(self):
        pass

    def submitTasks(self, tasks):
        total, self.finished = len(tasks), 0
        def callback(args):
            logger.debug("got answer: %s", args)
            tid, reason, result, update = args
            task = self.tasks.pop(tid)
            self.finished += 1
            logger.info("Task %s finished (%d/%d)        \x1b[1A",
                tid, self.finished, total)
            if self.finished == total:
                logger.info("\r" + " "*80 + "\x1b[1A") # erase the progress bar
            self.taskEnded(task, reason, result, update)

        for task in tasks:
            logger.debug("put task async: %s", task)
            self.tasks[task.id] = task
            self.pool.apply_async(run_task_in_process,
                [task, self.nextAttempId(), env.environ],
                callback=callback)

    def stop(self):
        self.pool.terminate()
        self.pool.join()
        logger.debug("process pool stopped")
Exemple #19
0
def main_internal(args, name='mxsniff'):
    """
    Console script

    >>> main_internal(['*****@*****.**'])
    [email protected]: google-gmail
    """
    import argparse
    import json
    from multiprocessing import Pool

    parser = argparse.ArgumentParser(
        prog=name,
        description='Identify email service providers given an email address, URL or domain name',
        fromfile_prefix_chars='@')
    parser.add_argument('names', metavar='email_or_url', nargs='+',
        help="email or URL to look up; use @filename to load from a file")
    parser.add_argument('-v', '--verbose', action='store_true',
        help="show both provider name and mail server names")
    parser.add_argument('-i', '--ignore-errors', action='store_true',
        help="ignore DNS lookup errors and continue with next item")
    args = parser.parse_args(args)

    pool = Pool(processes=10)
    it = pool.imap(multiprocess_mxsniff, args.names, 10)
    try:
        for result in it:
            if args.verbose:
                print(json.dumps(result)) + ','
            else:
                print("{item}: {provider}".format(item=result['query'], provider=', '.join(result['match'])))
    except KeyboardInterrupt:
        pool.terminate()
 def processBody(self):
     queue = ready_queue(self.url, self.body)
     #print "found %i links to queue" % len(queue)
     self.text = stripPunctuation(self.remove_html_tags(stripScript(self.body)))
     if len(self.text) > 5000:
         offset = 0
         i = 0
         l = []
         while True:
             j = self.findnth(self.text[i:],' ',500)
             offset += j
             if j == -1:
                 break
             l.append(self.text[i:j])
             i = offset + j+1
         logger.debug("processing with %i threads" % len(l))
         try:
             if len(l) == 0:
                 return []
             pool = Pool(processes=(len(l)))
             self.keyword_dicts = pool.map(rankKeywords, l)
         except KeyboardInterrupt:
             pool.terminate()
             pool.join()
             sys.exit()
         else:
             pool.close()
             pool.join()
         logger.debug("processed, returned %i dicts" % len(self.keyword_dicts))
     else:
         self.keyword_dicts.append(rankKeywords(self.text))
     return queue
def call_func_parallel(func, args_iterator, workers=-1):
    from multiprocessing import Pool
    if workers == -1:
        workers = get_core_count()
    pool = Pool(workers)
    pool.starmap(func, args_iterator)
    pool.terminate()
Exemple #22
0
    def Execute(self, num_parallel):
        """Starts to run tests.

    This method watches all test command until all of them are finished.

    Args:
      num_parallel: Allows "num_parallel" tasks at once.
    Returns:
      An failed command list. Each command represents as one-line
      command-line string. If this list is empty, all tests are passed.
    """
        # TODO(nona): Show progress report for debugging.
        try:
            pool = Pool(processes=num_parallel)
            params = [(command, self._gtest_report_dir) for command in self._test_commands]
            # Workaround against http://bugs.python.org/issue8296
            # See also http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool
            async_results = pool.map_async(_ExecuteTest, params)
            while True:
                try:
                    results = async_results.get(1000000)
                    break
                except TimeoutError:
                    pass
            pool.close()
            return [" ".join(result["command"]) for result in results if not result["result"]]
        except:
            pool.terminate()
            logging.fatal("Exception occurred.")
            raise
Exemple #23
0
def _doFastPoW(target, initialHash):
    logger.debug("Fast PoW start")
    import time
    from multiprocessing import Pool, cpu_count
    try:
        pool_size = cpu_count()
    except:
        pool_size = 4
    try:
        maxCores = config.getint('bitmessagesettings', 'maxcores')
    except:
        maxCores = 99999
    if pool_size > maxCores:
        pool_size = maxCores
    pool = Pool(processes=pool_size)
    result = []
    for i in range(pool_size):
        result.append(pool.apply_async(_pool_worker, args = (i, initialHash, target, pool_size)))
    while True:
        if shutdown >= 1:
            pool.terminate()
            raise Exception("Interrupted")
        for i in range(pool_size):
            if result[i].ready():
                result = result[i].get()
                pool.terminate()
                pool.join() #Wait for the workers to exit...
                logger.debug("Fast PoW done")
                return result[0], result[1]
        time.sleep(0.2)
Exemple #24
0
def replaceText(text_test,apikey):
  urls = []
  urls_in_order = []
  for url in  re.findall(r'(https?://[^\s]+)', text_test):
    newurl = url.split('"')[0].split('<')[0]
    while newurl[-1] == '.' or newurl[-1] == ')' or newurl[-1] == '!':
      newurl = newurl[:-1]
    if not apikey:
      urls.append(newurl)
    else:
      urls.append((newurl,apikey))
    urls_in_order.append(newurl)


  f = getWebArchiveLink
  if apikey:
    f = getPermaccLink
  p = Pool(cpu_count())
  conversion = {}
  for result in p.map(f, list(set(urls))):
    conversion[result[0]] = result[1]    
  p.terminate()

  print conversion
  curPos = 0
  for url in urls_in_order:
    if url in text_test[curPos:]:
      print url
      print conversion[url]
      print text_test[curPos:]
      newPos = text_test.index(url)
      text_test = text_test[0:curPos] + text_test[curPos:].replace(url,conversion[url],1)
      curPos = newPos

  return text_test  
def pricing(dual):
    cpus = cpu_count() - int(argv[2])
    '''process for getting new columns'''
    final = pow(2, K)
    if K < 23:
        section = final
    else:
        section = 100 * cpus # probar valores
    to = 0
    since = 1
    manager = Manager()
    elements = manager.list([RETAILERS, DCS, PLANTS])
    out = manager.Queue() # queue with the result from each worker
    while to < final:
        p = Pool(cpus)
        to = min(since + section, final)
        boss = p.apply_async(coordinator, (out,))
        workers = [p.apply_async(work, (k, elements, dual, out))  for k in xrange(since, to)]
        enviados = 0
        for w in workers:
            enviados += w.get()
        out.put('ok')
        a = boss.get()
        assert a.counter == enviados
        since = to + 1
        p.terminate()
    return a
Exemple #26
0
def plot_zphot_zspec(Nthreads):

    from multiprocessing import Pool

    match = filter('GAMA-MATCHED')
    n_samples = match['ID'].shape[0]

    pool = Pool(Nthreads)
    mapfn = pool.map
    Nchunk = np.ceil(1. / Nthreads * n_samples).astype(np.int)
    arglist = [None] * Nthreads
    for i in range(Nthreads):
	s = int(i * Nchunk)
	e = int(s + Nchunk)
	if i == Nthreads - 1 : e = 203024
	print s , e
	arglist[i] = (s, e)  
    result = list(mapfn(match_index, [ars for ars in arglist]))
    result = np.concatenate(result)
    #print result.flatten()
    np.savetxt("zphot_matched.txt" , result.flatten())
    pool.close()
    pool.terminate()
    pool.join()

    return None
Exemple #27
0
def main():
    logging.basicConfig(level=logging.DEBUG)
    urls = []
    with open('urls-50k.csv') as csvfile:
        reader = csv.DictReader(csvfile, ['url', 'cnt'])
        urls = [row['url'] for row in reader]
        urls = urls[1:]     # strip out header row
    # normalize URL encoding of '://'
    urls = [url.replace('%3A%2F%2F', '://') for url in urls]
    # dedupe
    urls = set(urls)
    pool = Pool(processes=200)
    promise = pool.map_async(consume, urls)
    results = []
    try:
        results = promise.get()
    except KeyboardInterrupt:
        logging.error('Terminating worker pool')
        pool.terminate()
        pool.join()
        return
    print "--ALL RESULTS--"
    print results
    print "--RESULTS THAT MATCH--"
    print [result['url'] for result in results
           if result and result['magic_viewport']]
    print "Number of URLs scanned:", len(urls)
    print "Failed checks", len([True for result in results if not result])
    print "Have it:", len([True for result in results
                           if result and result['magic_viewport']])
    print "Don't have it:", len([False for result in results
                                if result and not result['magic_viewport']])

if __name__ == '__main__':
    main()
def harmony_search(objective_function, num_processes, num_iterations, initial_harmonies=None):
    """
        Here, we use multiprocessing.Pool to do multiple harmony searches simultaneously. Since HS is stochastic (unless random_seed is set),
        multiple runs can find different results. We run the specified number of iterations on the specified number of processes and return
        an instance of HarmonySearchResults.
    """
    pool = Pool(num_processes)
    try:
        start = datetime.now()
        pool_results = [pool.apply_async(worker, args=(objective_function, initial_harmonies,)) for i in range(num_iterations)]
        pool.close()  # no more tasks will be submitted to the pool
        pool.join()  # wait for all tasks to finish before moving on
        end = datetime.now()
        elapsed_time = end - start

        # find best harmony from all iterations
        best_harmony = None
        best_fitness = float('-inf') if objective_function.maximize() else float('+inf')
        harmony_memories = list()
        harmony_histories = list()
        for result in pool_results:
            harmony, fitness, harmony_memory, harmony_history = result.get()  # multiprocessing.pool.AsyncResult is returned for each process, so we need to call get() to pull out the value
            if (objective_function.maximize() and fitness > best_fitness) or (not objective_function.maximize() and fitness < best_fitness):
                best_harmony = harmony
                best_fitness = fitness
            harmony_memories.append(harmony_memory)
            harmony_histories.append(harmony_history)

        return HarmonySearchResults(elapsed_time=elapsed_time, best_harmony=best_harmony, best_fitness=best_fitness,\
                                    harmony_memories=harmony_memories, harmony_histories=harmony_histories)
    except KeyboardInterrupt:
        pool.terminate()
def crack(ssid, clientMac, APMac, Anonce, Snonce, mic, data, passQueue):
    foundPassQ = Queue()
    try:
        timeA = datetime.now()
        startSize = passQueue.qsize()
    except:
        pass
    pool = Pool(numOfPs, crackProcess, (ssid, clientMac, APMac, Anonce, Snonce, mic, data, passQueue, foundPassQ))
    while True:
        sleep(1)
        try:
            timeB = datetime.now()
            currentSize = passQueue.qsize()
            print str(100 - 100.0 * currentSize / startSize) + "% done. " + str((startSize - currentSize) / (timeB - timeA).total_seconds()) + " hashes per second"
        except:
            pass
        if foundPassQ.empty():
            if passQueue.empty():
                returnVal = False
                break
        else:
            passphrase = foundPassQ.get()
            returnVal = passphrase
            break
    pool.terminate()
    return returnVal
Exemple #30
0
def run_ntuples(analysis, channel, period, samples, loglevel, **kwargs):
    '''Run a given analyzer for the analysis'''
    logger = logging.getLogger(__name__)
    test = kwargs.pop('test',False)
    metShift = kwargs.pop('metShift','')
    ntup_dir = './ntuples/%s_%iTeV_%s' % (analysis, period, channel)
    python_mkdir(ntup_dir)
    root_dir, sample_names = get_sample_names(analysis,period,samples,**kwargs)


    filelists = {}
    for sample in sample_names:
        sampledir = '%s/%s' % (root_dir, sample)
        filelists[sample] = ['%s/%s' % (sampledir, x) for x in os.listdir(sampledir)]

    if len(sample_names)==1 or test: # only one, its a test, dont use map
        name = sample_names[0]
        outname =  "%s/%s.root" % (ntup_dir, name)
        if test: outname = 'test.root'
        run_analyzer((analysis, channel, name, filelists[name], outname, period, metShift, loglevel))
        return 0

    p = Pool(8)
    try:
        p.map_async(run_analyzer, [(analysis, channel, name, filelists[name], "%s/%s.root" % (ntup_dir, name), period, metShift, loglevel) for name in sample_names]).get(999999)
    except KeyboardInterrupt:
        p.terminate()
        logger.info('Analyzer cancelled')
        sys.exit(1)
   
    return 0
Exemple #31
0
    def say(self, msg, dynamic=False, end=False, cmd=None):
        """Method to give text-to-speech output(using **The Festival Speech Synthesis System**), print the response into console and **send a tweet**.

        Args:
            msg (str):  Message to be read by Dragonfire or turned into a tweet.

        Keyword Args:
            dynamic (bool):     Dynamically print the output into console?
            end (bool):         Is it the end of the dynamic printing?
            cmd (str):          Bash command.

        Returns:
            bool:  True or False

        .. note::

            This method is extremely polymorphic so use it carefully.
             - If you call it on `--server` mode it tweets. Otherwise it prints the reponse into console.
             - If `--silent` option is not fed then it also gives text-to-speech output. Otherwise it remain silent.
             - If response is more than 10000 characters it does not print.
             - If `--headless` option is not fed then it shows a speaking female head animation on the screen using `realhud` Python C extension.

        """

        if self.server:
            text = "@" + self.twitter_user + " " + msg  # .upper()
            text = (text[:TWITTER_CHAR_LIMIT]) if len(text) > TWITTER_CHAR_LIMIT else text
            if cmd:
                if len(cmd) > 1:
                    if cmd[0] == "sensible-browser":
                        reduction = len(text + " " + cmd[1]) - TWITTER_CHAR_LIMIT
                        if reduction < 1:
                            reduction = None
                        text = text[:reduction] + " " + cmd[1]
                        page = metadata_parser.MetadataParser(url=cmd[1])
                        img_url = page.get_metadata_link('image')
                        if img_url:
                            response = urllib.request.urlopen(img_url)
                            img_data = response.read()
                            img_extension = mimetypes.guess_extension(response.headers['content-type'])
                            filename = "/tmp/tmp" + uuid.uuid4().hex + img_extension
                            with open(filename, 'wb') as f:
                                f.write(img_data)

                            try:
                                self.twitter_api.update_with_media(filename, text)
                                if randint(1, 3) == 1:
                                    self.twitter_api.create_friendship(self.twitter_user, follow=True)
                            except TweepError as e:
                                print("Warning: " + e.response.text)
                            finally:
                                os.remove(filename)
                            return msg
            try:
                self.twitter_api.update_status(text)
                if randint(1, 10) == 1:
                    self.twitter_api.create_friendship(self.twitter_user, follow=True)
            except TweepError as e:
                print("Warning: " + e.response.text)
            return msg
        # if songRunning == True:
        #   subprocess.Popen(["rhythmbox-client","--pause"])
        if len(msg) < 10000:
            (columns, lines) = shutil.get_terminal_size()
            if dynamic:
                if end:
                    print(msg.upper())
                    print(columns * "_" + "\n")
                else:
                    print("Dragonfire: " + msg.upper(), end=' ')
                    stdout.flush()
            else:
                print("Dragonfire: " + msg.upper())
                print(columns * "_" + "\n")
        if not self.silent:
            subprocess.call(["pkill", "flite"], stdout=FNULL, stderr=FNULL)
            tts_proc = subprocess.Popen(
                "flite -voice slt -f /dev/stdin",
                stdin=subprocess.PIPE,
                stdout=FNULL,
                stderr=FNULL,
                shell=True)
            msg = "".join([i if ord(i) < 128 else ' ' for i in msg])
            tts_proc.stdin.write(msg.encode())
            tts_proc.stdin.close()
            # print "TTS process started."

        pool = Pool(processes=1)
        if not self.headless:
            pool.apply_async(realhud.play_gif, [0.5, True])
            # print "Avatar process started."

        if not self.silent:
            tts_proc.wait()
        pool.terminate()
        # if songRunning == True:
        #   subprocess.Popen(["rhythmbox-client","--play"])
        return msg
Exemple #32
0
def run():
    # get a connection for the controlling processes
    master_conn = get_pg_conn()

    if master_conn is None or master_conn == ERROR:
        return NO_CONNECTION

    comment("Connected to %s:%s:%s as %s" % (db_host, db_port, db, db_user))
    if table_name is not None:
        snippet = "Table '%s'" % table_name
    else:
        snippet = "Schema '%s'" % schema_name

    comment(
        "Analyzing %s for Columnar Encoding Optimisations with %s Threads..." %
        (snippet, threads))

    if do_execute:
        if drop_old_data:
            really_go = getpass.getpass(
                "This will make irreversible changes to your database, and cannot be undone. Type 'Yes' to continue: "
            )

            if not really_go == 'Yes':
                print("Terminating on User Request")
                return TERMINATED_BY_USER

        comment(
            "Recommended encoding changes will be applied automatically...")
    else:
        pass

    # process the table name to support multiple items
    if table_name is not None:
        tables = ""
        if table_name is not None and ',' in table_name:
            for t in table_name.split(','):
                tables = tables + "'" + t + "',"

            tables = tables[:-1]
        else:
            tables = "'" + table_name + "'"

    if table_name is not None:
        statement = '''select pgn.nspname::text as schema, trim(a.name) as table, b.mbytes, a.rows, decode(pgc.reldiststyle,0,'EVEN',1,'KEY',8,'ALL') dist_style, TRIM(pgu.usename) "owner", pgd.description
from (select db_id, id, name, sum(rows) as rows from stv_tbl_perm a group by db_id, id, name) as a
join pg_class as pgc on pgc.oid = a.id
left outer join pg_description pgd ON pgd.objoid = pgc.oid and pgd.objsubid = 0
join pg_namespace as pgn on pgn.oid = pgc.relnamespace
join pg_user pgu on pgu.usesysid = pgc.relowner
join (select tbl, count(*) as mbytes
from stv_blocklist group by tbl) b on a.id=b.tbl
and pgn.nspname::text ~ '%s' and pgc.relname in (%s)        
        ''' % (schema_name, tables)
    else:
        # query for all tables in the schema ordered by size descending
        comment("Extracting Candidate Table List...")

        statement = '''select pgn.nspname::text as schema, trim(a.name) as table, b.mbytes, a.rows, decode(pgc.reldiststyle,0,'EVEN',1,'KEY',8,'ALL') dist_style, TRIM(pgu.usename) "owner", pgd.description
from (select db_id, id, name, sum(rows) as rows from stv_tbl_perm a group by db_id, id, name) as a
join pg_class as pgc on pgc.oid = a.id
left outer join pg_description pgd ON pgd.objoid = pgc.oid and pgd.objsubid = 0
join pg_namespace as pgn on pgn.oid = pgc.relnamespace
join pg_user pgu on pgu.usesysid = pgc.relowner 
join (select tbl, count(*) as mbytes
from stv_blocklist group by tbl) b on a.id=b.tbl
where pgn.nspname::text ~ '%s'
  and a.name::text SIMILAR TO '[A-Za-z0-9_]*'
order by 2;
        ''' % (schema_name, )

    if debug:
        comment(statement)

    query_result = execute_query(statement)

    if query_result is None:
        comment("Unable to issue table query - aborting")
        return ERROR

    table_names = []
    for row in query_result:
        table_names.append(row)

    comment("Analyzing %s table(s) which contain allocated data blocks" %
            (len(table_names)))

    if debug:
        [comment(str(x)) for x in table_names]

    result = []

    if table_names is not None:
        # we'll use a Pool to process all the tables with multiple threads, or just sequentially if 1 thread is requested
        if threads > 1:
            # setup executor pool
            p = Pool(threads)

            try:
                # run all concurrent steps and block on completion
                result = p.map(analyze, table_names)
            except KeyboardInterrupt:
                # To handle Ctrl-C from user
                p.close()
                p.terminate()
                cleanup(master_conn)
                return TERMINATED_BY_USER
            except:
                print(traceback.format_exc())
                p.close()
                p.terminate()
                cleanup(master_conn)
                return ERROR

            p.terminate()
        else:
            for t in table_names:
                result.append(analyze(t))
    else:
        comment("No Tables Found to Analyze")

    # return any non-zero worker output statuses
    modified_tables = 0
    for ret in result:
        if isinstance(ret, (list, tuple)):
            return_code = ret[0]
            fk_commands = ret[1]
            modified_tables = modified_tables + 1 if ret[2] else modified_tables
        else:
            return_code = ret
            fk_commands = None

        if fk_commands is not None and len(fk_commands) > 0:
            print_statements(fk_commands)

            if do_execute:
                if not run_commands(master_conn, fk_commands):
                    if not ignore_errors:
                        print("Error running commands %s" % (fk_commands, ))
                        return ERROR

        if return_code != OK:
            print("Error in worker thread: return code %d. Exiting." %
                  (return_code, ))
            return return_code

    comment("Performed modification of %s tables" % modified_tables)

    if do_execute:
        if not master_conn.commit():
            return ERROR

    comment('Processing Complete')
    cleanup(master_conn)

    return OK
Exemple #33
0
        return wrd

# The ordinals for alphanums
chars = []
[chars.extend(l) for l in [range(97, 123), range(65, 91), range(48, 58)]]

# Create a pool of subprocesses to sidestep the GIL and iterate through in 
# chunks of 1000
p = Pool(8)

# Loop through alphanums odometer style
for i in range(8, len(chars) + 1):
    wrds = []
    for wrd in product(chars, repeat=i):
        wrds.append(''.join([chr(c) for c in wrd]))
        if len(wrds) >= 1000:
            try:
                results = p.map(verify_hash, wrds)
                for result in results:
                    if result:
                        print(result)
                        raise KeyboardInterrupt
                print(wrds[-1])
                wrds = []
            except KeyboardInterrupt:
                break
        else:
            continue
    p.terminate()
    break
Exemple #34
0
def hotspotsscan_withcontrol(chipfile, maxinsert, windowscare, countchr,
                             inputgloablumbda, bayesfactorthreshold, nthreads,
                             chipfregion, jobtype, ratio, inputfile,
                             inputfregion):

    pool = Pool(nthreads)

    try:

        pars = list()

        hotspots = list()

        print("gloablumbda", inputgloablumbda, "readlengthmean",
              inputfregion.readlengthmean)

        bayesfactorthresholdcount = 2

        i = 2

        while True:

            nowbayesfactor = bayesfactor(inputgloablumbda, i)

            if nowbayesfactor > bayesfactorthreshold:

                break

            bayesfactorthresholdcount = i

            i = i + 1

        print("bayesfactorthresholdcount", bayesfactorthresholdcount)

        windowsize = 100000

        for chromosmoe in countchr:

            chr_length = chipfregion.chrs_length[chromosmoe]

            for scare in range(0, int(chr_length / windowsize) + 1):

                nowstart = scare * windowsize + 1 - 200

                nowend = (scare + 1) * windowsize + 200

                if nowend > chr_length:

                    nowend = chr_length

                if nowstart < 1:

                    nowstart = 1

                nowregion = chromosmoe + ":" + str(nowstart) + "-" + str(
                    nowend)

                par = dict()

                par['region'] = nowregion

                par['maxinsert'] = maxinsert

                par['bamfile'] = chipfile

                par['jobtype'] = jobtype

                par['chrlength'] = chr_length

                par['regionchromosome'] = chromosmoe

                par['regionstart'] = nowstart

                par['regionend'] = nowend

                par['ratio'] = ratio

                # par['bayesfactordic'] = bayesfactordic

                par['bayesfactorcount'] = bayesfactorthresholdcount

                par['readlengthmean'] = chipfregion.readlengthmean

                pars.append(par)

        enrichedinthreads = pool.map(hotspot_control_worker, pars)

        chrenrichedpotin = dict()

        for enrichedinthread in enrichedinthreads:

            nowchr = enrichedinthread['chromosome']

            if nowchr in chrenrichedpotin:

                chrenrichedpotin[nowchr].append(enrichedinthread['list'])

            else:

                chrenrichedpotin[nowchr] = list()

                chrenrichedpotin[nowchr].append(enrichedinthread['list'])

        chrhotpars = list()

        for nowchr in chrenrichedpotin:

            hotpar = dict()

            hotpar['chromosome'] = nowchr

            hotpar['preregion'] = chrenrichedpotin[nowchr]

            hotpar['chr_length'] = chipfregion.chrs_length[chromosmoe]

            hotpar['fregion'] = chipfregion

            chrhotpars.append(hotpar)

        hotsptosinthreads = pool.map(hotspots_chromsome_merge, chrhotpars)

        for hotinth in hotsptosinthreads:

            for hotspotnow in hotinth:

                hotspots.append(hotspotnow)

        pool.close()

        pool.close()

        return hotspots

    except KeyboardInterrupt:

        pool.terminate()

        print("You cancelled the program!")

        sys.exit(1)

    except Exception as e:

        print(
            'got exception in Jazzlib.hotspotsscan.hotspotsscan_withcontrol: %r, terminating the pool'
            % (e, ))

        pool.terminate()

        print('pool is terminated')

    finally:

        pool.join()
Exemple #35
0
def is_absolute_url(url):
    return bool(urlparse(url).netloc)


if __name__ == "__main__":
    page_numbers = range(1, 50)
    search_url = 'https://www.menulog.com.au/area/3057-brunswick-east'
    url_list = [search_url]

    # Make the Pool of workers
    pool = ThreadPool(10)
    # Open the urls in their own threads and return the results
    scraped_data = pool.map(parse_listing, url_list)
    # Close the pool and wait for the work to finish
    pool.terminate()
    pool.join()

    if scraped_data:
        name_file = "menulog_business_details"
        print("Writing scraped data to %s.csv" % name_file)
        with open('%s.csv' % name_file, 'wb') as csvfile:
            fieldnames = [
                'business_name', 'rating', 'categories', 'address', 'url'
            ]
            writer = csv.DictWriter(csvfile,
                                    fieldnames=fieldnames,
                                    quoting=csv.QUOTE_ALL)
            writer.writeheader()
            for datas in scraped_data:
                for data in datas:
Exemple #36
0
def main():
    #conn = sqlite3.connect("picinfo.db",check_same_thread = False) 
    #爬取链接这一步若采用线程池则,队列使用Manager.Queue
    #爬去采用线程池主要是由于,查询关键词较多
    m = Manager()
    INIT_QUE = m.Queue()
    DOWNLOAD_QUE = JoinableQueue()
    CROP_QUE = JoinableQueue()
    UPLOAD_QUE = JoinableQueue()
    conn = sqlite3.connect("picinfo.db") 
    mkdirpath(os.path.join("imgs","unkown","unkown"))
    #cursor = conn.cursor()
    lock = {
        "sql": m.Lock()
        ,"id": m.Lock()
    }
    #1. 使用mysql初始化各队列之前未完成的任务
    if IF_INIT:
        init_que(INIT_QUE,DOWNLOAD_QUE,CROP_QUE,UPLOAD_QUE)
    start_id = get_id()
    manager = Manager()
    id_m = manager.list([start_id])
    #2. 爬取链接 
    get_link_task = []
    
    process_number_dict = m.dict()
    process_number_dict["init"] = len(main_word) * len(countrys)
    process_number_dict["download"] = max_process_num
    process_number_dict["crop"] = max_process_num
    pool = Pool(max_process_num)

    if IF_CRAW:
        for q1 in main_word:
            for q2 in countrys:
                mkdirpath(os.path.join("imgs",q2,q1))
                mkdirpath(os.path.join("crop_imgs",q2,q1))
                #pool.apply_async(hello,args=(q1,q2,max_number,id_m,INIT_QUE,lock,))#,process_number_dict,))
                pool.apply_async(get_link, args=(q1,q2,max_number,id_m,INIT_QUE,lock,process_number_dict,))
#                p = Process(target=get_link, args=(q1,q2,max_number,id_m,INIT_QUE,lock))
#                get_link_task.append(p)
    else:
        for _ in range(max_process_num):
            INIT_QUE.put(None)

    #3. 下载图片
    download_link_task = []
    if IF_DOWNLOAD:
        for _ in range(max_process_num):
            p = Process(target=download_img,args=(INIT_QUE,DOWNLOAD_QUE,lock,process_number_dict))
            #p.start()
            download_link_task.append(p)
    else:
        for _ in range(max_process_num):
            DOWNLOAD_QUE.put(None)

    
    #4. 裁减人脸
    print("*-"*10)
    crop_task = []
    if IF_CROP:
         for _ in range(max_process_num):
            p = Process(target=crop_img,args=(DOWNLOAD_QUE,CROP_QUE,lock,0.5,process_number_dict))
            #p.start()
            crop_task.append(p)
    else:
        for _ in range(max_process_num):
            CROP_QUE.put(None)

    print(len(get_link_task))
    print("main_pid",os.getpid()," ",os.getppid())
    for each in get_link_task + download_link_task + crop_task:
        print(each,"start")
        each.start()
    pool.close()
    pool.join()
    pool.terminate()
    for each in get_link_task + download_link_task + crop_task:
        each.join()
    print(DOWNLOAD_QUE.qsize())
    release_que([INIT_QUE,DOWNLOAD_QUE,CROP_QUE,UPLOAD_QUE])
    print("job done")
Exemple #37
0
def main(*args, **kwargs):
    """
    Main program
    """
    locals().update(kwargs)

    # Format the print messages and make it thread safe
    hijack_print()

    # Identified pairs of related images
    similar_pairs = list()

    # Find all files under directory
    images = []
    for d in (start_dir, compare_to):
        if d:
            for root, dirnames, filenames in os.walk(d):
                for filename in fnmatch.filter(filenames, '*.*'):
                    # Don't include any thumbnail or other junk from iPhoto/Photos
                    if not '.photoslibrary' in root or 'Masters' in root:
                        if not str(filename).startswith('.') and not str(
                                filename).endswith('.CR2'):
                            images.append(os.path.join(root, filename))

    file_count = len(images)
    if not compare_to:
        print "%d files to process in %s" % (file_count, start_dir)
    else:
        print "Comparing %d images between %s and %s" % (file_count, start_dir,
                                                         compare_to)

    if file_count == 0:
        print "No images found"
        exit(0)

    # Prehash
    print "Please wait for initial image scan to complete..."

    # Create a worker pool to hash the images over multiple cpus
    worker_pool = Pool(processes=cpus,
                       initializer=init_worker,
                       maxtasksperchild=100)
    worker_results = []

    # Cache all the image hashes ahead of time so user can see progress
    for idx, image_path in enumerate(images):

        # Don't process last image as it will not have anything to compare against
        if idx < file_count:
            new_callback_function = partial(
                lambda x, key: ImageUtils.save_hash(key, x), key=image_path)
            worker_results.append(
                worker_pool.apply_async(MethodProxy(ImageUtils,
                                                    ImageUtils.hash),
                                        [image_path, image_path],
                                        callback=new_callback_function))

    # This block basically prints out the progress until hashing is done and allows graceful exit if user quits
    try:
        done, elapsed, total, started = 0, 0, len(worker_results), time.time()
        worker_pool.close()
        while True:
            done = sum(r.ready() for r in worker_results)
            elapsed = time.time() - started
            rate = int(ImageUtils.new_hash_count / elapsed)
            eta = int((total - done) /
                      float(rate)) if done > 0 and rate > 0 else None
            print_progress(int(float(done) / total * 100), rate, eta)
            # if all(r.ready() for r in worker_results):
            if done == total:
                print "Hashing completed"
                break
            time.sleep(1)
    except (KeyboardInterrupt, SystemExit):
        print '\n'
        print "Caught KeyboardInterrupt, terminating workers"
        worker_pool.terminate()
        worker_pool.join()
        ImageUtils.persistent_store.commit()
        exit(1)
    else:
        worker_pool.join()
        ImageUtils.persistent_store.commit()

    if only_index:
        return

    # Comparison
    print ""
    print "Comparing the images..."

    target_dir1 = os.path.expanduser(start_dir)
    target_dir2 = os.path.expanduser(compare_to) if compare_to else None

    # Compare each image to every other image
    for idx, image_path in enumerate(tqdm(images)):

        # Don't process last image as it will not have anything to compare against
        if idx == file_count - 1:
            continue

        hash1 = ImageUtils.hash(image_path, image_path)

        if not hash1:
            continue

        # Compare to all images following
        for jdx in xrange(idx + 1, len(images)):
            image_path2 = images[jdx]

            # Skip same image paths if it happens
            if image_path == image_path2:
                continue

            # If comparing two directories instead of one to itself, then check the images belong to different parents
            if compare_to and any([
                    all([
                        str(image_path).startswith(target_dir1),
                        str(image_path2).startswith(target_dir1)
                    ]),
                    all([
                        str(image_path).startswith(target_dir2),
                        str(image_path2).startswith(target_dir2)
                    ])
            ]):
                continue

            hash2 = ImageUtils.hash(image_path2, image_path2)

            if not hash2:
                continue

            # Compute the similarity values
            dist = ImageUtils.hamming_score(hash1, hash2)
            similarity = (64 - dist) * 100 / 64

            if not inverse:
                if similarity > confidence_threshold:
                    similar_pairs.append(
                        OutputRecord(image_path, image_path2, dist,
                                     similarity))
            else:
                if similarity <= confidence_threshold:
                    similar_pairs.append(
                        OutputRecord(image_path, image_path2, dist,
                                     similarity))

    # Print the results
    outputter_for_format(output).output(similar_pairs)

    print '\n'
Exemple #38
0
def optimize_for_order(conv_params, pool_kernel=None, pool_stride=None, sequential=True):
    # Generate permutations for the order
    loops = ['B/b', 'OW/ow', 'OH/oh', 'IC/ic', 'OC/oc']
    order = set(permutations(loops))

    return_dict = {}
    acc_obj, K, O, S, IC, OC, B, iprec, wprec, im2col, energy_cost = conv_params

    #print('optimizing for convolution layer: weights {}x{}x{}x{}'.format(OC,IC,K,K))
    #print('Batch size: {}'.format(B))

    if pool_kernel is None:
        pool_kernel = (1,1,1,1)
    if pool_stride is None:
        pool_stride = (1,1,1,1)
    conv_params_with_pool = acc_obj, K, O, S, IC, OC, B, iprec, wprec, im2col, energy_cost, pool_kernel, pool_stride

    if not sequential:
        _bound_optimizer_method = functools.partial(_optimize_for_order, conv_params_with_pool)

        try:
            pool = Pool(cpu_count())
            results = pool.map_async(_bound_optimizer_method, order).get(10000)
            pool.close()
            pool.join()

            # for o in order:
            #     _bound_optimizer_method(o)
            # exit()

            best_cycles = None
            best_energy = None
            min_cycles = min([x[-4] for x in results])
            min_energy = min([x[-3] for x in results])
            cycles_list = [x[-2] for x in results]
            energy_list = [x[-1] for x in results]
            energy_array = np.stack(energy_list)
            cycles_array = np.stack(cycles_list)
            for r in results:
                tiling, order_type, cycles, energy, _, _ = r
                # print('{}:\n{}\n\t{:1.2f}, {:1.2f}'.format(order_type, tiling, cycles/float(min_cycles), energy/float(min_energy)))
                if best_cycles is None or best_cycles > cycles or (best_cycles == cycles and best_energy > energy):
                    best_cycles = cycles
                    best_energy = energy
                    best_tiling = tiling
                    best_order = order_type
            return best_tiling, best_order, cycles_array, energy_array

        except KeyboardInterrupt:
            pool.terminate()
            pool.join()
            return

    else:
        best_cycles = None
        best_energy = None
        best_tiling = None
        best_order  = None
        for o in order:
            tiling, order_type, cycles, energy, _, _ = _optimize_for_order(conv_params_with_pool, o)
            if best_cycles is None or best_cycles > cycles:
                best_cycles = cycles
                best_energy = energy
                best_tiling = tiling
                best_order  = order_type
            elif best_cycles == cycles and best_energy > energy:
                best_cycles = cycles
                best_energy = energy
                best_tiling = tiling
                best_order  = order_type
        return best_tiling, best_order, None, None
def real_time(args):
    """
    Read and apply object detection to input real time system (webcam)
    """

    # If display is off while no number of frames limit has been define: set
    # diplay to on
    if ((not args["display"]) & (args["num_frames"] < 0)):
        print("\nSet display to on\n")
        args["display"] = 1

    # Set the multiprocessing logger to debug if required
    if args["logger_debug"]:
        logger = multiprocessing.log_to_stderr()
        logger.setLevel(multiprocessing.SUBDEBUG)

    # Multiprocessing: Init input and output Queue and pool of workers
    input_q = Queue(maxsize=args["queue_size"])
    output_q = Queue(maxsize=args["queue_size"])
    pool = Pool(args["num_workers"], worker, (input_q, output_q))

    # created a threaded video stream and start the FPS counter
    vs = WebcamVideoStream(src=args["input_device"]).start()
    fps = FPS().start()

    # Define the output codec and create VideoWriter object
    if args["output"]:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('outputs/{}.mp4'.format(args["output_name"]),
                              fourcc,
                              vs.getFPS() / args["num_workers"],
                              (vs.getWidth(), vs.getHeight()))
    # Start reading and treating the video stream

    if args["display"] > 0:
        print()
        print(
            "====================================================================="
        )
        print(
            "Starting video acquisition. Press 'q' (on the video windows) to stop."
        )
        print(
            "====================================================================="
        )
        print()

    count_frame = 0
    while True:
        #Capture frame-by-frame
        ret, frame = vs.read()
        count_frame = count_frame + 1

        if ret:
            input_q.put(frame)
            output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)

            # write the frame
            if args["output"]:
                out.write(output_rgb)

            # Display the resulting frame
            if args["display"]:
                cv2.imshow('frame', output_rgb)
                fps.update()
            elif countFrame >= args["num_frames"]:
                break

        else:
            break

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    pool.terminate()
    vs.stop()
    if args["output"]:
        out.release()
    cv2.destroyAllWindows()
Exemple #40
0
        time.sleep(random.expovariate(0.25))
        me.__setattr__(stuff, True)


def updateClimber(me):
    stufftodo = ['RockSchool2012', 'Seneca', 'V2']  # more later
    for stuff in stufftodo:
        time.sleep(random.expovariate(0.5))
        me.__setattr__(stuff, True)


if __name__ == '__main__':
    mamma, baba = Parents(), Parents()
    me = SubhodeepMoitra(mamma, baba)
    lifeStart = time.time()
    gradSchoolStart = 21  # Life begins at gradschool ?

    # Don't want to live beyond 70
    lifeEnd = lifeStart + 70 - random.expovariate(lambd=1.0)
    life = Pool(processes=4)
    life.apply_async(updateProteinArchitect, me)
    life.apply_async(updatePythonista, me)
    life.apply_async(updateTriathlete, me)
    life.apply_async(updateClimber, me)

    while time.time() + gradSchoolStart < lifeEnd:
        time.sleep(1)  # Check anually if I am alive
    me.alive = False
    life.terminate()
    print("What a ride..!!")
Exemple #41
0
def Calculate_D_Matrix_WGS84_mp(satlatlonalt, az, ze, azze_def=1):
    '''
    Returns the Distance matrix calculated given the satellite coordinates and viewing geometry for the WGS84 model.
    INPUTS:
        satlatlonalt -  vector containing the satellite coordinates [lat-lon-alt] (km)
        az -  azimuth vector (deg)
        ze -  zenith vector (deg)
        azze_def - falg indicating if default az and ze fov angles will be used from instrument parameters
    OUTPUT:
        S  - Distance matrix assuming WGS84 (km 10^{-1})
    NOTES:
    HISTORY:
        03-Jun-2015: Written by Dimitrios Iliou ([email protected])
        26-Aug-2015: Added the exception for the Pool
    '''
    try:
        rbot = np.zeros(np.size(ze, 0))

        for i in range(0, np.size(ze, 0)):
            _, _, rbot[i] = ic.tangent_point(satlatlonalt, az[i], ze[i])

        rtop = rbot.copy()
        rtop[1:] = rbot[:-1]
        rtop[0] = satlatlonalt[2] - 1
        #rtop[0] = rtop[1] + (rtop[1] - rtop[2])
        rmid = (rbot + rtop) / 2
        S = np.zeros((np.size(ze), np.size(rbot)))
        k = 0

        N = multiprocessing.cpu_count()

        # Create the pool.  Be nice.  Don't use all the cores!
        pool = Pool(processes=16)
        t0 = time.time()
        for i in range(0, np.size(ze)):

            job_args = [(satlatlonalt, az[i], ze[i], rtop[j])
                        for j in range(0, len(rbot))]
            job_args2 = [(satlatlonalt, az[i], ze[i], rbot[j])
                         for j in range(0, len(rbot))]
            job_args3 = [(satlatlonalt, az[i], ze[i], rtop[j], 'second')
                         for j in range(0, len(rbot))]

            ub = pool.map(Calculate_D_Matrix_WGS84_mp_star, job_args)
            lb = pool.map(Calculate_D_Matrix_WGS84_mp_star, job_args2)
            ub1 = np.array(ub)
            '''
            if np.sum(np.isnan(lb)) == len(lb):
                lb2 = pool.map(Calculate_D_Matrix_WGS84_mp_star,job_args3)
                lb21 = array(lb2)
                S[i,np.where(np.isnan(ub)==True)and(np.where(np.isnan(lb)==True))]=0
                diff = (ub1[np.where(np.isnan(ub)==False)and(np.where(np.isnan(lb2)==False))] - lb21[np.where(np.isnan(ub)==False)and(np.where(np.isnan(lb2)==False))])
                S[i,np.where(np.isnan(ub)==False)and(np.where(np.isnan(lb2)==False))]=abs(diff)
            else:
                lb1 = array(lb)
                S[i,np.where(np.isnan(ub)==True)and(np.where(np.isnan(lb)==True))]=0
                diff = (ub1[np.where(np.isnan(ub)==False)and(np.where(np.isnan(lb)==False))] - lb1[np.where(np.isnan(ub)==False)and(np.where(np.isnan(lb)==False))])
                S[i,np.where(np.isnan(ub)==False)and(np.where(np.isnan(lb)==False))]=2*abs(diff)
            '''
            lb2 = pool.map(Calculate_D_Matrix_WGS84_mp_star, job_args3)
            lb21 = np.array(lb2)
            S[i,
              np.where(np.isnan(ub) == True) and
              (np.where(np.isnan(lb) == True))] = 0
            diff2 = (ub1[np.where(np.isnan(ub) == False)
                         and (np.where(np.isnan(lb2) == False))] -
                     lb21[np.where(np.isnan(ub) == False)
                          and (np.where(np.isnan(lb2) == False))])
            lb1 = np.array(lb)
            diff = (ub1[np.where(np.isnan(ub) == False)
                        and (np.where(np.isnan(lb) == False))] -
                    lb1[np.where(np.isnan(ub) == False)
                        and (np.where(np.isnan(lb) == False))])
            S[i,
              np.where(np.isnan(ub) == False) and
              (np.where(np.isnan(lb) == False))] = 2 * abs(diff)
            diago = np.where(np.isnan(ub) == False) and (np.where(
                np.isnan(lb) == True))[0][0]
            S[i, diago] = abs(diff2[diago])

        t1 = time.time()
        #print t1-t0
        pool.close()
        pool.join()
        S = S * 1e-1
    except (KeyboardInterrupt, SystemExit, ZeroDivisionError,
            BaseException) as inst:

        if 'pool' in vars():
            pool.terminate()

        #print "You cancelled the program!"
        print type(inst)
        print inst
        exit(1)

    except Exception:

        print "Something Happened :("
        print type(inst)
        print inst
        exit(1)

    return S, rmid
Exemple #42
0
class MapWrapper:
    """
    Parallelisation wrapper for working with map-like callables, such as
    `multiprocessing.Pool.map`.

    Parameters
    ----------
    pool : int or map-like callable
        If `pool` is an integer, then it specifies the number of threads to
        use for parallelization. If ``int(pool) == 1``, then no parallel
        processing is used and the map builtin is used.
        If ``pool == -1``, then the pool will utilize all available CPUs.
        If `pool` is a map-like callable that follows the same
        calling sequence as the built-in map function, then this callable is
        used for parallelization.
    """
    def __init__(self, pool=1):
        self.pool = None
        self._mapfunc = map
        self._own_pool = False

        if callable(pool):
            self.pool = pool
            self._mapfunc = self.pool
        else:
            from multiprocessing import Pool
            # user supplies a number
            if int(pool) == -1:
                # use as many processors as possible
                self.pool = Pool()
                self._mapfunc = self.pool.map
                self._own_pool = True
            elif int(pool) == 1:
                pass
            elif int(pool) > 1:
                # use the number of processors requested
                self.pool = Pool(processes=int(pool))
                self._mapfunc = self.pool.map
                self._own_pool = True
            else:
                raise RuntimeError("Number of workers specified must be -1,"
                                   " an int >= 1, or an object with a 'map' "
                                   "method")

    def __enter__(self):
        return self

    def terminate(self):
        if self._own_pool:
            self.pool.terminate()

    def join(self):
        if self._own_pool:
            self.pool.join()

    def close(self):
        if self._own_pool:
            self.pool.close()

    def __exit__(self, exc_type, exc_value, traceback):
        if self._own_pool:
            self.pool.close()
            self.pool.terminate()

    def __call__(self, func, iterable):
        # only accept one iterable because that's all Pool.map accepts
        try:
            return self._mapfunc(func, iterable)
        except TypeError as e:
            # wrong number of arguments
            raise TypeError("The map-like callable must be of the"
                            " form f(func, iterable)") from e
Exemple #43
0
    def loop_core(self,tsample=50.,tmax=None, path='.',**kwargs):
        import pandas as pd
        for i,j in self.attrs.iteritems():
            kwargs.setdefault(i,j)


        path=Path(path)
        path.norm()
        path.mkdir(rmdir=kwargs.get('clean_directory',False))
        table=[]

        dic={
            'tsample':tsample,
            'tmax':tmax,
            'path':path,
            'sys':kwargs.get('sys',None),
            }


        if 'model' in kwargs:
            model=kwargs['model']
            model.renew()
            model.set_params(**kwargs)
        else:
            model=self.Model(**kwargs)
        if not kwargs.get('reseed',1):
            kwargs['model']=model #transmit the model further down the loop

        dic.update(model.export_params())

        if 'replicas' in kwargs:
            '''Multiprocessing test'''
            from multiprocessing import Pool,freeze_support
            freeze_support()
            systems=range(kwargs['replicas'])

            array=[(path.copy(),self.Model,model.export_params(),model.results,tmax,tsample,kwargs.get('keep','endpoint'),sys)
                     for sys in systems  ]

            pool=Pool()

            pool.map(mp_run,array )
            pool.close()
            pool.join()
            pool.terminate()
            model.compress_results(path)
        else:
            kwargs.setdefault('converge',1)
            kwargs.setdefault('keep','endpoint')
            model,success=self.evol(model=model,tmax=tmax,tsample=tsample,path=path,**kwargs)
            if not success and not 'model' in kwargs:
                kwargs['loop_trials']=kwargs.get('loop_trials',0)+1

                if kwargs['loop_trials']<self.MAX_TRIALS:
                    print '### model diverged, trying again! ###'
                    return self.loop_core(tsample=tsample,tmax=tmax, path=path,**kwargs)
                else:
                    print '### model diverged {} times, setting results to 0! ###'.format(kwargs['loop_trials'])
                    for var in model.results:
                        model.results[var].matrix[:]=0
            if 'loop_trials' in kwargs:
                del kwargs['loop_trials']
            model.save(path,overwrite=1 )

        table.append(dic)
        pd.DataFrame(table).to_csv(path+'files.csv')
        return table
def __main__():
    """
    Feature Extractor: MobileNets
    Meta-architecture: SSD
    Dataset: MS-COCO
    """
    # flags
    use_image, use_video = True, False
    ckpt_number = "_{}".format(FLAGS.CKPT_NUMBER) if int(FLAGS.CKPT_NUMBER) > 0 else ""
    # directory of images/videos
    path_to_test = {
        "root": "object_detection/test_images/",
        "pet": 'object_detection/test_images/pet-bottles/',
        "krones": "object_detection/test_images/" + FLAGS.folder
    }
    # Path to frozen detection graph. This is the actual model that is used for the object detection.
    models = {
        "ssd_mobilenet": "object_detection/weights/ssd_mobilenet_v1_coco_2017_11_17",
        "ssd_inception": "object_detection/weights/ssd_inception_v2_coco_2017_11_17",
        "rcnn_inception": "object_detection/weights/faster_rcnn_inception_v2_coco_2017_11_08",
        "rcnn_resnet": "object_detection/weights/faster_rcnn_resnet50_coco_2017_11_08",
        "raccoon": "object_detection/weights/ssd_inception_v2_racoon",
        "krones": "{}{}".format("krones/models/ssd_mobilenet_v1_coco/frozen_graph", ckpt_number)
    }
    # label maps for different datasets
    labels = {
        "mscoco": "object_detection/data/mscoco_label_map.pbtxt",
        "kitti": "object_detection/data/kitti_label_map.pbtxt",
        "pet": "object_detection/data/pet_label_map.pbtxt",
        "raccoon": "object_detection/data/racoon_label_map.pbtxt",
        "krones": "krones/data/label_map.pbtxt"
    }
    checkpoint_path = models["krones"] + '/frozen_inference_graph.pb'
    # List of the strings that is used to add correct label for each box.
    path_to_labels = labels["krones"]
    num_classes = 90
    # resolution -- has impact on inference time
    scale_factor = 0.5
    # ROI -- has impact on inference time
    x_boundary = (1 * scale_factor, 1270 * scale_factor)
    y_boundary = (1 * scale_factor, 700 * scale_factor)
    xy_boundary = x_boundary, y_boundary
    # test images
    image_paths = [name for name in glob.glob(path_to_test["krones"] + '*')]
    label_map = label_map_util.load_labelmap(path_to_labels)
    categories = label_map_util.convert_label_map_to_categories(label_map,
                                                                max_num_classes=num_classes,
                                                                use_display_name=True)
    category_index = label_map_util.create_category_index(categories)
    queue_size = 5
    input_q, output_q = Queue(maxsize=queue_size), Queue(maxsize=queue_size)
    pool = Pool(
        # number of queues
        2,
        # worker
        worker,
        # arguments of worker
        (input_q, output_q, checkpoint_path, xy_boundary, category_index, scale_factor)
    )
    # for images
    if use_image:
        for image_path in image_paths:
            image_name = image_path.rsplit('/', 1)[-1]
            if os.path.isfile(image_path):
                handle_queues(input_q, output_q, cv2.imread(image_path), image_name)
                cv2.waitKey(0)
    # for video
    if use_video:
        video_cap = imageio.get_reader(path_to_test["root"] + "project_video.mp4")
        # for each frame, get bounding boxes
        for index, frame in enumerate(video_cap):
            # drop every 2nd frame to improve FPS
            if index % 2 == 0:
                handle_queues(input_q, output_q, frame)
        video_cap.stop()
    # cleaning tasks
    pool.terminate()
    cv2.destroyAllWindows()
Exemple #45
0
def scale_and_get_IFFT_on_VTA_array(S_vector, num_of_proc, name_sol, d,
                                    FREQ_vector_signal, Xs_signal_normalized,
                                    t_vect, arrays_shape, i_start_octv):

    start_IFFT = time_lib.time()

    global solution_sort_octv

    hf = h5py.File(name_sol[:-4] + '.h5', 'r')
    solution_over_contacts = hf.get('dataset_1')
    solution_over_contacts = np.array(solution_over_contacts)
    hf.close()

    num_segments = sum(arrays_shape)

    Max_field_on_VTA_array = np.ctypeslib.as_ctypes(
        np.zeros(num_segments, float))
    global shared_array
    shared_array = sharedctypes.RawArray(Max_field_on_VTA_array._type_,
                                         Max_field_on_VTA_array)

    Fr_corresp_ar = np.genfromtxt(os.environ['PATIENTDIR'] +
                                  '/Stim_Signal/Fr_corresp_array' +
                                  str(d["trunc_param"] * 1.0) + '.csv',
                                  delimiter=' ')
    FR_vec_sign_octv = np.genfromtxt(os.environ['PATIENTDIR'] +
                                     '/Stim_Signal/FR_vector_signal_octaves' +
                                     str(d["trunc_param"] * 1.0) + '.csv',
                                     delimiter=' ')

    Fr_corresp_ar = np.round(Fr_corresp_ar, 6)
    N_freq_octv = (FR_vec_sign_octv.shape[0])

    hf = h5py.File(name_sol[:-4] + '.h5', 'r')
    solution_over_contacts = hf.get('dataset_1')
    solution_over_contacts = np.array(solution_over_contacts)
    hf.close()

    solution_sort_octv = np.zeros((solution_over_contacts.shape[0], 2), float)

    #now we want to go over all points of this data set and scale the solutions with S_vector
    #print("S_factor: ",S_vector)
    N_contacts = solution_over_contacts.shape[1] - 1

    for j in range(len(S_vector)):
        if S_vector[j] == None:
            S_vector[
                j] = 0.0  #Important: Here we put it to 0 to drop it's contribution. It does not affect the field solution, because it was not treated as a ground in FFEM

    for point_i in np.arange(0, num_segments * FR_vec_sign_octv.shape[0],
                             FR_vec_sign_octv.shape[0]):
        #solution_sort_octv[point_i:(point_i+FR_vec_sign_octv.shape[0]),:8]=solution_sort_octv[point_i:(point_i+FR_vec_sign_octv.shape[0]),:8]*S_vector
        #solution_sort_octv[point_i:(point_i+FR_vec_sign_octv.shape[0]),0]=np.sum(solution_over_contacts[point_i:(point_i+FR_vec_sign_octv.shape[0]),:8]*S_vector, axis=1)
        solution_sort_octv[point_i:(
            point_i + FR_vec_sign_octv.shape[0]), 0] = np.sum(
                solution_over_contacts[point_i:(
                    point_i + FR_vec_sign_octv.shape[0]), :N_contacts] *
                S_vector,
                axis=1)

    p = Pool(num_of_proc)
    res = p.map(
        partial(scaled_ifft_on_VTA_array, Xs_signal_normalized,
                FREQ_vector_signal.shape[0], N_freq_octv, FR_vec_sign_octv,
                Fr_corresp_ar, t_vect, d["T"], i_start_octv),
        np.arange(num_segments))
    Max_field_on_VTA_array = np.ctypeslib.as_array(shared_array)
    p.terminate()

    minutes = int((time_lib.time() - start_IFFT) / 60)
    secnds = int(time_lib.time() - start_IFFT) - minutes * 60
    print("----- IFFT took ", minutes, " min ", secnds, " s -----")

    return Max_field_on_VTA_array
Exemple #46
0
class ImageDataset(object):
    def __init__(self,
                 name,
                 datadir,
                 batch_size,
                 im_processor,
                 processes=3,
                 shuffle=True,
                 dst_size=None):
        self._name = name
        self._data_dir = datadir
        self._batch_size = batch_size
        self.dst_size = dst_size

        self._epoch = -1
        self._num_classes = 0
        self._classes = []

        # load by self.load_dataset()
        self._image_indexes = []
        self._image_names = []
        self._annotations = []
        # Use this dict for storing dataset specific config options
        self.config = {}

        # Pool
        self._shuffle = shuffle
        self._pool_processes = processes
        self.pool = Pool(self._pool_processes)
        self.gen = None
        self._im_processor = im_processor

    def next_batch(self, size_index):
        batch = {
            'images': [],
            'gt_boxes': [],
            'gt_classes': [],
            'dontcare': [],
            'origin_im': []
        }
        i = 0
        if self.gen is None:
            indexes = np.arange(len(self.image_names), dtype=np.int)
            if self._shuffle:
                np.random.shuffle(indexes)
            self.gen = self.pool.imap(
                partial(self._im_processor, size_index=None),
                ([self.image_names[i],
                  self.get_annotation(i), self.dst_size] for i in indexes),
                chunksize=self.batch_size)
            self._epoch += 1
            print(('epoch {} start...'.format(self._epoch)))

        while i < self.batch_size:
            try:
                images, gt_boxes, classes, dontcare, origin_im = next(self.gen)

                # multi-scale
                w, h = cfg.multi_scale_inp_size[size_index]
                gt_boxes = np.asarray(gt_boxes, dtype=np.float)
                if len(gt_boxes) > 0:
                    gt_boxes[:, 0::2] *= float(w) / images.shape[1]
                    gt_boxes[:, 1::2] *= float(h) / images.shape[0]
                images = cv2.resize(images, (w, h))

                batch['images'].append(images)
                batch['gt_boxes'].append(gt_boxes)
                batch['gt_classes'].append(classes)
                batch['dontcare'].append(dontcare)
                batch['origin_im'].append(origin_im)
                i += 1
            except (StopIteration, ):
                indexes = np.arange(len(self.image_names), dtype=np.int)
                if self._shuffle:
                    np.random.shuffle(indexes)
                self.gen = self.pool.imap(partial(
                    self._im_processor, size_index=None), ([
                        self.image_names[i],
                        self.get_annotation(i), self.dst_size
                    ] for i in indexes),
                                          chunksize=self.batch_size)
                self._epoch += 1
                print(('epoch {} start...'.format(self._epoch)))
        batch['images'] = np.asarray(batch['images'])
        return batch

    def close(self):
        self.pool.terminate()
        self.pool.join()
        self.gen = None

    def load_dataset(self):
        raise NotImplementedError

    def evaluate_detections(self, all_boxes, output_dir=None):
        """
        all_boxes is a list of length number-of-classes.
        Each list element is a list of length number-of-images.
        Each of those list elements is either an empty list []
        or a numpy array of detection.

        all_boxes[class][image] = [] or np.array of shape #dets x 5
        """
        raise NotImplementedError

    def get_annotation(self, i):
        if self.annotations is None:
            return None
        return self.annotations[i]

    @property
    def name(self):
        return self._name

    @property
    def num_classes(self):
        return len(self._classes)

    @property
    def classes(self):
        return self._classes

    @property
    def image_names(self):
        return self._image_names

    @property
    def image_indexes(self):
        return self._image_indexes

    @property
    def annotations(self):
        return self._annotations

    @property
    def cache_path(self):
        cache_path = os.path.join(self._data_dir, 'cache')
        mkdir(cache_path)
        return cache_path

    @property
    def num_images(self):
        return len(self.image_names)

    @property
    def epoch(self):
        return self._epoch

    @property
    def batch_size(self):
        return self._batch_size

    @property
    def batch_per_epoch(self):
        return self.num_images // self.batch_size
Exemple #47
0
def main():
    """
        Description: Main function
    """

    # Argument parsing
    args = parse_arguments()

    # Create the directory if it does not exist.
    try:
        os.makedirs(args.output_dir)
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise

    # Creating word list
    if args.dict:
        lang_dict = []
        if os.path.isfile(args.dict):
            with open(args.dict, "r", encoding="utf8", errors="ignore") as d:
                lang_dict = [l for l in d.read().splitlines() if len(l) > 0]
        else:
            sys.exit("Cannot open dict")
    else:
        lang_dict = load_dict(args.language)

    # Create font (path) list
    if args.font_dir:
        fonts = [
            os.path.join(args.font_dir, p) for p in os.listdir(args.font_dir)
            if os.path.splitext(p)[1] == ".ttf"
        ]
    elif args.font:
        if os.path.isfile(args.font):
            fonts = [args.font]
        else:
            sys.exit("Cannot open font")
    else:
        fonts = load_fonts(args.language)

    # Creating synthetic sentences (or word)
    strings = []

    if args.use_wikipedia:
        strings = create_strings_from_wikipedia(args.length, args.count,
                                                args.language)
    elif args.input_file != "":
        strings = create_strings_from_file(args.input_file, args.count)
    elif args.random_sequences:
        strings = create_strings_randomly(
            args.length,
            args.random,
            args.count,
            args.include_letters,
            args.include_numbers,
            args.include_symbols,
            args.language,
        )
        # Set a name format compatible with special characters automatically if they are used
        if args.include_symbols or True not in (
                args.include_letters,
                args.include_numbers,
                args.include_symbols,
        ):
            args.name_format = 2
    else:
        strings = create_strings_from_dict(args.length, args.random,
                                           args.count, lang_dict)

    if args.language == "ar":
        from arabic_reshaper import ArabicReshaper

        arabic_reshaper = ArabicReshaper()
        strings = [
            " ".join([arabic_reshaper.reshape(w) for w in s.split(" ")[::-1]])
            for s in strings
        ]
    if args.case == "upper":
        strings = [x.upper() for x in strings]
    if args.case == "lower":
        strings = [x.lower() for x in strings]

    string_count = len(strings)

    p = Pool(args.thread_count)
    for _ in tqdm(
            p.imap_unordered(
                FakeTextDataGenerator.generate_from_tuple,
                zip(
                    [i for i in range(0, string_count)],
                    strings,
                    [
                        fonts[rnd.randrange(0, len(fonts))]
                        for _ in range(0, string_count)
                    ],
                    [args.output_dir] * string_count,
                    [args.format] * string_count,
                    [args.extension] * string_count,
                    [args.skew_angle] * string_count,
                    [args.random_skew] * string_count,
                    [args.blur] * string_count,
                    [args.random_blur] * string_count,
                    [args.background] * string_count,
                    [args.distorsion] * string_count,
                    [args.distorsion_orientation] * string_count,
                    [args.handwritten] * string_count,
                    [args.name_format] * string_count,
                    [args.width] * string_count,
                    [args.alignment] * string_count,
                    [args.text_color] * string_count,
                    [args.orientation] * string_count,
                    [args.space_width] * string_count,
                    [args.character_spacing] * string_count,
                    [args.margins] * string_count,
                    [args.fit] * string_count,
                    [args.output_mask] * string_count,
                    [args.word_split] * string_count,
                    [args.image_dir] * string_count,
                ),
            ),
            total=args.count,
    ):
        pass
    p.terminate()

    if args.name_format == 2:
        # Create file with filename-to-label connections
        with open(os.path.join(args.output_dir, "labels.txt"),
                  "a",
                  encoding="utf8") as f:
            for i in range(string_count):
                file_name = str(i) + "." + args.extension
                f.write("{} {}\n".format(file_name, strings[i]))
Exemple #48
0
def loadData(start_date, end_date):
    # ============= the first wave of data merge ================
    input_pipe, output_pipe = Pipe(True)
    final_input_pipe, final_output_pipe = Pipe(True)

    pool = Pool(processes=4)
    process_merge_data = []

    # stock quotes
    process_basic_data = basicDataClass(input_pipe=input_pipe, output_pipe=output_pipe,
                                        final_output_pipe=final_output_pipe,
                                        sql_config=ConfigQuant, start_date=start_date, end_date=end_date, **stockQuoteConf)
    # process_basic_data.start()
    # tmp_process = pool.apply_async(process_basic_data.run)
    # process_merge_data.append(tmp_process)
    process_basic_data.run()

    # data to be merged
    # print(len(allMergeDataConf),"to be merged")
    for conf in allMergeDataConf:
        # tmp_process = mergeDataClass(output_pipe=output_pipe, sql_config=ConfigQuant, start_date=start_date, end_date=end_date, **conf)
        # tmp_process.start()
        tmp_ins = mergeDataClass(output_pipe=output_pipe, sql_config=ConfigQuant, start_date=start_date, end_date=end_date, **conf)
        tmp_process = pool.apply_async(tmp_ins.run)
        process_merge_data.append(tmp_process)

    for tmp_process in process_merge_data:
        merge_dict = tmp_process.get()
        process_basic_data.mergeData(merge_dict)



    # output_pipe.close()
    # final_output_pipe.close()
    #
    # try:
    #     final_data = final_input_pipe.recv()
    # except EOFError:
    #     pass
    # print(process_basic_data.data)
    # print('end of the first wave')
    pool.terminate()
    # ========== the second wave of merge ===========
    input_pipe2, output_pipe2 = Pipe(True)
    final_input_pipe2, final_output_pipe2 = Pipe(True)

    pool = Pool(processes=4)
    # receiver process
    # receiver_process = Process(target=receiveMergeData, args=(final_data, input_pipe, final_output_pipe))
    # receiver_process.start()
    # receiver_process = pool.apply_async(receiveMergeData, (final_data, input_pipe2, len(allMergeDataConf2), final_output_pipe2))

    # data to be merge (send loaded data to receiver process to be merged)
    # print(len(allMergeDataConf2), 'to be merged')
    process_merge_data = []
    for conf in allMergeDataConf2:
        tmp_ins = mergeDataClass(output_pipe=output_pipe2, sql_config=ConfigQuant, start_date=start_date,
                                     end_date=end_date, **conf)
        tmp_process = pool.apply_async(tmp_ins.run)
        # tmp_process.start()
        process_merge_data.append(tmp_process)

    for tmp_process in process_merge_data:
        merge_dict = tmp_process.get()
        process_basic_data.mergeData(merge_dict)

    # print('ready for final')
    #
    # # output_pipe2.close()
    # # final_output_pipe2.close()
    #
    # try:
    #     final_data = final_input_pipe2.recv()
    # except EOFError:
    #     pass
    # print('final data shape:', process_basic_data.data.shape)
    # pass

    # convert industry to dummy variables
    chunk_size = 50000
    chunk_num = int(process_basic_data.data.shape[0] / chunk_size)
    if chunk_num * chunk_size < process_basic_data.data.shape[0]:
        chunk_num += 1

    pool = Pool(processes=4)
    tot_child_process = []
    for i in range(chunk_num):
        tmp_data = process_basic_data.data.iloc[i*chunk_size : (i+1)*chunk_size]
        tmp_process = pool.apply_async(pivotStockIndustry, (tmp_data, SW_industry))
        tot_child_process.append(tmp_process)

    tot_data = pd.DataFrame([])
    for tmp_process in tot_child_process:
        tmp_data = tmp_process.get()
        tot_data = tot_data.append(tmp_data)

    # return process_basic_data.data
    return tot_data
Exemple #49
0
def verify_acasxu_style(
    network_file: str,
    means: np.ndarray,
    stds: np.ndarray,
    input_box: List[Tuple[np.ndarray, np.ndarray]],
    output_constraints: List[List[Tuple[int, int, float]]],
    timeout_lp=1,
    timeout_milp=1,
    max_depth=10,
    permitted_depth_extensions=3,
    use_default_heuristic=True,
    complete=True,
    progress_bar=True
) -> Optional[List[Tuple[np.ndarray, Tuple[int, int, float],
                         Optional[float]]]]:
    """
    Verifies an fully-connected (non-convolutional) neural network.

    Returns None if an no counterexamples could be found and the network could not be verified.
    Returns an empty list if the network could be verified.
    Returns a list of inputs for which the network violates the constraints if the network could not be verified.
    """
    domain = "deeppoly"

    model, is_conv = read_onnx_net(network_file)
    eran = ERAN(model, is_onnx=True)

    # total will be updated later
    progress_bar = tqdm(total=1, disable=not progress_bar)

    specLB = [interval[0] for interval in input_box]
    specUB = [interval[1] for interval in input_box]
    _normalize(specLB, means, stds)
    _normalize(specUB, means, stds)

    counterexample_list = []
    # adex_holds stores whether x_adex (below) is actually a counterexample
    # if adex_holds is True, then x_adex is a spurious counterexample
    adex_holds = True

    verified_flag, nn, nlb, nub, _, x_adex = eran.analyze_box(
        specLB, specUB, _init_domain(domain), timeout_lp, timeout_milp,
        use_default_heuristic, output_constraints)

    if not verified_flag and x_adex is not None:
        adex_holds, _, _, _, _, _ = eran.analyze_box(x_adex, x_adex,
                                                     "deeppoly", timeout_lp,
                                                     timeout_milp,
                                                     use_default_heuristic,
                                                     output_constraints)
        if not adex_holds:
            verified_flag = False
            # we need to undo the input normalisation, that was applied to the counterexamples
            counterexample_list.append(
                (np.array(x_adex) * stds + means, output_constraints))

    if not verified_flag and adex_holds:
        # expensive min/max gradient calculation
        nn.set_last_weights(output_constraints)
        grads_lower, grads_upper = nn.back_propagate_gradient(nlb, nub)
        assert len(grads_lower) == len(specLB), 'back_propagate_gradient did not yield gradients for inputs. ' \
                                                '(verify_acasxu_style only supports non-convolutional networks).'

        smears = [
            max(-grad_l, grad_u) * (u - l) for grad_l, grad_u, l, u in zip(
                grads_lower, grads_upper, specLB, specUB)
        ]
        smears = [1e-8 if smear == 0 else smear for smear in smears]
        split_multiple = 20 / np.sum(smears)

        num_splits = [int(np.ceil(smear * split_multiple)) for smear in smears]
        step_size = []
        for i in range(len(specLB)):  # for each input dimension
            if num_splits[i] == 0:
                num_splits[i] = 1
            step_size.append((specUB[i] - specLB[i]) / num_splits[i])

        start_val = np.copy(specLB)
        end_val = np.copy(specUB)
        # _, nn, _, _, _, _ = eran.analyze_box(
        #     specLB, specUB, domain,
        #     timeout_lp, timeout_milp, use_default_heuristic, output_constraints
        # )

        # generate all combinations of splits of the input dimensions
        multi_bounds = [(specLB.copy(), specUB.copy())]
        for d in range(len(specLB)):  # for each input dimension
            # for each split from the previous dimensions
            new_multi_bounds = []
            for specLB_, specUB_ in multi_bounds:
                for i in range(num_splits[d]):
                    specLB_ = specLB_.copy()
                    specUB_ = specUB_.copy()
                    specLB_[d] = start_val[d] + i * step_size[d]
                    specUB_[d] = np.fmin(end_val[d],
                                         start_val[d] + (i + 1) * step_size[d])
                    new_multi_bounds.append((specLB_, specUB_))
            multi_bounds = new_multi_bounds

        # print(f"len(multi_bounds)={len(multi_bounds)}\n"
        #       f"means={means}\n"
        #       f"stds={stds}\n"
        #       f"grads_lower={grads_lower}\n"
        #       f"grads_upper={grads_upper}\n"
        #       f"smeans={smears}\n"
        #       f"num_splits={num_splits}\n"
        #       f"step_size={step_size}\n"
        #       f"start_val={start_val}\n"
        #       f"end_val={end_val}")

        progress_bar.reset(total=len(multi_bounds) + 1)
        progress_bar.update()  # for the first analyze_box run

        failed_already = Value('i', 1)
        pool = None
        try:
            # sequential version
            # res = itertools.starmap(
            #    lambda lb, ub: _acasxu_recursive(lb, ub, model, eran, output_constraints, failed_already,
            #                                     25, 0, domain, timeout_lp, timeout_milp, use_default_heuristic,
            #                                     complete),
            #    multi_bounds
            # )
            arguments = [{
                'specLB': lb,
                'specUB': ub,
                'network_file': network_file,
                'constraints': output_constraints,
                'max_depth': max_depth,
                'permitted_depth_extensions': permitted_depth_extensions,
                'domain': domain,
                'timeout_lp': timeout_lp,
                'timeout_milp': timeout_milp,
                'use_default_heuristic': use_default_heuristic,
                'complete': complete
            } for lb, ub in multi_bounds]
            # using only half of the CPUs sped up the computation on the computers it was tested on
            # and also kept CPU utilisation high for all CPUs.
            pool = Pool(processes=os.cpu_count() // 2,
                        initializer=_init,
                        initargs=(failed_already, ))
            res = pool.imap_unordered(_start_acasxu_recursive, arguments)

            counterexample_list = []
            verified_flag = True
            for verified, counterexamples in res:
                if not verified:
                    verified_flag = False
                    if counterexamples is not None:
                        # convert counterexamples to numpy
                        counterexamples = [
                            (np.array(cx), constraint, value)
                            for cx, constraint, value in counterexamples
                        ]
                        # we need to undo the input normalisation, that was applied to the counterexamples
                        counterexamples = [
                            (cx * stds + means, constraint, value)
                            for cx, constraint, value in counterexamples
                        ]
                        counterexample_list.extend(counterexamples)
                    else:
                        warning(
                            f"Property not verified for without counterexample."
                        )
                progress_bar.update()

        except Exception as ex:
            warning(f"Property not verified because of an exception: {ex}.")
            raise ex
        finally:
            progress_bar.close()
            if pool is not None:
                # make sure the Pool is properly closed
                pool.terminate()
                pool.join()
    else:
        # property has been verified in first analyze_box run.
        progress_bar.update()
        progress_bar.close()

    if not verified_flag and len(counterexample_list) > 0:
        info(
            f"Property not verified with counterexamples: {counterexample_list}."
        )
        return counterexample_list
    elif not verified_flag:
        info(f"Property not verified without counterexamples.")
        # raise RuntimeError("Property disproven, but no counterexample found.")
        return None
    else:
        info(f"Property verified.")
        return []
Exemple #50
0
def handle_title_query(query):
    query = title_beautify(query)
    log_info("Get title query: {0}".format(query))

    #starts search
    res = search_startswith(query)  # and the idf is large
    if res:
        log_info("Found {0} results in db: {1}".format(
            len(res), str([x['_id'] for x in res])))
        return res
    # similar search
    res = similar_search(query)
    if res:
        log_info(u"Found similar results in db: {0}".format(res['_id']))
        return [res]

    # search on web
    searchers = searcher.register_searcher.get_searcher_list()
    parsers = fetcher.register_parser.get_parser_list()
    ctx = JobContext(query)

    args = zip(searchers, [ctx] * len(searchers))
    pool = Pool()
    async_results = [pool.apply_async(searcher_run, arg) for arg in args]

    # Search and get all the results item
    all_search_results = []
    for s in async_results:
        s = s.get()
        if s is None:
            continue
        srs = s['results']

        # try search database with updated title
        try:
            updated_title = s['ctx_update']['title']
        except KeyError:
            pass
        else:
            if updated_title != query:
                query = updated_title
                res = search_exact(query)
                if res:
                    log_info("Found {0} results in db: {1}".format(
                        len(res), str([x['_id'] for x in res])))
                    return res
        all_search_results.extend(srs)

        meta = s.get('ctx_update')
        if meta:
            log_info('Meat update from searcher: {0}'.format(str(meta.keys())))
            ctx.update_meta_dict(meta)
    pool.close()
    pool.terminate()

    # Analyse each result and try to parse info
    download_candidates = []
    parser_used = set()
    found = False
    for sr in all_search_results:
        for parser in parsers:
            if parser.can_handle(sr):
                download_candidates.append((parser, sr))
                if ctx.need_field(parser.support_meta_field):
                    # Already tried this fetcher
                    if not parser.repeatable and \
                            parser.name in parser_used:
                        continue
                    else:
                        parser_used.add(parser.name)

                    succ = parser.fetch_info(ctx, sr)
                    if not succ:
                        continue
                    found = True
                    if ctx.existing is not None:
                        log_info("Found {0} results in db".format(
                            len(ctx.existing)))
                        return [ctx.existing]

    # no metadata or downloadable source found
    if not found and len(download_candidates) == 0:
        return None
    # Save data, return data and start downloading
    try:
        pid = new_paper(ctx)
        ret = [{
            '_id': pid,
            'title': ctx.title,
            'view_cnt': 1,
            'download_cnt': 0
        }]
        ret[0].update(ctx.meta)

        progress_dict[pid] = 0.0
        if len(download_candidates) > 0:
            thread = Thread(target=start_download,
                            args=(download_candidates, ctx, pid))
            thread.start()
        return ret
    except:
        log_exc("Failed to save to db")
Exemple #51
0
def web():
    run = True
    parser = argparse.ArgumentParser()
    parser.add_argument('-src',
                        '--source',
                        dest='video_source',
                        type=int,
                        default=0,
                        help='Device index of the camera.')
    parser.add_argument('-wd',
                        '--width',
                        dest='width',
                        type=int,
                        default=480,
                        help='Width of the frames in the video stream.')
    parser.add_argument('-ht',
                        '--height',
                        dest='height',
                        type=int,
                        default=360,
                        help='Height of the frames in the video stream.')
    parser.add_argument('-num-w',
                        '--num-workers',
                        dest='num_workers',
                        type=int,
                        default=2,
                        help='Number of workers.')
    parser.add_argument('-q-size',
                        '--queue-size',
                        dest='queue_size',
                        type=int,
                        default=5,
                        help='Size of the queue.')
    args = parser.parse_args()

    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()
    fps = FPS().start()

    while run is True:  # fps._numFrames < 120
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
        cv2.imshow('Video', output_rgb)
        fps.update()

        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    pool.terminate()
    video_capture.stop()
    cv2.destroyAllWindows()
Exemple #52
0
def poolcontext(*args, **kwargs):
    pool = Pool(*args, **kwargs)
    yield pool
    pool.terminate()
Exemple #53
0
def performSearch(domains=[], nThreads=16, launchWhois=False):
    """
    Method to perform the mail verification process.

    Arguments
    ---------
        domains: List of domains to check.
        nThreads: Number of threads to use.
        launchWhois: Sets if whois queries will be launched.

    Returns
    -------
        list: A list containing the results as i3visio entities.
    """
    results = []

    # Using threads in a pool if we are not running the program in main
    args = []

    # Returning None if no valid domain has been returned
    if len(domains) == 0:
        return results

    # If the process is executed by the current app, we use the Processes. It is faster than pools.
    if nThreads <= 0 or nThreads > len(domains):
        nThreads = len(domains)

    # Launching the Pool
    # ------------------
    # Example catched from: https://stackoverflow.com/questions/11312525/catch-ctrlc-sigint-and-exit-multiprocesses-gracefully-in-python
    try:
        original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
        pool = Pool(nThreads)
        signal.signal(signal.SIGINT, original_sigint_handler)
    except ValueError:
        # To avoid: ValueError: signal only works in main thread
        pool = Pool(nThreads)

    poolResults = []
    try:

        def log_result(result):
            # This is called whenever foo_pool(i) returns a result.
            # result_list is modified only by the main process, not the pool workers.
            poolResults.append(result)

        for d in domains:
            # We need to create all the arguments that will be needed
            parameters = (
                d,
                launchWhois,
            )
            pool.apply_async(pool_function,
                             args=parameters,
                             callback=log_result)

        # Waiting for results to be finished
        while len(poolResults) < len(domains):
            pass
        # Closing normal termination
        pool.close()
    except KeyboardInterrupt:
        print(
            general.warning(
                "\nProcess manually stopped by the user. Terminating workers.\n"
            ))
        pool.terminate()
        print(general.warning("The following domains were not processed:"))
        pending_tld = ""
        for d in domains:
            processed = False
            for processedDomain in poolResults:
                if str(d) == processedDomain["platform"]:
                    processed = True
                    break
            if not processed:
                print(general.warning("\t- " + str(d["domain"])))
                pending_tld += " " + str(d["tld"])
        print(
            general.warning(
                "[!] If you want to relaunch the app with these domains you can always run the command with: "
            ))
        print(general.warning("\t domainfy ... -t none -u " + pending_tld))
        print(
            general.warning(
                "[!] If you prefer to avoid these platforms you can manually evade them for whatever reason with: "
            ))
        print(general.warning("\t domainfy ... -x " + pending_tld))
    pool.join()

    # Processing the results
    # ----------------------
    for serArray in poolResults:
        data = serArray["data"]
        # We need to recover the results and check if they are not an empty json or None
        if data != None and data != {}:
            results.append(data)
    return results
Exemple #54
0
def get_pars_fr(wavst,
                flxst,
                model_patht='../../data/COELHO2014/',
                npools=4,
                fixG=1.0):
    for order in range(len(flxst)):
        flxst[order] = clean_strong_lines(wavst[order], flxst[order], mode=1)

    t0 = time.time()

    global wavs, flxs
    global model_path

    wavs, flxs = wavst.copy(), flxst.copy()
    model_path = model_patht

    gt = np.array([6000, 7000, 8000, 9000, 10000])
    gg = np.array([2.5, 3.0, 3.5, 4.0, 4.5])
    if fixG != -1:
        gg = np.array([fixG])
    gz = np.array([-1, -0.5, 0.0, 0.2])
    gr = np.array([10., 50., 100., 150., 200., 250., 300.])

    #"""
    tr = np.tile(gr, len(gt) * len(gg) * len(gz))
    tg = np.repeat(np.tile(gg, len(gt)), len(gr) * len(gz))
    tz = np.repeat(np.tile(gz, len(gt) * len(gg)), len(gr))
    tt = np.repeat(gt, len(gg) * len(gr) * len(gz))
    tot = np.vstack((tt, tg, tz, tr)).T

    #for pars in tot:
    #	pars = [8000,4.0,-0.5,40.0]
    #	print pars, multiccf(pars)

    p = Pool(npools)
    vals = np.array((p.map(multiccf, list(tot))))
    p.terminate()
    I = np.argmin(vals)
    best_vals = tot[I]
    bt, bg, bz, br = best_vals[0], best_vals[1], best_vals[2], best_vals[3]
    #"""
    t1 = time.time()
    print bt, bg, bz, br, (t1 - t0) / 60., 'mins'

    #bt,bg,bz,br = 7000.,4.5, 0.2, 100.0
    gt = np.arange(bt - 1000, bt + 1001, 250)
    I = np.where((gt >= 6000) & (gt <= 10000))[0]
    gt = gt[I]
    gr = np.arange(br - 60., br + 61., 20.)
    I = np.where(gr >= 10)[0]
    gr = gr[I]

    tr = np.tile(gr, len(gt) * len(gg) * len(gz))
    tg = np.repeat(np.tile(gg, len(gt)), len(gr) * len(gz))
    tz = np.repeat(np.tile(gz, len(gt) * len(gg)), len(gr))
    tt = np.repeat(gt, len(gg) * len(gr) * len(gz))
    tot = np.vstack((tt, tg, tz, tr)).T

    p = Pool(npools)
    vals = np.array((p.map(multiccf, list(tot))))
    p.terminate()
    I = np.argmin(vals)
    best_vals = tot[I]
    bt, bg, bz, br = best_vals[0], best_vals[1], best_vals[2], best_vals[3]
    t2 = time.time()
    print bt, bg, bz, br, (t2 - t1) / 60., 'mins'
    #np.savetxt('temp_grid.txt',vals)

    if fixG == -1:
        grid = np.reshape(vals, (len(gt), len(gg), len(gz), len(gr)))
        tckt = interpolate.splrep(gt, np.arange(len(gt)), k=1)
        tckg = interpolate.splrep(gg, np.arange(len(gg)), k=1)
        tckz = interpolate.splrep(gz, np.arange(len(gz)), k=1)
        tckr = interpolate.splrep(gr, np.arange(len(gr)), k=1)

        itckt = interpolate.splrep(np.arange(len(gt)), gt, k=1)
        itckg = interpolate.splrep(np.arange(len(gg)), gg, k=1)
        itckz = interpolate.splrep(np.arange(len(gz)), gz, k=1)
        itckr = interpolate.splrep(np.arange(len(gr)), gr, k=1)

        st = np.arange(gt[0], gt[-1] + 1, 10.)
        sg = np.arange(gg[0], gg[-1] + 0.01, 0.1)
        sz = np.arange(gz[0], gz[-1] + 0.01, 0.1)
        sr = np.arange(gr[0], gr[-1] + 1., 5.)

        st = interpolate.splev(st, tckt)
        sg = interpolate.splev(sg, tckg)
        sz = interpolate.splev(sz, tckz)
        sr = interpolate.splev(sr, tckr)

        tr2 = np.tile(sr, len(st) * len(sg) * len(sz))
        tg2 = np.repeat(np.tile(sg, len(st)), len(sr) * len(sz))
        tz2 = np.repeat(np.tile(sz, len(st) * len(sg)), len(sr))
        tt2 = np.repeat(st, len(sg) * len(sr) * len(sz))
        tot2 = np.vstack((tt2, tg2, tz2, tr2))

        zi = ndimage.map_coordinates(grid, tot2, order=3, mode='nearest')
        I = np.argmin(zi)
        minval = tot2[:, I]

        mint = interpolate.splev(minval[0], itckt)
        ming = interpolate.splev(minval[1], itckg)
        minz = interpolate.splev(minval[2], itckz)
        minr = interpolate.splev(minval[3], itckr)

    else:
        grid = np.reshape(vals, (len(gt), len(gz), len(gr)))
        tckt = interpolate.splrep(gt, np.arange(len(gt)), k=1)
        tckz = interpolate.splrep(gz, np.arange(len(gz)), k=1)
        tckr = interpolate.splrep(gr, np.arange(len(gr)), k=1)

        itckt = interpolate.splrep(np.arange(len(gt)), gt, k=1)
        itckz = interpolate.splrep(np.arange(len(gz)), gz, k=1)
        itckr = interpolate.splrep(np.arange(len(gr)), gr, k=1)

        st = np.arange(gt[0], gt[-1] + 1, 10.)
        sz = np.arange(gz[0], gz[-1] + 0.01, 0.1)
        sr = np.arange(gr[0], gr[-1] + 1., 5.)

        st = interpolate.splev(st, tckt)
        sz = interpolate.splev(sz, tckz)
        sr = interpolate.splev(sr, tckr)

        tr2 = np.tile(sr, len(st) * len(sz))
        tz2 = np.repeat(np.tile(sz, len(st)), len(sr))
        tt2 = np.repeat(st, len(sr) * len(sz))
        tot2 = np.vstack((tt2, tz2, tr2))

        zi = ndimage.map_coordinates(grid, tot2, order=3, mode='nearest')
        I = np.argmin(zi)
        minval = tot2[:, I]

        mint = interpolate.splev(minval[0], itckt)
        ming = fixG
        minz = interpolate.splev(minval[1], itckz)
        minr = interpolate.splev(minval[2], itckr)

    #d = {'grid':grid, 'zi':zi, 'tot2':tot2, 'gt':gt, 'gg':gg, 'gz':gz, 'gr':gr}
    #pickle.dump(d,open('temp_dict.pkl'))

    return float(mint), float(ming), float(minz), float(minr)
Exemple #55
0
def fit_TRA_holy_nksqr_BFGS(lamda_list, TR_pair_list_generator, parameter_list_generator,  nk_f_guess, delta_weight = 0.1, tolerance = 1e-4, no_negative = True):
	'''n_front and n_back must be real valued for this to work without caveats.
thickness and lambda can be any units, so long as they are the same, lamda_list must be sorted'''


	from numpy import pi,exp,abs,sqrt, array, matmul, loadtxt, zeros, savetxt, inf, diff, ones
	from scipy.optimize import root, least_squares, minimize
	from TRANK import extrap


	point_multiplicity = len(TR_pair_list_generator(lamda_list[0]))
	#print(point_multiplicity)
	# 3.0  is from T, R, and A
	abs_delta_weight = sqrt(delta_weight**2  * point_multiplicity * 3.0 * (len(lamda_list)/(len(lamda_list)-1.0)))

	from multiprocessing import Pool, cpu_count
	my_pool = Pool(cpu_count())


	def TR_error(nk_list):

		c_nk_list = []
		muh_inputs = []
		for i in range(len(lamda_list)):
			nk = nk_list[2*i] + 1.0j*nk_list[2*i+1]
			c_nk_list.append(nk)
			muh_inputs.append( (lamda_list[i], nk, TR_pair_list_generator, parameter_list_generator ) )

		#print (zip(lamda_list, c_nk_list))
		error_list_lists = my_pool.map(TRA_lamda_error, muh_inputs)
		#error_list_lists =my_pool.map(lamda_error, zip(lamda_list, c_nk_list))
		#print (error_list_lists)

		#error_list = []
		#for sub_error_list in error_list_lists:
		#	error_list = error_list + sub_error_list

		base_line_error_square = (array(error_list_lists)**2).sum()


		delta_array = diff(c_nk_list)*abs_delta_weight
		#delta_errors =(delta_array.real**2 + delta_array.imag**2) * abs_delta_weight

		#error_list = error_list + list(delta_errors)
		#error_list = error_list + list(delta_array.real) + list(delta_array.imag)

		delta_errors_square = (delta_array.real**2 + delta_array.imag**2).sum() * abs_delta_weight

		return base_line_error_square + delta_errors_square

	####### now for a guess list ##############

	nk_guess_list = []
	for i in range(len(lamda_list)):
		nk = nk_f_guess(lamda_list[i])
		nk_guess_list.append(abs(nk.real))
		nk_guess_list.append(abs(nk.imag))

	######### test
	#print(TR_error(nk_guess_list))
	############ nk guessing over, time for creating and minimizing error function
	if no_negative:
		solution = minimize(TR_error,
					x0 = nk_guess_list,
					method = 'L-BFGS-B',
					bounds = 2*len(lamda_list)*[(0,inf)] ,
					tol = tolerance,
					options = {'disp' : True } ).x
	else:
		solution = minimize(TR_error,
					x0 = nk_guess_list,
					method = 'L-BFGS-B',
					tol = tolerance,
					options = {'disp' : True } ).x

	my_pool.terminate()
	my_pool.close()
	n_list=[]
	k_list=[]
	for i in range(len(lamda_list)):
		n_list.append(solution[2*i]  )
		k_list.append(solution[2*i+1])

	nf = extrap(lamda_list, n_list, kind = 'cubic')
	kf = extrap(lamda_list, k_list, kind = 'cubic')

	def fit_nk_f(lamda):
		return nf(lamda) + 1.0j*kf(lamda)

	return fit_nk_f
Exemple #56
0
class TaskPool(object):
    """ A Task Pool provides a thread safe mechanism for running long lived operations
    inside a separate process to avoid blocking the main game loop.

    The event handler can submit a task at any time. When the task completes the task pool update
    will process the callbacks.

    The event handler events Update and Shutdown should call the appropriate task pool method.
    """
    def __init__(self, processes=1, maxtasksperchild=None):
        super(TaskPool, self).__init__()
        mplogger.info("creating multiprocessing pool (n=%s)", processes)
        self.pool = Pool(processes, maxtasksperchild=maxtasksperchild)

        self._lk_result = Lock()
        self._results = []

    def submit(self, fn, args=(), kwargs={}, callback=None, error_callback=None):
        """ submit a task to be run in a background process

        :param fn: a function to be run in a background process
        :param args: the positional arguments to fn, if any
        :param kwargs: the keyword arguments to fn, if any
        :param callback: a callback function which accepts a single argument, the return value from fn.
        The callback is called if the function exits without an exception.
        :param error_callback: a callback function which accepts a single argument, the exception value from fn.
        The callback is called if the function exits because of an unhandled exception.
        """

        self.pool.apply_async(fn, args, kwargs,
            lambda result: self._onSuccess(result, callback),
            lambda ex: self._onFailure(ex, error_callback))

    def _onSuccess(self, result, callback):
        with self._lk_result:
            self._results.append( (result, callback) )

    def _onFailure(self, ex, callback):
        with self._lk_result:
            self._results.append( (ex, callback) )

    def update(self):
        """ check for completed tasks and process the callbacks.
        """

        results = []
        if self._results:
            with self._lk_result:
                results = self._results
                self._results = []

            for result, callback in results:
                if callback:
                    try:
                        callback(result)
                    except Exception as e:
                        mplogger.exception("task callback failed")

    def shutdown(self):
        """ cancel running tasks and stop the task pool """

        self.pool.terminate()
        self.pool.join()
Exemple #57
0
def fit_TRA_nk_sqr(lamda_list, TR_pair_list_generator, parameter_list_generator,  nk_f_guess, delta_weight = 0.1, tolerance = 1e-4, no_negative = True, interpolation_type = 'cubic', method = 'least_squares'):
	'''n_front and n_back must be real valued for this to work without caveats.
thickness and lambda can be any units, so long as they are the same, lamda_list must be sorted'''


	from numpy import pi,exp,abs,sqrt, array, matmul, loadtxt, zeros, savetxt, inf, diff, ones
	from scipy.optimize import root, least_squares, minimize
	from TRANK import extrap


	point_multiplicity = len(TR_pair_list_generator(lamda_list[0]))
	#print(point_multiplicity)
	# 3.0  is from T, R, and A
	abs_delta_weight = sqrt(delta_weight**2  * point_multiplicity * 3.0 * (len(lamda_list)/(len(lamda_list)-1.0)))

	from multiprocessing import Pool, cpu_count
	my_pool = Pool(cpu_count())
	#my_pool = Pool()
	#my_pool = Pool(1)


	def TR_error(nk_list):

		c_nk_list = []
		muh_inputs = []
		for i in range(len(lamda_list)):
			nk = nk_list[2*i] + 1.0j*nk_list[2*i+1]
			c_nk_list.append(nk)
			muh_inputs.append( (lamda_list[i], nk, TR_pair_list_generator, parameter_list_generator ) )
			
		#print (zip(lamda_list, c_nk_list))
		error_list_lists = my_pool.map(TRA_lamda_error, muh_inputs)
		#error_list_lists =my_pool.map(lamda_error, zip(lamda_list, c_nk_list))
		#print (error_list_lists)

		error_list = []
		for sub_error_list in error_list_lists:
			error_list = error_list + sub_error_list

		delta_array = diff(c_nk_list)*abs_delta_weight
		#delta_errors =(delta_array.real**2 + delta_array.imag**2) * abs_delta_weight

		#error_list = error_list + list(delta_errors)
		error_list = error_list + list(delta_array.real) + list(delta_array.imag)

		return array(error_list)

	####### now for a guess list ##############

	nk_guess_list = []
	for i in range(len(lamda_list)):
		nk = nk_f_guess(lamda_list[i])
		nk_guess_list.append(abs(nk.real))
		nk_guess_list.append(abs(nk.imag))

	######### test
	#print(TR_error(nk_guess_list))
	############ nk guessing over, time for creating and minimizing error function
	if method == 'least_squares':
		inputs = dict(fun = TR_error,
					x0 = nk_guess_list,
					ftol = tolerance,
					xtol = tolerance,
					gtol = tolerance,
					verbose = 2)
		if no_negative:
			inputs.update(dict( bounds = [zeros(2*len(lamda_list)),inf*ones(2*len(lamda_list))] ))
		solution = least_squares(**inputs ).x

	elif method == 'L-BFGS-B':
		inputs = dict(fun = lambda x: 0.5*sum(TR_error(x)**2),
				x0 = nk_guess_list,
				method = 'L-BFGS-B',
				tol = tolerance,
				options = {'disp' : True, 'iprint': 2}  )
		if no_negative:
			inputs.update(dict( bounds = 2*len(lamda_list)*[(0,inf)]))
		solution = minimize(**inputs ).x

	elif method == 'SLSQP':
		inputs = dict(fun = lambda x: 0.5*sum(TR_error(x)**2),
				x0 = nk_guess_list,
				method = 'SLSQP',
				tol = tolerance,
				options = {'disp' : True, 'iprint': 2}  )
		if no_negative:
			inputs.update(dict( bounds = 2*len(lamda_list)*[(0,inf)]))
		solution = minimize(**inputs ).x

	elif method == 'TNC':
		inputs = dict(fun = lambda x: 0.5*sum(TR_error(x)**2),
				x0 = nk_guess_list,
				method = 'TNC',
				tol = tolerance,
				options = {'disp' : True, 'iprint': 2}  )
		if no_negative:
			inputs.update(dict( bounds = 2*len(lamda_list)*[(0,inf)]))
		solution = minimize(**inputs ).x

	else:
		raise ValueError("Invalid minimization method!")

	my_pool.terminate()
	my_pool.close()
	n_list=[]
	k_list=[]
	for i in range(len(lamda_list)):
		n_list.append(solution[2*i]  )
		k_list.append(solution[2*i+1])

	nf = extrap(lamda_list, n_list, kind = interpolation_type)
	kf = extrap(lamda_list, k_list, kind = interpolation_type)

	def fit_nk_f(lamda):
		return nf(lamda) + 1.0j*kf(lamda)

	return fit_nk_f
def multithreaded_afree(files=[],
                        output='',
                        NOG=False,
                        log=False,
                        threshold=0,
                        threads=1):
    """Multi-thread Afree with filtering
    
    Can do NOGs. Can do threshold filtering. 
    This function is probably unnecessary as file I/O is the limit. Use afree_unique instead.
    """
    if not output:
        return
    if not (files and len(files)):
        return

    # sort files by file size
    # this tries to optimize time by grouping blocks into similar sized files
    try:
        temp = sorted(files, key=os.path.getsize)
        files = temp
    except Exception as e:
        print e

    # use blocks to conserve on memory!
    # write values out at end of each block
    # Block size may need to be optimized...
    blocks = split_to_n_files(files, threads)

    final = set()
    p = Pool(threads)
    for i, block in enumerate(blocks):
        try:
            args = [(file, NOG, log, threshold, True) for file in block]
            results = None
            afree = p.map_async(__multi_wrapper, args)
            while not results:
                try:
                    results = afree.get(10)
                except TimeoutError:
                    pass
                except KeyboardInterrupt:
                    p.terminate()
                    return
                except Exception as e:
                    print e
                    p.terminate()
                    return

            for result in results:
                final.update(result)
        except Exception as e:
            print e
            break
    p.close()

    if final:
        with open(output, 'wb') as w:
            # write line by line, in case something goes wrong
            # have fail safe, really want to try to save results!
            for line in final:
                for attempt in range(10):
                    try:
                        w.write(line)
                    except:
                        pass
                    else:
                        break
                else:
                    "Failed to write to file"
                    break
Exemple #59
0
def fit_spectra_nk_sqr(lamda_list, spectrum_list_generator, parameter_list_generator,  
					nk_f_guess, delta_weight = 0.1, k_weight_fraction = 1.0, tolerance = 1e-5, 
					no_negative = True, interpolation_type = 'cubic', method = 'least_squares', threads = 0,
					input_data=None, test_setup=None, TB1=None
					):
	'''n_front and n_back must be real valued for this to work without caveats.
thickness and lambda can be any units, so long as they are the same, lamda_list must be sorted'''


	from numpy import pi,exp,abs,sqrt, array, matmul, loadtxt, zeros, savetxt, inf, diff, ones
	from scipy.optimize import root, least_squares, minimize
	from TRANK import extrap


	#point_multiplicity = len(spectrum_list_generator(lamda_list[0]))
	#print(point_multiplicity)

	#point_multiplicity_list = [len(spectrum_list_generator(lamda)) for lamda in lamda_list ]
	#point_multiplicity = point_multiplicity_list[0]

	abs_delta_weight = sqrt(delta_weight**2  * (len(lamda_list)/(len(lamda_list)-1.0)))
    
	from multiprocessing import Pool, cpu_count
	if threads <= 0:
		threads = cpu_count()
	

	TB1.append ('Using %i Processes' % threads)
	
	my_pool=Pool(threads)
	
	
    
	def F_error(nk_list):
		
		c_nk_list = []
		muh_inputs = []
		for i in range(len(lamda_list)):
			nk = nk_list[2*i] + 1.0j*nk_list[2*i+1]				
			c_nk_list.append(nk)
			muh_inputs.append( (lamda_list[i], nk, test_setup) )
			'''
			ctx=mp.get_context('spawn')
			p=ctx.Process(target=spectrum_lamda_error,args=(child_conn,muh_inputs[i],))
			p.start()
			print (parent_conn.recv())
			p.join()
			
			'''
		
		#lock=RLock()	
		
		error_list_lists =my_pool.map(spectrum_lamda_error, muh_inputs)
		
		
		#error_list_lists = map(spectrum_lamda_error, muh_inputs)

		#combine the sub error lists into       #+=��mean to do so?
		error_list = []
		for sub_error_list in error_list_lists:
			error_list = error_list + sub_error_list
		
		
		delta_array = diff(c_nk_list)*abs_delta_weight

		error_list = error_list + list(delta_array.real) + list(k_weight_fraction * delta_array.imag)    #make up some points?
		
		return error_list

	####### now for a guess list ##############

	nk_guess_list = []
	for i in range(len(lamda_list)):
		nk = nk_f_guess(lamda_list[i])
		if no_negative:
			n = nk.real
			k = nk.imag
			if n < 0.0 : n = 0.0
			if k < 0.0 : k = 0.0
			nk_guess_list.append(n)
			nk_guess_list.append(k)
		else:
			nk_guess_list.append(nk.real)
			nk_guess_list.append(nk.imag)


	######### test
	if False: print(F_error(nk_guess_list))
	############ nk guessing over, time for creating and minimizing error function
	if method == 'least_squares':
		inputs = dict(fun = F_error,
					x0 = nk_guess_list,
					ftol = tolerance,
					xtol = tolerance,
					gtol = tolerance,
					verbose = 2)
		if no_negative:
			inputs.update(dict( bounds = [zeros(2*len(lamda_list)),inf*ones(2*len(lamda_list))] ))
		solution = least_squares(**inputs ).x

	elif method == 'L-BFGS-B':
		inputs = dict(fun = lambda x: 0.5*sum(array(F_error(x))**2),
				x0 = nk_guess_list,
				method = 'L-BFGS-B',
				tol = tolerance,
				options = {'disp' : True, 'iprint': 2}  )
		if no_negative:
			inputs.update(dict( bounds = 2*len(lamda_list)*[(0,inf)]))
		solution = minimize(**inputs ).x

	elif method == 'SLSQP':
		inputs = dict(fun = lambda x: 0.5*sum(array(F_error(x))**2),
				x0 = nk_guess_list,
				method = 'SLSQP',
				tol = tolerance,
				options = {'disp' : True, 'iprint': 2}  )
		if no_negative:
			inputs.update(dict( bounds = 2*len(lamda_list)*[(0,inf)]))
		solution = minimize(**inputs ).x

	elif method == 'TNC':
		inputs = dict(fun = lambda x: 0.5*sum(array(F_error(x))**2),
				x0 = nk_guess_list,
				method = 'TNC',
				tol = tolerance,
				options = {'disp' : True, 'iprint': 2}  )
		if no_negative:
			inputs.update(dict( bounds = 2*len(lamda_list)*[(0,inf)]))
		solution = minimize(**inputs ).x

	else:
		raise ValueError("Invalid minimization method!")

	
	my_pool.terminate()
	my_pool.close()
	
		

	nk_list=[]
	for i in range(len(lamda_list)):
		nk_list.append(solution[2*i] + 1.0j*solution[2*i+1]  )

	fit_nk_f = extrap(lamda_list, nk_list, kind = interpolation_type)


	return fit_nk_f
Exemple #60
0
def fit_TRA_nk_sqr_KK_compliant(lamda_list, lamda_fine, TR_pair_list_generator, parameter_list_generator,  nk_f_guess,
								delta_weight = 0.1, tolerance = 1e-5, no_negative = True, interpolation_type = 'cubic', method = 'least_squares'):
	'''n_front and n_back must be real valued for this to work without caveats.
thickness and lambda can be any units, so long as they are the same, lamda_list must be sorted'''


	from numpy import pi,exp,abs,sqrt, array, matmul, loadtxt, zeros, savetxt, inf, diff, ones, mean
	from scipy.optimize import root, least_squares, minimize
	from TRANK import extrap


	point_multiplicity = len(TR_pair_list_generator(lamda_list[0]))
	#print(point_multiplicity)
	# 3.0  is from T, R, and A
	abs_delta_weight = sqrt(delta_weight**2  * point_multiplicity * 3.0 * (len(lamda_list)/(len(lamda_list)-1.0)))

	from multiprocessing import Pool, cpu_count
	my_pool = Pool(cpu_count())
	#my_pool = Pool()
	#my_pool = Pool(1)


	def TR_error(k_and_p_list):
		# the last value is the principle value
		#FYI -> k = array(k_and_p_list[0:-1])
		k = k_and_p_list[0:-1]
		p = k_and_p_list[-1]
		n = p + KK_lamda(lamda_list = lamda_list, lamda_fine = lamda_fine,  k = k )



		muh_inputs = []
		for i in range(len(lamda_list)):
			nk = n[i]+1.0j*k[i]# double check this works properly later

			muh_inputs.append( (lamda_list[i], nk, TR_pair_list_generator, parameter_list_generator ) )

		#print (zip(lamda_list, c_nk_list))
		error_list_lists = my_pool.map(TRA_lamda_error, muh_inputs)
		#error_list_lists =my_pool.map(lamda_error, zip(lamda_list, c_nk_list))
		#print (error_list_lists)

		error_list = []
		for sub_error_list in error_list_lists:
			error_list = error_list + sub_error_list


		error_list = error_list + list( abs_delta_weight*diff(n) )   + list( abs_delta_weight * diff(k))

		return error_list

	####### now for a guess list ##############

	guess_k_and_p_list= []
	p = 0.0
	for i in range(len(lamda_list)):
		nk = nk_f_guess(lamda_list[i])
		if no_negative and nk.imag < 0.0:
			k = 0
		else:
			k = nk.imag

		guess_k_and_p_list.append( k)
		p+= nk.real
	# now we put p at the end
	p = p/len(lamda_list) - 1.0   # this is a guess for the principle value
	print ('principle value guess:',p)
	guess_k_and_p_list.append(p)


	######### test
	if False: print(TR_error(guess_k_and_p_list)) #use this to see if the TR_error works
	############ nk guessing over, time for creating and minimizing error function

	if method == 'least_squares':
		inputs = dict(fun = TR_error,
					x0 =  guess_k_and_p_list,
					ftol = tolerance,
					xtol = tolerance,
					gtol = tolerance,
					verbose = 2)
		if no_negative:
			inputs.update(dict( bounds = [len(lamda_list)*[0.0]+[-inf],len(lamda_list)*[inf]+[inf]] ))
		solution = least_squares(**inputs ).x

	elif method == 'SLSQP':
		inputs = dict(fun = lambda x: 0.5*sum(TR_error(x)**2),
				x0 = guess_k_and_p_list,
				method = 'SLSQP',
				tol = tolerance,
				options = {'disp' : True, 'iprint': 2}  )
		if no_negative:
			inputs.update(dict( bounds = len(lamda_list)*[(0,inf)]  + [(-inf,inf)] ))
		solution = minimize(**inputs ).x

	elif method == 'L-BFGS-B':
		inputs = dict(fun = lambda x: 0.5*sum(TR_error(x)**2),
				x0 = guess_k_and_p_list,
				method = 'L-BFGS-B',
				tol = tolerance,
				options = {'disp' : True, 'iprint': 2}  )
		if no_negative:
			inputs.update(dict( bounds = len(lamda_list)*[(0,inf)]  + [(-inf,inf)] ))
		solution = minimize(**inputs ).x

	elif method == 'TNC':
		inputs = dict(fun = lambda x: 0.5*sum(TR_error(x)**2),
				x0 = guess_k_and_p_list,
				method = 'TNC',
				tol = tolerance,
				options = {'disp' : True, 'iprint': 2}  )
		if no_negative:
			inputs.update(dict( bounds = len(lamda_list)*[(0,inf)]  + [(-inf,inf)] ))
		solution = minimize(**inputs ).x

	else:
		raise ValueError("Invalid minimization method!")

	my_pool.terminate()
	my_pool.close()

	k_and_p_list = solution
	k = k_and_p_list[0:-1]
	p = k_and_p_list[-1]
	n = p + KK_lamda(lamda_list = lamda_list, lamda_fine = lamda_fine,  k = k )
	print ('Final principle value:',p)

	nf = extrap(lamda_list, n, kind = interpolation_type)
	kf = extrap(lamda_list, k, kind = interpolation_type)

	def fit_nk_f(lamda):
		return nf(lamda) + 1.0j*kf(lamda)

	return fit_nk_f