Exemple #1
0
def init_logging():
    """Initialize logging system."""

    debug = '--debug' in sys.argv
    level = logging.DEBUG if debug else logging.INFO
    multiprocessing.log_to_stderr()
    logger.setLevel(level)
    def __init__(self,cmd_param):
        '''
        主进程类 为了兼容windows而这样创建类
        '''
        super(Mtask,self).__init__()
        self.start_time = datetime.datetime.now()
        self.parent_timeout = 180
        self.parent_timeout_flag = 0
        self.child_timeout =120
        self.child_num = 10
        self.slice_num = 20
        self.process_list = []
        self.result =[]
        self.batch_id = 0
        self.print_flag = 1
        self.mult_debug_flag = 0
        self.cmd_param = cmd_param
       


        if self.mult_debug_flag:
            #设置进程log日志
            multiprocessing.log_to_stderr()
            logger=multiprocessing.get_logger()
            logger.setLevel(logging.INFO)
Exemple #3
0
def main():
  multiprocessing.log_to_stderr()
  logger = multiprocessing.get_logger()
  logger.setLevel(logging.INFO)
  pool = RedisPool(30)
  p = WorkerEngine(pool, 'Fetcher', 25)
  p.start(logger)
Exemple #4
0
def setup_logger(loglevel=conf.DAEMON_LOG_LEVEL, logfile=None,
        format=conf.LOG_FORMAT, **kwargs):
    """Setup the ``multiprocessing`` logger. If ``logfile`` is not specified,
    ``stderr`` is used.

    Returns logger object.
    """
    if not _monkeypatched[0]:
        monkeypatch()
        _monkeypatched[0] = True

    logger = get_default_logger(loglevel=loglevel)
    if logger.handlers:
        # Logger already configured
        return logger
    if logfile:
        if hasattr(logfile, "write"):
            log_file_handler = logging.StreamHandler(logfile)
        else:
            log_file_handler = logging.FileHandler(logfile)
        formatter = logging.Formatter(format)
        log_file_handler.setFormatter(formatter)
        logger.addHandler(log_file_handler)
    else:
        import multiprocessing
        multiprocessing.log_to_stderr()
    return logger
Exemple #5
0
def test_log():
    multiprocessing.log_to_stderr()
    logger = multiprocessing.get_logger()
    logger.setLevel(logging.INFO)
    t1 = time.time()
    print(time.time() - t1)
    logger.info("done")
def run_prevalence(out_dir, remove_rt, working_dir, produce_prevalence):
    """
    Create exact prevalences over a list of DRMs provided by a sequence
    object when provided with simulated resistant and susceptible files.

    Args:
        out_dir: The final folder into which to place the anonymized files.
        remove_rt: Are we removing a piece of the RT gene?
        working_dir: The folder in which to place temporary files.
        produce_prevalence: what prevalence to produce

    Returns:
        True on completion.
    """

    print "Building exact prevalences."

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    csv_rows = []
    threads = []

    with open(os.path.join(working_dir, error_filename), 'rb') as handle:
        error_data = pickle.load(handle)
        platform = error_data['platform']
        paired_end = error_data['paired_end']

        manifest_queue = multiprocessing.Manager().Queue()
        multiprocessing.log_to_stderr()
        p = LoggingPool(len(error_data['files']))
    
        for result in error_data['files']:

            if remove_rt:
                result['sequence'].remove_rt = True

            process_result = p.apply_async(run_prevalence_thread, [
                manifest_queue, platform, paired_end,
                    result, working_dir, out_dir, produce_prevalence
            ])

        p.close()
        p.join()

        with open(os.path.join(out_dir, 
            final_csv_filename), 'w') as csv_handle,\
            open(os.path.join(out_dir, 
            final_json_filename), 'w') as json_handle:
            csv_handle.write(','.join(csv_header_rows) + '\n')
            test = sample.header(plat.platforms[platform],
                paired_end)
            test['samples'] = []
            while not manifest_queue.empty():
                s = manifest_queue.get()
                test['samples'].append(s.encode(plat.platforms[platform]))
                csv_handle.write(s.dump_csv())
            json_handle.write(json.dumps(test, indent=2))
                
    return True
def SumData(roi_start_list, roi_end_list, file_list, file_path, istart, iend, queue):
    logger = mp.get_logger()
    mp.log_to_stderr(logging.INFO)

    roi_sums = mproc.SHARED_ARRAY
    data_array = XYDataArray()
    num_rois = len(roi_start_list)
    logger.info("Reading files from %d to %d" % (istart, iend))
    if istart > 0:
        istart = istart - 1

    # Process each file in the list that falls in the range istart to iend
    for i in range(istart, iend):

        # Read in the information from the file and create numpy arrays from that information.
        data_array.CreateArrays(os.path.join(file_path, file_list[i]))

        # Sum the data in the arrays that lies between the roi values.  Do this
        # for each roi that was created.
        for j in range(num_rois):
#           logger.info("Summing roi %d from file %d" % (j, i))
            roi_sums[j][i] = roi_sums[j][i] + data_array.SumROIData(roi_start_list[j], roi_end_list[j])

        # Add a value of 1 to the queue so that the user interface can be updated
        # with the latest progress.
        queue.put(1)
    return roi_sums
def SumPixels(file_list, file_path, istart, iend, queue):
    logger = mp.get_logger()
    mp.log_to_stderr(logging.INFO)

    roi_sums = mproc.SHARED_ARRAY
    data_array = TiffDatatArray()
    logger.info("Reading files from %d to %d" % (istart, iend))
    if istart > 0:
        istart = istart - 1

    # Process each file in the list that falls in the range istart to iend
    for i in range(istart, iend):

        # Read in the information from the file and create numpy arrays from that information.
        data_array.CreateArrays(os.path.join(file_path, file_list[i]))

        # Sum the data in the arrays that lies between the roi values.  Do this
        # for each roi that was created.
        new_sum = data_array.GetDataArray()
        roi_sums = numpy.add(roi_sums, new_sum)

        # Add a value of 1 to the queue so that the user interface can be updated
        # with the latest progress.
        queue.put(1)
    return roi_sums
	def init(self, nprocs=None, spawnonce=True):
		multiprocessing.log_to_stderr(logging.WARN)
		if nprocs is None:
			self.nprocs = multiprocessing.cpu_count()
		else:
			self.nprocs = multiprocessing.cpu_count() + nprocs if nprocs < 0 else nprocs
		self.proc = []
		self.spawnonce = spawnonce
def main():
	if len(sys.argv) >1:
	  threads = sys.argv[1]
	else:
	  threads =1
	print('Threads to process :',threads)
	multiprocessing.log_to_stderr()
	logger =multiprocessing.get_logger()
	logger.setLevel(logging.INFO)

#	while True:
 	Threadstart(int(threads))
    def which_host(self, urllist, attr):
        """check every url in the given list against all regular expressions
        and extract the value of the chosen html attribute.
        Then use a queue and enough processes to download all matched urls"""
        # make a queue and enough processes as numprocs
        self.q = Queue()
        self.ps = (Process(target=self.use_queue, args=()) for i in range(self.numprocs))

        # enable multiprocessing logging feature
        if debug:
            logger.setLevel(logging.DEBUG)
            log_to_stderr(logging.DEBUG)


        for p in self.ps:
            # start all processes
            p.start()
        
        # piping the urllist urls into a set to purge duplicates
        finalset = set()
        for L in urllist:
            self.stringl = L.get(attr, None)
            # remove the anonym.to string before urls
            if self.stringl.startswith("http://anonym.to/?"):
                self.stringl = re.sub('http://anonym.to/\?', '', self.stringl)
            finalset.add(self.stringl)


        for L in finalset:
            # iterate over the regexp dictionary items; when finding a url
            # matching, put the the class name, url and self.basedir in the queue
            for k, v in regexp_dict.items():
                if k.search(L):
                    self.logger.info("downloading %s" % L)
                    # instantiate and then pass the parse method to the queue.
                    # it downloads but doesn't make the queue do its job
#                    parser = v(L, self.basedir)
#                    self.q.put((parser.parse()))

                    # add the class name and the parameters needed for its __init__
                    # into the queue
                    self.q.put((v, (L, self.basedir)))
                    self.img_counter = self.img_counter + 1
                else:
                    continue

        for i in range(self.numprocs):
            # put a STOP to end the iter builtin inside use_queue
            self.q.put("STOP")

        self.logger.info('%d images were present' % self.img_counter)
        print("%d images were present" % self.img_counter)
def main():
        global csvout
 	if len(sys.argv) >1:
	  threads = sys.argv[1]
	else:
	  threads = 1
	print('Threads to run :',threads)
	
        multiprocessing.log_to_stderr()
	logger = multiprocessing.get_logger()
	logger.setLevel(logging.INFO)
	
	Threadstart(threads)
def multiprocess(args):
    step = ((args.token_hi - args.token_lo) / args.worker_count) + 1
    tr1 = range(args.token_lo, args.token_hi, step)  # intermediate points
    tr2 = [(t, t + 1) for t in tr1[1:]]  # add adjacent points
    tr3 = [t for st in tr2 for t in st]  # flatten
    tr4 = [args.token_lo] + tr3 + [args.token_hi]  # add end points
    token_ranges = [tr4[i:i + 2] for i in range(0, len(tr4), 2)]  # make pairs
 
    rate = args.throttle_rate / args.worker_count
 
    multiprocessing.log_to_stderr().setLevel(logging.INFO)
    manager = Manager()
    results = manager.dict()  # create a special shared dict to gather results
 
    workers = [
        Process(
            target=main,
            args=(
                args, worker_index, token_ranges[worker_index], rate, results
            )
        )
        for worker_index in range(args.worker_count)
    ]
 
    os_times_start = os.times()
 
    for worker in workers:
        worker.start()
 
    for worker in workers:
        worker.join()
 
    os_times_stop = os.times()
    exitcode = 0
 
    for worker in workers:
        if worker.exitcode:
            exitcode = worker.exitcode
            break
 
    if results:
        # transform the special dict
        results_dict = analyze_results(results, os_times_stop, os_times_start)
 
        if args.json_output:
            print_json(args, results_dict)
        else:
            print_arguments(args)
            print_results(results_dict)
 
    return(exitcode)
def run_error(platform, working_dir, pcr_error, env_error, 
    human_error, paired_end):
    """
    Simulate sequencing error from the output generated by run_diversity.

    Args:
        platform: string One of "roche", "illumina" or "ion".
        working_dir: path The folder in which to place temporary files.
        pcr_error: bool Should we include a PCR error
        env_error: bool Should we include an ENV error
        human_error: bool Should we include human DNA
        paired_end: bool Are we simulating paired_end data?

    Returns:
        True on completion.
    """

    print "Simulating reads from sequence sets."

    error_data = {}
    # properties of the simulation
    error_data['paired_end'] = paired_end
    error_data['platform'] = platform

    with open(os.path.join(working_dir, evolved_filename), 'rb') as handle:

        evolved_data = pickle.load(handle)

        multiprocessing.log_to_stderr()
        p = LoggingPool(len(evolved_data))
        file_queue = multiprocessing.Manager().Queue()
       
        for result in evolved_data:

            p.apply_async(run_error_thread, [
                file_queue, result, platform, working_dir, 
                pcr_error, env_error, human_error, paired_end
            ])

        p.close()
        p.join()

        error_data['files'] = []
        while not file_queue.empty():
            error_data['files'].append(file_queue.get())

    with open(os.path.join(working_dir, error_filename), 'wb') as handle:
        pickle.dump(error_data, handle, protocol=pickle.HIGHEST_PROTOCOL)

    return True
def doMultiprocess(app):
    applist = {'pdb2pqr':doPDB2PQR,
               'apbs':doAPBS,
               'chimera':doChimera}
    multiprocessing.log_to_stderr(logging.INFO)               # set logging to info level rather than DEBUG
    pdb_ids = glbl.model_input.keys()
    manager = Manager()                                       # creates shared memory manager object
    nextPDBid = Queue()                                       # Create Queue object to serve as shared id generator across processes
    for pid in pdb_ids: nextPDBid.put(pid)                    # Load the ids to be tested into the Queue
    for x in range(0,multiprocessing.cpu_count()):            # Create one process per logical CPU
        p = Process(target=applist[app], args=(nextPDBid,))   # Assign process to app function, passing in the Queue
        glbl.jobs.append(p)                                   # Add the process to a list of running processes
        p.start()                                             # Start process running
    for j in glbl.jobs:
        j.join()  
    return     
Exemple #16
0
def main():
    logger = multiprocessing.log_to_stderr()
    logger.setLevel(logging.DEBUG)

    logger.info('Start main module')

    managerSpeech = multiprocessing.Manager()
    managerSpeechDict = managerSpeech.dict()
    managerSpeechEvent = managerSpeech.Event()

    juliusProc = multiprocessing.Process(target=coreJulius, args=(logger, managerSpeechDict, managerSpeechEvent))
    juliusProc.name = "CoreJulius"
    juliusProc.daemon = True
    juliusProc.start()

    speechProc = multiprocessing.Process(target=coreSpeech, args=(logger, managerSpeechDict, managerSpeechEvent))
    speechProc.name = "CoreSpeech"
    speechProc.daemon = True
    speechProc.start()

    core_speech.play_sound("do_start.wav", logger)

    while True:
        try:
            time.sleep(1)
        except KeyboardInterrupt:
            juliusProc.join(2)
            if juliusProc.is_alive():
                juliusProc.terminate()
            speechProc.join(2)
            if speechProc.is_alive():
                speechProc.terminate()
            managerSpeech.shutdown()
            break
Exemple #17
0
def main():
    multiprocessing.log_to_stderr()
    logger = multiprocessing.get_logger()
    logger.setLevel(logging.INFO)

    path = ''
    if len(sys.argv) not in (1, 2):
        print('error: wrong number of arguments.')
        print('usage: {}          (to use hackrf)'.format(sys.argv[0]))
        print('usage: {} <file>   (to use raw file)'.format(sys.argv[0]))
        sys.exit(1)
    else:
        if len(sys.argv) is 2:
            path = sys.argv[1]

    spectacle = Spectacle(path)
Exemple #18
0
 def setupParallelLogging( self ) :
     # ---------------------------------------------------
     # set up Logging
     # ----------------
     # from multiprocessing import enableLogging, getLogger
     import multiprocessing
     # preliminaries for handlers/output files, etc.
     from time import ctime
     datetime = ctime()
     datetime = datetime.replace(' ', '_')
     outfile = open( 'gaudirun-%s.log'%(datetime), 'w' )
     # two handlers, one for a log file, one for terminal
     streamhandler = logging.StreamHandler(strm=outfile)
     console       = logging.StreamHandler()
     # create formatter : the params in parentheses are variable names available via logging
     formatter = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(message)s" )
     # add formatter to Handler
     streamhandler.setFormatter(formatter)
     console.setFormatter(formatter)
     # now, configure the logger
     # enableLogging( level=0 )
     # self.log = getLogger()
     self.log = multiprocessing.log_to_stderr()
     self.log.setLevel( logging.INFO )
     self.log.name = 'Gaudi/Main.py Logger'
     self.log.handlers = []
     # add handlers to logger : one for output to a file, one for console output
     self.log.addHandler(streamhandler)
     self.log.addHandler(console)
     self.log.removeHandler(console)
     # set level!!
     self.log.setLevel = logging.INFO
def InternalSet(Achild:Queue, Bchild:Queue, outqueue:Queue):
    """Take the output of two LeafSet's and take the union."""
    logger = multiprocessing.log_to_stderr()
    logger.setLevel(logging.INFO)
    AminusB = set()
    BminusA = set()
    morestuff = True
    while morestuff:
        a = Achild.get()
        b = Bchild.get()
        logger.info("Internal:%s:%s" % (a, b))
        if a in BminusA:
            BminusA.remove(a)
        elif a not in AminusB:
            AminusB.add(a)
            outqueue.put(a)
        if b in AminusB:
            AminusB.remove(b)
        elif b not in BminusA:
            BminusA.add(b)
            outqueue.put(b)
        Achild.task_done()
        Bchild.task_done()
        if (a == SIGOBJ) or (b == SIGOBJ):
            outqueue.put(SIGOBJ)
            morestuff = False
    logger.info("internal done")
Exemple #20
0
def main(at_once=100):
    # file detailing what to download
    # each line should be of format:
    # fileType, color, brand, model, url, hash
    fname = sys.argv[1]

    # multiprocessing.Pool 33sec per 100 photos
    # multiprocessing.dummy.Pool ncpu*3 22sec per 100 photos
    # 500 for 120sec, 3*ncpu

    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)
    # NUM_WORKERS = multiprocessing.cpu_count() * 2
    p = Pool()
    # get listing of files to download
    task_list = read_tasking(fname)
    print('processed {0} lines to download'.format(len(task_list)))
    print('tasking loaded')

    # download in batches
    good = open(fname + '.good', 'w')
    bad = open(fname + '.bad', 'w')
    for batch in tqdm(grouper(at_once, task_list)):
        retval = p.map(work_func, batch)
        for r, t in retval:
            if r:
                good.write(json.dumps(t)+'\n')
            else:
                bad.write(json.dumps(t)+'\n')
        good.flush()
        bad.flush()

    good.close()
    bad.close()
Exemple #21
0
def par_fit( K, ChunkGenerator, nProc=1, timeout=120, doVerbose=False): 
    logger = mp.log_to_stderr()
    if doVerbose:
      logger.setLevel(logging.INFO)
       
    item1 = ChunkGenerator.next()
    if type( item1 ) == str:
      Xinit = np.loadtxt( item1 ) 
      funcChunk = fitChunkFromFile
    else:
      funcChunk = fitChunk
      Xinit = item1
    D = Xinit.shape[1]
    initShared( Xinit )
    
    # Run in parallel!        
    workerpool = mp.Pool( processes=nProc )
    R = workerpool.map_async( funcChunk, ChunkGenerator )

    R.get( timeout=timeout )  
    
    workerpool.close() # just in case...
    
    # Build mixture model with resulting parameters!
    mygmm = sklearn.mixture.GMM( n_components=K )

    mygmm.weights_ = sh2np( shWeights )
    mygmm.covars_ = sh2np( shCovars, (K,D) )
    mygmm.means_ = sh2np( shMeans, (K,D) )
    return mygmm
def Estep_parallel( Xin, w, MuList, SigmaList, nProc=2, chunksize=1, doVerbose=False ):
  ''' Returns
      -------
        logResp : N x K vector of log posterior probabilities 

                  logResp[n,k] : n-th data point's posterior under k-th comp
  '''
  '''def obs_comp_generator( MuList, SigmaList):
    for k in xrange( len(MuList) ):
      yield k,MuList[k], SigmaList[k]
  GMMCompIterator = obs_comp_generator( MuList, SigmaList )
  '''
  if doVerbose:
    logger = mp.log_to_stderr()
    logger.setLevel( logging.INFO )

  global X
  X = Xin

  GMMCompIterator = [ (k,MuList[k],SigmaList[k]) for k in xrange( len(MuList) )]
  mypool = mp.Pool( processes=nProc )
  myParOp = mypool.map_async( loggausspdf_globaldata, GMMCompIterator, chunksize=chunksize )
  resultList = myParOp.get()
  #st = time.time()
  logResp = np.vstack(resultList)
  #print '  Reduction: %.2f sec' % (time.time()-st)
  # Time to agg results into single matrix: 0.07 sec for N=250000,K=25
  # Conclusion: agg results takes almost no time relative to each individual job
  return logResp.T + np.log(w)
def main():

    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    if len(sys.argv) >= 3:
        #directory containing samples which contains fastq
        samplesDir = getAbsPath(sys.argv[1])
        
        #quality scores file
        qScoreFile = getAbsPath(sys.argv[2]).rstrip('/')

        #perl script for qscore calculation
        qScorePerlScript = getAbsPath(sys.argv[3]).rstrip('/')
        
        #get all sample directory inside samplesDir
        allSamplesDir = allSampleDirsPath(samplesDir)
        
        #call child workers to do the job
        results = prepAndRunWorkers(allSamplesDir, qScoreFile, qScorePerlScript)

        #print final results
        print "avgQScoreComputation success: ", all(results)
    else:
        print 'err: files missing'
    def broadFirst(self, key):
        ''' True means no need to dig more
        False means need to dig to next level
        '''
        count = 0
        logger = multiprocessing.log_to_stderr(multiprocessing.SUBDEBUG)
        state = downloadTile(key, self.config.basic_url, self.config.image_floder)
        count = count + 1
        bottom = 0
        l = self.moveToNextLevel(key)
        q = Queue.Queue()
        for i in l:
            q.put(i)
        while not q.empty():
            tl = []
            for _ in range(4):
                tl.append(q.get())
                count = count + 1
            for t in tl:
                state = downloadTile(t, self.config.basic_url, self.config.image_floder)
#                 state = True
                if state and len(t)< self.config.max_z:
                    l = self.moveToNextLevel(t)
                    for _ in l:
                        q.put(_)
            if count - bottom >= 1000:
                logger.debug('%s: Roughly Process: %d/21845' % (time.ctime(), count))
                bottom = count
#             logger.debug('%d' % count)
        return True
Exemple #25
0
    def setUp(self):
        """Start the agent and wait for it to start"""
        super(FunctionalBase, self).setUp()
        mpl = multiprocessing.log_to_stderr()
        mpl.setLevel(logging.INFO)
        self.test_port = os.environ.get('TEST_PORT', '9999')
        # Build a basic standalone agent using the config option defaults.
        # 127.0.0.1:6835 is the fake Ironic client.
        self.agent = agent.IronicPythonAgent(
            'http://127.0.0.1:6835', 'localhost',
            ('0.0.0.0', int(self.test_port)), 3, 10, None, 300, 1,
            'agent_ipmitool', True)
        self.process = multiprocessing.Process(
            target=self.agent.run)
        self.process.start()
        self.addCleanup(self.process.terminate)

        # Wait for process to start, otherwise we have a race for tests
        sleep_time = 0.1
        tries = 0
        max_tries = int(os.environ.get('IPA_WAIT_TRIES', '100'))
        while tries < max_tries:
            try:
                return self.request('get', 'commands')
            except requests.ConnectionError:
                time.sleep(sleep_time)
                tries += 1

        raise IOError('Agent did not start after %s seconds.' % (max_tries *
                                                                 sleep_time))
Exemple #26
0
def dispatcher(success_fp, fail_fp, partitions):
    """Dispatch execution over a pool of processors

    Parameters
    ----------
    success_fp : file-like object
        A file-like object to write a list of successful sample IDs too
    fail_fp : file-like object
        A file-like object to write a list of unsuccessful sample IDs too,
        and any associated error information
    partitions : Iterable of (function, Iterable of str)
        Yields a function and an iterable of IDs. It is expected that the
        functions yielded will have the following signature:

        {str: list} <- function(list of str)
    """
    if ag.is_test_env():
        logger = mp.log_to_stderr()
        logger.setLevel(logging.INFO)

    pool = mp.Pool(processes=agenv.get_cpu_count())

    success_fp.write('%s\n' % '#SampleID')
    fail_fp.write('%s\t%s\n' % ('#SampleID', 'Error(s)'))

    for func, ids in partitions:
        functor = partial(run_functor, func)
        for success_details in pool.map(functor, list(agru.chunk_list(ids))):
            for id_, detail in success_details.items():
                if detail:
                    fail_fp.write("%s\t%s\n" % (id_, '\t'.join(detail)))
                else:
                    success_fp.write("%s\n" % id_)
Exemple #27
0
 def processQueueThread(self, event_queue, notifier, timeout,
                        processLock, done_sending, log_level):
     """Send events without blocking swift proxy."""
     log = multiprocessing.log_to_stderr()
     log.setLevel(log_level)
     event_buffer = self.start_sender_process(self._notifier)
     while True:
         try:
             log.debug('Wait for event from send queue')
             event = event_queue.get()
             if event:
                 log.debug('Got event %s from queue - trigger '
                           'send', event.id)
                 self.trigger_send(event, event_buffer, done_sending,
                                   log)
                 while done_sending.wait(timeout) is False:
                     log.warning('Timeout sending event %s - '
                                 'retry in new thread.', event.id)
                     processLock.acquire()
                     self.event_sender_die(event_buffer,
                                           done_sending)
                     event_buffer = self.start_sender_process(notifier)
                     processLock.release()
                     self.trigger_send(event, event_buffer,
                                       done_sending, log)
             else:
                 event_buffer.put(None)
                 break
         except BaseException:
             LOG.error("Exception in sending thread",
                       exc_info=True)
             LOG.exception("SendEventThread loop exception")
def reader(readq, resultq):
	logger = multiprocessing.log_to_stderr()

	logger.info("Start reading")
	BLOCKSIZE = 1024*1024
	while True:
		item = readq.get()
		if item[0] == 'Q':
			logger.warning("Got reader quit")
			break
		elif item[0] == 'R':
			path = item[1]
			bytesRead = 0
			fileSize = item[2].st_size
			try:
				with open(path, 'rb') as f:
					while bytesRead < fileSize:
						buf = f.read(BLOCKSIZE)
						resultq.put(['W'])
						if len(buf) == 0:
							break
						bytesRead += len(buf)
					buf = f.read(BLOCKSIZE)
					if len(buf) != 0:
						raise Exception("Expected EOF for %s" % path)
				resultq.put(['E'])
			except Exception as inst:
				print("open() failed:", inst)

	logger.info("Finished reading")
	resultq.put(['Q'])
def main():

    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    if len(sys.argv) >= 4:
        #directory containing fastq library
        fastqsDir = workerForBam.getAbsPath(sys.argv[1])
        
        #directory containing other directories with fasta names
        fastaDir = workerForBam.getAbsPath(sys.argv[2])

        #directory containing file locks
        lockDirPath = workerForBam.getAbsPath(sys.argv[3])
        
        #directory containing temp output -> fastQ's, jobsFile 
        outDir = workerForBam.getAbsPath(sys.argv[4])

        #write all fastq's processing in job file
        combineJobPath = writeCombineBAMJobsFromSAI(outDir, fastqsDir,\
                                                        fastaDir,\
                                                        lockDirPath)

        #call workers to generate paired BAMs from SAIs
        #results = callPairedSAIToBAMWorkers(fastqsDir, fastaDir)
        #print results

    else:
        print 'err: files missing'
def run(host, port, params, force):
    # configure logging
    mplogger = mp.log_to_stderr()
    mplogger.setLevel(params['loglevel'])
    logger.setLevel(params['loglevel'])

    if logger.getEffectiveLevel() <= 10:
        logRequests = True
    else:
        logRequests = False

    # create the server
    manager = TaskManager(params, force)
    server = TaskManagerServer(
        (host, port), logRequests=logRequests, allow_none=True)

    server.register_function(manager.load_tasks, 'panda_reload')
    server.register_function(manager.get_sim_root, 'panda_connect')
    server.register_function(manager.get_next_task, 'panda_request')
    server.register_function(manager.set_complete, 'panda_complete')
    server.register_function(manager.set_error, 'panda_error')
    server.register_function(manager.get_status, 'panda_status')
    server.register_multicall_functions()
    server.register_introspection_functions()

    logger.info("Started XML-RPC server at %s:%d" % (host, port))

    server.serve_forever()
Exemple #31
0
def main():
    logger = multiprocessing.log_to_stderr()
    logger.setLevel(logging.INFO)
    logger.info('Odometry processor started')
    odometryprocessor = OdometryProcessor(logger)
    rospy.spin()
import datetime
import json
import logging
import multiprocessing as mp
import sys
import time

import paho.mqtt.client as mqtt

MQTT_SERVER = "127.0.0.1"
MQTT_TOPIC_PREFIX = "home/rtl_433"
TIMEOUT_STALE_SENSOR = 600  # Seconds before showing a timeout indicator

# log = logging.getLogger()  # Single process logger
log = mp.log_to_stderr()  # Multiprocessing capable logger
mqtt_client = mqtt.Client("RTL_433_Test")

sensor_state = dict()  # Dictionary containing accumulated sensor state


def print_sensor_state():
    """ Print accumulated sensor state """
    time_now = datetime.datetime.utcnow().replace(microsecond=0)
    print("\nUpdate per {} UTC".format(time_now.isoformat(sep=' ')))
    for model in sensor_state:
        print(model)
        for ID in sensor_state[model]:
            data = sensor_state[model][ID]['data'].copy()
            timestamp = data.pop('time')
            timedelta = (time_now - timestamp).total_seconds()
Exemple #33
0
                self.dose_duration_405_seconds *
                self.analog_out.rate,  #Autorounds
                self.analog_out.num_channels),
            dtype=np.float64)
        voltages['dose_405'][:-1, 2] = self.dose_voltage_405
        self.voltages = voltages
        self.last_played_voltage = None
        return None

    def close(self):
        self.idp.close()
        self.analog_out.close()
        self.stage.close()


if __name__ == '__main__':
    logger = mp.log_to_stderr()
    logger.setLevel(logging.INFO)

    scanner = Scanner(zaber_stage_port_name='COM3')

    try:
        for x in range(1, 100000, 1000):
            print("New x position:", x)
            for i in range(3):
                scanner.snap('488', x=x, y=20000, stage_cooldown_seconds=0.1)
##                input("Hit enter...")
    finally:
        scanner.close()
    input("Hit enter to finish")
Exemple #34
0
from django.core.management.base import BaseCommand
from django.db import close_old_connections, reset_queries
from django.utils.encoding import force_text, smart_bytes
from django.utils.timezone import now

from haystack import connections as haystack_connections
from haystack.exceptions import NotHandled
from haystack.query import SearchQuerySet
from haystack.utils.app_loading import haystack_get_models, haystack_load_apps

DEFAULT_BATCH_SIZE = None
DEFAULT_AGE = None
DEFAULT_MAX_RETRIES = 5

LOG = multiprocessing.log_to_stderr(level=logging.WARNING)


def update_worker(args):
    if len(args) != 10:
        LOG.error('update_worker received incorrect arguments: %r', args)
        raise ValueError('update_worker received incorrect arguments')

    model, start, end, total, using, start_date, end_date, verbosity, commit, max_retries = args

    # FIXME: confirm that this is still relevant with modern versions of Django:
    # We need to reset the connections, otherwise the different processes
    # will try to share the connection, which causes things to blow up.
    from django.db import connections

    for alias, info in connections.databases.items():
Exemple #35
0
                else:
                    raise RuntimeError(
                        "User choose option {} , close the program!".format(
                            op))

    #test 3.5.1- multiprocessing loop
    #hrv - 0.114s - 100 samples
    #warning: only works with amp object initiated inside the child process
    #warning: passing amp object as an arg to the child process, or directly to the worker function does not work
    TEST_35_1 = False
    if TEST_35_1:
        import multiprocessing
        import logging
        import sys

        multiprocessing.log_to_stderr()
        logger = multiprocessing.get_logger()
        logger.setLevel(logging.DEBUG)

        #Sharing state between processes¶ - Method: shared memory map Value
        run = multiprocessing.Value('b', False)  #start False
        print("run.value", run.value)

        starttime = time.time()

        p = multiprocessing.Process(name="loop_get_data",
                                    target=hrv_worker,
                                    args=(run, ))
        p.daemon = True  #To exit the main process even if the child process p didn't finished

        p.start()  #start process
def synchronous_pull_with_lease_management(project_id, subscription_id):
    """Pulling messages synchronously with lease management"""
    # [START pubsub_subscriber_sync_pull_with_lease]
    import logging
    import multiprocessing
    import random
    import time

    from google.cloud import pubsub_v1

    # TODO(developer)
    # project_id = "your-project-id"
    # subscription_id = "your-subscription-id"

    subscriber = pubsub_v1.SubscriberClient()
    subscription_path = subscriber.subscription_path(project_id,
                                                     subscription_id)

    NUM_MESSAGES = 2
    ACK_DEADLINE = 30
    SLEEP_TIME = 10

    # The subscriber pulls a specific number of messages.
    response = subscriber.pull(request={
        "subscription": subscription_path,
        "max_messages": NUM_MESSAGES
    })

    multiprocessing.log_to_stderr()
    logger = multiprocessing.get_logger()
    logger.setLevel(logging.INFO)

    def worker(msg):
        """Simulates a long-running process."""
        RUN_TIME = random.randint(1, 60)
        logger.info("{}: Running {} for {}s".format(
            time.strftime("%X", time.gmtime()), msg.message.data, RUN_TIME))
        time.sleep(RUN_TIME)

    # `processes` stores process as key and ack id and message as values.
    processes = dict()
    for message in response.received_messages:
        process = multiprocessing.Process(target=worker, args=(message, ))
        processes[process] = (message.ack_id, message.message.data)
        process.start()

    while processes:
        for process in list(processes):
            ack_id, msg_data = processes[process]
            # If the process is still running, reset the ack deadline as
            # specified by ACK_DEADLINE once every while as specified
            # by SLEEP_TIME.
            if process.is_alive():
                # `ack_deadline_seconds` must be between 10 to 600.
                subscriber.modify_ack_deadline(
                    request={
                        "subscription": subscription_path,
                        "ack_ids": [ack_id],
                        "ack_deadline_seconds": ACK_DEADLINE,
                    })
                logger.info("{}: Reset ack deadline for {} for {}s".format(
                    time.strftime("%X", time.gmtime()),
                    msg_data,
                    ACK_DEADLINE,
                ))

            # If the processs is finished, acknowledges using `ack_id`.
            else:
                subscriber.acknowledge(request={
                    "subscription": subscription_path,
                    "ack_ids": [ack_id]
                })
                logger.info("{}: Acknowledged {}".format(
                    time.strftime("%X", time.gmtime()), msg_data))
                processes.pop(process)

        # If there are still processes running, sleeps the thread.
        if processes:
            time.sleep(SLEEP_TIME)

    print("Received and acknowledged {} messages. Done.".format(
        len(response.received_messages)))

    # Close the underlying gPRC channel. Alternatively, wrap subscriber in
    # a 'with' block to automatically call close() when done.
    subscriber.close()
Exemple #37
0
def get_mp_logger(level=logging.DEBUG):
    logger = multiprocessing.log_to_stderr()
    logger.setLevel(level)
    return logger
    while True:
        _, img = cap.read()
        if img is not None:
            mainQ.put(img)


def show(mainQ):
    cv2.namedWindow('test1')
    while True:
        frame1 = mainQ.get()
        frame1 = cv2.flip(frame1, 1)
        cv2.imshow('test1', frame1)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break


if __name__ == '__main__':
    logMessages = multiprocessing.log_to_stderr()
    logMessages.setLevel(multiprocessing.SUBDEBUG)
    mainQ = multiprocessing.Queue()

    read_process = multiprocessing.Process(target=read, args=(mainQ, ))
    read_process.start()

    show_process = multiprocessing.Process(target=show, args=(mainQ, ))
    show_process.start()

    read_process.join()
    show.join()
Exemple #39
0
def main():
    logger = mp.log_to_stderr()
    #logger.setLevel(logging.CRITICAL)
    logger.setLevel(logging.INFO)
    #logger.setLevel(logging.FATAL)

    # create shared array
    mgr = mp.Manager()
    #d = mgr.dict()
    d = {}

    N, M = 15000, 11
    #base_ptr = mp.RawArray(ctypes.c_double, N)
    #arr = tonumpyarray(base_ptr)
    #arr[:] = np.array(np.random.uniform(size=N)*1000, np.int)
    #arr_orig = arr.copy()

    arr = np.array(np.random.uniform(size=N) * 1000, np.int)

    tmp = np.ctypeslib.as_ctypes(arr)
    shared_arr = mp.RawArray(tmp._type_, tmp)
    print(shared_arr)
    d["array"] = shared_arr

    lst_event = []
    for i in range(5):
        lst_event.append(mp.Event())

    lst_event[0].set()

    value_ptr = mp.RawArray(ctypes.c_int, 2)
    value_ptr[0] = 0
    value_ptr[1] = 0

    d["value"] = [0, 0]

    p1 = mp.Process(target=exec1, args=(
        d,
        lst_event,
    ))
    p2 = mp.Process(target=exec2, args=(
        d,
        lst_event,
    ))

    t = time.time()
    p1.start()
    p2.start()

    info("Starting")
    lst_event[3].wait()
    lst_event[4].wait()
    info("All Started")

    lst_event[0].clear()

    lst_event[1].wait()
    lst_event[2].wait()

    time.sleep(1)

    arr = np.ctypeslib.as_array(d["array"])
    #info(arr)
    crit(arr[0])
    crit(arr[arr.size // 2 - 1])
    crit(arr[arr.size // 2])
    crit(arr[arr.size - 1])

    # write to arr from different processes
    # with closing(mp.Pool(initializer=init, initargs=(shared_arr,))) as p:
    #     # many processes access the same slice
    #     stop_f = N // 10
    #     p.map_async(f, [slice(stop_f)]*M)

    #     # many processes access different slices of the same array
    #     assert M % 2 # odd
    #     step = N // 10
    #     p.map_async(g, [slice(i, i + step) for i in range(stop_f, N, step)])
    # p.join()
    # assert np.allclose(((-1)**M)*tonumpyarray(shared_arr), arr_orig)
    return t
import logging

def daemon():
    p = multiprocessing.current_process()
    print('Starting:', p.name, p.pid, flush=True)
    time.sleep(2)
    print('Exiting :', p.name, p.pid, flush=True)


def non_daemon():
    p = multiprocessing.current_process()
    print('Starting:', p.name, p.pid, flush=True)
    print('Exiting :', p.name, p.pid, flush=True)


if __name__ == '__main__':
    print(print('==='*15 + ' < ' + 'MAIN PROCESS' + ' > ' + '==='*15))
    logger = multiprocessing.log_to_stderr(logging.INFO)
    d = multiprocessing.Process(name='MyDaemon', target=daemon)
    d.daemon = True

    n = multiprocessing.Process(name='non-daemon', target=non_daemon)
    n.daemon = False

    d.start()
    print('==='*15)
    time.sleep(1)
    n.start()

    d.join()
    n.join()
Exemple #41
0
# a python entry point for a function
if __name__ == "__main__":
    start = time()
    path = r"/Users/jake/Desktop/test"
    fanout_unziptar(path)
    end = time()
    print('script ended after {} mins'.format((end-start)/60))


# set special list for results of multiprocess to pump within
distances = mp.Manager().list()


# set error logging
import logging
mpl = mp.log_to_stderr()
mpl.setLevel(logging.INFO)




# Using pandas ----------------
#  IMPT: does not work when applying on string columns

def some_func(x):
    x = x*29/2
    return x

import swifter
train['new_col'] = train['non_string'].swifter.apply(some_func)
Exemple #42
0
def get_stderr_logger(level=multiprocessing.SUBDEBUG):
    lug = multiprocessing.log_to_stderr()
    lug.setLevel(level)
    return lug
Exemple #43
0
            for pair in self.prod:
                params = f'{pair[0]}_{pair[1]}'
                t2.append(
                    Thread(name=params,
                           target=self.translate,
                           args=(params, self.requests[params], sock)))
            t1.start()
            for t in t2:
                t.start()


if __name__ == '__main__':

    app.url_map.converters['uuid'] = UUIDConverter
    app.config['JSON_SORT_KEYS'] = False
    logger = log_to_stderr()
    logger.setLevel(logging.INFO)
    logging.basicConfig(
        level=logging.DEBUG,
        format='[%(levelname)s](%(threadName)-10s)%(message)s',
    )

    parser = ConfigParser()
    parser.read('dev.ini')
    params = ['text', 'auth', 'olang', 'odomain']
    odomain_code_mapping = {
        'fml': 'Formal',
        'inf': 'Informal',
        'auto': 'Auto',
        'tt': 'tt',
        'cr': 'cr'
Exemple #44
0
def multiprocessing_debug_info():
    """
    Run this to see what multiprocessing is doing.
    :return:
    """
    multiprocessing.log_to_stderr().setLevel(logging.DEBUG)
Exemple #45
0
#     """thread worker function"""
#     start = time.time()
#     time.sleep(num)
#     print 'Worker:', num, time.time() - start
#     # return

# if __name__ == '__main__':
#     # jobs = []
#     start = time.time()
#     for i in range(5):
#         p = Process(target=worker, args=(i,))
#         # jobs.append(p)
#         p.start()
#     print "finished", time.time()-start

# ==============================================
import multiprocessing
import logging
import sys


def worker():
    print 'Doing some work'
    sys.stdout.flush()


if __name__ == '__main__':
    multiprocessing.log_to_stderr(logging.DEBUG)
    p = multiprocessing.Process(target=worker)
    p.start()
    p.join()
Exemple #46
0
def main(argv=None):
	"""Command line interface to fragment extraction."""
	if argv is None:
		argv = sys.argv[2:]
	try:
		opts, args = gnu_getopt(argv, 'ho:', FLAGS + OPTIONS)
	except GetoptError as err:
		print('error:', err, file=sys.stderr)
		print(SHORTUSAGE)
		sys.exit(2)
	opts = dict(opts)

	for flag in FLAGS:
		PARAMS[flag] = '--' + flag in opts
	PARAMS['disc'] = opts.get('--fmt', 'bracket') != 'bracket'
	PARAMS['fmt'] = opts.get('--fmt', 'bracket')
	numproc = int(opts.get('--numproc', 1))
	if numproc == 0:
		numproc = cpu_count()
	if not numproc:
		raise ValueError('numproc should be an integer > 0. got: %r' % numproc)
	limit = int(opts.get('--numtrees', 0)) or None
	PARAMS['cover'] = None
	if '--cover' in opts and ',' in opts['--cover']:
		a, b = opts['--cover'].split(',')
		PARAMS['cover'] = int(a), int(b)
	elif '--cover' in opts:
		PARAMS['cover'] = int(opts.get('--cover', 0)), 999
	PARAMS['twoterms'] = opts.get('--twoterms')
	encoding = opts.get('--encoding', 'utf8')
	batchdir = opts.get('--batch')

	if len(args) < 1:
		print('missing treebank argument')
	if batchdir is None and len(args) not in (1, 2):
		print('incorrect number of arguments:', args, file=sys.stderr)
		print(SHORTUSAGE)
		sys.exit(2)
	if batchdir:
		if numproc != 1:
			raise ValueError('Batch mode only supported in single-process '
				'mode. Use the xargs command for multi-processing.')
	tmp = None
	for n, fname in enumerate(args):
		if fname == '-':
			if numproc != 1:
				# write to temp file so that contents can be read
				# in multiple processes
				if tmp is not None:
					raise ValueError('can only read from stdin once.')
				tmp = tempfile.NamedTemporaryFile()
				tmp.write(open(sys.stdin.fileno(), 'rb').read())
				tmp.flush()
				args[n] = tmp.name
		elif not os.path.exists(fname):
			raise ValueError('not found: %r' % fname)
	if PARAMS['complete']:
		if len(args) < 2:
			raise ValueError('need at least two treebanks with --complete.')
		if PARAMS['twoterms'] or PARAMS['adjacent']:
			raise ValueError('--twoterms and --adjacent are incompatible '
					'with --complete.')
		if PARAMS['approx'] or PARAMS['nofreq']:
			raise ValueError('--complete is incompatible with --nofreq '
					'and --approx')

	level = logging.WARNING if PARAMS['quiet'] else logging.DEBUG
	logging.basicConfig(level=level, format='%(message)s')
	if PARAMS['debug'] and numproc > 1:
		logger = multiprocessing.log_to_stderr()
		logger.setLevel(multiprocessing.SUBDEBUG)

	logging.info('Disco-DOP Fragment Extractor')

	logging.info('parameters:\n%s', '\n'.join('    %s:\t%r' % kv
		for kv in sorted(PARAMS.items())))
	logging.info('\n'.join('treebank%d: %s' % (n + 1, a)
		for n, a in enumerate(args)))

	if numproc == 1 and batchdir:
		batch(batchdir, args, limit, encoding, '--debin' in opts)
	else:
		fragmentkeys, counts = regular(args, numproc, limit, encoding)
		out = (io.open(opts['-o'], 'w', encoding=encoding)
				if '-o' in opts else None)
		if '--debin' in opts:
			fragmentkeys = debinarize(fragmentkeys)
		printfragments(fragmentkeys, counts, out=out)
	if tmp is not None:
		del tmp
Exemple #47
0
        '--process_names',
        help=
        ('space delimited list of file of process names, used for model saving filepaths, '
         'one per review file'),
        nargs='+',
        type=str)
    parser.add_argument(
        '-c',
        '--categories',
        help=('space delimited list of categories to map to review files, '
              'one per review file'),
        nargs='+',
        type=str)
    args = parser.parse_args()
    loglevel = args.loglevel.upper()
    if args.logfile is None:
        logging.basicConfig(datefmt='%Y-%m-%d %H:%M:%S',
                            format='%(asctime)s %(levelname)-8s %(message)s',
                            level=loglevel)
    else:
        logging.basicConfig(datefmt='%Y-%m-%d %H:%M:%S',
                            format='%(asctime)s %(levelname)-8s %(message)s',
                            filename=args.logfile,
                            level=loglevel)
    LOGGER = multiprocessing.log_to_stderr()
    LOGGER.info(args)
    # hbase_table_prefix = None if str.lower(args.hbase_table_prefix) == 'none' else args.hbase_table_prefix
    main(args.hbase_table, args.model_prefix, args.predictions_prefix,
         args.reviews_files, args.process_names, args.hdfs_base_url,
         args.categories)
            for container in active_containers:
                logger.info("Container {0} running".format(container.name))

        time.sleep(3)
        status, description = grp.check_health()
        if description:
            print "\n**** WEB APP HEALTH STATUS ****"
            print description
            print "*******************************\n"
        logs_stream_generators.update(grp.get_logs_stream())

    if results.containers_logs:
        jobs = []
        for container_name, log_stream_generator in logs_stream_generators.items(
        ):
            multiprocessing.log_to_stderr(logging.ERROR)
            p = multiprocessing.Process(name=container_name,
                                        target=print_log_stream,
                                        args=(log_stream_generator, ))
            p.daemon = True
            p.start()
        try:
            while (True):
                time.sleep(1)
        except KeyboardInterrupt:
            for p in jobs:
                p.terminate()
                p.join()
            logger.info("Bye Bye.. Have a nice day.")

    if results.containers_stats:
Exemple #49
0
def enable_debug():
    """
    Enables the full debug, including for sub processes.
    """
    logger = multiprocessing.log_to_stderr(logging.DEBUG)
    logger.setLevel(multiprocessing.SUBDEBUG)
Exemple #50
0
FONT_SIZE = 32
FONT = ImageFont.truetype("NotoSansCJKsc-Regular.otf", size=FONT_SIZE)
LOGGER = multiprocessing.get_logger()
FOLDER = "char_img"
CHUNKSIZE = 50

def gen_img(codepoint, font=FONT, folder=FOLDER):
    char = chr(int(codepoint, 16))
    test_img = Image.new("L", (FONT_SIZE, 4 * FONT_SIZE), 0)
    out_img = Image.new("L", (FONT_SIZE, FONT_SIZE), 0)
    d = ImageDraw.Draw(test_img)
    d.text((0, 0), char, fill=255, font=font)
    bbox = test_img.getbbox()
    d = ImageDraw.Draw(out_img)
    y = FONT_SIZE // 2 - (bbox[1] + bbox[3]) // 2
    if (bbox[3] - bbox[1]) > FONT_SIZE:
        LOGGER.warning(f"Character {char} at codepoint {codepoint} has height {bbox[3] - bbox[1]}")
    d.text((0, y), char, fill=255, font=font)
    out_img.save(f"{folder}/{codepoint}.png")

if __name__ == "__main__":
    multiprocessing.log_to_stderr().setLevel(logging.WARNING)
    with open("TGSCC-Unicode.txt", "r") as f:
        codepoints = [line.split()[1][2:] for line in f.readlines()[2:]]
    pool = multiprocessing.Pool()
    results = pool.imap_unordered(gen_img, codepoints, chunksize=CHUNKSIZE)
    for _ in tqdm(results, total=len(codepoints)):
        pass
    pool.close()
Exemple #51
0
        cfg_from_list(['DATASET', args.dataset])
    if args.exp is not None:
        cfg_from_list(['TEST.EXP_NAME', args.exp])
    if args.diff_backprop is not None:
        cfg_from_list(['TRAIN.DIFF_BACKPROP', args.diff_backprop])
    if args.stabilizer is not None:
        cfg_from_list(['TRAIN.STABILIZER', args.stabilizer])
    if args.out_path is not None:
        cfg_from_list(['DIR.OUT_PATH', args.out_path])
    if args.weights is not None:
        cfg_from_list([
            'CONST.WEIGHTS', args.weights, 'TRAIN.RESUME_TRAIN', True,
            'TRAIN.INITIAL_ITERATION',
            int(args.init_iter)
        ])

    print('Using config:')
    pprint.pprint(cfg)

    if not args.test:
        train_net()
    else:
        test_net()


if __name__ == '__main__':
    mp.log_to_stderr()
    logger = mp.get_logger()
    logger.setLevel(logging.INFO)
    main()
Exemple #52
0
################################################################################

import packages.buskill
from packages.garden.navigationdrawer import NavigationDrawer
from packages.garden.progressspinner import ProgressSpinner
from buskill_version import BUSKILL_VERSION

import os, sys, re, webbrowser

import multiprocessing
from multiprocessing import util

import logging
logger = logging.getLogger(__name__)
util.get_logger().setLevel(util.DEBUG)
multiprocessing.log_to_stderr().setLevel(logging.DEBUG)
#from multiprocessing import get_context

import kivy
#kivy.require('1.0.6') # replace with your current kivy version !

from kivy.app import App
from kivy.properties import ObjectProperty, StringProperty
from kivy.clock import Clock

from kivy.core.window import Window
Window.size = (300, 500)

from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.uix.button import Button
Exemple #53
0
from __future__ import print_function
from future.utils import iteritems
from builtins import range, input

import multiprocessing
import random
import sys
import traceback
import operator
import logging
import argparse
from collections import namedtuple
from rpn import RPN, RPNError, is_number
from time import time

logger = multiprocessing.log_to_stderr()
logger.setLevel(logging.ERROR)


class EquationError(Exception):
    pass


class UnsolvableError(Exception):
    pass


def manually():
    number_of_variables = None
    while number_of_variables is None:
        number_of_variables = int(input("Number of variables: "))
"""
import logging
import multiprocessing
import os


def printer(item_, lock_):
    # Locking
    lock_.acquire()
    try:
        print('{} is printing: {}'.format(os.getpid(), item_))
    finally:
        # Unlocking
        lock_.release()


if __name__ == '__main__':
    # Lock object
    lock = multiprocessing.Lock()

    items = ['tango', 'foxtrot', 10]

    multiprocessing.log_to_stderr()  # To redirect to stderr pipe

    logger = multiprocessing.get_logger()  # Getting logger object
    logger.setLevel(logging.INFO)

    for item in items:
        p = multiprocessing.Process(target=printer, args=(item, lock))
        p.start()
Exemple #55
0
 def __init__(self, blockchain, mempool):
     mp.log_to_stderr()
     mp_logger = mp.get_logger()
     mp_logger.setLevel(logging.DEBUG)
     self.blockchain = blockchain
     self.mempool = mempool
Exemple #56
0
        lock.release()


def sub_500_lock(total, lock):
    for i in range(100):
        time.sleep(0.01)
        lock.acquire()
        total.value -= 5
        lock.release()

if __name__ == '__main__':

    total = Value('i', 500)
    lock = Lock()

    log_to_stderr()
    logger = get_logger()
    logger.setLevel(logging.INFO)

    add_proc = Process(target=add_500_lock, args=(total, lock))
    sub_proc = Process(target=sub_500_lock, args=(total, lock))

    add_proc.start()
    sub_proc.start()

    add_proc.join()
    sub_proc.join()
    print(total.value)


Exemple #57
0
def main():
    # This incantation is forced on us so the IDP won't print everything twice:
    import logging
    import multiprocessing as mp
    logger = mp.log_to_stderr()
    logger.setLevel(logging.INFO)

    # Set parameters for IDP (Image Data Pipeline)
    set_num_buffers = 3
    image_height_pixels = 128
    image_width_pixels = 380

    # Set parameters for DAQ (analog out card)
    num_daq_channels = 3
    daq_rate = 8e5

    ##############################################################
    # Set exposure parameters for camera and laser illumination: #
    ##############################################################
    
    green_AOM_mV = [
        300,
        ] #calibrated
    green_powers = [
        '1010mW',
        ]
    red_AOM_mV = [
        269,
        ] #calibrated
    red_powers = [
        '0mW',
        ]
    angle_string = '14'

    # Set laser pulse duration VERY SHORT
    green_pulse_duration_pixels = 1
    red_pulse_duration_pixels = 1

    # Set green pulse train repetition time short enough to
    # thermally stabilize the sample
    green_rep_time_us = 600
    green_rep_time_pixels = int(np.ceil(
        green_rep_time_us * 1e-6 * daq_rate))

    # how many red laser shots in an exposure?
    pulses_per_exposure = 8
    # you don't want red light leaking into next exposure so set this to
    # 1 if you're imaging 720 nm.
    # set to zero if you're looking for depletion, because you need
    # every green pulse matched with a red for that measurement
    less_red_pulses = 0
    
    desired_effective_exposure_time_pixels = (green_rep_time_pixels *
                                              pulses_per_exposure)
    assert desired_effective_exposure_time_pixels > 0

    #define red/green pulse delays
    red_start_pixel_array = np.array([-2, 0, 2])
    num_delays = red_start_pixel_array.shape[0]
    print('Red/green delay (us) =', red_start_pixel_array / daq_rate * 1e6)
    # number of exposures should be the first dimension of the idp buffer
    num_delay_scan_repetitions = 1
    num_exposures = num_delays * num_delay_scan_repetitions

    # actual roll time is 640 us, which should be a multiple of
    # green_rep_time_us, but may not always be
    # this only works for the current field of view height 128 pixels
    # 10 us per line, rolling is symmetrical around middle of chip
    rolling_time_us = 640 #experimentally determined for this field of view
    rolling_time_pixels = int(np.ceil(
        rolling_time_us * 1e-6 * daq_rate))
    extra_time_after_roll_pixels = (green_rep_time_pixels -
                                    rolling_time_pixels %
                                    green_rep_time_pixels)
    effective_exposure_time_pixels = (extra_time_after_roll_pixels +
                                      desired_effective_exposure_time_pixels)
    # reminder: negative delay values (red before green) are only valid if the
    # camera roll finishes before the red pulse gets there
    assert extra_time_after_roll_pixels > -min(red_start_pixel_array)
    set_exposure_time_pixels = (rolling_time_pixels +
                                effective_exposure_time_pixels)
    # set exposure time must be an integer multiple of green rep time
    assert (set_exposure_time_pixels % green_rep_time_pixels) == 0
    set_exposure_time_us = int(np.ceil(
        set_exposure_time_pixels / daq_rate * 1e6))
    


    # Initialize the IDP:
    idp = image_data_pipeline.Image_Data_Pipeline(
        num_buffers=set_num_buffers,
        buffer_shape=(num_exposures, image_height_pixels, image_width_pixels),
        camera_child_process=pco_edge_camera_child_process)
    assert idp.buffer_shape[0] == num_exposures
    
    # Initialize the DAQ:
    daq = ni.PCI_6733(
        num_channels=num_daq_channels,
        rate=daq_rate,
        verbose=True)
    assert daq.rate == daq_rate

    try:
        # Apply camera settings:
        idp.display.set_intensity_scaling('median_filter_autoscale')
        idp.apply_camera_settings(
            trigger='external_trigger',
            exposure_time_microseconds = set_exposure_time_us,
            region_of_interest  ={'bottom': 1088,
                                  'top': 961,
                                  'left': 841,
                                  'right': 1220},
            preframes=0)
        # UNCOMMON COMMAND: the daq voltage string can get very long, so
        # Andy wrote a new part of pco.py that adjusts the set timeout
        # for waiting for the FIRST camera trigger (Oct 4, 2016)
        idp.camera.commands.send(('set_first_trigger_timeout_seconds',
                                  {'first_trigger_timeout_seconds': 3}))
        assert idp.camera.commands.recv() == 3 # clear command queue
        # Figure out some basic timing information: This is what the
        # camera thinks it's doing. Is it what we want it to do?
        exposure_time_us = idp.camera.get_setting('exposure_time_microseconds')
        print('I want exposure time to be (us)',set_exposure_time_us)
        print('Exposure time actually is (us)',exposure_time_us)
        assert exposure_time_us == set_exposure_time_us
        rolling_time_us = idp.camera.get_setting('rolling_time_microseconds')
        rolling_time_jitter_us = 15 #experimentally measured and also in spec
        rolling_time_us += rolling_time_jitter_us
        pulse_tail_us = 25 #experimentally measured response of buffer amp and AOM
        print("\nCamera exposure time:", exposure_time_us, "(us)\n")
        print("\nCamera rolling time:", rolling_time_us, "(us)\n")
        effective_exposure_us = exposure_time_us - rolling_time_us
        print("\nCamera effective exposure:", effective_exposure_us, "(us)\n")

        for [red_voltage_num, my_red_voltage_mV] in enumerate(red_AOM_mV):
            for [green_voltage_num, my_green_voltage_mV] in enumerate(green_AOM_mV):


                # Calculate DAQ voltages

                # Set voltages to play on analog out card
                green_voltage = my_green_voltage_mV/1000
                red_voltage = my_red_voltage_mV/1000
                trig_voltage = 3

                # time between exposures must be greater than camera trigger
                # jitter and a multiple of the green rep time
                # trigger jitter is about 10 us
                time_between_exposures_pixels = 2 * green_rep_time_pixels
                camera_rep_time_pixels = (set_exposure_time_pixels +
                                          time_between_exposures_pixels)
                camera_rep_time_us = camera_rep_time_pixels / daq_rate * 1e6

                voltages = np.zeros((camera_rep_time_pixels * num_exposures,
                                     num_daq_channels))

                # green laser pulses on for the duration of the daq play
                green_chunk = np.zeros(green_rep_time_pixels)
                green_chunk[0:green_pulse_duration_pixels] = green_voltage
                voltages[:,1] = np.tile(
                    green_chunk, int(voltages.shape[0]/green_rep_time_pixels))

                # camera trigger duration should be 3us or greater
                trigger_duration_us = 3
                trigger_duration_pixels = int(np.ceil(
                    trigger_duration_us / 1e6 * daq_rate))

                # loop used to define camera trigger and red laser pulse
                # voltages
                for which_exposure in range(num_exposures):
                    cursor = which_exposure * camera_rep_time_pixels
                    # Camera triggers:
                    voltages[cursor:cursor + trigger_duration_pixels, 0] = (
                        trig_voltage)
                    # Red laser pulses
                    red_start_pixel = (
                        red_start_pixel_array[which_exposure % num_delays])
                    red_series_start = (cursor +
                                        rolling_time_pixels +
                                        extra_time_after_roll_pixels +
                                        red_start_pixel)
                    red_chunk = np.zeros(green_rep_time_pixels)
                    red_chunk[0:red_pulse_duration_pixels] = red_voltage

                    red_exposure_array = np.tile(red_chunk, (
                        pulses_per_exposure - less_red_pulses))

                    voltages[red_series_start:(red_series_start + red_exposure_array.shape[0]), 2] = red_exposure_array

                # save voltages that will be sent to daq
                with open('voltages_green_' + green_powers[green_voltage_num] +
                          '_red_' + red_powers[red_voltage_num] +
                          '_depletion.pickle', 'wb') as f:
                    pickle.dump(voltages, f)
                


                # Put it all together
                idp.load_permission_slips(
                    num_slips=1,
                    file_saving_info=[
                        {'filename': (
                            'STE_depletion_angle_' + angle_string +
                            '_green_' + green_powers[green_voltage_num] +
                            '_red_' + red_powers[red_voltage_num] +
                            '.tif'),
                         'channels': num_delays,
                         'slices': num_delay_scan_repetitions,
                         }])
                daq.play_voltages(voltages, block=True)
    finally:
        # Shut everything down. This can be important!
        daq.close()
        idp.close()
def main():
    # send it all to stderr
    mp.log_to_stderr()
    # get access to a logger and set its logging level to INFO
    logger = mp.get_logger()
    logger.setLevel(logging.INFO)

    dataset = read_data(DATASET_PATH)
    # global train_x, test_x, train_y, test_y

    train_x, test_x, train_y, test_y = split_dataset(dataset, 0.25)

    # print("--- Testing Sequence DE ---")
    # start_time_seq = time.time()
    # result_seq = list(de_sequence(fobj, bounds=[(-100, 100)] * 6))
    # print(result_seq[-1])
    # print("")
    # print("--- %s seconds ---" % (time.time() - start_time_seq))
    #
    # sleep(5)

    print("--- Tuning Random Forest with Parallel DE ---")
    start_time_rf_tuning_para = time_RF.time()

    # result_para = list(de_parallel(fobj, bounds=[(-100, 100)] * 6))
    # print(result_para[-1])

    # initialization
    bounds = [(10, 150), (1, 20), (2, 20), (2, 50), (0.01, 1), (1, 10)]
    mut = 0.8
    crossp = 0.7
    popsize = 60
    its = 100

    dimensions = len(bounds)
    pop = np.random.rand(popsize, dimensions)

    # pdb.set_trace()
    min_b, max_b = np.asarray(bounds).T
    diff = np.fabs(min_b - max_b)
    pop_denorm = min_b + pop * diff

    # convert from float to integer
    pop_denorm_convert = pop_denorm.tolist()

    result_list = []
    temp_list = []

    for index in pop_denorm_convert:
        temp_list.append(np.int_(np.round_(index[0])))
        temp_list.append(np.int_(np.round_(index[1])))
        temp_list.append(np.int_(np.round_(index[2])))
        temp_list.append(np.int_(np.round_(index[3])))
        temp_list.append(float('%.2f' % index[4]))
        temp_list.append(np.int(np.round_(index[5])))
        result_list.append(temp_list)
        temp_list = []

    fitness = np.asarray([
        rf_tuning(index[0], index[1], index[2], index[3], index[4], index[5],
                  train_x, test_x, train_y, test_y) for index in result_list
    ])

    best_idx = np.argmax(fitness)
    best = pop_denorm[best_idx]

    print("Dimension:", dimensions)
    print("pop:", pop)
    print("min_b:", min_b)
    print("max_b:", max_b)
    print("diff:", diff)
    print("pop_denorm:", pop_denorm)
    print("fitness:", fitness)
    print("best_idx:", best_idx)
    print("best:", best)

    lock = mp.Lock()
    # execute loops in each process
    processes = []
    for x in range(mp.cpu_count()):
        processes.append(
            mp.Process(target=de_innerloop,
                       args=(output, its, popsize, pop, mut, dimensions,
                             crossp, min_b, diff, lock, fitness, best_idx,
                             best, train_x, test_x, train_y, test_y)))

    # Run processes
    for p in processes:
        p.start()

    # Exit the completed processes
    # Without join() function call, process will remain idle and won’t terminate
    for p in processes:
        p.join()

    # Get process results from the output queue
    results = [output.get() for p in processes]
    print(results)

    print("")
    print("--- %s seconds ---" % (time_RF.time() - start_time_rf_tuning_para))
    print("")
def realtime(args):
    """
    Read and apply object detection to input real time stream (webcam)
    """

    # If display is off while no number of frames limit has been define: set diplay to on
    if ((not args["display"]) & (args["num_frames"] < 0)):
        print("\nSet display to on\n")
        args["display"] = 1

    # Set the multiprocessing logger to debug if required
    if args["logger_debug"]:
        logger = multiprocessing.log_to_stderr()
        logger.setLevel(multiprocessing.SUBDEBUG)

    # Multiprocessing: Init input and output Queue and pool of workers
    input_q = Queue(maxsize=args["queue_size"])
    output_q = Queue(maxsize=args["queue_size"])
    crop_q = Queue(maxsize=args["queue_size"])
    pool = Pool(1, worker, (input_q, output_q, crop_q))
    pool2 = Pool(1, crop_worker, (crop_q, ))
    # created a threaded video stream and start the FPS counter
    vs = WebcamVideoStream(src=args["input_device"]).start()
    fps = FPS().start()

    # Define the output codec and create VideoWriter object
    if args["output"]:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('outputs/{}.avi'.format(args["output_name"]),
                              fourcc,
                              vs.getFPS() / args["num_workers"],
                              (vs.getWidth(), vs.getHeight()))

    # Start reading and treating the video stream
    if args["display"] > 0:
        print()
        print(
            "====================================================================="
        )
        print(
            "Starting video acquisition. Press 'q' (on the video windows) to stop."
        )
        print(
            "====================================================================="
        )
        print()

    countFrame = 0
    while True:
        # Capture frame-by-frame
        ret, frame = vs.read()
        countFrame = countFrame + 1
        if ret:
            input_q.put(frame)
            output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
            #print("size of crop_q:", crop_q.qsize())
            # write the frame
            if args["output"]:
                out.write(output_rgb)

            # Display the resulting frame
            if args["display"]:
                ## full screen
                if args["full_screen"]:
                    cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
                    cv2.setWindowProperty("frame", cv2.WND_PROP_FULLSCREEN,
                                          cv2.WINDOW_FULLSCREEN)
                cv2.imshow("frame", output_rgb)

                fps.update()
            elif countFrame >= args["num_frames"]:
                break

        else:
            break

        k = cv2.waitKey(1)
        if k == ord('q'):
            #path = "/home/mohak/Desktop/image/"
            #test = os.listdir(path)
            #for item in test:
            #if item.endswith(".jpg"):
            #os.remove(os.path.join(path, item))
            #else:
            #pass
            break
            cv2.destroyAllWindows()

    # When everything done, release the capture
    fps.stop()
    pool.terminate()
    pool2.terminate()
    vs.stop()
    if args["output"]:
        out.release()
    cv2.destroyAllWindows()
Exemple #60
0
overwrite_scores = parser.parse_args().overwrite_scores

# This will re-run plots
parser.add_argument('-oo','--overwrite_output',action='store_true',default=False)
overwrite_output = parser.parse_args().overwrite_output

parser.add_argument('-T','--tests',action='store_true',default=False)
basic_tests = parser.parse_args().tests
parser.add_argument('-C','--check_only',action='store_true',default=False)
check_only = parser.parse_args().check_only
parser.add_argument('-S','--subcpus',dest='subcpus',default=1,type=int)
subcpus = parser.parse_args().subcpus

##### ERROR LOGGING #####

mp_log = multiprocessing.log_to_stderr()
mp_log.setLevel(logging.INFO)

### SETTINGS ###

# Folder key for final experimental wrfouts
key_wrf = 'ForReal_nco'
# Folder key for post-processed fields (objects, lat/lon, etc)
key_pp = 'Xmas'
# Folder key for scores (FSS, etc)
key_scores = 'Xmas'
# Folder key for plots
key_plot = 'Xmas'

ensroot = '/scratch/john.lawson/WRF/VSE_reso/{}'.format(key_wrf)
reproj_obs_root = '/work/john.lawson/VSE_reso/reproj_obs/{}'.format(key_pp)