Example #1
1
    def run(self):  
        # initialize windows side   
        tosdb.admin_init(self._addr, AINIT_TIMEOUT)   
        tosdb.vinit(root=self._dll_root)

        # create block
        blk = tosdb.VTOSDB_DataBlock(self._addr, BLOCK_SIZE, date_time=True)
        blk.add_items(*(self._symbols))
        blk.add_topics('last')
        if args.vol:
            blk.add_topics('volume')
     
        # generate filename             
        dprfx = _strftime("%Y%m%d", _localtime())
        isec = int(self._intrvl * 60)  
        for s in self._symbols:
            #
            # create GetOnTimeInterval object for each symbol
            # 
            p = self._out_dir + '/' + dprfx + '_' \
                + s.replace('/','-S-').replace('$','-D-').replace('.','-P-') \
                +'_' + _val_type + '_' + str(self._intrvl) + 'min.tosdb'           
            iobj = _Goti.send_to_file(blk, s, p, _TI.vals[isec], isec/10)
            print(repr(iobj), file=_stderr)
            self._iobjs.append(iobj)

        for i in self._iobjs:
            if i:
                i.join()

        while(True): # why??
            _sleep(10)
Example #2
0
 def _init_buffer(self, topic, item):
     for _ in range(int(self._isec / self._psec)):
         dat = self._ssfunc(item, topic, date_time=True, throw_if_data_lost=False)
         if dat:
             return dat
         _sleep(self._psec)
     raise TOSDB_Error("failed to get any data for first interval: %s, %s" % (topic,item))
Example #3
0
def DeadlockWrap(function, *_args, **_kwargs):
    """DeadlockWrap(function, *_args, **_kwargs) - automatically retries
    function in case of a database deadlock.

    This is a function intended to be used to wrap database calls such
    that they perform retrys with exponentially backing off sleeps in
    between when a DBLockDeadlockError exception is raised.

    A 'max_retries' parameter may optionally be passed to prevent it
    from retrying forever (in which case the exception will be reraised).

        d = DB(...)
        d.open(...)
        DeadlockWrap(d.put, "foo", data="bar")  # set key "foo" to "bar"
    """
    sleeptime = _deadlock_MinSleepTime
    max_retries = _kwargs.get("max_retries", -1)
    if _kwargs.has_key("max_retries"):
        del _kwargs["max_retries"]
    while 1:
        try:
            return function(*_args, **_kwargs)
        except db.DBLockDeadlockError:
            if _deadlock_VerboseFile:
                _deadlock_VerboseFile.write("dbutils.DeadlockWrap: sleeping %1.3f\n" % sleeptime)
            _sleep(sleeptime)
            # exponential backoff in the sleep time
            sleeptime *= 2
            if sleeptime > _deadlock_MaxSleepTime:
                sleeptime = _deadlock_MaxSleepTime
            max_retries -= 1
            if max_retries == -1:
                raise
Example #4
0
    def run(self):
        self.log.debug('[ws] start serving')
        self.__wd.serve()
        self.__clean.serve()
        self._set_server_available()
        # monitor = _Moniter(self.log)
        while not self.isSetStop():
            # monitor.report(self.info())
            curr_task = None
            start_at = _time()
            try:
                if not self.__task_buff.empty():
                    curr_task = self.__task_buff.get()
                    self.__curr_tasks.append(curr_task)
                    curr_task._call_by_ws_set_status(ST_WORKING)
                    self.__wd.addWorks(curr_task.subWorks)
                    self.log.debug('[ws] pop a Task: %s', curr_task.name)
                    curr_task = None

                for i, tk in enumerate(self.__curr_tasks):
                    wkarchvied = True
                    for wk in tk.subWorks:
                        if wk.status != ST_FINISHED:
                            wkarchvied = False
                        elif wk.status == ST_ERROR:
                            tk._call_by_ws_set_status(ST_ERROR)
                            tk.setToStop()
                            tk.waitForStop()
                            self.log.debug('[ws] Task err: %s', tk.name)
                            del self.__curr_tasks[i]
                        elif wk.status == ST_CANCEL:
                            tk._call_by_ws_set_status(ST_CANCEL)
                            tk.setToStop()
                            tk.waitForStop()
                            self.log.debug('[ws] Task canceled: %s', tk.name)
                            del self.__curr_tasks[i]
                    if wkarchvied:
                        tk._call_by_ws_set_status(ST_FINISHED)
                        self.log.debug('[ws] Task done: %s', tk.name)
                        del self.__curr_tasks[i]
                    if tk.status > ST_WORKING:
                        self.log.debug('[ws] cleanup')
                        self.__clean.addWork(WorkShop.SerWork(tk))
            except Exception as e:
                # TODO: fetal err, need handle and report
                if curr_task:
                    curr_task._call_by_ws_set_status(ST_ERROR)
                self.log.exception(e)
            finally:
                duration = _time() - start_at
                if duration < 0.8:
                    _sleep(0.5)

        self._set_server_available(flag=False)
        self.__wd.setToStop()
        self.__wd.joinAll()
        self.__cleanUp()
        self.__clean.setToStop()
        self.__clean.joinAll()
        self.log.debug('[ws] stop serving')
Example #5
0
 def run(self):
     from random import random
     counter = 0
     while counter < self.quota:
         counter = counter + 1
         self.queue.put("%s.%d" % (self.name, counter))
         _sleep(random() * 0.00001)
Example #6
0
def wd_test(log):
    ws = WorkDispatcher(tmin=5, tmax=10, log=log)
    i = 0
    total = 0
    works = []
    try:
        ws.serve()
        while True:
            wk = WorkTest(name='work_%05d' % i)
            # wk.cancel()
            ws.addWork(wk)
            works.append(wk)
            if i > 50:
                ws.mgr.pauseAll()
            if i > 100:
                ws.mgr.resumeAll()
            i += 1
            total += 1
            log.error('workers = %d', ws.mgr.count())
            if i > 200:
                break
            if i < 190:
                _sleep(0.3)
    except Exception as e:
        log.exception(e)
        raise
    finally:
        # _sleep(1)
        ws.setToStop()
        ws.joinAll()
        for wk in works:
            log.error('[%s] status=%d', wk.name, wk.status)
        log.error('total=%d, count=%d', total, WorkTest.TOTAL)
Example #7
0
 def wait(self, timeout=None):
     if not self._is_owned():
         raise RuntimeError("cannot wait on un-acquired lock")
     waiter = Lock()
     waiter.acquire()
     self.__waiters.append(waiter)
     saved_state = self._release_save()
     try:    # restore state no matter what (e.g., KeyboardInterrupt)
         if timeout is None:
             waiter.acquire()
         else:
             # Balancing act:  We can't afford a pure busy loop, so we
             # have to sleep; but if we sleep the whole timeout time,
             # we'll be unresponsive.  The scheme here sleeps very
             # little at first, longer as time goes on, but never longer
             # than 20 times per second (or the timeout time remaining).
             endtime = _time() + timeout
             delay = 0.0005  # 500 us -> initial delay of 1 ms
             while True:
                 gotit = waiter.acquire(0)
                 if gotit:
                     break
                 remaining = endtime - _time()
                 if remaining <= 0:
                     break
                 delay = min(delay * 2, remaining, .05)
                 _sleep(delay)
             if not gotit:
                 try:
                     self.__waiters.remove(waiter)
                 except ValueError:
                     pass
     finally:
         self._acquire_restore(saved_state)
Example #8
0
def main(cfg, log):
    if cfg.playlist:
        for url in cfg.urls:
            outpath, cfg.urls = parsers.getPlayListParser(url).info(url)
            cfg.outpath = pjoin(cfg.outpath, outpath)
            util.assure_path(cfg.outpath)
            with open(pjoin(cfg.outpath, 'url.txt'), 'w') as fp:
                fp.writelines([url + "\n\n"])
                for i, clip in enumerate(cfg.urls):
                    fp.writelines(["[%03d] %s\n"%(i, clip)])

    bar = ProgressBar()
    ws = WorkShop(tmin=cfg.tmin, tmax=cfg.tmax, log=log)
    dlvs = []
    for i, url in enumerate(cfg.urls):
        dlvideo = VUrlTask(url, vidfmt=cfg.vidfmt, npf=cfg.npf,
                           outpath=cfg.outpath, bar=bar, log=log)
        dlvs.append(dlvideo)
    try:
        ws.serve()
        ws.addTasks(dlvs)
        while len(dlvs) > 0:
            for i, dlv in enumerate(dlvs):
                if dlv.isArchived() or dlv.isError():
                    del dlvs[i]
            _sleep(1)
    except Exception as e:
        log.exception(e)
    finally:
        ws.setToStop()
        ws.join()
Example #9
0
 def _check(self):
     while self.running:
         step = self._q.get()
         self._doupdate(step=step)
         self._q.task_done()
         _sleep(0.1)
     self._running = False
    def put(self, item, block=True, timeout=None): 
        """Put item into the queue"""

        if self.maxsize > 0:
            if not block:
                if self.qsize() >= self.maxsize:
                    raise Full('Redis queue is full')
            elif timeout is None:
                while self.qsize() >= self.maxsize:
                    _sleep(0.1)
            elif timeout < 0:
                raise ValueError("'timeout' must be a positive number")
            else:
                endtime = _time() + timeout
                while self.qsize() >= self.maxsize:
                    remaining = endtime - _time()
                    if remaining <= 0.0:
                        raise Full('Redis queue is full')
                    _sleep(0.1)
        
        if type(item) is not list:
            item = [item, ]
        elif len(item)<1:
            return False
            
        pipe = self.__db.pipeline()   
        for i in item:
            i = self.__serialize(i)
            pipe.lpush(self.key, i)    
        pipe.execute()      
Example #11
0
 def shutdown(self):
     if self._shutdown_called:
         return
     self._shutdown_called = True
     if not self._shutdown:
         self._shutdown = msg.Shutdown()
     sd = self._shutdown
     self._send_q.put(sd)
     self._main_q.put(sd)
     return
     
     if _poll_for(self, '_main_dead', timeout=sd['before_int']):
         return
         
     # currently executing something.  Try interrupting.
     for _ in range(sd['int_retries']):
         self._interrupt_main(0)
         if _poll_for(self, '_main_dead', timeout=sd['int_poll']):
             return
         
     # interrupt didn't work.  Kill ourselves
     from signal import SIGKILL
     self._log.warn("[shutdown] commiting suicide.")
     _sleep(0.1) # sleep so that the logging completes
     os.kill(os.getpid(), SIGKILL)
Example #12
0
    def preset_recall(self, preset_number, muted=False):
        """Recalls a preset on the Protea 4.24C device"""

        if not (1 <= preset_number <= 30):
            raise ValueError("Recalled preset must be between 1 and 31")

        self.write_message(21, bytes([preset_number-1, 1]))
        response = self._serial.read(10)

        if not muted:
            # This technique  is necessary because of limitations in the 4.24C,
            # in which during recall, it seems to assign values to its DSP in a
            # sequential fashion, sometimes leading to temporarily insane
            # values, such as high gains in both inputs and outputs, if the
            # previous preset had high gain in outputs and the next preset has
            # high gain in inputs.
            #
            # Furthermore, recalling the preset a second time allows us to
            # safely mute all outputs, then to recall without muting (i.e. as
            # they were saved in memory), since the input gains are already at a
            # sane level by respect to the outputs.

            _sleep(3.5)
            self.write_message(21, bytes([preset_number-1, 0]))
            response = self._serial.read(10)
Example #13
0
def _blockUntilConditionMet(
		getValue,
		giveUpAfterSeconds,
		shouldStopEvaluator=lambda value: bool(value),
		intervalBetweenSeconds=0.1,
		errorMessage=None):
	"""Repeatedly tries to get a value up until a time limit expires. Tries are separated by
	a time interval. The call will block until shouldStopEvaluator returns True when given the value,
	the default evaluator just returns the value converted to a boolean.
	@return A tuple, (True, value) if evaluator condition is met, otherwise (False, None)
	@raises RuntimeError if the time limit expires and an errorMessage is given.
	"""
	assert callable(getValue)
	assert callable(shouldStopEvaluator)
	assert intervalBetweenSeconds > 0.001
	SLEEP_TIME = intervalBetweenSeconds * 0.5
	startTime = _timer()
	lastRunTime = startTime
	firstRun = True  # ensure we start immediately
	while (_timer() - startTime) < giveUpAfterSeconds:
		if firstRun or (_timer() - lastRunTime) > intervalBetweenSeconds:
			firstRun = False
			lastRunTime = _timer()
			val = getValue()
			if shouldStopEvaluator(val):
				return True, val
			_sleep(SLEEP_TIME)

	else:
		if errorMessage:
			raise AssertionError(errorMessage)
		return False, None
Example #14
0
 def _background_worker(self):   
     try:     
         ni = self._isec / self._psec
         itbeg = _perf_counter()    
         count = 0
         self._rflag = True
         while self._rflag:
             tbeg = _perf_counter()               
             with self._buffers_lock:
                 self._manage_buffers()
                 for (t,i), b in self._buffers.items():
                     b.incr()
                     dat = self._ssfunc(i, t, date_time=True, throw_if_data_lost=False)                    
                     if dat:
                         self._parse_data(t,i,dat)
                     if b.count == 0:
                         continue
                     if (b.count % ni) == 0:
                         #print("b.count mod interval: %i" % b.count)
                         self._handle_null_interval(t, i, b)
             count += 1
             tend = _perf_counter()
             trem = self._psec - (tend - tbeg)
             if trem < 0:
                 ## TODO :: this will create problems handling nulls as we wont be able
                 ##         to speed up by using WAIT_ADJ_DOWN (below)
                 ##         considering adjusting _psec 
                 print("WARN: _background_worker taking longer than _psec (%i) seconds"
                       % self._psec)
             _sleep( max(trem,0) * (self.WAIT_ADJ_DOWN ** self._wait_adj_down_exp) )
             if (count % ni) == 0:
                 self._tune_background_worker(count,ni,itbeg)
     finally:
         self._rflag = False
Example #15
0
    def wait(self, timeout = None):
        if not self._is_owned():
            raise RuntimeError('cannot wait on un-acquired lock')
        waiter = _allocate_lock()
        waiter.acquire()
        self.__waiters.append(waiter)
        saved_state = self._release_save()
        try:
            if timeout is None:
                waiter.acquire()
                self._note('%s.wait(): got it', self)
            else:
                endtime = _time() + timeout
                delay = 0.0005
                while True:
                    gotit = waiter.acquire(0)
                    if gotit:
                        break
                    remaining = endtime - _time()
                    if remaining <= 0:
                        break
                    delay = min(delay * 2, remaining, 0.05)
                    _sleep(delay)

                if not gotit:
                    self._note('%s.wait(%s): timed out', self, timeout)
                    try:
                        self.__waiters.remove(waiter)
                    except ValueError:
                        pass

                else:
                    self._note('%s.wait(%s): got it', self, timeout)
        finally:
            self._acquire_restore(saved_state)
Example #16
0
    def get_return(self, wait=1, timeout=None, raise_exception=1, alt_return=None):
        """delivers the return value or (by default) echoes the exception of 
           the call job

           wait: 0=no waiting; Attribute error raised if no
                 1=waits for return value or exception
                 callable -> waits and wait()-call's while waiting for return
        """
        if not self.done and wait:
            starttime=_time()
            delay=0.0005
            while not self.done:
                if timeout:
                    remaining = starttime + timeout - _time()
                    if remaining <= 0:  #time is over
                        if raise_exception:
                            raise Empty, "return timed out"
                        else:
                            return alt_return
                    delay = min(delay * 2, remaining, .05)
                else:
                    delay = min(delay * 2, .05)
                if callable(wait): wait()
                _sleep(delay)       #reduce CPU usage by using a sleep
        if self.done==2:    #we had an exception
            exc=self.exc
            del self.exc
            if raise_exception & 1:    #by default exception is raised
                raise exc[0],exc[1],exc[2]
            else:
                return alt_return
        return self.ret
Example #17
0
    def daemon(cls, please_stop):
        global next_ping

        Till.enabled = True
        try:
            while not please_stop:
                now = _time()
                with Till.locker:
                    if next_ping > now:
                        _sleep(min(next_ping - now, INTERVAL))
                        continue

                    next_ping = now + INTERVAL
                    work = None
                    if Till.all_timers:
                        Till.all_timers.sort(key=lambda r: r[0])
                        for i, (t, s) in enumerate(Till.all_timers):
                            if now < t:
                                work, Till.all_timers[:i] = Till.all_timers[:i], []
                                next_ping = min(next_ping, Till.all_timers[0][0])
                                break
                        else:
                            work, Till.all_timers = Till.all_timers, []

                if work:
                    for t, s in work:
                        s.go()

        except Exception, e:
            from pyLibrary.debugs.logs import Log

            Log.warning("timer shutdown", cause=e)
Example #18
0
 def run(self):
     from random import random
     counter = 0
     while counter < self.quota:
         counter = counter + 1
         self.queue.put('%s.%d' % (self.name, counter))
         _sleep(random() * 1e-05)
Example #19
0
def main():
  my_cool_parser = argparse.ArgumentParser(description="Mock application to test Gooey's functionality")
  my_cool_parser.add_argument("filename", help="Name of the file you want to read")  # positional
  my_cool_parser.add_argument("outfile", help="Name of the file where you'll save the output")  # positional
  my_cool_parser.add_argument('-c', '--countdown', default=10, type=int, help='sets the time to count down from')
  my_cool_parser.add_argument("-s", "--showtime", action="store_true", help="display the countdown timer")
  my_cool_parser.add_argument("-d", "--delay", action="store_true", help="Delay execution for a bit")
  my_cool_parser.add_argument('--verbose', '-v', action='count')
  my_cool_parser.add_argument("-o", "--obfuscate", action="store_true", help="obfuscate the countdown timer!")
  my_cool_parser.add_argument('-r', '--recursive', choices=['yes', 'no'], help='Recurse into subfolders')
  my_cool_parser.add_argument("-w", "--writelog", default="No, NOT whatevs", help="write log to some file or something")
  my_cool_parser.add_argument("-e", "--expandAll", action="store_true", help="expand all processes")

  print 'inside of main(), my_cool_parser =', my_cool_parser
  args = my_cool_parser.parse_args()

  print sys.argv
  print args.countdown
  print args.showtime

  start_time = _time()
  print 'Counting down from %s' % args.countdown
  while _time() - start_time < args.countdown:
    if args.showtime:
      print 'printing message at: %s' % _time()
    else:
      print 'printing message at: %s' % hashlib.md5(str(_time())).hexdigest()
    _sleep(.5)
  print 'Finished running the program. Byeeeeesss!'
Example #20
0
def main():
    urls = [
        # 'http://v.youku.com/v_show/id_XNzUyNDE4MTQw.html'
        # 'http://i.youku.com/u/UNTc4NzI3MjY0',
        # 'http://v.youku.com/v_show/id_XNzQ5NDAwMDIw.html?from=y1.1-2.10001-0.1-1',
        # 'http://v.youku.com/v_show/id_XNzUwMTE2MDQw.html?f=22611771',
        # 'http://v.youku.com/v_show/id_XNzQ3MjMxMTYw.html',
        'http://video.sina.com.cn/p/ent/v/m/2014-08-14/102164094039.html'
    ]
    log = util.get_logger()
    bar = ProgressBar()
    ws = WorkShop(tmin=1, tmax=2, log=log)
    dlvs = []
    for i, url in enumerate(urls):
        dlvideo = VUrlTask(url, 0, 3, './tmp', bar=bar, log=log)
        dlvs.append(dlvideo)
    try:
        ws.serve()
        ws.addTasks(dlvs)
        while len(dlvs) > 0:
            for i, dlv in enumerate(dlvs):
                if dlv.isArchived() or dlv.isError():
                    del dlvs[i]
            _sleep(1)
    except KeyboardInterrupt:
        pass
    except Exception as e:
        log.exception(e)
    finally:
        ws.setToStop()
        ws.join()
Example #21
0
    def wait(self, timeout=None):
        """Wait until notified or until a timeout occurs.
        
        If the calling thread has not acquired the lock when this method is
        called, a RuntimeError is raised.
        
        This method releases the underlying lock, and then blocks until it is
        awakened by a notify() or notifyAll() call for the same condition
        variable in another thread, or until the optional timeout occurs. Once
        awakened or timed out, it re-acquires the lock and returns.
        
        When the timeout argument is present and not None, it should be a
        floating point number specifying a timeout for the operation in seconds
        (or fractions thereof).
        
        When the underlying lock is an RLock, it is not released using its
        release() method, since this may not actually unlock the lock when it
        was acquired multiple times recursively. Instead, an internal interface
        of the RLock class is used, which really unlocks it even when it has
        been recursively acquired several times. Another internal interface is
        then used to restore the recursion level when the lock is reacquired.
        
        """
        if not self._is_owned():
            raise RuntimeError("cannot wait on un-acquired lock")
        waiter = _allocate_lock()
        waiter.acquire()
        self.__waiters.append(waiter)
        saved_state = self._release_save()
        try:
            if timeout is None:
                waiter.acquire()
                self._note("%s.wait(): got it", self)
            else:
                endtime = _time() + timeout
                delay = 0.0005
                while True:
                    gotit = waiter.acquire(0)
                    if gotit:
                        break
                    remaining = endtime - _time()
                    if remaining <= 0:
                        break
                    delay = min(delay * 2, remaining, 0.05)
                    _sleep(delay)

                if not gotit:
                    self._note("%s.wait(%s): timed out", self, timeout)
                    try:
                        self.__waiters.remove(waiter)
                    except ValueError:
                        pass

                else:
                    self._note("%s.wait(%s): got it", self, timeout)
        finally:
            self._acquire_restore(saved_state)

        return
Example #22
0
 def makeSubWorks(self):
     TaskTest.addme()
     subworks = []
     for i in range(self.sub_size):
         subworks.append(WorkTest('t_%s_%d' % (self.name, i)))
     self.addSubWorks(subworks)
     _sleep(0.1)
     self.log.error(' ........... am Task %s, subworks=%d', self.name, len(subworks))
    def wait(self, timeout=30.0):
        DELAY = 0.0001  # 0.1 ms

        stopwatch = Stopwatch(timeout=timeout)

        while not self.__state and stopwatch.check():
            _sleep(DELAY)
            stopwatch += 1
Example #24
0
    def acquire(self, blocking=1):
        me = threading.currentThread()
        if self.__owner is me:
            self.__count = self.__count + 1
            if __debug__:
                self._note("%s.acquire(%s): recursive success", self, blocking)
            return 1

        if not blocking or not self.__timeout:
            rc = self.__block.acquire(blocking)
            if rc:
                self.__owner = me
                self.__count = 1
#                 self.__acquiredStackTrace = traceback.extract_stack()
                if __debug__:
                    self._note("%s.acquire(%s): initial success", self, blocking)
            else:
                if __debug__:
                    self._note("%s.acquire(%s): failure", self, blocking)
            return rc
        else:
            # This comes from threading._Condition.wait()

            # Balancing act:  We can't afford a pure busy loop, so we
            # have to sleep; but if we sleep the whole timeout time,
            # we'll be unresponsive.  The scheme here sleeps very
            # little at first, longer as time goes on, but never longer
            # than 20 times per second (or the timeout time remaining).
            endtime = _time() + self.__timeout
            delay = 0.0005 # 500 us -> initial delay of 1 ms

            while True:
                gotit = self.__block.acquire(0)
                if gotit:
                    break
                remaining = endtime - _time()
                if remaining <= 0:
                    break
#                 delay = min(delay * 2, remaining, .05)
                delay = min(delay * 2, remaining, .02)
                _sleep(delay)
            if not gotit:
                if __debug__:
                    self._note("%s.wait(%s): timed out", self, self.__timeout)
                
#                 print "----Lock acquired by"
#                 print "".join(traceback.format_list(self.__acquiredStackTrace))

                raise DeadBlockPreventionTimeOutError()
            else:
                if __debug__:
                    self._note("%s.wait(%s): got it", self, self.__timeout)
                
                self.__owner = me
                self.__count = 1
#                 self.__acquiredStackTrace = traceback.extract_stack()
                
                return gotit
def main():
    server = RSServer(8000)
    server.start()
    while True:
        if not _keep_running:
            server.shutdown()
            break
        _sleep(0.1)
    return
Example #26
0
 def start(self):
     _ImmortalThread.start(self)
     log_startup(self,
                 'Thread Pool Thread With Name %s and Index %d' %
                 (self.__name, self.__index),
                 'TPT-%s-%d' % (self.__name, self.__index))
     while not self.__started:
         _sleep(0.001)
     return
Example #27
0
    def experiment(self, ag_mode, ag_sp, flow_rate, volume):
        """
        @param flow_rate: flow rate in *mL per min*
        """
        app = self.app
        app.login()
        time = _time

        self.logger.info("Initializing Agitation with mode=%s sp=%s." % (ag_mode, ag_sp))
        app.setag(ag_mode, ag_sp)

        # if setpoint is auto mode, wait for pv to reach correct value
        if ag_mode == 0:
            timeout = time() + 10 * 60
            log_time = time() + 10
            while True:
                pv = app.getagpv()
                if ag_sp - 1 < pv < ag_sp + 1:
                    break
                t = time()
                if t > log_time:
                    log_time = int(t) + 10
                    self.logger.info("Waiting for Agitation to reach setpoint. PV = %d." % app.getagpv())
                if t > timeout:
                    raise KLAError("Agitation didn't reach setpoint.")
                _sleep(1)

        app.setmg(1, flow_rate / 1000)

        self.logger.info("Beginning KLA Experiment.")

        batch_name = "kla%s-%s-%s-%s" % (ag_mode, volume, ag_sp, flow_rate)

        self.logger.info("Starting new batch named '%s'." % batch_name)
        if app.batchrunning():
            app.endbatch()
        app.startbatch(batch_name)

        start = time()
        end = start + 14 * 60
        log_time = start + 10
        while True:
            t = time()
            pv = app.getdopv()
            if t > log_time:
                self.logger.info("Test running, %d seconds passed. DO PV = %.1f." % (t - start, pv))
                log_time += 10
            if t > end:
                break
            if pv > 90:
                break

        self.logger.info("Test finished. DO PV = %.1f after %d seconds." % (app.getdopv(), time() - start))

        self.logger.info("Ending batch")
        app.endbatch()
        return batch_name
Example #28
0
    def put(self, item, block=True, timeout=None):
        """Put an item into the queue.

        If optional args 'block' is true and 'timeout' is None (the default),
        block if necessary until a free slot is available. If 'timeout' is
        a positive number, it blocks at most 'timeout' seconds and raises
        the Full exception if no free slot was available within that time.
        Otherwise ('block' is false), put an item on the queue if a free slot
        is immediately available, else raise the Full exception ('timeout'
        is ignored in that case).
        """
        if block:
            if timeout is None:
                # blocking, w/o timeout, i.e. forever
                self.fsema.acquire()
            elif timeout >= 0:
                # waiting max. 'timeout' seconds.
                # this code snipped is from threading.py: _Event.wait():
                # Balancing act:  We can't afford a pure busy loop, so we
                # have to sleep; but if we sleep the whole timeout time,
                # we'll be unresponsive.  The scheme here sleeps very
                # little at first, longer as time goes on, but never longer
                # than 20 times per second (or the timeout time remaining).
                delay = 0.0005 # 500 us -> initial delay of 1 ms
                endtime = _time() + timeout
                while True:
                    if self.fsema.acquire(0):
                        break
                    remaining = endtime - _time()
                    if remaining <= 0:  #time is over and no slot was free
                        raise Full
                    delay = min(delay * 2, remaining, .05)
                    _sleep(delay)       #reduce CPU usage by using a sleep
            else:
                raise ValueError("'timeout' must be a positive number")
        elif not self.fsema.acquire(0):
            raise Full
        self.mutex.acquire()
        release_fsema = True
        try:
            was_empty = self._empty()
            self._put(item)
            # If we fail before here, the empty state has
            # not changed, so we can skip the release of esema
            if was_empty:
                self.esema.release()
            # If we fail before here, the queue can not be full, so
            # release_full_sema remains True
            release_fsema = not self._full()
        finally:
            # Catching system level exceptions here (RecursionDepth,
            # OutOfMemory, etc) - so do as little as possible in terms
            # of Python calls.
            if release_fsema:
                self.fsema.release()
            self.mutex.release()
Example #29
0
def _fake_sleep(t, sleep_time=0.25):
    """
    Replaces a long ``time.sleep(t)`` with repeated calls to
    ``time.sleep(0.25)``, because ``time.sleep(t)`` is uninterruptable with
    ``thread.interrupt_main()``.
    """
    while t > sleep_time:
        _sleep(sleep_time)
        t -= sleep_time
    _sleep(t)
Example #30
0
    def producer(self, count = 100, stop = None):
        from random import random as _random
        from time import sleep as _sleep

        counter = 0
        while counter < count:
            self.pipe.put(counter)
            _sleep(_random() * 0.00001)
            counter = counter + 1
        self.pipe.done_sending()
    def wait(self, timeout=None):
        """Wait until notified or until a timeout occurs.

        If the calling thread has not acquired the lock when this method is
        called, a RuntimeError is raised.

        This method releases the underlying lock, and then blocks until it is
        awakened by a notify() or notifyAll() call for the same condition
        variable in another thread, or until the optional timeout occurs. Once
        awakened or timed out, it re-acquires the lock and returns.

        When the timeout argument is present and not None, it should be a
        floating point number specifying a timeout for the operation in seconds
        (or fractions thereof).

        When the underlying lock is an RLock, it is not released using its
        release() method, since this may not actually unlock the lock when it
        was acquired multiple times recursively. Instead, an internal interface
        of the RLock class is used, which really unlocks it even when it has
        been recursively acquired several times. Another internal interface is
        then used to restore the recursion level when the lock is reacquired.

        """
        if not self._is_owned():
            raise RuntimeError("cannot wait on un-acquired lock")
        waiter = _allocate_lock()
        waiter.acquire()
        self.__waiters.append(waiter)
        saved_state = self._release_save()
        try:    # restore state no matter what (e.g., KeyboardInterrupt)
            if timeout is None:
                waiter.acquire()
                if __debug__:
                    self._note("%s.wait(): got it", self)
            else:
                # Balancing act:  We can't afford a pure busy loop, so we
                # have to sleep; but if we sleep the whole timeout time,
                # we'll be unresponsive.  The scheme here sleeps very
                # little at first, longer as time goes on, but never longer
                # than 20 times per second (or the timeout time remaining).
                endtime = _time() + timeout
                delay = 0.0005 # 500 us -> initial delay of 1 ms
                while True:
                    gotit = waiter.acquire(0)
                    if gotit:
                        break
                    remaining = endtime - _time()
                    if remaining <= 0:
                        break
                    delay = min(delay * 2, remaining, .05)
                    _sleep(delay)
                if not gotit:
                    if __debug__:
                        self._note("%s.wait(%s): timed out", self, timeout)
                    try:
                        self.__waiters.remove(waiter)
                    except ValueError:
                        pass
                else:
                    if __debug__:
                        self._note("%s.wait(%s): got it", self, timeout)
        finally:
            self._acquire_restore(saved_state)
def _test():
    class BoundedQueue(_Verbose):
        def __init__(self, limit):
            _Verbose.__init__(self)
            self.mon = RLock()
            self.rc = Condition(self.mon)
            self.wc = Condition(self.mon)
            self.limit = limit
            self.queue = _deque()

        def put(self, item):
            self.mon.acquire()
            while len(self.queue) >= self.limit:
                self._note('put(%s): queue full', item)
                self.wc.wait()

            self.queue.append(item)
            self._note('put(%s): appended, length now %d', item,
                       len(self.queue))
            self.rc.notify()
            self.mon.release()

        def get(self):
            self.mon.acquire()
            while not self.queue:
                self._note('get(): queue empty')
                self.rc.wait()

            item = self.queue.popleft()
            self._note('get(): got %s, %d left', item, len(self.queue))
            self.wc.notify()
            self.mon.release()
            return item

    class ProducerThread(Thread):
        def __init__(self, queue, quota):
            Thread.__init__(self, name='Producer')
            self.queue = queue
            self.quota = quota

        def run(self):
            from random import random
            counter = 0
            while counter < self.quota:
                counter = counter + 1
                self.queue.put('%s.%d' % (self.name, counter))
                _sleep(random() * 1e-05)

    class ConsumerThread(Thread):
        def __init__(self, queue, count):
            Thread.__init__(self, name='Consumer')
            self.queue = queue
            self.count = count

        def run(self):
            while self.count > 0:
                item = self.queue.get()
                print item
                self.count = self.count - 1

    NP = 3
    QL = 4
    NI = 5
    Q = BoundedQueue(QL)
    P = []
    for i in range(NP):
        t = ProducerThread(Q, NI)
        t.name = 'Producer-%d' % (i + 1)
        P.append(t)

    C = ConsumerThread(Q, NI * NP)
    for t in P:
        t.start()
        _sleep(1e-06)

    C.start()
    for t in P:
        t.join()

    C.join()
Example #33
0
 def all_metrics(self, symbol):
     _metrics = {}
     for _metric in self.available_metrics:
         _metrics[_metric] = self.metrics(symbol, _metric)
         _sleep(0.1)
     return concat(_metrics)
Example #34
0
 def _play_freq(pin, freq, duration):
     pin.set_analog_period_microseconds(int(1000000 / freq))
     pin.write_analog(511)
     # with pin_class.tlock:
     pin_class.tones.appendleft((pin, int(freq), _perf() + duration / 1000))
     _sleep(duration / 1000)
Example #35
0
    def Activated(self):
        try:
            # Save object name that will be divided.
            selection = Gui.Selection.getSelectionEx()
            if (len(selection) < 1):
                # An object must be selected
                errMessage = "Select an object to use Split Tool"
                faced.getInfo(selection).errorDialog(errMessage)
                return
            App.ActiveDocument.openTransaction(
                translate("Design456", "Split Object"))
            shape = selection[0].Object.Shape
            bb = shape.BoundBox
            length = max(bb.XLength, bb.YLength, bb.ZLength)

            nameOfselectedObject = selection[0].ObjectName
            totalName = nameOfselectedObject + '_cs'
            """ slow function . . you need to use wait before getting 
                the answer as the execution is continuing down """
            Gui.runCommand('Part_CrossSections', 0)
            gcompund = App.ActiveDocument.addObject("Part::Compound",
                                                    "Compound")

            App.ActiveDocument.recompute()

            # get object name
            # We need this delay to let user choose the split form. And
            getExtrude_cs = None  # Dummy variable used to wait for the Extrude_cs be made
            while (getExtrude_cs is None):
                getExtrude_cs = App.ActiveDocument.getObject(totalName)
                _sleep(.1)
                Gui.updateGui()
            # Begin command Part_Compound
            gcompund.Links = [
                getExtrude_cs,
            ]

            # Begin command Part_BooleanFragments
            j = SPLIT.makeBooleanFragments(name='BooleanFragments')
            j.Objects = [
                gcompund,
                App.ActiveDocument.getObject(nameOfselectedObject)
            ]
            j.Mode = 'Standard'
            j.Proxy.execute(j)
            j.purgeTouched()
            App.ActiveDocument.recompute()
            if j.isValid() == False:
                App.ActiveDocument.removeObject(j.Name)
                # Shape is not OK
                errMessage = "Failed to fillet the objects"
                faced.getInfo(selection).errorDialog(errMessage)
            else:
                # Make a simple copy
                newShape = Part.getShape(j,
                                         '',
                                         needSubElement=False,
                                         refine=False)
                NewJ = App.ActiveDocument.addObject(
                    'Part::Feature', 'SplitedObject').Shape = newShape
                # Remove Old objects
                for obj in j.Objects:
                    App.ActiveDocument.removeObject(obj.Name)
                App.ActiveDocument.removeObject(totalName)
                App.ActiveDocument.removeObject(j.Name)
            App.ActiveDocument.commitTransaction()  #undo reg.
            App.ActiveDocument.recompute()
        except Exception as err:
            App.Console.PrintError("'SplitObject' Failed. "
                                   "{err}\n".format(err=str(err)))
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
            print(exc_type, fname, exc_tb.tb_lineno)