def put(self, item, block=True, timeout=None): 
        """Put item into the queue"""

        if self.maxsize > 0:
            if not block:
                if self.qsize() >= self.maxsize:
                    raise Full('Redis queue is full')
            elif timeout is None:
                while self.qsize() >= self.maxsize:
                    _sleep(0.1)
            elif timeout < 0:
                raise ValueError("'timeout' must be a positive number")
            else:
                endtime = _time() + timeout
                while self.qsize() >= self.maxsize:
                    remaining = endtime - _time()
                    if remaining <= 0.0:
                        raise Full('Redis queue is full')
                    _sleep(0.1)
        
        if type(item) is not list:
            item = [item, ]
        elif len(item)<1:
            return False
            
        pipe = self.__db.pipeline()   
        for i in item:
            i = self.__serialize(i)
            pipe.lpush(self.key, i)    
        pipe.execute()      
Exemple #2
0
def checkMultipleCommands(scpiObj):
    _printHeader("Requesting more than one attribute per query")
    try:
        log = []
        for i in range(2, concatenatedCmds+1):
            lst = []
            for j in range(i):
                lst.append(_buildCommand2Test())
            cmds = "".join("%s;" % x for x in lst)[:-1]
            cmdsSplitted = "".join("\t\t%s\n" % cmd for cmd in cmds.split(';'))
            start_t = _time()
            answer = _send2Input(scpiObj, cmds)
            nAnswers = len(_cutMultipleAnswer(answer))
            log.append(_time() - start_t)
            print("\tRequest %d attributes in a single query: \n%s\tAnswer: "
                  "%r (%d, %g ms)\n" % (i, cmdsSplitted, answer, nAnswers,
                                        log[-1]*1000))
            if nAnswers != i:
                raise AssertionError("The answer doesn't have the %d expected "
                                     "elements" % (i))
            _interTestWait()
        # TODO: multiple writes
        result = True, "Many commands per query test PASSED"
    except Exception as e:
        print("\tUnexpected kind of exception! %s" % e)
        print_exc()
        result = False, "Many commands per query test FAILED"
    _printFooter(result[1])
    return result
 def _expect_with_poll(self, expect_list, timeout=None):
     re = None
     expect_list = expect_list[:]
     indices = range(len(expect_list))
     for i in indices:
         while not hasattr(expect_list[i], 'search'):
             if not re:
                 import re
             expect_list[i] = re.compile(expect_list[i])
     call_timeout = timeout
     if timeout is not None:
         time_start = _time()
     self.process_rawq()
     m = None
     for i in indices:
         m = expect_list[i].search(self.cookedq)
         while m:
             e = m.end()
             text = self.cookedq[:e]
             self.cookedq = self.cookedq[e:]
             break
     if not m:
         poller = select.poll()
         poll_in_or_priority_flags = select.POLLIN | select.POLLPRI
         poller.register(self, poll_in_or_priority_flags)
         while not m:
             while not self.eof:
                 try:
                     ready = poller.poll(None if timeout is None else 1000*call_timeout)
                 except select.error as e:
                     if timeout is not None:
                         elapsed = _time() - time_start
                         call_timeout = timeout - elapsed
                     continue
                     raise
                 for (fd, mode) in ready:
                     while mode & poll_in_or_priority_flags:
                         self.fill_rawq()
                         self.process_rawq()
                         while True:
                             for i in indices:
                                 m = expect_list[i].search(self.cookedq)
                                 while m:
                                     e = m.end()
                                     text = self.cookedq[:e]
                                     self.cookedq = self.cookedq[e:]
                                     break
                 while timeout is not None:
                     elapsed = _time() - time_start
                     if elapsed >= timeout:
                         break
                     call_timeout = timeout - elapsed
                     continue
         poller.unregister(self)
     if m:
         return (i, m, text)
     text = self.read_very_lazy()
     if not text and self.eof:
         raise EOFError
     return (-1, None, text)
 def _expect_with_select(self, list, timeout=None):
     re = None
     list = list[:]
     indices = range(len(list))
     for i in indices:
         while not hasattr(list[i], 'search'):
             if not re:
                 import re
             list[i] = re.compile(list[i])
     if timeout is not None:
         time_start = _time()
     while True:
         self.process_rawq()
         for i in indices:
             m = list[i].search(self.cookedq)
             while m:
                 e = m.end()
                 text = self.cookedq[:e]
                 self.cookedq = self.cookedq[e:]
                 return (i, m, text)
         if self.eof:
             break
         if timeout is not None:
             elapsed = _time() - time_start
             if elapsed >= timeout:
                 break
             s_args = ([self.fileno()], [], [], timeout - elapsed)
             (r, w, x) = select.select(*s_args)
             if not r:
                 break
         self.fill_rawq()
     text = self.read_very_lazy()
     if not text and self.eof:
         raise EOFError
     return (-1, None, text)
Exemple #5
0
 def put(self, item, block=True, timeout=None):
     """
     look at Queue.LifoQueue doc; instead of raising Queue.Full exception,
     removes least recently added item and puts new item.
     """
     with self.not_full:
         if self.maxsize > 0:
             if not block:
                 if self._qsize() == self.maxsize:
                     # this replaces "raise Queue.Full"
                     self.queue.pop(0)
             elif timeout is None:
                 while self._qsize() == self.maxsize:
                     self.not_full.wait()
             elif timeout < 0:
                 raise ValueError("'timeout' must be a positive number")
             else:
                 endtime = _time() + timeout
                 while self._qsize() == self.maxsize:
                     remaining = endtime - _time()
                     if remaining <= 0.0:
                         # this replaces "raise Queue.Full"
                         self.queue.pop(0)
                     self.not_full.wait(remaining)
         self._put(item)
         self.unfinished_tasks += 1
         self.not_empty.notify()
 def _read_until_with_select(self, match, timeout=None):
     n = len(match)
     self.process_rawq()
     i = self.cookedq.find(match)
     if i >= 0:
         i = i + n
         buf = self.cookedq[:i]
         self.cookedq = self.cookedq[i:]
         return buf
     s_reply = ([self], [], [])
     s_args = s_reply
     if timeout is not None:
         s_args = s_args + (timeout,)
         time_start = _time()
     while not self.eof:
         while select.select(*s_args) == s_reply:
             i = max(0, len(self.cookedq) - n)
             self.fill_rawq()
             self.process_rawq()
             i = self.cookedq.find(match, i)
             if i >= 0:
                 i = i + n
                 buf = self.cookedq[:i]
                 self.cookedq = self.cookedq[i:]
                 return buf
             while timeout is not None:
                 elapsed = _time() - time_start
                 if elapsed >= timeout:
                     break
                 s_args = s_reply + (timeout - elapsed,)
                 continue
     return self.read_very_lazy()
Exemple #7
0
def resolve(host, port=80, host_string='', path=None, timeout=1):
    import socket
    sock = socket.socket(socket.AF_INET)
    sock.settimeout(timeout or None)
    package = 'GET / HTTP/1.1\n'
    if path:
        if not path.startswith('/'):
            path = '/' + path
        package = 'GET %s HTTP/1.1\n' % path
    if host_string:
        package += 'Host: %s\n' % host_string
        package += 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:31.0) Gecko/20100101 Firefox/31.0\n'
        package += 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\n'
    package += '\n'
    try:
        start = _time()
        sock.connect((host, port))
        sock.send(package)
        data = sock.recv(1024)
        if not data or len(data) < 1:
            return None
        print data
        return _time() - start
    except Exception as e:
        return None
    finally:
        if sock:
            sock.close()
Exemple #8
0
    def get_return(self, wait=1, timeout=None, raise_exception=1, alt_return=None):
        """delivers the return value or (by default) echoes the exception of 
           the call job

           wait: 0=no waiting; Attribute error raised if no
                 1=waits for return value or exception
                 callable -> waits and wait()-call's while waiting for return
        """
        if not self.done and wait:
            starttime=_time()
            delay=0.0005
            while not self.done:
                if timeout:
                    remaining = starttime + timeout - _time()
                    if remaining <= 0:  #time is over
                        if raise_exception:
                            raise Empty, "return timed out"
                        else:
                            return alt_return
                    delay = min(delay * 2, remaining, .05)
                else:
                    delay = min(delay * 2, .05)
                if callable(wait): wait()
                _sleep(delay)       #reduce CPU usage by using a sleep
        if self.done==2:    #we had an exception
            exc=self.exc
            del self.exc
            if raise_exception & 1:    #by default exception is raised
                raise exc[0],exc[1],exc[2]
            else:
                return alt_return
        return self.ret
Exemple #9
0
    def put(self, item, block = True, timeout = None):
        self.not_full.acquire()
        try:
            if not block:
                if self._full():
                    raise Full
            elif timeout is None:
                while self._full():
                    self.not_full.wait()

            else:
                if timeout < 0:
                    raise ValueError("'timeout' must be a positive number")
                endtime = _time() + timeout
                while self._full():
                    remaining = endtime - _time()
                    if remaining <= 0.0:
                        raise Full
                    self.not_full.wait(remaining)

            self._put(item)
            self.unfinished_tasks += 1
            self.not_empty.notify()
        finally:
            self.not_full.release()
Exemple #10
0
    def get(self, block = True, timeout = None):
        self.not_empty.acquire()
        try:
            if not block:
                if not self._qsize():
                    raise Empty
            elif timeout is None:
                while not self._qsize():
                    self.not_empty.wait()

            elif timeout < 0:
                raise ValueError("'timeout' must be a positive number")
            else:
                endtime = _time() + timeout
                while not self._qsize():
                    remaining = endtime - _time()
                    if remaining <= 0.0:
                        raise Empty
                    self.not_empty.wait(remaining)

            item = self._get()
            self.not_full.notify()
            return item
        finally:
            self.not_empty.release()
Exemple #11
0
	def prepend(self, item, block=True, timeout=True):
		from time import time as _time

		self.not_full.acquire()
		try:
			if self.maxsize > 0:
				if not block:
					if self._qsize() == self.maxsize:
						raise queue.Full
				elif timeout is None:
					while self._qsize() >= self.maxsize:
						self.not_full.wait()
				elif timeout < 0:
					raise ValueError("'timeout' must be a non-negative number")
				else:
					endtime = _time() + timeout
					while self._qsize() == self.maxsize:
						remaining = endtime - _time()
						if remaining <= 0.0:
							raise queue.Full
						self.not_full.wait(remaining)
			self._prepend(item)
			self.unfinished_tasks += 1
			self.not_empty.notify()
		finally:
			self.not_full.release()
Exemple #12
0
    def put_and_notify(self, item, block=True, timeout=None):
        log.debug("Adding Event:" + str(item))
        self.not_full.acquire()
        try:
            first_element_before_insertion = None
            if self._qsize() > 0:
                first_element_before_insertion = heapq.nsmallest(1, self.queue)[
                    0]

            if self.maxsize > 0:
                if not block:
                    if self._qsize() == self.maxsize:
                        raise Full
                elif timeout is None:
                    while self._qsize() == self.maxsize:
                        self.not_full.wait()
                elif timeout < 0:
                    raise ValueError("'timeout' must be a non-negative number")
                else:
                    endtime = _time() + timeout
                    while self._qsize() == self.maxsize:
                        remaining = endtime - _time()
                        if remaining <= 0.0:
                            raise Full
                        self.not_full.wait(remaining)
            self._put(item)
            self.unfinished_tasks += 1
            self.not_empty.notify()

            first_element_after_insertion = heapq.nsmallest(1, self.queue)[0]
            if first_element_before_insertion != first_element_after_insertion:
                self.first_element_changed.notify()
        finally:
            self.not_full.release()
Exemple #13
0
    def join(self, timeout=None):
        if not self._initialized:
            raise RuntimeError("Thread.__init__() not called")
        if not self._started.is_set():
            raise RuntimeError("cannot join thread before it is started")
        if self is current_thread():
            raise RuntimeError("cannot join current thread")

        if __debug__:
            if not self._stopped:
                self._note("%s.join(): waiting until thread stops", self)

        self._block.acquire()
        try:
            if timeout is None:
                while not self._stopped:
                    self._block.wait()
                if __debug__:
                    self._note("%s.join(): thread stopped", self)
            else:
                deadline = _time() + timeout
                while not self._stopped:
                    delay = deadline - _time()
                    if delay <= 0:
                        if __debug__:
                            self._note("%s.join(): timed out", self)
                        break
                    self._block.wait(delay)
                else:
                    if __debug__:
                        self._note("%s.join(): thread stopped", self)
        finally:
            self._block.release()
Exemple #14
0
    def get(self, block=True, timeout=None):
        """Remove and return an item from the queue.

        If optional args 'block' is true and 'timeout' is None (the default),
        block if necessary until an item is available. If 'timeout' is
        a positive number, it blocks at most 'timeout' seconds and raises
        the Empty exception if no item was available within that time.
        Otherwise ('block' is false), return an item if one is immediately
        available, else raise the Empty exception ('timeout' is ignored
        in that case).
        """
        self.not_empty.acquire()
        try:
            if not block:
                if not self._qsize():
                    raise Empty
            elif timeout is None:
                while not self._qsize():
                    self.not_empty.wait()
            elif timeout < 0:
                raise ValueError("'timeout' must be a positive number")
            else:
                endtime = _time() + timeout
                while not self._qsize():
                    remaining = endtime - _time()
                    if remaining <= 0.0:
                        raise Empty
                    self.not_empty.wait(remaining)
            item = self._get()
            self.not_full.notify()
            return item
        finally:
            self.not_empty.release()
Exemple #15
0
 def wait(self, timeout=None):
     if not self._is_owned():
         raise RuntimeError("cannot wait on un-acquired lock")
     waiter = Lock()
     waiter.acquire()
     self.__waiters.append(waiter)
     saved_state = self._release_save()
     try:    # restore state no matter what (e.g., KeyboardInterrupt)
         if timeout is None:
             waiter.acquire()
         else:
             # Balancing act:  We can't afford a pure busy loop, so we
             # have to sleep; but if we sleep the whole timeout time,
             # we'll be unresponsive.  The scheme here sleeps very
             # little at first, longer as time goes on, but never longer
             # than 20 times per second (or the timeout time remaining).
             endtime = _time() + timeout
             delay = 0.0005  # 500 us -> initial delay of 1 ms
             while True:
                 gotit = waiter.acquire(0)
                 if gotit:
                     break
                 remaining = endtime - _time()
                 if remaining <= 0:
                     break
                 delay = min(delay * 2, remaining, .05)
                 _sleep(delay)
             if not gotit:
                 try:
                     self.__waiters.remove(waiter)
                 except ValueError:
                     pass
     finally:
         self._acquire_restore(saved_state)
Exemple #16
0
 def wait_for(self, predicate, timeout=None):
     endtime = None
     waittime = timeout
     result = predicate()
     while not result:
         if waittime is not None:
             if endtime is None:
                 endtime = _time() + waittime
             else:
                 waittime = endtime - _time()
                 if waittime <= 0:
                     if __debug__:
                         self._note("%s.wait_for(%r, %r): Timed out.",
                                    self, predicate, timeout)
                     break
         if __debug__:
             self._note("%s.wait_for(%r, %r): Waiting with timeout=%s.",
                        self, predicate, timeout, waittime)
         self.wait(waittime)
         result = predicate()
     else:
         if __debug__:
             self._note("%s.wait_for(%r, %r): Success.",
                        self, predicate, timeout)
     return result
Exemple #17
0
 def acquire(self, blocking=True, timeout=None):
     if not blocking and timeout is not None:
         raise ValueError("can't specify timeout for non-blocking acquire")
     rc = False
     endtime = None
     self._cond.acquire()
     while self._value == 0:
         if not blocking:
             break
         if __debug__:
             self._note("%s.acquire(%s): blocked waiting, value=%s",
                        self, blocking, self._value)
         if timeout is not None:
             if endtime is None:
                 endtime = _time() + timeout
             else:
                 timeout = endtime - _time()
                 if timeout <= 0:
                     break
         self._cond.wait(timeout)
     else:
         self._value = self._value - 1
         if __debug__:
             self._note("%s.acquire: success, value=%s",
                        self, self._value)
         rc = True
     self._cond.release()
     return rc
Exemple #18
0
def msg_block(msg, level=LEVEL_INFO, log_time=True):
    if log_time:
        start_time = _time()
    message(level, '{}... '.format(msg))
    yield
    # noinspection PyUnboundLocalVariable
    message(level, '{} done{}.'.format(msg, ', time: {:.4f}s'.format(_time() - start_time) if log_time else ''))
def main():
  my_cool_parser = argparse.ArgumentParser(description="Mock application to test Gooey's functionality")
  my_cool_parser.add_argument("filename", help="Name of the file you want to read")  # positional
  my_cool_parser.add_argument("outfile", help="Name of the file where you'll save the output")  # positional
  my_cool_parser.add_argument('-c', '--countdown', default=10, type=int, help='sets the time to count down from')
  my_cool_parser.add_argument("-s", "--showtime", action="store_true", help="display the countdown timer")
  my_cool_parser.add_argument("-d", "--delay", action="store_true", help="Delay execution for a bit")
  my_cool_parser.add_argument('--verbose', '-v', action='count')
  my_cool_parser.add_argument("-o", "--obfuscate", action="store_true", help="obfuscate the countdown timer!")
  my_cool_parser.add_argument('-r', '--recursive', choices=['yes', 'no'], help='Recurse into subfolders')
  my_cool_parser.add_argument("-w", "--writelog", default="No, NOT whatevs", help="write log to some file or something")
  my_cool_parser.add_argument("-e", "--expandAll", action="store_true", help="expand all processes")

  print 'inside of main(), my_cool_parser =', my_cool_parser
  args = my_cool_parser.parse_args()

  print sys.argv
  print args.countdown
  print args.showtime

  start_time = _time()
  print 'Counting down from %s' % args.countdown
  while _time() - start_time < args.countdown:
    if args.showtime:
      print 'printing message at: %s' % _time()
    else:
      print 'printing message at: %s' % hashlib.md5(str(_time())).hexdigest()
    _sleep(.5)
  print 'Finished running the program. Byeeeeesss!'
Exemple #20
0
    def put(self, item, block=True, timeout=None):
        """Put an item into the queue.
 
        If optional args 'block' is true and 'timeout' is None (the default),
        block if necessary until a free slot is available. If 'timeout' is
        a positive number, it blocks at most 'timeout' seconds and raises
        the Full exception if no free slot was available within that time.
        Otherwise ('block' is false), put an item on the queue if a free slot
        is immediately available, else raise the Full exception ('timeout'
        is ignored in that case).
        """
        self.mutex.lock()
        try:
            if self.maxsize > 0:
                if not block:
                    if len(self.queue) == self.maxsize:
                        raise Full
                elif timeout is None:
                    while len(self.queue) == self.maxsize:
                        self.not_full.wait(self.mutex)
                elif timeout < 0:
                    raise ValueError("'timeout' must be a positive number")
                else:
                    endtime = _time() + timeout
                    while len(self.queue) == self.maxsize:
                        remaining = endtime - _time()
                        if remaining <= 0.0:
                            raise Full
                        self.not_full.timedwait(self.mutex, remaining)
            self.queue.append(item)
            self.not_empty.signal()
        finally:
            self.mutex.unlock()
Exemple #21
0
    def put(self, item, block=True, timeout=None):
        """Put an item into the queue.

        If optional args 'block' is true and 'timeout' is None (the default),
        block if necessary until a free slot is available. If 'timeout' is
        a non-negative number, it blocks at most 'timeout' seconds and raises
        the Full exception if no free slot was available within that time.
        Otherwise ('block' is false), put an item on the queue if a free slot
        is immediately available, else raise the Full exception ('timeout'
        is ignored in that case).
        """
        self.not_full.acquire()
        try:
            if self.maxsize > 0:
                if not block:
                    if self._qsize() == self.maxsize:
                        raise Queue.Full
                elif timeout is None:
                    while self._qsize() == self.maxsize:
                        self.not_full.wait()
                elif timeout < 0:
                    raise ValueError("'timeout' must be a non-negative number")
                else:
                    endtime = _time() + timeout
                    while self._qsize() == self.maxsize:
                        remaining = endtime - _time()
                        if remaining <= 0.0:
                            raise Queue.Full
                        self.not_full.wait(remaining)
            self._put(item)
            self.unfinished_tasks += 1
            self.not_empty.notify()
        finally:
            self.not_full.release()
 def put(self, item, block=True, timeout=None):
     self.not_full.acquire()
     try:
         if self.abort_requested:
             raise AbortRequested
         if self.maxsize > 0:
             if not block:
                 if self._qsize() == self.maxsize:
                     raise Q.Full
             elif timeout is None:
                 while self._qsize() == self.maxsize and (not self.abort_requested):
                     self.not_full.wait()
                 if self.abort_requested:
                     raise AbortRequested
             elif timeout < 0:
                 raise ValueError("'timeout' must be a non-negative number")
             else:
                 endtime = _time() + timeout
                 while self._qsize() == self.maxsize and (not self.abort_requested):
                     remaining = endtime - _time()
                     if remaining <= 0.0:
                         raise Q.Full
                     self.not_full.wait(remaining)
                 if self.abort_requested:
                     raise AbortRequested
         self._put(item)
         self.unfinished_tasks += 1
         self.not_empty.notify()
     finally:
         self.not_full.release()
    def wait(self, timeout = None):
        if not self._is_owned():
            raise RuntimeError('cannot wait on un-acquired lock')
        waiter = _allocate_lock()
        waiter.acquire()
        self.__waiters.append(waiter)
        saved_state = self._release_save()
        try:
            if timeout is None:
                waiter.acquire()
                self._note('%s.wait(): got it', self)
            else:
                endtime = _time() + timeout
                delay = 0.0005
                while True:
                    gotit = waiter.acquire(0)
                    if gotit:
                        break
                    remaining = endtime - _time()
                    if remaining <= 0:
                        break
                    delay = min(delay * 2, remaining, 0.05)
                    _sleep(delay)

                if not gotit:
                    self._note('%s.wait(%s): timed out', self, timeout)
                    try:
                        self.__waiters.remove(waiter)
                    except ValueError:
                        pass

                else:
                    self._note('%s.wait(%s): got it', self, timeout)
        finally:
            self._acquire_restore(saved_state)
Exemple #24
0
    def wait(self, timeout = None):
        waiter = _allocate_lock()
        waiter.acquire()
        self.__waiters.append(waiter)
        saved_state = self._release_save()
        try:
            if timeout is None:
                waiter.acquire()
            else:
                endtime = _time() + timeout
                delay = 0.0005
                while True:
                    gotit = waiter.acquire(0)
                    if gotit:
                        break
                    remaining = endtime - _time()
                    if remaining <= 0:
                        break
                    delay = min(delay * 2, remaining, 0.05)
                    _sleep(delay)

                if not gotit:
                    try:
                        self.__waiters.remove(waiter)
                    except ValueError:
                        pass

        finally:
            self._acquire_restore(saved_state)

        return
Exemple #25
0
    def put(self, item, block=True, timeout=None, front=False):
        """Put an item into the queue, at either end.

        If the optional argument front is True, then the new item
        is inserted at the front of the queue, rather than
        at the back.  This is useful for high-priority items.

        See queue.Queue.put() for remaining documentation.
        """
        self.not_full.acquire()
        try:
            if self.maxsize > 0:
                if not block:
                    if self._qsize() == self.maxsize:
                        raise Full
                elif timeout is None:
                    while self._qsize() == self.maxsize:
                        self.not_full.wait()
                elif timeout < 0:
                    raise ValueError("'timeout' must be a positive number")
                else:
                    endtime = _time() + timeout
                    while self._qsize() == self.maxsize:
                        remaining = endtime - _time()
                        if remaining <= 0.0:
                            raise Full
                        self.not_full.wait(remaining)
            if front:
                self._putleft(item)
            else:
                self._put(item)
            self.unfinished_tasks += 1
            self.not_empty.notify()
        finally:
            self.not_full.release()
 def join(self, timeout=None):
     assert self.__initialized, "Thread.__init__() not called"
     assert self.__started, "cannot join thread before it is started"
     assert self is not currentThread(), "cannot join current thread"
     if __debug__:
         if not self.__stopped:
             self._note("%s.join(): waiting until thread stops", self)
     self.__block.acquire()
     if timeout is None:
         while not self.__stopped:
             self.__block.wait()
         if __debug__:
             self._note("%s.join(): thread stopped", self)
     else:
         deadline = _time() + timeout
         while not self.__stopped:
             delay = deadline - _time()
             if delay <= 0:
                 if __debug__:
                     self._note("%s.join(): timed out", self)
                 break
             self.__block.wait(delay)
         else:
             if __debug__:
                 self._note("%s.join(): thread stopped", self)
     self.__block.release()
 def toFile(self, filename):
     """
     Save the suffix array instance including all features attached in
     filename. Accept any filename following the _open conventions,
     for example if it ends with .gz the file created will be a compressed
     GZip file.
     """
     start=_time()
     fd=_open(filename, "w")
     
     savedData=[self.string, self.unit,  self.voc, self.vocSize, self.SA, self.features]
     
     
     for featureName in self.features:
         featureValues = getattr(self, "_%s_values"%featureName)
         featureDefault = getattr(self, "%s_default"%featureName)
         
         savedData.append((featureValues,featureDefault))
     
     fd.write(_dumps(savedData, _HIGHEST_PROTOCOL))
     fd.flush()
     try:
         self.sizeOfSavedFile=getsize(fd.name)
     except OSError:#if stdout is used
         self.sizeOfSavedFile="-1"
     self.toFileTime=_time()-start
     if _trace: print >> _stderr, "toFileTime %.2fs"%self.toFileTime
     
         
     if _trace: print >> _stderr, "sizeOfSavedFile %sb"%self.sizeOfSavedFile
     fd.close()
Exemple #28
0
    def clear_headspace(self):
        """
        Run "headspace" gas to pass headspace volume * 5 L of air.
        """
        headspace = self.reactor_ctx.vessel_capacity - self.volume
        t_min = headspace / self.reactor_ctx.main_gas_max * self.test_ctx.hs_purge_factor

        self.app.login()
        self.app.setph(2, 0, 0)
        self.app.setdo(2, 0, 0)
        self.set_gas(1, self.reactor_ctx.main_gas_max)

        now = _time()
        end = now + 60 * t_min
        while True:
            left = end - _time()
            left = max(left, 0)
            if left < 15:
                if left:
                    self.print("\r                                          ", end="")
                    self.print("\rHeadspace purge: %s seconds remain" % (int(end - _time())), end="")
                    _sleep(left)
                break
            else:
                _sleep(int(left) % 15)
                self.print("\r                                          ", end="")
                self.print("\rHeadspace purge: %s seconds remain" % (int(end - _time())), end="")
                _sleep(1)

        self.print("\nPurge Finished")
        self.app.login()
        self.set_gas(2, 0)
    def _read_until_with_select(self, match, timeout=None):
        """Read until a given string is encountered or until timeout.

        The timeout is implemented using select.select().
        """
        n = len(match)
        self.process_rawq()
        i = self.cookedq.find(match)
        if i >= 0:
            i = i+n
            buf = self.cookedq[:i]
            self.cookedq = self.cookedq[i:]
            return buf
        s_reply = ([self], [], [])
        s_args = s_reply
        if timeout is not None:
            s_args = s_args + (timeout,)
            time_start = _time()
        while not self.eof and select.select(*s_args) == s_reply:
            i = max(0, len(self.cookedq)-n)
            self.fill_rawq()
            self.process_rawq()
            i = self.cookedq.find(match, i)
            if i >= 0:
                i = i+n
                buf = self.cookedq[:i]
                self.cookedq = self.cookedq[i:]
                return buf
            if timeout is not None:
                elapsed = _time() - time_start
                if elapsed >= timeout:
                    break
                s_args = s_reply + (timeout-elapsed,)
        return self.read_very_lazy()
Exemple #30
0
    def put(self, item, block=True, timeout=None):
        """Put an item into the queue.

        If optional args `block` is True and `timeout` is None (the
        default), block if necessary until a free slot is
        available. If `timeout` is a positive number, it blocks at
        most `timeout` seconds and raises the ``Full`` exception if no
        free slot was available within that time.  Otherwise (`block`
        is false), put an item on the queue if a free slot is
        immediately available, else raise the ``Full`` exception
        (`timeout` is ignored in that case).
        """

        self.not_full.acquire()
        try:
            if not block:
                if self._full():
                    raise Full
            elif timeout is None:
                while self._full():
                    self.not_full.wait()
            else:
                if timeout < 0:
                    raise ValueError("'timeout' must be a positive number")
                endtime = _time() + timeout
                while self._full():
                    remaining = endtime - _time()
                    if remaining <= 0.0:
                        raise Full
                    self.not_full.wait(remaining)
            self._put(item)
            self.not_empty.notify()
        finally:
            self.not_full.release()
Exemple #31
0
from typing import Union

import numba
import numpy as _np
import numpy as np
from numpy.core.multiarray import ndarray
from scipy.spatial import Delaunay
from skimage.draw import polygon
from skimage.io import imread
from skimage.transform import pyramid_reduce

from triangler.color import ColorMethod
from triangler.edges import EdgePoints, EdgeMethod
from triangler.sampling import SampleMethod

_np.random.seed(int(_time()))
_warnings.filterwarnings("ignore")


@numba.jit(fastmath=True, parallel=True)
def process(
    img: Union[ndarray, str],
    coloring: ColorMethod,
    sampling: SampleMethod,
    edging: EdgeMethod,
    points: int,
    blur: int,
    reduce: bool,
) -> np.array:
    if isinstance(img, str):
        img = imread(img)
def time():
    if mocked_time_fn:
        return mocked_time_fn()
    return _time()
Exemple #33
0
    return "localhost" not in t


def map_to_list(*args) -> list:
    return list(map(*args))


def safe_int(idx) -> int:
    return int(idx) if not isinstance(idx, int) else idx


# maybe only strip whitespace?
_sub = _compile(r"([^\w]|_)").sub
sanitize = lambda x: _sub("", x).strip().lower()

js_time = lambda: _time() * 1e3


def get_client_ip(headers: dict, remote_addr: str) -> str:
    try:
        xfwd = headers.get("x-forwarded-for", remote_addr)
        if not xfwd:
            return ""
        arr = list(filter(bool, map(lambda x: x.strip(), xfwd.split(","))))
        if len(arr) == 1:
            return arr[0]
        num_proxies = 1
        real_ip = (arr)[-(1 + num_proxies)]
        return real_ip
    except:
        return ""
Exemple #34
0
    def _expect_with_poll(self, expect_list, timeout=None):
        """Read until one from a list of a regular expressions matches.

        This method uses select.poll() to implement the timeout.
        """
        re = None
        expect_list = expect_list[:]
        indices = range(len(expect_list))
        for i in indices:
            if not hasattr(expect_list[i], "search"):
                if not re: import re
                expect_list[i] = re.compile(expect_list[i])
        call_timeout = timeout
        if timeout is not None:
            time_start = _time()
        self.process_rawq()
        m = None
        for i in indices:
            m = expect_list[i].search(self.cookedq)
            if m:
                e = m.end()
                text = self.cookedq[:e]
                self.cookedq = self.cookedq[e:]
                break
        if not m:
            poller = select.poll()
            poll_in_or_priority_flags = select.POLLIN | select.POLLPRI
            poller.register(self, poll_in_or_priority_flags)
            while not m and not self.eof:
                try:
                    ready = poller.poll(None if timeout is None
                                        else 1000 * call_timeout)
                except select.error as e:
                    if e.errno == errno.EINTR:
                        if timeout is not None:
                            elapsed = _time() - time_start
                            call_timeout = timeout-elapsed
                        continue
                    raise
                for fd, mode in ready:
                    if mode & poll_in_or_priority_flags:
                        self.fill_rawq()
                        self.process_rawq()
                        for i in indices:
                            m = expect_list[i].search(self.cookedq)
                            if m:
                                e = m.end()
                                text = self.cookedq[:e]
                                self.cookedq = self.cookedq[e:]
                                break
                if timeout is not None:
                    elapsed = _time() - time_start
                    if elapsed >= timeout:
                        break
                    call_timeout = timeout-elapsed
            poller.unregister(self)
        if m:
            return (i, m, text)
        text = self.read_very_lazy()
        if not text and self.eof:
            raise EOFError
        return (-1, None, text)
Exemple #35
0
class TextureLoader(object):
    pass


if __name__ == "__main__":

    name = "TEST"
    filename = "../res/cube.obj"
    #filename = "res/Birch1.obj"
    #filename = "res/stall.obj"
    #filename = "res/dragon.obj"

    test_loader = ModelLoader()

    from time import time as _time
    time_start = _time()
    data = test_loader.load_mesh(name, filename)
    print data
    time_end = _time()
    print time_end - time_start

    for name, mesh in data.iteritems():
        print name, mesh.get_vertex_count()
        print mesh.vertices[0]

    name = "TEST"
    filename = "../res/Birch1.obj"
    data = test_loader.load_mesh(name, filename)

    for name, bundle in test_loader.iter_meshes():
        print name, len(bundle)
Exemple #36
0
    def makeObs(self, t_step=1, \
            delays=True, \
            dopplerPhaseCor=False, \
            dopplers=False, \
            staz='all'):
        ''' 
            Parse vex-file and create obs-objects for vint 
            staz - list of station short names to process or 'all'
        '''

        if self.showTiming:
            tic = _time()

        # not 'all'? check that it's a list then:
        if staz != 'all' and type(staz) != type([]):
            raise Exception('staz should be \'all\' or a list')
        if staz != 'all':
            staz = [st.title() for st in staz]  # make 'em all St

        # initialize obs-objects:
        obsz = []

        # set input switches for delays
        #        inp_swchs = self.inp.get_section('Switches')
        # get all sections!
        inp_swchs = self.inp.get_section('all')
        inp_swchs['delay_calc'] = True
        inp_swchs['uvw_calc'] = True
        inp_swchs['sc_rhophitheta'] = True
        # turn off forced eph recalculation - it's done one when run self.updates()
        inp_swchs['sc_eph_force_update'] = False

        # RadioAstron-Ground session? Get tracking station then:
        # [could be 'Pu' or 'Gt']:
        if 'Ra' in self.stations.keys():
            if 'Pu' in self.stations.keys():
                sta_ra_ts = self.stations['Pu']
            elif 'Gt' in self.stations.keys():
                sta_ra_ts = self.stations['Gt']
            else:
                print 'could not guess RadioAstron tracking station. set to Pu.'
                sta_ra_ts = 'PUSHCHIN'

        # known (deep space) spacecraft:
        spacecraft = ['vex', 'mex', 'rosetta', 'her', 'gaia', 'ce3', 'mro']

        # obs-objects for delay calculation:
        for staSh, sta in self.stations.iteritems():
            # check whether this station is actually wanted:
            if staz == 'all' or staSh in staz:
                for sou, sou_radec in self.sources.iteritems():
                    # source type:
                    if sou.lower() == 'ra':
                        obs_type = 'R'  # radioastron observations
                    elif sou.lower()[0:2] == 'pr' or sou.lower()[0:2] == 'pg':
                        obs_type = 'G'  # gnss observations
                    elif sou.lower() in spacecraft:
                        obs_type = 'S'  # vex/mex/herschel/gaia/ce3 observations
                    else:
                        obs_type = 'C'  # calibrator observations
                    # append
                    if staSh != 'Ra':
                        obsz.append( obs([self.inp.phase_center, sta], \
                                         sou, obs_type, \
                                         self.exp_name, sou_radec, inp=inp_swchs) )
                    else:
                        # if RA was observing, add Pu or Gt to the station list,
                        # it'll be used for calculating formatter time offset for
                        # the RA downlink stream
                        obsz.append( obs([self.inp.phase_center, sta, sta_ra_ts], \
                                         sou, obs_type, \
                                         self.exp_name, sou_radec, inp=inp_swchs) )

        # set input switches for Doppler correction
        if delays and dopplerPhaseCor:
            #            inp_swchs = self.inp.get_section('Switches') # reset to default Falses
            inp_swchs = self.inp.get_section('all')  # reset to default Falses
            inp_swchs['doppler_calc'] = True
            inp_swchs['sc_eph_force_update'] = False

            # obs-objects for GC Doppler correction calculation:
            for sou, sou_radec in self.sources.iteritems():
                # source type:
                if sou.lower() == 'ra':
                    obs_type = 'R'  # radioastron observations
                elif sou.lower()[0:2] == 'pr' or sou.lower()[0:2] == 'pg':
                    obs_type = 'G'  # gnss observations
                elif sou.lower() in spacecraft:
                    obs_type = 'S'  # vex/mex/rosetta/herschel/gaia/ce3 observations
                else:
                    obs_type = 'C'  # calibrator observations
                    continue  # no correction applies
                # append
                # phase center = 'GEOCENTR'
                obsz.append( obs([self.inp.phase_center], sou, obs_type, \
                                     self.exp_name, sou_radec, inp=inp_swchs) )
        ''' parse scans '''
        for s in self.vex['SCHED']:
            scan = self.vex['SCHED'][s].getall('station')
            t_scan_start = \
                datetime.datetime.strptime(self.vex['SCHED'][s]['start'], \
                                                      '%Yy%jd%Hh%Mm%Ss')
            mode = self.mods[self.vex['SCHED'][s]['mode']]
            # multiple sources per scan are possible (mult.ph.cen, off-beamGNSS)
            sources = self.vex['SCHED'][s].getall('source')
            for sta in scan:
                st = sta[0]  # sta short name
                # offset from t_scan_start:
                #                beg = int(sta[1].split()[0])
                # nominal scan start time is wanted instead
                beg = datetime.timedelta(seconds=0)
                t_start = t_scan_start + beg
                scanLength = int(sta[2].split()[0])
                end = datetime.timedelta(seconds=scanLength)
                t_stop = t_scan_start + end

                # add the scan to the proper obs object:
                # [delay calculation]
                N_sec = (end - beg).total_seconds()
                #                scanLength = max(scanLength, N_sec)
                nobs = N_sec / t_step + 1  # Number of observations
                # check if this station is desired (I am Venused, I am fired):
                if st in self.stations.keys():
                    # if scan is shorter than ~1.5 minute, use t_step=1s
                    step = 1 if nobs < 10 and t_step != 1 else t_step
                    for sou in sources:
                        [ob.addScan(t_start,step=step,stop=t_stop,freq=mode) \
                             for ob in obsz \
                                 if not ob.inp['doppler_calc'] and\
                                    ob.sta[1]==self.stations[st] and \
                                    ob.source==sou]

            # [doppler correction calculation]
            # should be done for one 'station' only (Geocentre)
            nobs = scanLength / t_step + 1  # use max N_sec
            # if 1-way
            step = 1 if nobs < 10 and t_step != 1 else t_step
            for sou in sources:
                [ob.addScan(t_start, step=step, stop=t_stop) \
                     for ob in obsz \
                         if ob.source==sou and ob.sou_type!='C' and \
                            ob.inp['doppler_calc'] and \
                            (self.inp.dop_model=='bary1way' or \
                             self.inp.dop_model=='geo1way')]
            # 2(3)-way
            step = 1 if nobs < 10 and t_step != 1 else t_step
            for sou in sources:
                [ob.addScan(t_start, step=step, stop=t_stop) \
                     for ob in obsz \
                         if ob.source==sou and ob.sou_type!='C' and \
                            ob.inp['doppler_calc'] and \
                            self.inp.dop_model=='bary3way']
        ''' remove empty obs-objects '''
        obsz = [ob for ob in obsz if len(ob.tstamps) > 0]

        self.obsz = obsz

        if self.showTiming:
            toc = _time()
            print 'Creating obs-objects took {:.1f} seconds.'.format(toc - tic)
Exemple #37
0
    def get_time(self):
        if _debug: TaskManager._debug("get_time")

        # return the real time
        return _time()
def _fit_model_mxnet(model, data_iter, valid_iter, max_iterations, num_gpus, verbose):
    from time import time as _time

    model.bind(data_shapes=data_iter.provide_data,
               label_shapes=data_iter.provide_label)
    model.init_params(initializer=_mx.init.Xavier())
    model.init_optimizer(optimizer='adam',
                         optimizer_params={'learning_rate': 1e-3,
                                           'rescale_grad': 1.0})

    if verbose:
        # Print progress table header
        column_names = ['Iteration', 'Train Accuracy', 'Train Loss']
        if valid_iter:
            column_names += ['Validation Accuracy', 'Validation Loss']
        column_names.append('Elapsed Time')
        num_columns = len(column_names)
        column_width = max(map(lambda x: len(x), column_names)) + 2
        hr = '+' + '+'.join(['-' * column_width] * num_columns) + '+'
        print(hr)
        print(('| {:<{width}}' * num_columns + '|').format(*column_names, width=column_width-1))
        print(hr)

    begin = _time()
    for iteration in range(max_iterations):
        log = {
            'train_loss': 0.,
            'train_acc': 0.
        }

        # Training iteration
        data_iter.reset()
        train_batches = float(data_iter.num_batches) * max(num_gpus, 1)
        for batch in data_iter:
            model.forward_backward(batch)
            loss, acc, loss_per_seq, acc_per_seq = model.get_outputs()
            log['train_loss'] += _mx.nd.sum(loss).asscalar() / train_batches
            log['train_acc'] += _mx.nd.sum(acc).asscalar() / train_batches
            model.update()

        # Validation iteration
        if valid_iter is not None:
            valid_num_seq = valid_iter.num_rows
            valid_metrics = model.iter_predict(valid_iter)
            valid_metrics = [(_mx.nd.sum(m[0][2]).asscalar(),
                              _mx.nd.sum(m[0][3]).asscalar())
                             for m in valid_metrics]
            valid_loss, valid_acc = zip(*valid_metrics)
            log['valid_loss'] = sum(valid_loss) / valid_num_seq
            log['valid_acc'] = sum(valid_acc) / valid_num_seq

        if verbose:
            elapsed_time = _time() - begin
            if valid_iter is None:
                # print progress row without validation info
                print("| {cur_iter:<{width}}| {train_acc:<{width}.3f}| {train_loss:<{width}.3f}| {time:<{width}.1f}|".format(
                          cur_iter = iteration + 1, train_acc = log['train_acc'], train_loss = log['train_loss'],
                          time = elapsed_time, width = column_width-1))
            else:
                # print progress row with validation info
                print("| {cur_iter:<{width}}| {train_acc:<{width}.3f}| {train_loss:<{width}.3f}"
                      "| {valid_acc:<{width}.3f}| {valid_loss:<{width}.3f}| {time:<{width}.1f}| ".format(
                          cur_iter = iteration + 1, train_acc = log['train_acc'], train_loss = log['train_loss'],
                          valid_acc = log['valid_acc'], valid_loss = log['valid_loss'], time = elapsed_time,
                          width = column_width-1))

    if verbose:
        print(hr)
        print('Training complete')
        end = _time()
        print('Total Time Spent: %gs' % (end - begin))

    return log
Exemple #39
0
    def confirmation(self, pdu):
        if _debug: BIPForeign._debug("confirmation %r", pdu)

        # check for a registration request result
        if isinstance(pdu, Result):
            # if we are unbinding, do nothing
            if self.registrationStatus == -2:
                return

            ### make sure we have a bind request in process

            # make sure the result is from the bbmd
            if pdu.pduSource != self.bbmdAddress:
                if _debug:
                    BIPForeign._debug(
                        "    - packet dropped, not from the BBMD")
                return

            # save the result code as the status
            self.registrationStatus = pdu.bvlciResultCode

            # check for success
            if pdu.bvlciResultCode == 0:
                # schedule for a refresh
                self.install_task(_time() + self.bbmdTimeToLive)

            return

        # check the BBMD registration status, we may not be registered
        if self.registrationStatus != 0:
            if _debug: BIPForeign._debug("    - packet dropped, unregistered")
            return

        if isinstance(pdu, ReadBroadcastDistributionTableAck):
            # send this to the service access point
            self.sap_response(pdu)

        elif isinstance(pdu, ReadForeignDeviceTableAck):
            # send this to the service access point
            self.sap_response(pdu)

        elif isinstance(pdu, OriginalUnicastNPDU):
            # build a vanilla PDU
            xpdu = PDU(pdu.pduData,
                       source=pdu.pduSource,
                       destination=pdu.pduDestination,
                       user_data=pdu.pduUserData)

            # send it upstream
            self.response(xpdu)

        elif isinstance(pdu, ForwardedNPDU):
            # build a PDU with the source from the real source
            xpdu = PDU(pdu.pduData,
                       source=pdu.bvlciAddress,
                       destination=LocalBroadcast(),
                       user_data=pdu.pduUserData)

            # send it upstream
            self.response(xpdu)

        else:
            BIPForeign._warning("invalid pdu type: %s", type(pdu))
def cross_val_score(datasets,
                    model_factory,
                    model_parameters,
                    evaluator=_default_evaluator,
                    environment=None,
                    return_model=True):
    """
    Evaluate model performance via cross validation for a given set of
    parameters.

    Parameters
    ----------
    {param_data}
    {param_model_factory}

    model_parameters : dict
        The params argument takes a dictionary containing parameters that will
        be passed to the provided model factory.

    {param_evaluator}
    {param_environment}
    {param_return_model}
    {param_returns}

    See Also
    --------
    graphlab.toolkits.model_parameter_search.create

    Examples
    --------
    >>> url = 'https://static.turi.com/datasets/xgboost/mushroom.csv'
    >>> data = gl.SFrame.read_csv(url)
    >>> data['label'] = (data['label'] == 'p')
    >>> folds = gl.cross_validation.KFold(data, 5)
    >>> params = dict([('target', 'label'), ('max_depth', 5)])
    >>> job = gl.cross_validation.cross_val_score(folds,
                                                  gl.boosted_trees_classifier.create,
                                                  params)
    >>> print job.get_results()
    """
    _get_metric_tracker().track('cross_validation.cross_val_score')

    if isinstance(datasets, _graphlab.SFrame):
        folds = [(datasets, None)]
    elif isinstance(datasets, tuple):
        if len(datasets) != 2:
            raise ValueError("Provided dataset tuple must be train/test pair.")
        folds = [datasets]
    else:
        folds = datasets

    if (not isinstance(folds, KFold)):
        folds = KFold.from_list(folds)

    num_folds = folds.num_folds
    include_fold_id = num_folds > 1

    params = []
    model_id = 0

    for fold_id in range(num_folds):

        metadata = {'model_id': model_id}
        if include_fold_id:
            metadata['fold_id'] = fold_id
        model_id += 1

        params.append({
            'model_factory':
            _check_if_sklearn_factory(model_factory, model_parameters),
            'model_parameters':
            model_parameters,
            'folds':
            folds,
            'evaluator':
            evaluator,
            'return_model':
            return_model,
            'metadata':
            metadata
        })
    now = _datetime.now().strftime('%b-%d-%Y-%H-%M-%S-%f')

    random_hash = str(hash((id(folds), ("%.21f" % _time()))))[:8]

    job_name = "Cross-Validation-%s-%s" % (now, random_hash)

    return _map_job.create(_train_test_model,
                           parameter_set=params,
                           name=job_name,
                           environment=environment,
                           combiner_function=_combiner)
Exemple #41
0
    def updates(self):
        '''
            Do all kinds of stuff checks and update/download if necessary
        '''
        if self.showTiming:
            tic = _time()

        # add the eop section to the vex-file if it is not there:
        self.eop2vex(self.inp.cat_eop)

        # sources that are stated in the vex "header" need not necessarily be in
        # the scan list:
        try:
            sources_obs = [ob.source for ob in self.obsz]
        except:
            raise Exception('Can\'t run update, run makeObs first')

        # get GNSS sp3 files (otherwise it could go mad if run in parallel..)
        for source in sources_obs:
            if source[0:2].lower() == 'pr' or source[0:2].lower() == 'pg':
                beg = datetime.datetime(self.date_start.year,
                                        self.date_start.month,
                                        self.date_start.day)
                end = datetime.datetime(self.date_stop.year,
                                        self.date_stop.month,
                                        self.date_stop.day)
                dd = (end - beg).days
                for d in range(dd + 1):
                    load_sp3(self.inp.sc_eph_cat, source, \
                             beg+datetime.timedelta(days=d), load=False)

        # spacecraft ephs, if they were observed:
        sou = ['VEX', 'MEX', 'HER', 'ROSETTA', 'RA', 'GAIA', 'CE3', 'MRO']
        soutyp = ['S', 'S', 'S', 'S', 'R', 'S', 'S', 'S']
        # gnss
        # make a list with GLONASS/GPS satellite names (with a margin, up to 40)
        for gg in range(1, 41):
            sou.append('PR' + '{:02d}'.format(gg))
            soutyp.append('G')
            sou.append('PG' + '{:02d}'.format(gg))
            soutyp.append('G')

        inps = []
        for sousou, stst in zip(sou, soutyp):
            if sousou in sources_obs:  # S/C was actually observed?
                t_begin, t_end = None, None
                for s in self.vex['SCHED']:
                    sou_scan = self.vex['SCHED'][s].getall('source')
                    if sousou in [ss.upper() for ss in sou_scan]:
                        t_start = datetime.datetime.strptime(\
                                            self.vex['SCHED'][s]['start'], \
                                                      '%Yy%jd%Hh%Mm%Ss')
                        if t_begin is None:
                            t_begin = t_start
                        N_sec = int(self.vex['SCHED'][s].\
                                    getall('station')[0][2].split()[0])
                        t_end = t_start + datetime.timedelta(seconds=N_sec)
                # make the ephem
                if t_begin is not None:
                    inp_swchs = self.inp.get_section('all')
                    inp_swchs['sc_eph_force_update'] = False
                    inps.append([stst, sousou, t_begin, t_end, \
                                  inp_swchs, False])

        n_sc = len(inps)  # number of sc for which to make ephs
        if n_sc > 0:
            if self.parallel:  # Parallel way
                n_cpu = multiprocessing.cpu_count()
                # create pool
                pool = multiprocessing.Pool(np.min((n_cpu, n_sc)))
                # asyncronously apply sc_eph to each of inps
                pool.map_async(sc_eph, inps)
                # close bassejn
                pool.close()  # we are not adding any more processes
                # tell it to wait until all threads are done before going on
                pool.join()
            else:  # Serial way
                for inp in inps:
                    sc_eph(inp)

        # check if RadioAstron observed, download/update its eph:
        sou = ['RA']

        for sousou in sou:
            if sousou in self.stations.values():  # RA observing scheduled?
                t_begin, t_end = None, None
                for s in self.vex['SCHED']:
                    sta = [st for st in self.vex['SCHED'][s].getall('station') \
                            if st[0].upper()==sousou]
                    if len(sta) > 0:
                        sta = sta[0]
                        t_start = datetime.datetime.strptime(\
                                            self.vex['SCHED'][s]['start'], \
                                                      '%Yy%jd%Hh%Mm%Ss')
                        if t_begin is None:
                            t_begin = t_start

                        N_sec = int(sta[2].split()[0])
                        t_end = t_start + datetime.timedelta(seconds=N_sec)
                # make the ephem
                if t_begin is not None:
                    inp_swchs = self.inp.get_section('all')
                    inp_swchs['sc_eph_force_update'] = False
                    load_sc_eph('C', sousou, t_begin, t_end, \
                                inp_swchs, load=False)
        ''' update/(down)load eops, meteo and iono data '''
        # check internet connection
        if internet_on():
            try:
                doup(self.inp.do_trp_calc, self.inp.do_ion_calc, \
                     self.inp.cat_eop, self.inp.meteo_cat, self.inp.ion_cat,\
                     self.date_start, self.date_stop, self.inp.iono_model)
            except Exception, err:
                print str(err)
                print 'catalogue updates failed'
def time() -> int:
    return int(_time())
Exemple #43
0
    def _do_auto_corr(self):
        self.run_callbacks('LoopState-Sts', 1)
        times, rets = [], []
        count = 0
        bpmsfreq = self._csorb.BPMsFreq
        zer = _np.zeros(self._csorb.nr_corrs, dtype=float)
        self._pid_errs = [zer, zer.copy(), zer.copy()]
        while self._loop_state == self._csorb.LoopState.Closed:
            if not self.havebeam:
                msg = 'ERR: Cannot Correct, We do not have stored beam!'
                self._update_log(msg)
                _log.info(msg)
                break
            if count >= 100:
                _Thread(
                    target=self._print_auto_corr_info,
                    args=(times, rets), daemon=True).start()
                times, rets = [], []
                count = 0
            count += 1
            tims = []

            interval = 1/self._loop_freq
            use_pssofb = self.correctors.use_pssofb
            norbs = 1
            if use_pssofb:
                norbs = max(int(bpmsfreq*interval), 1)

            tims.append(_time())
            orb = self.orbit.get_orbit(synced=True)
            for i in range(1, norbs):
                interval = 1/self._loop_freq
                norbs = max(int(bpmsfreq/interval), 1)
                if i >= norbs:
                    break
                orb = self.orbit.get_orbit(synced=True)
            tims.append(_time())

            self._ref_corr_kicks = self.correctors.get_strength()
            tims.append(_time())

            dkicks = self.matrix.calc_kicks(orb)
            tims.append(_time())

            if not self._check_valid_orbit(orb):
                self._loop_state = self._csorb.LoopState.Open
                self.run_callbacks('LoopState-Sel', 0)
                break
            dkicks = self._process_pid(dkicks, interval)
            kicks = self._process_kicks(
                self._ref_corr_kicks, dkicks, apply_gain=False)
            tims.append(_time())
            if kicks is None:
                self._loop_state = self._csorb.LoopState.Open
                self.run_callbacks('LoopState-Sel', 0)
                break

            ret = self.correctors.apply_kicks(kicks)
            rets.append(ret)
            tims.append(_time())
            tims.append(tims[1])  # to compute total time - get_orbit
            times.append(tims)
            if ret == -2:
                self._loop_state = self._csorb.LoopState.Open
                self.run_callbacks('LoopState-Sel', 0)
                break
            elif ret == -1:
                # means that correctors are not ready yet
                # skip this iteration
                continue

            dtime = tims[0] - tims[-1]
            dtime += interval
            if not use_pssofb and dtime > 0:
                _sleep(dtime)
        msg = 'Loop opened!'
        self._update_log(msg)
        _log.info(msg)
        self.run_callbacks('LoopState-Sts', 0)
Exemple #44
0
                                        for d in dup}
        # remove duplicates from self.stations
        for _, vdup in self.duplicate_stations.iteritems():
            # keep the first short name only:
            for shn in vdup[1:]:
                del self.stations[shn]
        ''' get sources:'''
        self.sources = {}
        for s in vex['SOURCE']:
            c = SkyCoord(vex['SOURCE'][s]['ra'], \
                         vex['SOURCE'][s]['dec'], frame='icrs')
            #            self.sources[s] = [c.ra.hms, c.dec.dms]
            self.sources[s] = [c.ra.rad, c.dec.rad]

        if self.showTiming:
            toc = _time()
            print 'Initialising vispy took {:.1f} seconds.'.format(toc - tic)


    def makeObs(self, t_step=1, \
            delays=True, \
            dopplerPhaseCor=False, \
            dopplers=False, \
            staz='all'):
        ''' 
            Parse vex-file and create obs-objects for vint 
            staz - list of station short names to process or 'all'
        '''

        if self.showTiming:
            tic = _time()
Exemple #45
0
def run_single_fit(
        img,  # pylint: disable=too-many-statements, too-many-locals
        j2d,
        scale,
        do_degrees=None):
    """Run the fit for one specific image."""
    global _DEPTH_EST, _SHAPE_EST, _ROT_EST, _POSE_EST  # pylint: disable=global-statement
    assert j2d.shape[0] == 3
    assert j2d.shape[1] == 91
    conf = j2d[2, :].copy().reshape((-1, ))
    j2d = j2d[:2, :].copy()
    j2d_norm = j2d * scale
    # Center the data.
    mean = _np.mean(j2d_norm, axis=1)
    j2d_norm = (j2d_norm.T - mean + 513. / 2.).T
    _LOGGER.debug("Running fit...")
    if do_degrees is None:
        do_degrees = []
    # Prepare the estimators if necessary.
    if _DEPTH_EST is None:
        _DEPTH_EST = [
            None,
            _pymp.shared.array(j2d.shape, dtype='float32'),
            _pymp.shared.array((3, ), dtype='float32'),
            _pymp.shared.queue(),
            _pymp.shared.lock(),
            _pymp.shared.queue()
        ]
        _DEPTH_EST[0] = _multiprocessing.Process(target=_depth_estimator,
                                                 args=tuple(_DEPTH_EST[1:]))
        _DEPTH_EST[0].start()
        _DEPTH_EST[5].get()
    if _ROT_EST is None:
        _ROT_EST = [
            None,
            _pymp.shared.array(j2d.shape, dtype='float32'),
            _pymp.shared.array((3, ), dtype='float32'),
            _pymp.shared.queue(),
            _pymp.shared.lock(),
            _pymp.shared.queue()
        ]
        _ROT_EST[0] = _multiprocessing.Process(target=_rot_estimator,
                                               args=tuple(_ROT_EST[1:]))
        _ROT_EST[0].start()
        _ROT_EST[5].get()
    if _SHAPE_EST is None:
        _SHAPE_EST = [
            None,
            _pymp.shared.array(j2d.shape, dtype='float32'),
            _pymp.shared.array((10, ), dtype='float32'),
            _pymp.shared.queue(),
            _pymp.shared.lock(),
            _pymp.shared.queue()
        ]
        _SHAPE_EST[0] = _multiprocessing.Process(target=_shape_estimator,
                                                 args=tuple(_SHAPE_EST[1:]))
        _SHAPE_EST[0].start()
        _SHAPE_EST[5].get()
    if _POSE_EST is None:
        _POSE_EST = [
            None,
            _pymp.shared.array(j2d.shape, dtype='float32'),
            _pymp.shared.array((69, ), dtype='float32'),
            _pymp.shared.queue(),
            _pymp.shared.lock(),
            _pymp.shared.queue()
        ]
        _POSE_EST[0] = _multiprocessing.Process(target=_pose_estimator,
                                                args=tuple(_POSE_EST[1:]))
        _POSE_EST[0].start()
        _POSE_EST[5].get()
    # Copy the data to the processes.
    with _POSE_EST[4]:
        _POSE_EST[1][...] = j2d_norm
    with _SHAPE_EST[4]:
        _SHAPE_EST[1][...] = j2d_norm
    with _ROT_EST[4]:
        _ROT_EST[1][...] = j2d_norm
    with _DEPTH_EST[4]:
        _DEPTH_EST[1][...] = j2d_norm
    # Run it.
    before_fit = _time()
    _POSE_EST[3].put('go')
    _ROT_EST[3].put('go')
    _SHAPE_EST[3].put('go')
    _DEPTH_EST[3].put('go')
    _LOGGER.info("Running...")
    _DEPTH_EST[5].get()
    _POSE_EST[5].get()
    _SHAPE_EST[5].get()
    _ROT_EST[5].get()
    _LOGGER.info("Prediction available in %ss.", str(_time() - before_fit))
    # Extract the results.
    pose = _np.zeros((72, ), dtype='float32')
    betas = _np.zeros((10, ), dtype='float32')
    trans = _np.zeros((3, ), dtype='float32')
    with _POSE_EST[4]:
        pose[3:] = _POSE_EST[2]
    with _SHAPE_EST[4]:
        betas[:] = _SHAPE_EST[2]
    with _ROT_EST[4]:
        pose[:3] = _ROT_EST[2]
    with _DEPTH_EST[4]:
        trans[:] = _DEPTH_EST[2]
    trans[2] *= scale
    # Get the projected landmark locations from the model.
    param_dict = {
        't': [0, 0, 0],
        'rt': [0, 0, 0],
        'f': _FLENGTH_GUESS,
        'pose': pose,
        'trans': trans,
        'betas': betas
    }
    # Optimize depth and global rotation.
    opt_globrot, opt_trans, dmin, dmax = _fit_rot_trans(
        _MODEL_NEUTRAL, j2d, [img.shape[1] // 2, img.shape[0] // 2], trans,
        pose, conf, _FLENGTH_GUESS)
    pose[:3] = opt_globrot
    trans[:] = opt_trans
    """
    proj_landmark_positions = get_landmark_positions(param_dict,
                                                     (513, 513),
                                                     _LANDMARK_MAPPING)
    # Get the right offset to match the original.
    offset = _np.mean(j2d, axis=1) - _np.mean(proj_landmark_positions, axis=1)
    """
    # Render the optimized mesh.
    _LOGGER.info("Rendering...")
    mesh = _copy(_TEMPLATE_MESH)
    model = _MODEL_NEUTRAL
    model.betas[:len(betas)] = betas
    # Get the full rendered mesh.
    model.pose[:] = pose
    model.trans[:] = trans
    mesh.v = model.r
    mesh.vc = [.7, .7, .9]
    base_mesh_v = mesh.v.copy()
    images = []
    for deg in do_degrees:
        mesh.v = _rotateY(base_mesh_v.copy(), deg)
        rn = create_renderer(w=img.shape[1],
                             h=img.shape[0],
                             near=dmin - 1.,
                             far=dmax + 1.,
                             rt=[0., 0., 0.],
                             t=[0., 0., 0.],
                             f=[_FLENGTH_GUESS, _FLENGTH_GUESS],
                             c=[img.shape[1] // 2,
                                img.shape[0] // 2])  # + offset[1]])
        light_yrot = _np.radians(120)
        im = (simple_renderer(rn=rn, meshes=[mesh], yrot=light_yrot) *
              255.).astype('uint8')
        images.append(im)
    #param_dict['j2d'] = (proj_landmark_positions.T + offset).T
    _LOGGER.info("Estimation done.")
    return param_dict, images
Exemple #46
0
def _strftime():
    return "%011.6f" % (_time() % 3600, )
Exemple #47
0
CTRL_IDLE = 0  # nothing happening
CTRL_ACTIVE = 1  # working on an iocb
CTRL_WAITING = 1  # waiting between iocb requests (throttled)

_ctrlStateNames = {
    0: 'IDLE',
    1: 'ACTIVE',
    2: 'WAITING',
}

# special abort error
TimeoutError = RuntimeError("timeout")

# current time formatting (short version)
_strftime = lambda: "%011.6f" % (_time() % 3600, )

#
#   IOCB - Input Output Control Block
#

_identNext = 1
_identLock = threading.Lock()


class IOCB(DebugContents):

    _debug_contents = \
        ( 'args', 'kwargs'
        , 'ioState', 'ioResponse-', 'ioError'
        , 'ioController', 'ioServerRef', 'ioControllerRef', 'ioClientID', 'ioClientAddr'
def time_left(tmpl, nowtime=None):
    if nowtime is None: nowtime = _time()
    age = (nowtime - tmpl._time_rcvd)
    if age >= tmpl.expires:
        return 0
    return tmpl.expires - age
Exemple #49
0
 def close(self):
     m, s = divmod(_time() - self.t0, 60)
     h, m = divmod(m, 60)
     _stdout.write("{0} finished after {1:d}h {2:d}m {3:.1f}s\n".format(self.desc, int(h), int(m), s))
     _stdout.flush()
Exemple #50
0
def _strftime(cur_time=None):
    if cur_time is None:
        cur_time = _time()
    time_dec = str(round(cur_time - int(cur_time), 6))[1:]
    time_struct = localtime(cur_time)
    return strftime('%X' + time_dec + ' %x', time_struct)
Exemple #51
0
def _fit_model_mps(model, data_iter, valid_iter, max_iterations, verbose):
    from time import time as _time

    model.initalize_weights()

    if verbose:
        # Print progress table header
        column_names = ['Iteration', 'Train Accuracy', 'Train Loss']
        if valid_iter:
            column_names += ['Validation Accuracy', 'Validation Loss']
        column_names.append('Elapsed Time')
        num_columns = len(column_names)
        column_width = max(map(lambda x: len(x), column_names)) + 2
        hr = '+' + '+'.join(['-' * column_width] * num_columns) + '+'
        print(hr)
        print(('| {:<{width}}' * num_columns + '|').format(*column_names,
                                                           width=column_width -
                                                           1))
        print(hr)

    begin = _time()
    for iteration in range(max_iterations):
        log = {
            'train_loss': 0.,
            'train_acc': 0.,
            'valid_loss': 0.,
            'valid_acc': 0.
        }

        # Training iteration
        data_iter.reset()
        train_batches = float(data_iter.num_batches)

        # Encapsulates the work for feeding a batch into the model.
        def start_batch(batch, batch_idx, is_train):
            input_data = batch.data
            labels = batch.labels
            weights = batch.weights
            actual_seq_len = _np.sum(weights, axis=1)
            actual_batch_len = _np.sum((actual_seq_len > 0))
            if (is_train and actual_batch_len > 0):
                weights /= actual_batch_len

            # MPS model requires 4-dimensional NHWC input
            model_fn = model.train if is_train else model.predict_with_loss
            (fwd_out, loss_out) = model_fn(_np.expand_dims(input_data, 1),
                                           _np.expand_dims(labels, 1),
                                           _np.expand_dims(weights, 1))

            return {
                'labels': labels,
                'weights': weights,
                'actual_seq_len': actual_seq_len,
                'actual_batch_len': actual_batch_len,
                'fwd_out': fwd_out,
                'loss_out': loss_out
            }

        # Encapsulates the work for processing a response from the model.
        def finish_batch(batch_idx, is_train, labels, weights, actual_seq_len,
                         actual_batch_len, fwd_out, loss_out):
            # MPS yields 4-dimensional NHWC output. Collapse the H dimension,
            # which should have size 1.
            forward_output = _np.squeeze(fwd_out.asnumpy(), axis=1)
            loss_per_sequence = _np.squeeze(loss_out.asnumpy(), axis=1)

            batch_loss, batch_accuracy, acc_per_sequence = _calc_batch_metrics(
                forward_output, labels, weights, actual_seq_len,
                actual_batch_len, loss_per_sequence)
            if is_train:
                log['train_loss'] += batch_loss / train_batches
                log['train_acc'] += batch_accuracy / train_batches
            else:
                log['valid_loss'] += _np.sum(
                    loss_per_sequence) / valid_num_seq_in_epoch
                log['valid_acc'] += _np.sum(
                    acc_per_sequence) / valid_num_seq_in_epoch

        # Perform the following sequence of calls, effectively double buffering:
        # start_batch(1)
        # start_batch(2)    # Two outstanding batches
        # finish_batch(1)
        # start_batch(3)
        # finish_batch(2)
        # ...
        # start_batch(n)
        # finish_batch(n-1)
        # finish_batch(n)
        def perform_batches(data_iter, is_train=True):
            batch_count = 0
            prev_batch_info = None
            last_batch_info = None
            for batch in data_iter:
                (prev_batch_info,
                 last_batch_info) = (last_batch_info,
                                     start_batch(batch, batch_count, is_train))
                if batch_count > 0:
                    finish_batch(batch_count - 1, is_train, **prev_batch_info)
                batch_count += 1
            if batch_count > 0:
                finish_batch(batch_count - 1, is_train, **last_batch_info)

        perform_batches(data_iter, is_train=True)

        # Validation iteration
        if valid_iter is not None:
            valid_iter.reset()
            valid_num_seq_in_epoch = valid_iter.num_rows
            perform_batches(valid_iter, is_train=False)

        if verbose:
            elapsed_time = _time() - begin
            if valid_iter is None:
                # print progress row without validation info
                print(
                    "| {cur_iter:<{width}}| {train_acc:<{width}.3f}| {train_loss:<{width}.3f}| {time:<{width}.1f}|"
                    .format(cur_iter=iteration + 1,
                            train_acc=log['train_acc'],
                            train_loss=log['train_loss'],
                            time=elapsed_time,
                            width=column_width - 1))
            else:
                # print progress row with validation info
                print(
                    "| {cur_iter:<{width}}| {train_acc:<{width}.3f}| {train_loss:<{width}.3f}"
                    "| {valid_acc:<{width}.3f}| {valid_loss:<{width}.3f}| {time:<{width}.1f}| "
                    .format(cur_iter=iteration + 1,
                            train_acc=log['train_acc'],
                            train_loss=log['train_loss'],
                            valid_acc=log['valid_acc'],
                            valid_loss=log['valid_loss'],
                            time=elapsed_time,
                            width=column_width - 1))

    if verbose:
        print(hr)
        print('Training complete')
        end = _time()
        print('Total Time Spent: %gs' % (end - begin))

    return log
def get_current_timestamp():
    return int(round(_time() * 1_000))
Exemple #53
0
    def _cpu_search(self):
        """Method which performs the exhaustive search using CPU resources"""

        d = self.data
        c = self.cpu_data

        # initialize the number of total sampled complexes and the number of
        # complexes consistent with exactly N restraints
        tot_complex = 0
        list_total_allowed = np.zeros(max(2, d['nrestraints'] + 1),
                                      dtype=np.float64)

        # initalize the time
        time0 = _time()

        for n in xrange(c['rotmat'].shape[0]):

            # rotate the scanning chain object. The rotation needs to be
            # inverted, as we are rotating the array, instead of the object.
            rotate_image3d(c['im_lsurf'], c['vlength'],
                           np.linalg.inv(c['rotmat'][n]), d['im_center'],
                           c['lsurf'])

            # calculate the clashing and interaction volume at every position
            # in space using FFTs.
            np.conj(rfftn(c['lsurf']), c['ft_lsurf'])
            c['clashvol'] = irfftn(c['ft_lsurf'] * c['ft_rcore'], s=c['shape'])
            c['intervol'] = irfftn(c['ft_lsurf'] * c['ft_rsurf'], s=c['shape'])

            # Calculate the accessible interaction space for the current
            # rotation. The clashing volume should not be too high, and the
            # interaction volume of a reasonable size
            np.logical_and(c['clashvol'] < c['max_clash'],
                           c['intervol'] > c['min_interaction'],
                           c['interspace'])

            # Calculate the number of complexes and multiply with the weight
            # for the orientation to correct for rotational/orientational bias
            tot_complex += c['weights'][n] * c['interspace'].sum()

            # if distance-restraints are available
            if self.distance_restraints:
                c['restspace'].fill(0)

                # determine the center of the distance-restraint consistent
                # spheres
                rest_center = d['restraints'][:, :3] - \
                        (np.mat(c['rotmat'][n]) * \
                        np.mat(d['restraints'][:,3:6]).T).T

                mindis = d['restraints'][:, 6]
                maxdis = d['restraints'][:, 7]
                # Markate the space that is consistent with the distance restraints
                distance_restraint(rest_center, mindis, maxdis, c['restspace'])

                # Multiply the interaction space with the distance-restraint
                # consistent space
                c['interspace'] *= c['restspace']

                # Now count which violation has been violated
                count_violations(rest_center, mindis, maxdis, c['interspace'],
                                 c['weights'][n], c['violations'])

            # To visualize the accessible interaction space, keep the maximum
            # number of consistent restraints found at every position in space
            np.maximum(c['interspace'], c['access_interspace'],
                       c['access_interspace'])

            # Keep track of the number of accessible complexes consistent with
            # EXACTLY N restraints. Again, correct for the
            # rotational/orientation bias
            list_total_allowed += c['weights'][n] *\
                        np.bincount(c['interspace'].ravel(),
                        minlength=(max(2, d['nrestraints']+1)))

            # Give the user information on progress if it is used interactively
            if _stdout.isatty():
                self._print_progress(n, c['nrot'], time0)

        # attach the output on the self.data dictionary
        # the accessible interaction space which will be visualized
        d['accessible_interaction_space'] = c['access_interspace']
        # the number of accessible complexes consistent with EXACTLY a certain number of restraints
        # the number of accessible complexes consistent with EXACTLY a certain
        # number of restraints. To account for this, the number of total
        # sampled complexes needs to be reduced by the number of complexes
        # consistent with 1 or more restraints
        d['accessible_complexes'] = [
            tot_complex - sum(list_total_allowed[1:])
        ] + list(list_total_allowed[1:])
        # the violation matrix
        d['violations'] = c['violations']
Exemple #54
0
    def _gpu_search(self):
        """Method that actually performs the exhaustive search on the GPU"""

        # make shortcuts
        d = self.data
        g = self.gpu_data
        q = self.queue
        k = g['k']

        # initalize the total number of sampled complexes
        tot_complexes = cl_array.sum(g['interspace'], dtype=np.float32)

        # initialize time
        time0 = _time()

        # loop over all rotations
        for n in xrange(g['nrot']):

            # rotate the scanning chain object
            k.rotate_image3d(q, g['sampler'], g['im_lsurf'], self.rotations[n],
                             g['lsurf'], d['im_center'])

            # perform the FFTs and calculate the clashing and interaction volume
            k.rfftn(q, g['lsurf'], g['ft_lsurf'])
            k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rcore'],
                              g['ft_clashvol'])
            k.irfftn(q, g['ft_clashvol'], g['clashvol'])

            k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rsurf'],
                              g['ft_intervol'])
            k.irfftn(q, g['ft_intervol'], g['intervol'])

            # determine at every position if the conformation is a proper complex
            k.touch(q, g['clashvol'], g['max_clash'], g['intervol'],
                    g['min_interaction'], g['interspace'])

            if self.distance_restraints:
                k.fill(q, g['restspace'], 0)

                # determine the space that is consistent with a number of
                # distance restraints
                k.distance_restraint(q, g['restraints'], self.rotations[n],
                                     g['restspace'])

                # get the accessible interaction space also consistent with a
                # certain number of distance restraints
                k.multiply(q, g['restspace'], g['interspace'],
                           g['access_interspace'])

            # calculate the total number of complexes, while taking into
            # account orientational/rotational bias
            tot_complexes += cl_array.sum(g['interspace'],
                                          dtype=np.float32) * np.float32(
                                              self.weights[n])

            # take at every position in space the maximum number of consistent
            # restraints for later visualization
            cl_array.maximum(g['best_access_interspace'],
                             g['access_interspace'],
                             g['best_access_interspace'])

            # calculate the number of accessable complexes consistent with
            # EXACTLY N distance restraints
            k.histogram(q, g['access_interspace'], g['subhists'],
                        self.weights[n], d['nrestraints'])

            # Count the violations of each restraint for all complexes
            # consistent with EXACTLY N restraints
            k.count_violations(q, g['restraints'], self.rotations[n],
                               g['access_interspace'], g['viol_counter'],
                               self.weights[n])

            # inform user
            if _stdout.isatty():
                self._print_progress(n, g['nrot'], time0)

        # wait for calculations to finish
        self.queue.finish()

        # transfer the data from GPU to CPU
        # get the number of accessible complexes and reduce the subhistograms
        # to the final histogram
        access_complexes = g['subhists'].get().sum(axis=0)
        # account for the fact that we are counting the number of accessible
        # complexes consistent with EXACTLY N restraints
        access_complexes[0] = tot_complexes.get() - sum(access_complexes[1:])
        d['accessible_complexes'] = access_complexes
        d['accessible_interaction_space'] = g['best_access_interspace'].get()

        # get the violation submatrices and reduce it to the final violation
        # matrix
        d['violations'] = g['viol_counter'].get().sum(axis=0)
Exemple #55
0
 def _remaining_time(self):
     if self._endtime is None:
         return None
     return self._endtime - _time()
    def rb_minmax(self, depth, time_left, board, my_turn=True):
        start = _time()
        is_final, score = self.is_final(my_turn, board)
        if is_final:  # no move left
            return None, score, None
        if depth == 0:
            return None, self.state_score(my_turn, board), None
        if my_turn == 1:
            best_move, max_score, best_new_loc = None, float('-inf'), None
            prev_loc = self.loc
            board[prev_loc] = -1
            for d in self.directions:
                if _time() - start > time_left:
                    raise TimeoutError()
                i = prev_loc[0] + d[0]
                j = prev_loc[1] + d[1]
                if 0 <= i < len(board) and 0 <= j < len(
                        board[0]) and board[i][j] == 0:  # then move is legal
                    new_loc = (i, j)
                    board[new_loc] = 1
                    self.loc = new_loc
                    self.available -= 1
                    _, score, _ = self.rb_minmax(depth=depth - 1,
                                                 time_left=time_left -
                                                 _time() + start,
                                                 board=board,
                                                 my_turn=1 - my_turn)
                    self.available += 1
                    board[new_loc] = 0
                    if score > max_score or max_score == float('-inf'):
                        best_move, max_score, best_new_loc = d, score, new_loc

            self.loc = prev_loc
            board[self.loc] = 1
            return best_move, max_score, best_new_loc

        else:
            best_move, min_score, best_new_loc = None, float('inf'), None
            prev_loc = self.rival_position
            board[prev_loc] = -1
            for d in self.directions:
                if _time() - start > time_left:
                    raise TimeoutError()
                i = prev_loc[0] + d[0]
                j = prev_loc[1] + d[1]
                if 0 <= i < len(board) and 0 <= j < len(
                        board[0]) and board[i][j] == 0:  # then move is legal
                    new_loc = (i, j)
                    self.rival_position = new_loc
                    board[new_loc] = 2
                    self.available -= 1
                    _, score, _ = self.rb_minmax(depth - 1,
                                                 time_left - _time() + start,
                                                 board, 1 - my_turn)
                    self.available += 1
                    board[new_loc] = 0

                    if score < min_score or min_score == float('inf'):
                        best_move, min_score, best_new_loc = d, score, new_loc

            self.rival_position = prev_loc
            board[self.rival_position] = 2
            return best_move, min_score, best_new_loc
Exemple #57
0
    def wait(self, timeout=None):
        """Wait until notified or until a timeout occurs.

        If the calling thread has not acquired the lock when this method is
        called, a RuntimeError is raised.

        This method releases the underlying lock, and then blocks until it is
        awakened by a notify() or notifyAll() call for the same condition
        variable in another thread, or until the optional timeout occurs. Once
        awakened or timed out, it re-acquires the lock and returns.

        When the timeout argument is present and not None, it should be a
        floating point number specifying a timeout for the operation in seconds
        (or fractions thereof).

        When the underlying lock is an RLock, it is not released using its
        release() method, since this may not actually unlock the lock when it
        was acquired multiple times recursively. Instead, an internal interface
        of the RLock class is used, which really unlocks it even when it has
        been recursively acquired several times. Another internal interface is
        then used to restore the recursion level when the lock is reacquired.

        """
        if not self._is_owned():
            raise RuntimeError("cannot wait on un-acquired lock")
        waiter = _allocate_lock()
        waiter.acquire()
        self.__waiters.append(waiter)
        saved_state = self._release_save()
        try:    # restore state no matter what (e.g., KeyboardInterrupt)
            if timeout is None:
                waiter.acquire()
                if __debug__:
                    self._note("%s.wait(): got it", self)
            else:
                # Balancing act:  We can't afford a pure busy loop, so we
                # have to sleep; but if we sleep the whole timeout time,
                # we'll be unresponsive.  The scheme here sleeps very
                # little at first, longer as time goes on, but never longer
                # than 20 times per second (or the timeout time remaining).
                endtime = _time() + timeout
                delay = 0.0005 # 500 us -> initial delay of 1 ms
                while True:
                    gotit = waiter.acquire(0)
                    if gotit:
                        break
                    remaining = endtime - _time()
                    if remaining <= 0:
                        break
                    delay = min(delay * 2, remaining, .05)
                    _sleep(delay)
                if not gotit:
                    if __debug__:
                        self._note("%s.wait(%s): timed out", self, timeout)
                    try:
                        self.__waiters.remove(waiter)
                    except ValueError:
                        pass
                else:
                    if __debug__:
                        self._note("%s.wait(%s): got it", self, timeout)
        finally:
            self._acquire_restore(saved_state)
Exemple #58
0
 def now():
     return _unix2Date(_time())
Exemple #59
0
    def packDelays(self, delay_type = 'group', \
              smoothing=True, ionPhaseCor=False, dopplerPhaseCor=False,\
              noIonDelay=False):
        ''' 
            Pack dudes into binary SFXC del-files and human-readable txt-files
        '''
        # load input sittings:
        inp = self.inp

        # experiment name:
        exp_name = self.exp_name

        # output directory:
        out_path = inp.out_path
        #        for ob in self.obsz:
        #            print ob.dude.delay[:,0]
        # get station names:
        stations = []
        # 'uniqueify'
        [stations.append(ob.sta[1]) for ob in self.obsz \
                      if not ob.inp['doppler_calc'] and ob.sta[1] not in stations]

        # get station short names:
        stations_short = []
        for st in stations:
            stations_short.append([k for k,v in self.stations.iteritems() \
                                    if v==st][0])

        # append duplicates:
        for kdup, vdup in self.duplicate_stations.iteritems():
            for key in vdup[1:]:
                stations.append(kdup)
                stations_short.append(key)

        # get short name for the phase center
        phase_center_short = shname(inp.phase_center, inp.shnames_cat,\
                                    inp.shnames_cat_igs)[0]

        # uplink station info (for text ouput?)
        #    if dopplerPhaseCor:
        #        uplink_sta = [ob.sta[1] for ob in obsz if ob.inp['doppler_calc']][0]
        #        uplink_sta_short = shname(uplink_sta, inp.shnames_cat)

        # load clock offsets/rates at stations from vex-file:
        clock = self.clocks()

        # smooth dudes
        if smoothing:
            if self.showTiming:
                tic = _time()
            if self.parallel:
                n_cpu = multiprocessing.cpu_count()
                # create pool
                pool = multiprocessing.Pool(n_cpu)
                # asyncronously apply smoothie to each of obsz
                result = pool.map_async(smoothie, self.obsz)
                # close bassejn
                pool.close()  # we are not adding any more processes
                pool.join()  # wait until all threads are done before going on
                # get the ordered results back into obsz
                self.obsz = result.get()
            else:
                for ob in self.obsz:
                    ob.smoothDude(tstep=1)
            if self.showTiming:
                toc = _time()
                print 'Smoothing took {:.1f} seconds.'.format(toc - tic)

        if self.showTiming:
            tic = _time()

        # integrate freq predictions into phase correction on a scan basis
        # 2nd condition: durak-protection -- check that a s/c was actually observed
        sou_scans = {}  # init here for parallelisation
        if dopplerPhaseCor and len(
            [ob for ob in self.obsz if ob.sou_type != 'C']) > 0:
            # dictionary with [tstamps, gc doppler freq prediction] for each s/c
            dpc = {ob.source: np.array(zip(ob.tstamps, np.sum(ob.dude.doppler, 1))) \
                          for ob in self.obsz \
                           if len(ob.tstamps)>0 and ob.inp['doppler_calc']}
            # add int sec t stamps for interpolation
            for k in dpc.keys():
                t_0 = datetime.datetime(dpc[k][0,0].year, \
                                        dpc[k][0,0].month, dpc[k][0,0].day)
                t = np.array([t.hour*3600.0 + t.minute*60.0 + t.second + \
                                (t-t_0).days*86400.0 for t in dpc[k][:,0]])
                dpc[k] = np.column_stack((t, dpc[k]))
            # result: [int sec tstamps, datetime tstamps, gc doppler freq prediction]

    #        print dpc

    # list of s/c - dpc.keys()

    # scan start/stop times + source names
            sstz_dpc = np.array([ [[scanStartTime, scanStopTime, ob.source] \
                    for scanStartTime, scanStopTime in \
                            zip(ob.scanStartTimes,ob.scanStopTimes)] \
                    for ob in self.obsz if ob.inp['doppler_calc'] and len(ob.tstamps)>0 ])
            # flatten list, because it can be irregular
            sstz_dpc = flatten(sstz_dpc)

            if len(sstz_dpc) > 0:  # there actually exist non-empty scans
                # reshape
                sstz_dpc = np.reshape(sstz_dpc, (-1, 3))
                # sort according to 0th column, which is time stamp
                sstz_dpc = sstz_dpc[np.argsort(sstz_dpc[:, 0])]

            # make a dictionary out of sstz_dpc
            # {sou_name: [ [t_start, t_stop, [poly]],
            #               ...
            #              [t_start, t_stop, [poly]] ]}
#            sou_scans = {}
            for sc in dpc.keys():
                sou_scans[sc] = []
            for sstz in sstz_dpc:
                sc = sstz[2]
                # allocate space for phase polies and mu_t for rescaling
                sou_scans[sc].append([sstz[0], sstz[1], [], []])

    #        print sou_scans

    # make optimal poly interpolants for freq predictions
    # iterate over s/c
            for sc in sou_scans.keys():
                # iterate over each scan:
                for ii, scan in enumerate(sou_scans[sc]):
                    # create mask using using current scan t_start, t_stop
                    maska = np.ma.masked_outside(dpc[sc][:, 1], scan[0],
                                                 scan[1]).mask
                    # mask t [sec] -> masked np.ma.array
                    t = np.ma.array(dpc[sc][:, 0], mask=maska, dtype=float)
                    # -> np.array
                    t = np.ma.compressed(t)
                    # scale t for a more robust fit:
                    mu_t = np.array([np.mean(t), np.std(t)])
                    t = (t - mu_t[0]) / mu_t[1]
                    # mask f [Hz] -> masked np.ma.array
                    f = np.ma.array(dpc[sc][:, 2], mask=maska, dtype=float)
                    # -> np.array
                    f = np.ma.compressed(f)
                    # subtract f at t[0] (to decrease dynamic range):
                    f -= f[0]
                    # make an optimal polyfit:
                    ffit = optimalFit(t,
                                      f,
                                      min_order=3,
                                      max_order=7,
                                      fit_type='poly')
                    # integrate it to a phase poly
                    pint = np.polyint(ffit.best_estimator_.coef_)
                    # save fit to phase by integrating p.best_estimator_.coef_
                    sou_scans[sc][ii][2] = pint
                    sou_scans[sc][ii][3] = mu_t


#            print sou_scans

# obs-obj w Doppler predictions for phase correction not needed further:
#    obsz_dop = [ob for ob in self.obsz if ob.inp['doppler_calc']]
        self.obsz = [ob for ob in self.obsz if not ob.inp['doppler_calc']]

        if self.parallel:
            n_cpu = multiprocessing.cpu_count()
            # create pool
            pool = multiprocessing.Pool(n_cpu)
            # asyncronously apply packie to each of inps
            inps = [(st, st_sh, self.obsz, phase_center_short, out_path, \
                     exp_name, sou_scans, ionPhaseCor, dopplerPhaseCor, \
                     noIonDelay, delay_type, clock, self.vex) \
                     for (st, st_sh) in zip(stations, stations_short)]
            pool.map_async(packie, inps)
            # close bassejn
            pool.close()  # we are not adding any more processes
            pool.join()  # wait until all threads are done before going on

        else:  # serial way
            # for each station stick all the data together, then sort it by datetime
            inps = [(st, st_sh, self.obsz, phase_center_short, out_path, \
                     exp_name, sou_scans, ionPhaseCor, dopplerPhaseCor, \
                     noIonDelay, delay_type, clock, self.vex) \
                     for (st, st_sh) in zip(stations, stations_short)]
            for inpu in inps:
                packie(inpu)

        if self.showTiming:
            toc = _time()
            print 'Output took {:.1f} seconds.'.format(toc - tic)
Exemple #60
0
 def time_ns():
     # type: () -> int
     return int(_time() * 10e5) * 1000