예제 #1
0
class XBeeTransparentListener(Thread):

    def __init__(self, xbee_serial):
        super().__init__()
        self.xbser = xbee_serial
        self.daemon = True
        self.stopped = False
        self.pause = RLock()

    def run(self):
        while not self.stopped and self.xbser.is_open:
            with self.pause:
                try:
                    line = self.xbser.readline()
                    if line:
                        print('>', line.strip())
                except Exception as ex:
                    print(str(ex))

    def stop(self):
        self.stopped = True

    def pause(self):
        self.pause.acquire()

    def unpause(self):
        self.pause.release()
예제 #2
0
    def open_dynamic_queue(self):
        if self._disconnecting:
            self.logger.info("Connection factory disconnecting, aborting open_dynamic_queue")
            return
        else:
            self.logger.log(TRACE1, "open_dynamic_queue -> not disconnecting")

        if not self._is_connected:
            self.logger.log(TRACE1, "open_dynamic_queue -> _is_connected1 %s" % self._is_connected)
            self._connect()
            self.logger.log(TRACE1, "open_dynamic_queue -> _is_connected2 %s" % self._is_connected)

        dynamic_queue = self.mq.Queue(self.mgr, self.dynamic_queue_template,
            self.CMQC.MQOO_INPUT_SHARED)

        # A bit hackish, but there's no other way to get its name.
        dynamic_queue_name = dynamic_queue._Queue__qDesc.ObjectName.strip()

        lock = RLock()
        lock.acquire()
        try:
            self._open_dynamic_queues_cache[dynamic_queue_name] = dynamic_queue
        finally:
            lock.release()

        self.logger.log(TRACE1, "Successfully created a dynamic queue, descriptor [%s]" % (
            dynamic_queue._Queue__qDesc))

        return dynamic_queue_name
예제 #3
0
class DataSignaler(object):
    def __init__(self, name, pruneFunc, data):
        super(DataSignaler, self).__init__()

        assert isinstance(data,TreeFunctioned)

        self.data = data
        self.event_signaler = EventSignaler(key=name)

        if pruneFunc is not None:
            assert callable(pruneFunc)

        self.prune_func = pruneFunc
        self._lock = RLock()

    def add(self, value):
        self._lock.acquire()

        try:
            self.data.addToTreeByFunction(value)
        finally:
            self._lock.release()

        data = {self.event_signaler.key : {'data': self.data}}
        self.event_signaler.signalEvent(data)

    def prune(self):
        if self.prune_func is not None:
            return criticalSection(self._lock, lambda: self.prune_func(dataStructure=self.data))

    def inByFunction(self, value, hashFuncList=None, depth=0):
       return criticalSection(self._lock, lambda: self.data.inByFunction(value, hashFuncList, depth))

    def getOriginalByFunction(self, value, hashFuncList=None, depth=0):
        return criticalSection(self._lock, lambda: self.data.getOriginalByFunction(value, hashFuncList, depth))
예제 #4
0
class SimpleCSVWriter:
    def __init__(self, filename, fields=None):
        self.filename = filename
        self.lock = RLock()
        self.isFirstRow = True
        self.fields = fields
        
    def writerow(self, d):
        self.lock.acquire()
        fields = self.fields if self.fields is not None else d.keys()
        if self.isFirstRow:
            # dump fields
            f = open(self.filename , "w")
            writer = csv.writer(f, lineterminator="\n", quoting=csv.QUOTE_ALL)
            row = [k for k in fields]
            writer.writerow(row)
            f.close()
            self.isFirstRow = False
        # dump object
        row = [d.get(k,'') for k in fields]
        f = open(self.filename , "a")
        writer = csv.writer(f, lineterminator="\n", quoting=csv.QUOTE_ALL)
        writer.writerow(row)
        f.close()
        self.lock.release()
예제 #5
0
class VM_Pool(object):

    def __init__(self, vm_map):
        self.proc_mgr = proc_mgmt.ProcMgr()
        self.vm_map = vm_map
        self.vm_rdy = {}
        self.init_map()
        self.pool_gate = RLock()
        
    def acquire(self, *names):
        self.pool_gate.acquire()
        for name in names:
            if self.vm_rdy.get(name):
                self.vm_rdy[name] = False
                return self.vm_map.get(name)
        self.pool_gate.release()
        return None
    
    def release(self, name):
        self.vm_rdy[name] = True
      
    def init_map(self):
        for name, vm_obj in self.vm_map.items():
            self.vm_rdy[name] = True

    def __str__(self):
        string = 'Pool:'
        for vm in self.vm_map.keys():
            string += vm + ": " + str(self.vm_rdy.get(vm)) + ", "
        return string
예제 #6
0
class ZenitherClient():
    def __init__(self, robot):
        try:
            rospy.init_node('ZenitherClient')
            rospy.logout('ZenitherServer: Initialized Node')
        except rospy.ROSException:
            pass

        if robot not in zc.calib:
            raise RuntimeError('unknown robot')
        self.calib = zc.calib[robot]

        srv = '/zenither/move_position'
        rospy.wait_for_service(srv)
        self.move_position = rospy.ServiceProxy(srv, Float_Int)
        
        srv = '/zenither/stop'
        rospy.wait_for_service(srv)
        self.stop = rospy.ServiceProxy(srv, Float_Int)
        
        srv = '/zenither/apply_torque'
        rospy.wait_for_service(srv)
        self.apply_torque = rospy.ServiceProxy(srv, Float_Int)

        srv = '/zenither/torque_move_position'
        rospy.wait_for_service(srv)
        self.torque_move_position = rospy.ServiceProxy(srv, Float_Int)

        zenither_pose_topic = 'zenither_pose'
        self.h = None
        self.lock = RLock()
        rospy.Subscriber(zenither_pose_topic, FloatArray, self.pose_cb)
        
    #---------- functions to send zenither commands. -------------
    def estop(self):
        self.stop(0)

    def zenith(self, torque=None):
        if torque == None:
            torque=self.calib['zenith_torque']
        self.apply_torque(torque)

    def nadir(self, torque=None):
        if torque == None:
            torque=self.calib['nadir_torque']
        self.apply_torque(torque)


    #--------- zenither height functions --------------
    def pose_cb(self, fa):
        self.lock.acquire()
        self.h = fa.data[0]
        self.lock.release()

    ## return the current height of the zenither.
    def height(self):
        self.lock.acquire()
        h = self.h
        self.lock.release()
        return h
예제 #7
0
class ThreadSafeFSM(InstrumentFSM):
    """
    A FSM class that provides thread locking in on_event to
    prevent simultaneous thread reentry.
    """

    def __init__(self, states, events, enter_event, exit_event):
        """
        """
        super(ThreadSafeFSM, self).__init__(states, events, enter_event, exit_event)
        self._lock = RLock()

    def on_event(self, event, *args, **kwargs):
        """
        """

        self._lock.acquire(True)
        ex = None

        try:
            result = super(ThreadSafeFSM, self).on_event(event, *args, **kwargs)

        except Exception as ex:
            result = None
            log.error("Unhandled Exception")
            log.exception(ex)

        finally:
            self._lock.release()

        if ex:
            raise ex

        return result
예제 #8
0
class PrintingThread(Thread):
    def __init__(self, printer, cmds):
        log.info("New printing thread, printer %s, len %d" %
                 (printer, len(cmds)))
        self.cmds=cmds
        self.printer=printer
        self.lock=RLock()
        self.state=0
        Thread.__init__(self)
        
    def run(self):
        printer=self.printer
        printer.zero()
        from time import time, sleep
        from os import path
        
        #wait=time()
        #log.info("Waiting for load page button.")
        #while path.getmtime(job_dir+"load_new_page")<wait:
        #    sleep(1.0)
        #log.info("Load page button press detected. Starting to print.")
        
        for i, step in enumerate(self.cmds):
            printer.do(step)
            self.lock.acquire()
            self.state=i
            self.lock.release()
        printer.eject_page()
        
    def progress(self):
        self.lock.acquire()
        res=self.state
        self.lock.release()
        return res/float(len(self.cmds)-1)
예제 #9
0
    def close_dynamic_queue(self, dynamic_queue_name):
        if self._disconnecting:
            self.logger.info("Connection factory disconnecting, aborting close_dynamic_queue")
            return
        else:
            self.logger.log(TRACE1, "close_dynamic_queue -> not disconnecting")

        if not self._is_connected:
            # If we're not connected then all dynamic queues had been already closed.
            self.logger.log(TRACE1, "close_dynamic_queue -> _is_connected1 %s" % self._is_connected)
            return
        else:
            self.logger.log(TRACE1, "close_dynamic_queue -> _is_connected2 %s" % self._is_connected)
            lock = RLock()
            lock.acquire()
            try:
                dynamic_queue = self._open_dynamic_queues_cache[dynamic_queue_name]
                dynamic_queue.close()

                self._open_dynamic_queues_cache.pop(dynamic_queue_name, None)
                self._open_send_queues_cache.pop(dynamic_queue_name, None)
                self._open_receive_queues_cache.pop(dynamic_queue_name, None)

                self.logger.log(TRACE1, "Successfully closed a dynamic queue [%s]" % (
                    dynamic_queue_name))

            finally:
                lock.release()
예제 #10
0
class ThreadData(object):
  def __init__(self):
    self._data = {}
    self._lock = RLock()
    
  def __setitem__(self, key, value):
    self._lock.acquire()
    thread = current_thread()
    if thread not in self._data:
      self._data[thread] = {}
    self._data[thread][key] = value
    self._lock.release()
    
  def __getitem__(self, key):
    thread = current_thread()
    return self._data[thread][key]
  
  def __delitem__(self, key):
    del(self[key])
  
  def __contains__(self, key):
    thread = current_thread()
    return key in self._data[thread]
  
  def update(self, data):
    self._lock.acquire()
    thread = current_thread()
    self._data[thread].update(data)
    self._lock.release()
  
  def clean(self):
    thread = current_thread()
    if thread in self._data:
      del(self._data[thread])
예제 #11
0
class XBeeTransparentListener(Thread):

    def __init__(self, on_received=None):
        super().__init__()
        self.xbser = None
        self.on_received = on_received
        self.daemon = True
        self.stopped = False
        self.pause = RLock()

    def run(self):
        while not self.stopped and self.xbser.is_open:
            with self.pause:
                try:
                    line = self.xbser.readline()
                    if line:
                        self.received(line)
                except Exception as ex:
                    print(str(ex))

    def received(self, line):
        """Subclasses may override this method, or provide a callback function when instance is created"""
        if self.on_received:
            self.on_received(line)
        else:
            print('[XBee]', line.strip())

    def stop(self):
        self.stopped = True

    def pause(self):
        self.pause.acquire()

    def unpause(self):
        self.pause.release()
예제 #12
0
파일: trigger.py 프로젝트: pborky/pyneuro
class TriggerDevice(NeuroDevice):
    def __init__(self, freq = 256, channels = 1):
        self.freq = freq
        self.channels = channels
        self.header = Header(TRIGGER_HEADER)
        self.header.channelCount = channels
        for i in range(channels):
            self.header.channels[i].samplingFrequency = freq
            self.header.channels[i].label = 'TRIGGER%d' % i
        self.values = [0,]*self.channels
        self.valLock = RLock()
        self.queue = Queue(15)
        self.thread = TriggerDeviceThread(self)
        self.thread.start()
    
    def getValues(self):
        self.valLock.acquire()
        try:
            return tuple(self.values)
        finally:
            self.valLock.release()
    
    def setValues(self, val):
        self.valLock.acquire()
        try:
            self.values[:] = val
        finally:
            self.valLock.release()

    def getHeader(self):
        return self.header.text()
    
    def getData(self):
        return self.queue.get(10.0)
예제 #13
0
파일: framer.py 프로젝트: ChugR/qpid-python
class Framer(Packer):

  HEADER="!4s4B"

  def __init__(self, sock):
    self.sock = sock
    self.sock_lock = RLock()
    self.tx_buf = ""
    self.rx_buf = ""
    self.security_layer_tx = None
    self.security_layer_rx = None
    self.maxbufsize = 65535

  def aborted(self):
    return False

  def write(self, buf):
    self.tx_buf += buf

  def flush(self):
    self.sock_lock.acquire()
    try:
      if self.security_layer_tx:
        try:
          cipher_buf = self.security_layer_tx.encode(self.tx_buf)
        except SASLError, e:
          raise Closed(str(e))
        self._write(cipher_buf)
      else:
예제 #14
0
파일: utils.py 프로젝트: vienin/python-ufo
class CacheDict(dict):
  
  _cacheTimeout = 0
  _accesslock   = None

  def __init__(self, timeout):
    self._cacheTimeout = timeout
    self._accesslock   = RLock()

  def get(self, key):
    return self[key]['value']

  def cache(self, key, value):
    self[key] = { 'time' : time.time(), 'value' : value }
    
  def isObsolete(self, key):
    return (not self.has_key(key) or
            time.time() - self[key]['time'] > self._cacheTimeout)
    
  def invalidate(self, key):
    if self.has_key(key):
      return self.pop(key)['value']

  def acquire(self):
    self._accesslock.acquire()

  def release(self):
    self._accesslock.release()
예제 #15
0
    def resetMysqlMACAndSN(self,mac):
        mysql = MySQLCommand(host=self.sysXMLDict['mysqlhost'], port=int(self.sysXMLDict['mysqlport']),
                             user=self.sysXMLDict['mysqluser'], passwd=self.sysXMLDict['mysqlpassword'],
                             db=self.sysXMLDict['mysqldatabase'], table=self.sysXMLDict['mysqltable'])

        mysqlConFlag = mysql.connectMysql()
        if not mysqlConFlag:
            logging.info('reset status connect failed.')
            return False

        locker = RLock()
        locker.acquire()

        resetFlag = mysql.resetMysqlMACStatusAndSN(mac=mac,stbType=self.sysXMLDict['mysqlstbtype'], poNumber=self.poNumber)
        if resetFlag:
            logging.info('reset mysql status success.')
        else:
            logging.info('reset mysql status failed.')
            mysql.closeMysql()
            locker.release()
            return False

        mysql.closeMysql()
        locker.release()

        return True
예제 #16
0
파일: model.py 프로젝트: loles/solar
class SingleIndexCache(object):
    def __init__(self):
        self.lock = RLock()
        self.cached_vals = []

    def __enter__(self):
        self.lock.acquire()
        return self

    def fill(self, values):
        self.cached_vals = values

    def wipe(self):
        self.cached_vals = []

    def get_index(self, real_funct, ind_name, **kwargs):
        kwargs.setdefault("max_results", 999999)
        if not self.cached_vals:
            recvs = real_funct(ind_name, **kwargs).results
            self.fill(recvs)

    def filter(self, startkey, endkey, max_results=1):
        c = self.cached_vals
        for (curr_val, obj_key) in c:
            if max_results == 0:
                break
            if curr_val >= startkey:
                if curr_val <= endkey:
                    max_results -= 1
                    yield (curr_val, obj_key)
                else:
                    break

    def __exit__(self, *args, **kwargs):
        self.lock.release()
class AndroidSocket(asynchat.async_chat):
    
    def __init__(self):
        self.logger = logging.getLogger("AndroidSocket")
        
        # Connect to the Android (when we start the async loop - this is asynchronous)
        asynchat.async_chat.__init__(self)
        self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
        self.connect( ANDROID_HOST )
        
        # Set up input buffer and define message terminator
        self.input_buffer = []
        self.set_terminator("\n")
        self.socket_lock = RLock()
    
    # Making async_chat thread-safe
    def push(self, data):
        try:
            self.socket_lock.acquire()
            asynchat.async_chat.push(self, data)
        finally:
            self.socket_lock.release()
    
    # Making async_chat thread-safe
    def initiate_send(self):
        try:
            self.socket_lock.acquire()
            asynchat.async_chat.initiate_send(self)
        finally:
            self.socket_lock.release()
        
            
    def handle_error(self):
        self.logger.error("================ ERROR! Failed to send something! ================ ")
        etype, value, tb = sys.exc_info()
        traceback.print_exception(etype, value, tb)
        
    def handle_connect(self):
        # Init all the devices we need to control
        try:
            self.commandDispatcher = CommandDispatcher(self)
        except Exception as e:
            print e
            raise e
        
        self.logger.info("Sending CREEPER_READY status")
        self.push("CREEPER_READY:\n")
        
        
    def collect_incoming_data(self, data):
        self.input_buffer.append(data)
        
    def found_terminator(self):
        self.handle_android_command(self.input_buffer[0])
        self.input_buffer = []
        
    def handle_android_command(self, command_data):
        self.logger.info("Received command: %s" % command_data)
        self.commandDispatcher.process_command(command_data)
예제 #18
0
파일: common.py 프로젝트: akrmn/gggom
class _Spinner(Thread):
    def __init__(self, message):
        Thread.__init__(self)
        self.rlock = RLock()
        self.cv = Condition()
        self.__chars = u"⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"
        if len(message) > 0:
            self.__message = " " + message
        else:
            self.__message = ""
        self.__message_length = len(self.__message)

    def __clear(self):
        stdout.write(
            '\b' * (self.__message_length + 2) +
            ' '  * (self.__message_length + 2) +
            '\b' * (self.__message_length + 2))
        stdout.flush()

    def __call__(self):
        self.start()

    def start(self):
        self.stopFlag = 0
        Thread.start(self)

    def stop(self):
        """To be called by the 'main' thread: Will block and wait for the
        thread to stop before returning control to 'main'."""

        self.stopFlag = 1

        # Wake up ahead of time if needed
        self.cv.acquire()
        self.cv.notify()
        self.cv.release()

        # Block and wait here untill thread fully exits its run method.
        self.rlock.acquire()

    def run(self):
        self.rlock.acquire()
        self.cv.acquire()
        stdout.write('  ' + self.__message)
        stdout.write('\b' * self.__message_length)
        stdout.flush()
        while 1:
            for char in self.__chars:
                self.cv.wait(0.1)
                if self.stopFlag:
                    self.__clear()
                    try :
                        return
                    finally :
                        # release lock immediatley after returning
                        self.rlock.release()
                stdout.write('\b')
                stdout.write(char)
                stdout.flush()
예제 #19
0
파일: db.py 프로젝트: stereohead/wsgi-cahin
class MysqlDb(BaseDb):
    
    def __init__(self, db_uri):
        parts = urlsplit(db_uri)
        
        self.host = parts.hostname
        self.port = parts.port or 3306
        self.user = parts.username
        self.pwd = parts.password or ""
        self.dbname = parts.path[1:]
        
        self.db_con_lock = RLock()
        self.db_con = None
        self.db_cur = None
    
    
    def acquire_cursor(self):
        try:
            self.db_con_lock.acquire()
            
            self.__get_connection()
            self.db_cur = self.db_con.cursor()
        
        except StandardError:
            if self.db_con is not None:
                self.db_con.close()
                
            if self.db_cur is not None:
                self.db_cur.close()
            
            self.db_con_lock.release()
        
        return self.db_cur


    def release_cursor(self):
        self.db_cur.close()
        self.db_con.commit()
        self.db_con.close()
        self.db_con = None
        self.db_con_lock.release()
    
    
    def __get_connection(self):
        if self.db_con is None:
            con = MySQLdb.connect(self.host, self.user, self.pwd, self.dbname, self.port)
        
        else:
            self.db_con.ping(True)
            con = self.db_con
    
        self.db_con = con            
            
        return con
            

    def cursor(self):
        c = Cursor(self)
        return c
예제 #20
0
class Main(HTTPServlet):
    registerShutdown = 1
    
    def __init__(self):
        HTTPServlet.__init__(self)
        
        self.blogDirectory = WeblogDirectory("../blogs.xml")    
        self.mutex = RLock()
             
    def awake(self, transaction):
        # Register our shutdown handler if it hasn't already been done. This is to
        # make sure the databases are properly closed when the system is shutdown.
        self.mutex.acquire()
        try:        
            if (Main.registerShutdown == 1):
                transaction.application().addShutDownHandler(self.blogDirectory.shutdown)
                Main.registerShutdown = 0
        finally:
            self.mutex.release()
            
    def respondToGet(self, transaction):
        request = transaction.request()
        response = transaction.response()
        
        pathInfo = request.extraURLPath() 

        try:
            (blog, pathInfo) = self._parsePathInfo(pathInfo)
            weblog = self.blogDirectory.getBlog(blog)
        
            try:
                stylesheet = request.field('t', "")
                # Extra optional argument that can be passed to the stylesheet
                arg = request.field('a', "")
            
                # Content query that can be applied as a final step to extract
                # something from the rendered content
                contentQuery = request.field('c', "")
            
                result = weblog.handleRequest(pathInfo, stylesheet, arg, contentQuery)
            
                # Determine the content-type for the result
                if (result.startswith("<?xml")):                             
                    contentType = "text/xml"         
                elif (result.startswith("<html")):
                    contentType = "text/html"
                else:
                    contentType = "text/plain"
                #print result
                
                response.setStatus(200, 'OK')
                response.setHeader('Content-type', contentType)
                response.setHeader('Content-length', str(len(result)))
                response.write(result)
            except NotFoundError:
                response.setStatus(404, 'Not Found')
        except KeyError, IndexError:
            response.setStatus(404, 'Weblog Not Found')
예제 #21
0
파일: Logging.py 프로젝트: myusuf3/hellanzb
class ScrollableHandler(StreamHandlerNoLF):
    """ ScrollableHandler is a StreamHandler that specially handles scrolling (log
    messages at the SCROLL level). It allows you to temporarily interrupt the constant
    scroll with other log messages of different levels (printed at the top of the scroll
    area) """

    # the SCROLL level (a class var)
    LOGFILE = 11
    SCROLL = 12
    SHUTDOWN = 13
    NOLOGFILE = 14
    
    def __init__(self, *args, **kwargs):
        self.scrollLock = RLock()
        self.scrollFlag = False
        StreamHandlerNoLF.__init__(self, *args, **kwargs)

    def handle(self, record):
        """ The 'scroll' level is a constant scroll that can be interrupted. This interruption is
        done via prepending text to the scroll area """
        rv = self.filter(record)
        if rv:

            if record.levelno == ScrollableHandler.SCROLL:
                self.emitSynchronized(record)
            elif record.levelno == ScrollableHandler.SHUTDOWN:
                record.msg = '\n\n\n%s\n' % record.msg
                self.emitSynchronized(record)
            else:
                self.scrollLock.acquire()
                # If scroll is on, interrupt scroll
                if ScrollableHandler.scrollFlag:
                    self.scrollHeader(record)
                else:
                    # otherwise if scroll isn't on, just log the message normally
                    self.emitSynchronized(record)
                self.scrollLock.release()
                            
        return rv

    def emitSynchronized(self, record):
        """ Write a log message atomically. Normal python logging Handler behavior """
        self.acquire()
        try:
            self.emit(record)
        finally:
            self.release()

    def scrollHeader(self, record):
        """ Print a log message so that the user can see it during a SCROLL """
        msg = self.format(record).rstrip() # Scroller appends newline for us
        from twisted.internet import reactor
        if inMainThread():
            # FIXME: scrollBegin() should really be creating the scroller instance
            # FIXME: no unicode crap from normal python log emit
            Hellanzb.scroller.scrollHeader(msg)
        else:
            reactor.callFromThread(Hellanzb.scroller.scrollHeader, msg)
예제 #22
0
 def exit(self):
     global command_output_thread
     print("Exiting thread {0}...".format(self))
     lock = RLock()
     lock.acquire()
     self.EXIT_FLAG = True
     pgid = self.get_pgid(True)
     if pgid is not None:
         call(["ionice", "-c", "0", "-P", str(self.get_pgid(True))])
     lock.release()
예제 #23
0
파일: coderunner.py 프로젝트: zennro/pyview
 def _threadCallback(self,thread):
   """
   A callback function which gets called when a code thread terminates.
   """
   lock = RLock()
   lock.acquire()
   if thread.failed():
     self._exceptions[thread._id] = thread.exceptionInfo()
     self._tracebacks[thread._id] = thread.tracebackInfo()
   lock.release()
예제 #24
0
class MonitorLock(object):
	def __init__(self):
		self.lock = RLock()
	def acquire(self, flag=True):
		self.lock.acquire()
	def release(self):
		self.lock.release()
	def MONITOR_ENTER(self):
		self.acquire()
	def MONITOR_EXIT(self):
		self.release()
예제 #25
0
class ThreadSafeDict(dict) :
    def __init__(self, * p_arg, ** n_arg) :
        dict.__init__(self, * p_arg, ** n_arg)
        self._lock = RLock()

    def __enter__(self) :
        self._lock.acquire()
        return self

    def __exit__(self, type, value, traceback) :
        self._lock.release()
예제 #26
0
class SerialWriteHandler:
    def __init__(self, ser, incomingDataHandler, input):
        self._incomingDataHandler = incomingDataHandler
        self._input = input
        self._serial = ser
        self._lock = RLock()

    def write(self, data):
        self._lock.acquire()
        self._serial.write(chr(HEADER_START))
        self._serial.write(str(data))
        self._lock.release()

    def writeAndWaitForAck(self, data, idToAck):
        self._lock.acquire()
        resend = True
        while resend:
            self.write(data)
            ack = self.waitForACK()
            if ack != None and ack.getIdToAck() == idToAck and ack.getReqLen() == len(data):
                resend = False
        self._lock.release()

    def waitForACK(self):
        gotHeaderStart = False
        incomingLength = 0
        headerId = 0
        data = []
        timeoutCount = 3
        try:
            while timeoutCount > 0:
                if gotHeaderStart:
                    if len(data) < 1:
                        data.append(self._input.read())
                        incomingLength, headerId = self._incomingDataHandler.getIncomingHeaderSizeAndId(data)
                    elif incomingLength >= 1 and headerId == ACK_RES:
                        for i in range(1, incomingLength):
                            data.append(self._input.read())
                        ack = ACKResponse()
                        ack.buildRequest(data)
                        if ack.checkPackage():
                            return ack
                        data = []
                        timeoutCount -= 1
                        gotHeaderStart = False
                    else:
                        data = []
                        timeoutCount -= 1
                        gotHeaderStart = False
                elif ord(self._input.read()) == HEADER_START:
                    gotHeaderStart = True
        except TypeError:
            rospy.logerr('ACK have not been send ,retransmitting.......')
        return None
예제 #27
0
파일: dhtlib.py 프로젝트: Shu-Ji/dht
class Client(KRPC):
    def __init__(self, table):
        self.table = table
        self.lock = RLock()

        timer(KRPC_TIMEOUT, self.timeout)
        KRPC.__init__(self)

    def find_node(self, address, nid=None):
        nid = self.get_neighbor(nid) if nid else self.table.nid
        tid = entropy(TID_LENGTH)
        msg = {
            "t": tid,
            "y": "q",
            "q": "find_node",
            "a": {"id": nid, "target": random_id()}
        }
        self.send_krpc(msg, address)

    def bootstrap(self):
        for address in BOOTSTRAP_NODES:
            self.find_node(address)

    def timeout(self):
        if not self.join_successed:
            self.bootstrap()
        timer(KRPC_TIMEOUT, self.timeout)

    def run(self):
        self.bootstrap()
        while 1:
            time.sleep(.001)
            try:
                data, address = self.ufd.recvfrom(65536)
                msg = bdecode(data)
                self.types[msg["y"]](msg, address)
            except Exception:
                pass

    def foreverloop(self):
        self.start()
        while 1:
            time.sleep(.001)
            if not self.table.nodes:
                self.join_successed = False
                time.sleep(1)
                continue

            for node in self.table.nodes:
                self.find_node((node.ip, node.port), node.nid)

            self.lock.acquire()
            self.table.nodes = []
            self.lock.release()
예제 #28
0
class RateManager:

    def __init__(self):
        self.lock = RLock()
        self.statusmap = {}
        self.currenttotal = {}
        self.dset = Set()
        self.clear_downloadstates()

    def add_downloadstate(self, ds):
        if DEBUG:
            print >> sys.stderr, 'RateManager: add_downloadstate', `(ds.get_download().get_def().get_infohash())`
        self.lock.acquire()
        try:
            d = ds.get_download()
            if d not in self.dset:
                self.statusmap[ds.get_status()].append(ds)
                for dir in [UPLOAD, DOWNLOAD]:
                    self.currenttotal[dir] += ds.get_current_speed(dir)

                self.dset.add(d)
            return len(self.dset)
        finally:
            self.lock.release()

    def add_downloadstatelist(self, dslist):
        for ds in dslist:
            self.add_downloadstate(ds)

    def adjust_speeds(self):
        self.lock.acquire()
        try:
            self.calc_and_set_speed_limits(DOWNLOAD)
            self.calc_and_set_speed_limits(UPLOAD)
            self.clear_downloadstates()
        finally:
            self.lock.release()

    def clear_downloadstates(self):
        self.statusmap[DLSTATUS_ALLOCATING_DISKSPACE] = []
        self.statusmap[DLSTATUS_WAITING4HASHCHECK] = []
        self.statusmap[DLSTATUS_HASHCHECKING] = []
        self.statusmap[DLSTATUS_DOWNLOADING] = []
        self.statusmap[DLSTATUS_SEEDING] = []
        self.statusmap[DLSTATUS_STOPPED] = []
        self.statusmap[DLSTATUS_STOPPED_ON_ERROR] = []
        self.statusmap[DLSTATUS_REPEXING] = []
        for dir in [UPLOAD, DOWNLOAD]:
            self.currenttotal[dir] = 0

        self.dset.clear()

    def calc_and_set_speed_limits(self, direct):
        pass
예제 #29
0
        def wrapper(self, *args, **kwargs):
            retval = getattr(self, attr, None)
            if retval is None:
                retval = fn(self, *args, **kwargs)
                _lock = RLock()
                try:
                    _lock.acquire()
                    setattr(self, attr, retval)
                finally:
                    _lock.release()

            return retval
예제 #30
0
class Observable:
    """The Observable object.  Observable objects allow registration
    management of observers via the register() and unregister() events.

    To implement an Observable object:
        - subclass your class from Observable
        - whenever you want to notify observers of a relevant event
          use the broadcastEvent() method, passing an event name
          and a variable number of arguments.  These will be
          broadcast to all observing objects, in the Observable's
          thread context."""

    def __init__(self):
        self.__registeredObservers = []
        self.__registerLock = RLock()

    def register(self, observerObject):
        """This method is private and subject to modifications.
        Avoid calling or overriding it."""
        self.__registerLock.acquire()
        try:
            logger.debug("%s registering %s as observer", self, observerObject)
            if not observerObject in self.__registeredObservers:
                self.__registeredObservers.append(observerObject)
        finally:
            self.__registerLock.release()

    def unregister(self, observerObject):
        """This method is private and subject to modifications.
        Avoid calling or overriding it."""
        self.__registerLock.acquire()
        try:
            logger.debug("%s unregistering %s", self, observerObject)
            if observerObject in self.__registeredObservers:
                self.__registeredObservers.remove(observerObject)
        finally:
            self.__registerLock.release()

    def broadcastEvent(self, eventName, *args):
        """Call this method within your Observable instance to
        have all Observers be notified about the event.

        Pass an event name (usually a string) and a variable number
        of arguments.  They will be relayed to observing Observers.
        """
        logger.debug("%s broadcasting event %s,%s", self, eventName, args)
        for o in self.__registeredObservers[:]:
            try:
                o.processEvent(self, eventName, *args)
            except:
                logger.exception("Uncaught exception while relaying event %s%s from %s to %s.", eventName, args, self,
                                 o)
                logger.error("The event will still be relayed to the rest of observers")
예제 #31
0
class Rcon:
    BUFFER_SIZE = 2**10

    def __init__(self, address, port, password, logger=None):
        self.logger = logger
        self.address = address
        self.port = port
        self.password = password
        self.socket = None
        self.command_lock = RLock()

    def __del__(self):
        self.disconnect()

    def send(self, data):
        if type(data) is Packet:
            data = data.flush()
        self.socket.send(data)
        time.sleep(0.03)  # MC-72390

    def receive(self, length):
        data = bytes()
        while len(data) < length:
            data += self.socket.recv(min(self.BUFFER_SIZE, length - len(data)))
        return data

    def receive_packet(self):
        length = struct.unpack('<i', self.receive(4))[0]
        data = self.receive(length)
        packet = Packet()
        packet.packet_id = struct.unpack('<i', data[0:4])[0]
        packet.packet_type = struct.unpack('<i', data[4:8])[0]
        packet.payload = data[8:-2].decode('utf8')
        return packet

    def connect(self):
        if self.socket is not None:
            try:
                self.disconnect()
            except:
                pass
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.socket.connect((self.address, self.port))
        self.send(Packet(PacketType.LOGIN_REQUEST, self.password))
        success = self.receive_packet().packet_id != PacketType.LOGIN_FAIL
        if not success:
            self.disconnect()
        return success

    def disconnect(self):
        if self.socket is None:
            return
        self.socket.close()
        self.socket = None

    def __send_command(self, command, depth, max_retry_time):
        self.command_lock.acquire()
        try:
            self.send(Packet(PacketType.COMMAND_REQUEST, command))
            self.send(Packet(PacketType.ENDING_PACKET, 'lol'))
            result = ''
            while True:
                packet = self.receive_packet()
                if packet.payload == 'Unknown request {}'.format(
                        hex(PacketType.ENDING_PACKET)[2:]):
                    break
                result += packet.payload
            return result
        except:
            if self.logger is not None:
                self.logger.warning('Rcon Fail to received packet')
            try:
                self.disconnect()
                if self.connect() and depth < max_retry_time:
                    return self.__send_command(command, depth + 1,
                                               max_retry_time)
            except:
                pass
            return None
        finally:
            self.command_lock.release()

    def send_command(self, command, max_retry_time=3):
        return self.__send_command(command, 0, max_retry_time)
예제 #32
0
class Connection(Framer):
    def __init__(self, sock, delegate=client, **args):
        Framer.__init__(self, sock)
        self.lock = RLock()
        self.attached = {}
        self.sessions = {}

        self.condition = Condition()
        # XXX: we should combine this into a single comprehensive state
        # model (whatever that means)
        self.opened = False
        self.failed = False
        self.closed = False
        self.close_code = (None, "connection aborted")

        self.thread = Thread(target=self.run)
        self.thread.setDaemon(True)

        self.channel_max = 65535
        self.user_id = None

        self.op_enc = OpEncoder()
        self.seg_enc = SegmentEncoder()
        self.frame_enc = FrameEncoder()

        self.delegate = delegate(self, **args)

    def attach(self, name, ch, delegate, force=False):
        self.lock.acquire()
        try:
            ssn = self.attached.get(ch.id)
            if ssn is not None:
                if ssn.name != name:
                    raise ChannelBusy(ch, ssn)
            else:
                ssn = self.sessions.get(name)
                if ssn is None:
                    ssn = Session(name, delegate=delegate)
                    self.sessions[name] = ssn
                elif ssn.channel is not None:
                    if force:
                        del self.attached[ssn.channel.id]
                        ssn.channel = None
                    else:
                        raise SessionBusy(ssn)
                self.attached[ch.id] = ssn
                ssn.channel = ch
            ch.session = ssn
            return ssn
        finally:
            self.lock.release()

    def detach(self, name, ch):
        self.lock.acquire()
        try:
            self.attached.pop(ch.id, None)
            ssn = self.sessions.pop(name, None)
            if ssn is not None:
                ssn.channel = None
                ssn.closed()
                return ssn
        finally:
            self.lock.release()

    def __channel(self):
        for i in xrange(1, self.channel_max):
            if not self.attached.has_key(i):
                return i
        else:
            raise ChannelsBusy()

    def session(self, name, timeout=None, delegate=session.client):
        self.lock.acquire()
        try:
            ch = Channel(self, self.__channel())
            ssn = self.attach(name, ch, delegate)
            ssn.channel.session_attach(name)
            if wait(ssn.condition, lambda: ssn.channel is not None, timeout):
                return ssn
            else:
                self.detach(name, ch)
                raise Timeout()
        finally:
            self.lock.release()

    def detach_all(self):
        self.lock.acquire()
        self.failed = True
        try:
            for ssn in self.attached.values():
                if self.close_code[0] != 200:
                    ssn.exceptions.append(self.close_code)
                self.detach(ssn.name, ssn.channel)
        finally:
            self.lock.release()

    def start(self, timeout=None):
        self.delegate.start()
        self.thread.start()
        if not wait(self.condition, lambda: self.opened or self.failed,
                    timeout):
            self.thread.join()
            raise Timeout()
        if self.failed:
            self.thread.join()
            raise ConnectionFailed(*self.close_code)

    def run(self):
        frame_dec = FrameDecoder()
        seg_dec = SegmentDecoder()
        op_dec = OpDecoder()

        while not self.closed:
            try:
                data = self.sock.recv(64 * 1024)
                if not data:
                    self.detach_all()
                    break
                # If we have a security layer and it sends us no decoded data,
                # that's OK as long as its return code is happy.
                if self.security_layer_rx:
                    try:
                        data = self.security_layer_rx.decode(data)
                    except:
                        self.detach_all()
                        break
            # When we do not use SSL transport, we get periodic
            # spurious timeout events on the socket.  When using SSL,
            # these events show up as timeout *errors*.  Both should be
            # ignored unless we have aborted.
            except socket.timeout:
                if self.aborted():
                    self.close_code = (None, "connection timed out")
                    self.detach_all()
                    break
                else:
                    continue
            except socket.error, e:
                if self.aborted() or str(e) != "The read operation timed out":
                    self.close_code = (None, str(e))
                    self.detach_all()
                    break
                else:
                    continue
            frame_dec.write(data)
            seg_dec.write(*frame_dec.read())
            op_dec.write(*seg_dec.read())
            for op in op_dec.read():
                try:
                    self.delegate.received(op)
                except Closed, e:
                    self.close_code = (None, str(e))
                    if not self.opened:
                        self.failed = True
                        self.closed = True
                        notify(self.condition)
예제 #33
0
class TwitterInstances(object):
    def __init__(self, dataCollection, tweetProvider):
        super(TwitterInstances, self).__init__()
        assert isinstance(dataCollection, DataCollection)

        self._by_oauth = dict()
        self._by_instance_key = dict()
        self._lock = RLock()
        self.data_collection = dataCollection
        self.tweet_provider = tweetProvider

    def add(self, twitterInstance):
        assert isinstance(twitterInstance, TwitterInstance)

        self._lock.acquire()
        try:
            self._by_instance_key[
                twitterInstance.instance_key] = twitterInstance
            self._by_oauth[twitterInstance.oauth] = twitterInstance
        finally:
            self._lock.release()

    def getUniqueInstanceKey(self):
        def func():
            instanceKey = unicode(getUniqueId())
            while instanceKey in self._by_instance_key:
                instanceKey = unicode(getUniqueId())
            return instanceKey

        return criticalSection(self._lock, func)

    def createInstance(self, twitterAuthentication, geographic_setup_string,
                       keywords, instance_setup_code):
        def func():
            twitterInstance = TwitterInstance(self.getUniqueInstanceKey(),
                                              self, twitterAuthentication,
                                              geographic_setup_string,
                                              keywords, instance_setup_code)

            return twitterInstance

        return criticalSection(self._lock, func)

    def getInstanceList(self):
        return criticalSection(self._lock,
                               lambda: list(self._by_instance_key.values()))

    def isInstanceKeyInUse(self, instanceKey):
        return criticalSection(self._lock,
                               lambda: instanceKey in self._by_instance_key)

    def isAuthInUse(self, oauth):
        return criticalSection(self._lock, lambda: oauth in self._by_oauth)

    def getInstanceByInstanceKey(self, instanceKey):
        result = criticalSection(
            self._lock, lambda: self._by_instance_key.get(instanceKey, None))
        return result

    def getInstanceByAuth(self, oauth):
        result = criticalSection(self._lock,
                                 lambda: self._by_oauth.get(oauth, None))
        return result

    def removeTwitterInstanceByInstanceKey(self, instanceKey):
        self._lock.acquire()
        try:
            instance = self._by_instance_key.get(instanceKey)
            if instance is None:
                return None

            assert isinstance(instance, TwitterInstance)

            # Remove from dictionaries first so that it is no
            # longer accessible from the rest of the application.
            del self._by_instance_key[instanceKey]
            del self._by_oauth[instance.oauth]
        finally:
            self._lock.release()

        # Cleanup instance.
        instance.shutdownInstance(False)
        self.data_collection.removeInstanceData(instanceKey)

        return instance

    def removeTwitterInstanceByAuth(self, oauth):
        self._lock.acquire()
        try:
            instance = self._by_oauth.get(oauth)
            if instance is None:
                return None

            assert isinstance(instance, TwitterInstance)

            # Remove from dictionaries first so that it is no
            # longer accessible from the rest of the application.
            del self._by_oauth[oauth]
            del self._by_instance_key[instance.instance_key]

            print unicode(self._by_instance_key)
        finally:
            self._lock.release()

        # Cleanup instance.
        instance.shutdownInstance(False)
        self.data_collection.removeInstanceData(unicode(instance.instance_key))

        return instance
예제 #34
0
class Cache:
    """
    An implementation of a cache.
    See https://en.wikipedia.org/wiki/Cache_algorithms
    """
    class Item:
        """
        Cache-private class representing an item in the cache.
        """
        def __init__(self):
            self.key = None
            self.stored_value = None
            self.stored_size = 0
            self.creation_time = 0
            self.access_time = 0
            self.access_count = 0

        def access(self):
            self.access_time = time.clock() - _T0
            self.access_count += 1

        def store(self, store, key, value):
            self.key = key
            self.access_count = 0
            self.access()
            stored_value, stored_size = store.store_value(key, value)
            self.stored_value = stored_value
            self.stored_size = stored_size

        def restore(self, store, key):
            self.access()
            return store.restore_value(key, self.stored_value)

        def discard(self, store, key):
            store.discard_value(key, self.stored_value)
            self.__init__()

    def __init__(self,
                 store=MemoryCacheStore(),
                 capacity=1000,
                 threshold=0.75,
                 policy=POLICY_LRU,
                 parent_cache=None):
        """
        Constructor.
    
        :param policy: cache replacement policy: LRU, MRU, LFU, or RR
        :param store: the cache store, see CacheStore interface
        :param capacity: the size capacity in units used by the store's store() method
        :param threshold: a number greater than zero and less than one
        """
        self._store = store
        self._capacity = capacity
        self._threshold = threshold
        self._policy = policy
        self._parent_cache = parent_cache
        self._size = 0
        self._max_size = self._capacity * self._threshold
        self._item_dict = {}
        self._item_list = []
        self._lock = RLock()

    @property
    def policy(self):
        return self._policy

    @property
    def store(self):
        return self._store

    @property
    def capacity(self):
        return self._capacity

    @property
    def threshold(self):
        return self._threshold

    @property
    def size(self):
        return self._size

    @property
    def max_size(self):
        return self._max_size

    def get_value(self, key):
        self._lock.acquire()
        item = self._item_dict.get(key)
        value = None
        if item:
            value = item.restore(self._store, key)
        elif self._parent_cache:
            item = self._parent_cache.get_value(key)
            if item:
                value = item.restore(self._parent_cache.store, key)
        self._lock.release()
        return value

    def put_value(self, key, value):
        self._lock.acquire()
        if self._parent_cache:
            # remove value from parent cache, because this cache will now take over
            self._parent_cache.remove_value(key)
        item = self._item_dict.get(key)
        if item:
            self._remove_item(item)
            self._size -= item.stored_size
            item.discard(self._store, key)
        else:
            item = Cache.Item()
        item.store(self._store, key, value)
        if self._size + item.stored_size > self._max_size:
            self.trim(item.stored_size)
        self._size += item.stored_size
        self._add_item(item)
        self._lock.release()

    def remove_value(self, key):
        self._lock.acquire()
        if self._parent_cache:
            self._parent_cache.remove_value(key)
        item = self._item_dict.get(key)
        if item:
            self._remove_item(item)
            self._size -= item.stored_size
            item.discard(self._store, key)
        self._lock.release()

    def _add_item(self, item):
        self._item_dict[item.key] = item
        self._item_list.append(item)

    def _remove_item(self, item):
        self._item_dict.pop(item.key)
        self._item_list.remove(item)

    def trim(self, extra_size=0):
        self._lock.acquire()
        self._item_list.sort(key=self._policy)
        keys = []
        size = self._size
        max_size = self._max_size
        for item in self._item_list:
            if size + extra_size > max_size:
                keys.append(item.key)
                size -= item.stored_size
        self._lock.release()
        # release lock to give another thread a chance then require lock again
        self._lock.acquire()
        for key in keys:
            if self._parent_cache:
                # Before discarding item fully, put its value into the parent cache
                value = self.get_value(key)
                self.remove_value(key)
                if value:
                    self._parent_cache.put_value(key, value)
            else:
                self.remove_value(key)
        self._lock.release()

    def clear(self, clear_parent=True):
        self._lock.acquire()
        if self._parent_cache and clear_parent:
            self._parent_cache.clear(clear_parent)
        keys = list(self._item_dict.keys())
        self._lock.release()
        for key in keys:
            if self._parent_cache and not clear_parent:
                value = self.get_value(key)
                if value:
                    self._parent_cache.put_value(key, value)
            self.remove_value(key)
class RingBuffer:
    def __init__(self, max_length):
        self._max_length = max_length  # Buffer size
        self._data = deque()  # Data array
        self._closed = False  # Buffer closed flag
        self._buffer_lock = RLock()  # Mutex

        # Write dados to buffer (and removes first element)
    def write(self, dados):
        if len(self._data) == self._max_length:  # Buffer is full, overwrite
            self._data.popleft()
        self._data.append(dados)

        # Read data in read pointer
    def read(self):
        return self._data.popleft()

    def readAll(self):
        _tmpdata = self._data.copy()
        self._data.clear()
        return _tmpdata

        # Open ring buffer
    def open(self):
        self._closed = False

# Close ring buffer

    def close(self):
        self._closed = True

# Return buffer status (true for buffer closed)

    def isClosed(self):
        return self._closed

# Return buffer data

    def getData(self):
        return self._data

# Return current buffer length

    def getLength(self):
        return len(self._data)

# Lock buffer (get buffer's mutex)

    def lock(self):
        self._buffer_lock.acquire()

# Unlock buffer (release buffer mutex)

    def unlock(self):
        self._buffer_lock.release()


# Return max buffer size

    def getMaxLength(self):
        return self._max_length
예제 #36
0
class Blockchain(object):
    def __init__(self):
        self.db = Config.getFilePath("CHAIN_DIRECTORY", "BLOCKCHAIN_DB")
        self.subDb = Config.getValue("BLOCKS_SUB_DB")
        
        self.storage = Storage(self.db, self.subDb)
           
        self.indexDb = Config.getFilePath("CHAIN_DIRECTORY", "INDEX_DB")
        self.subIndexDb = Config.getValue("INDEX_SUB_DB")
        
        self.index = Storage(self.indexDb, self.subIndexDb)
        
        self.CHAIN_HEAD_INDEX = DataType.serialize("CHAIN_HEAD_INDEX")
        self.BLOCK_HEIGHT_KEY = "BLOCK_HEIGHT_KEY"
        
        self.blkValidator = ValidatorFactory.getInstance(ValidatorType.BLOCK)
        self.txValidator = ValidatorFactory.getInstance(ValidatorType.TX)
        
        self.blkLock = RLock()
        self.orphanLock = RLock()
        
        self.miningPool = {}
        
    def init(self):
        if self.getIndexBlockByHash(self.CHAIN_HEAD_INDEX) == None:
            genesisBlockGasLimit = Config.getIntValue("GENESIS_BLOCK_GAS_LIMIT")
            genesisBlockGasUsed = Config.getIntValue("GENESIS_BLOCK_GAS_USED")
            genesisBlockGasPrice = Config.getIntValue("GENESIS_BLOCK_GAS_PRICE")

            genesisBlock = Block()
            genesisBlock.previousHash = Config.getBytesValue("GENESIS_BLOCK_PREVIOUS_HASH", False)
            genesisBlock.gasLimit = genesisBlockGasLimit
            genesisBlock.gasUsed = genesisBlockGasUsed
            genesisBlock.nonce = Config.getIntValue("GENESIS_BLOCK_NONCE")
            genesisBlock.bits = Config.getIntValue("GENESIS_BLOCK_DIFFICULTY_BITS", 16)
            genesisBlock.timestamp = Config.getIntValue("GENESIS_BLOCK_TIMESTAMP")
            
            transaction = Transaction(genesisBlockGasLimit, genesisBlockGasPrice)
            coinbaseData = []
            coinbaseData.append(Config.getValue("GENESIS_BLOCK_COINBASE_DATA"))
            transaction.addCoinbaseInput(coinbaseData)
            genesisBlockRewards = Config.getDecimalValue("GENESIS_BLOCK_REWARDS")
            genesisBlockRewards = Units.toUnits(genesisBlockRewards)
            genesisBlockPublicAddress = Config.getValue("GENESIS_BLOCK_PUBLIC_ADDRESS")
            genesisBlockPublicAddress = Address.toAddressBytes(genesisBlockPublicAddress)
            genesisBlockScript = Script.verifySignature()
            transaction.addOutput(genesisBlockPublicAddress, genesisBlockScript, genesisBlockRewards)
            transaction.hash()
            
            genesisBlock.transactions.append(transaction) 
            genesisBlock.merkleRoot = MerkleTree.getMerkleRoot(genesisBlock.transactions, False)
            genesisBlock.witnessRoot = MerkleTree.getMerkleRoot(genesisBlock.transactions, True)
            
            self.addBlock(genesisBlock)
    
    def acquireBlockLock(self):
        self.blkLock.acquire()
        
    def releaseBlockLock(self):
        self.blkLock.release()

    def acquireOrphanLock(self, blocking=True):
        return self.orphanLock.acquire(blocking)
        
    def releaseOrphanLock(self):
        self.orphanLock.release()

    def getBlockHashByHeight(self, height):
        blockHeightKey = "{0}{1}{2}".format(self.BLOCK_HEIGHT_KEY, '_', height)
        blockHeightKey = DataType.serialize(blockHeightKey)
        return self.index.get(blockHeightKey)

    def getBlockByHash(self, blockHash):
        blockHashBytes = DataType.serialize(blockHash)
        blockBytes = self.storage.get(blockHashBytes)
        return self.getBlockFromBytes(blockBytes)
    
    def getChainHeadBlock(self):
        chainHeadIndexBlockBytes = self.index.get(self.CHAIN_HEAD_INDEX)
        chainHeadIndexBlock = self.getIndexBlockFromBytes(chainHeadIndexBlockBytes)
        if chainHeadIndexBlock != None:
            return self.getBlockByHash(chainHeadIndexBlock.previousHash)
        else:
            return None
        
    def getBlockFromBytes(self, blockBytes):
        if blockBytes != None:
            block = Block()
            block.deserialize(blockBytes)
            return block
        return None
    
    def getIndexBlockByHash(self, blockHash):
        blockHashBytes = DataType.serialize(blockHash)
        indexBlockBytes = self.index.get(blockHashBytes)
        return self.getIndexBlockFromBytes(indexBlockBytes)
    
    def getChainHeadIndexBlock(self):
        chainHeadIndexBlockBytes = self.index.get(self.CHAIN_HEAD_INDEX)
        return self.getIndexBlockFromBytes(chainHeadIndexBlockBytes)
    
    def getIndexBlockFromBytes(self, indexBlockBytes):
        if indexBlockBytes != None:
            indexBlock = IndexBlock()
            indexBlock.deserialize(indexBlockBytes)
            return indexBlock
        return None

    def getNewBlock(self, address, previousHash, bits, extraNonce):
        previousIndexBlock = self.getIndexBlockByHash(previousHash)
        block = Block()

        gasLimit = Config.getIntValue("BLOCK_REWARDS_GAS_LIMIT")
        gasPrice = Config.getIntValue("BLOCK_REWARDS_GAS_PRICE")
        transaction = Transaction(gasLimit, gasPrice)

        height = previousIndexBlock.height + 1
        coinbaseData = [
            DataType.asString(height), 
            DataType.asString(bits), 
            DataType.asString(extraNonce)
        ]
        transaction.addCoinbaseInput(coinbaseData)
        block.transactions.append(transaction) 
        txFees = 0
        totalTxGasUsed = 0
        unconfirmedTransactions = MemoryPool.getMemoryPool()
        for txId in unconfirmedTransactions:
            unconfirmedTransaction = unconfirmedTransactions[txId]
            block.transactions.append(unconfirmedTransaction)  
            txFees += unconfirmedTransaction.calculateTxFee()
            totalTxGasUsed += unconfirmedTransaction.calculateTxGasUsed()
        blockRewards = Config.getDecimalValue("BLOCK_REWARDS")
        blockRewards = Units.toUnits(blockRewards)
        coinbaseValue = blockRewards + txFees
        script = Script.verifySignature()

        transaction.addOutput(address, script, coinbaseValue)
        transaction.hash()

        #Include coinbase tx gas used
        totalTxGasUsed += transaction.calculateTxGasUsed()

        block.merkleRoot = MerkleTree.getMerkleRoot(block.transactions, False)
        block.witnessRoot = MerkleTree.getMerkleRoot(block.transactions, True)

        blockGasLimit = previousIndexBlock.gasLimit + (previousIndexBlock.gasLimit * (1 / 1024))
        blockGasLimit = math.ceil(blockGasLimit)

        block.gasLimit = blockGasLimit
        block.gasUsed = totalTxGasUsed
        block.nonce = 0
        block.bits = bits
        block.previousHash = previousHash
        return block

    '''
        Add block into the tree. There are three cases: 
            1. block further extends the main branch; 
            2. block extends a side branch but does not add enough difficulty to make it become the new main branch; 
            3. block extends a side branch and makes it the new main branch.
    '''   
    def addBlock(self, block):
        if self.blkValidator.validate(block):
            try:
                self.acquireBlockLock()
            
                blockHash = block.hash()
                blockHashBytes = DataType.serialize(blockHash)
                
                if self.index.get(blockHashBytes) == None:
                    if not OrphanManager.hasBlock(blockHash):
                        bits = block.bits
                        previousChainWork = None
                        previousHash = block.previousHash
                        blockGasLimit = block.gasLimit
                        blockHeight = 0
                        
                        chainHeadBlock = self.getChainHeadBlock()
                        chainHeadIndexBlock = self.getIndexBlockByHash(self.CHAIN_HEAD_INDEX)
                        previousIndexBlock = self.getIndexBlockByHash(previousHash)
                        
                        if chainHeadIndexBlock == None:
                            chainHeadIndexBlock = IndexBlock()
                        if previousIndexBlock != None:
                            blockHeight = previousIndexBlock.height + 1
                            previousChainWork = previousIndexBlock.chainWork
                            
                        if blockHash == Config.getBytesValue('GENESIS_BLOCK_HASH'):
                            previousChainWork = Config.getIntValue('GENESIS_BLOCK_CHAIN_WORK', 16)
                        
                        '''        
                            For case 1, adding to main branch:
                        '''
                        if previousHash == chainHeadIndexBlock.previousHash or blockHash == Config.getBytesValue('GENESIS_BLOCK_HASH'):                
                            '''
                                For all but the coinbase transaction, apply the following:
                            '''
                            if not self.blkValidator.verifyNonCoinbaseTransactions(block):
                                return False
                                 
                            '''
                                Reject if coinbase value > sum of block creation fee and transaction fees
                            '''   
                            if not self.blkValidator.verifyCoinbaseValue(block):
                                return False
                                    
                            for transaction in block.transactions:
                                for txIn in transaction.inputs:
                                    UXTO.removeUnspentTransactionCoin(txIn.outpoint)
                                uxtoOutputs = []
                                uxtoOutputs.extend(transaction.outputs)
                                uxtoOutputs.extend(transaction.internalOutputs)
                                txOutputSize = 0
                                for txOut in uxtoOutputs:
                                    if txOut.store:
                                        txOutputSize += 1
                                outputIndex = 0
                                for txOut in uxtoOutputs:
                                    UXTO.removeStaleUnspentTransactionScript(txOut)
                                    if txOut.store:
                                        coin = Coin()
                                        coin.output = txOut
                                        coin.txOutputSize = txOutputSize
                                        coin.height = blockHeight
                                        coin.coinbase = transaction.isCoinbase()
                                        UXTO.addUnspentTransactionCoin(Outpoint(transaction.hash(), outputIndex), coin)
                                        outputIndex += 1
                                        '''
                                            For each transaction, "Add to wallet if mine"
                                        '''

                                '''
                                    For each transaction in the block, delete any matching transaction from the transaction pool
                                '''
                                MemoryPool.removeTransaction(transaction)
                                
                            chainHeadIndexBlock.chainWork = Bits.getChainworkFromBits(previousChainWork, bits)
                            chainHeadIndexBlock.previousHash = blockHash
                            chainHeadIndexBlock.gasLimit = blockGasLimit
                            chainHeadIndexBlock.height = blockHeight
                            self.index.set(self.CHAIN_HEAD_INDEX, chainHeadIndexBlock.serialize())
                        else:
                            blockChainWork = Bits.getChainworkFromBits(previousChainWork, bits)
                            chainHeadWork = chainHeadIndexBlock.chainWork
                            
                            hasNewMainChain = blockChainWork > chainHeadWork
                            
                            if hasNewMainChain:
                                '''
                                    For case 3, a side branch becoming the main branch:
                                '''
                            else:
                                '''
                                    For case 2, adding to a side branch, we don't do anything.
                                '''
                
                            if hasNewMainChain:
                                '''
                                    Find the fork block on the main branch which this side branch forks off of
                                '''
                                forkBlockHash = self.searchForkBlockHash(previousIndexBlock, chainHeadIndexBlock)
                                
                                '''
                                    Redefine the main branch to only go up to this fork block
                                    
                                    We will set new main chain head below
                                '''
                                
                                isNewMainChainValid = True
                                
                                '''
                                    For each block on the side branch, from the child of the fork block to the leaf, add to the main branch:
                                '''
                                prevBlock = self.getBlockByHash(block.previousHash)
                                while prevBlock.hash() != forkBlockHash:
                                    '''
                                        Do "branch" checks 3-11
                                    '''
                                    '''
                                        3) Transaction list must be non-empty
                                    '''
                                    if not self.blkValidator.verifyTransactionsNonEmpty(prevBlock):
                                        isNewMainChainValid = False
                                    
                                    '''
                                        4) Block hash must satisfy claimed nBits proof of work
                                    '''
                                    if not self.blkValidator.validateBlockBits(prevBlock.serializeHeader(), prevBlock.bits):
                                        isNewMainChainValid = False
                                    
                                    '''
                                        5) Block timestamp must not be more than two hours in the future
                                    '''
                                    if not self.blkValidator.verifyFutureTimestamp(prevBlock):
                                        isNewMainChainValid = False
                                        
                                    '''
                                        6) First transaction must be coinbase (i.e. only 1 input, with hash=0, n=-1), the rest must not be
                                    '''
                                    if not self.blkValidator.verifyInitialCoinbaseTransaction(prevBlock):
                                        isNewMainChainValid = False
                                        
                                    '''
                                        7) For each transaction, apply "tx" checks 2-4
                                            2) Make sure neither in or out lists are empty
                                            3) Size in bytes <= TRANSACTION_SIZE_LIMIT
                                            4) Each output value, as well as the total, must be in legal money range
                                    '''
                                    for transaction in prevBlock.transactions:
                                        if not self.txValidator.verifyInputOutputNonEmpty(transaction):
                                            isNewMainChainValid = False
                                        if not self.txValidator.verifyTransactionSizeLimit(transaction):
                                            isNewMainChainValid = False
                                        if not self.txValidator.verifyAllowedOutputValueRange(transaction):
                                            isNewMainChainValid = False
                                        
                                        '''
                                            8) For the coinbase (first) transaction, scriptSig length must be 2-100
                                        '''
                                        if not self.blkValidator.verifyCoinbaseWitnessLength(transaction):
                                            isNewMainChainValid = False
                                        
                                        '''
                                            9) Reject if sum of transaction sig opcounts > MAX_BLOCK_SIGOPS
                                        '''
                                        if not self.blkValidator.verifyMaxBlockSigOps(transaction):
                                            isNewMainChainValid = False
                                        
                                    '''
                                        10) Verify Merkle hash
                                    '''
                                    if not self.blkValidator.verifyMerkleHash(prevBlock):
                                        isNewMainChainValid = False
                                        
                                    '''
                                        Verify Witness hash
                                    '''
                                    if not self.blkValidator.verifyWitnessHash(prevBlock):
                                        isNewMainChainValid = False
                                        
                                    '''
                                        11) Check if prev block (matching prev hash) is in main branch or side branches. If not, add this to orphan blocks, 
                                        then query peer we got this from for 1st missing orphan block in prev chain; done with block
                                    '''
                                    if blockHash != Config.getBytesValue('GENESIS_BLOCK_HASH') and self.getBlockByHash(prevBlock.previousHash) == None:
                                        OrphanManager.addBlock(prevBlock)
                                        isNewMainChainValid = False
                                
                                    '''
                                        For all but the coinbase transaction, apply the following:
                                    '''
                                    if not self.blkValidator.verifyNonCoinbaseTransactions(prevBlock):
                                        isNewMainChainValid = False
                                    
                                    '''
                                        Reject if coinbase value > sum of block creation fee and transaction fees
                                    '''   
                                    if not self.blkValidator.verifyCoinbaseValue(prevBlock):
                                        isNewMainChainValid = False
                                    
                                    '''
                                        (If we have not rejected):
                                    '''
                                    if not isNewMainChainValid:
                                        break
                                    
                                    '''         
                                        For each transaction, "Add to wallet if mine"
                                    '''
                                        
                                    prevBlock = self.getBlockByHash(prevBlock.previousHash)
                                
                                '''
                                    If we reject at any point, leave the main branch as what it was originally, done with block
                                '''
                                if isNewMainChainValid:
                                    chainHeadIndexBlock.chainWork = blockChainWork
                                    chainHeadIndexBlock.previousHash = blockHash 
                                    chainHeadIndexBlock.gasLimit = blockGasLimit
                                    chainHeadIndexBlock.height = blockHeight
                                    self.index.set(self.CHAIN_HEAD_INDEX, chainHeadIndexBlock.serialize())
                                    
                                    '''
                                        For each block in the old main branch, from the leaf down to the child of the fork block:
                                    '''
                                    oldBlock = chainHeadBlock
                                    while oldBlock.hash() != forkBlockHash:
                                        '''
                                            For each non-coinbase transaction in the block:
                                        '''
                                        for transaction in oldBlock.transactions:
                                            if not transaction.isCoinbase():
                                                '''
                                                    Apply "tx" checks 2-9
                                                '''
                                                isTxValid = True
                                                
                                                '''
                                                    2) Make sure neither in or out lists are empty
                                                '''
                                                if not self.txValidator.verifyInputOutputNonEmpty(transaction):
                                                    isTxValid = False
                                                
                                                '''
                                                    3) Size in bytes <= TRANSACTION_SIZE_LIMIT
                                                '''
                                                if not self.txValidator.verifyTransactionSizeLimit(transaction):
                                                    isTxValid = False
                                                
                                                '''
                                                    4) Each output value, as well as the total, must be in legal money range
                                                '''
                                                if not self.txValidator.verifyAllowedOutputValueRange(transaction):
                                                    isTxValid = False
                                                
                                                '''
                                                    5) Make sure none of the inputs have hash=0, n=-1 (coinbase transactions)
                                                '''
                                                if not self.txValidator.verifyInputsNonCoinbase(transaction):
                                                    isTxValid = False
                                                
                                                '''
                                                    6) size in bytes >= 100[2]
                                                '''
                                                if not self.txValidator.verifyTransactionRequiredSize(transaction):
                                                    isTxValid = False
                                                
                                                '''
                                                    sig opcount <= 2[3]
                                                    3) The number of signature operands in the signature (no, that is not redundant) for standard transactions will never exceed two
                                                    7) Reject "nonstandard" transactions: scriptSig doing anything other than pushing numbers on the stack, 
                                                    or script not matching the two usual forms[4]
                                                '''
                                                if not self.txValidator.verifyAddress(transaction):
                                                    isTxValid = False
                                                if not self.txValidator.verifyExtraData(transaction):
                                                    isTxValid = False
                                                if not self.txValidator.verifyScript(transaction):
                                                    isTxValid = False
                                                if not self.txValidator.verifyWitness(transaction):
                                                    isTxValid = False
                                                
                                                '''
                                                    8) Reject if we already have matching tx in the pool,
                                                    except in step 8, only look in the transaction pool for duplicates, not the main branch
                                                '''
                                                if not self.txValidator.verifyTransactionDuplicateInPool(transaction):
                                                    isTxValid = False
                                                
                                                '''
                                                    9) For each input, if the referenced output exists in any other tx in the pool, reject this transaction
                                                '''
                                                if not self.txValidator.verifyTxOutputDuplicateInPool(transaction):
                                                    isTxValid = False
                                                
                                                '''
                                                    Add to transaction pool if accepted, else go on to next transaction
                                                '''
                                                if isTxValid:
                                                    MemoryPool.addSignedTransaction(transaction)

                                            outputIndex = 0
                                            for txOut in transaction.outputs:
                                                UXTO.removeUnspentTransactionCoin(Outpoint(transaction.hash(), outputIndex))
                                                outputIndex += 1

                                        oldBlock = self.getBlockByHash(oldBlock.previousHash)
                                    
                                    '''
                                        For each block in the new main branch, from the child of the fork node to the leaf:
                                    '''
                                    newMainBranchBlocks = []
                                        
                                    prevBlock = block
                                    while prevBlock.hash() != forkBlockHash:
                                        newMainBranchBlocks.insert(0, prevBlock)
                                        prevBlock = self.getBlockByHash(prevBlock.previousHash)
                                        
                                    for newMainBranchBlock in newMainBranchBlocks:
                                        newMainBranchBlockHash = newMainBranchBlock.hash()
                                        newMainBranchIndexBlock = None
                                        if newMainBranchBlockHash == blockHash:
                                            newMainBranchIndexBlock = chainHeadIndexBlock
                                        else:
                                            newMainBranchIndexBlock = self.getIndexBlockByHash(newMainBranchBlockHash)
                                        for transaction in newMainBranchBlock.transactions:
                                            for txIn in transaction.inputs:
                                                UXTO.removeUnspentTransactionCoin(txIn.outpoint)
                                            uxtoOutputs = []
                                            uxtoOutputs.extend(transaction.outputs)
                                            uxtoOutputs.extend(transaction.internalOutputs)
                                            txOutputSize = 0
                                            for txOut in uxtoOutputs:
                                                if txOut.store:
                                                    txOutputSize += 1
                                            outputIndex = 0
                                            for txOut in uxtoOutputs:
                                                UXTO.removeStaleUnspentTransactionScript(txOut)
                                                if txOut.store:
                                                    coin = Coin()
                                                    coin.output = txOut
                                                    coin.txOutputSize = txOutputSize
                                                    coin.height = newMainBranchIndexBlock.height
                                                    coin.coinbase = transaction.isCoinbase()
                                                    UXTO.addUnspentTransactionCoin(Outpoint(transaction.hash(), outputIndex), coin)
                                                    outputIndex += 1
                                                    '''
                                                        For each transaction, "Add to wallet if mine"
                                                    '''

                                            '''
                                                For each transaction in the block, delete any matching transaction from the transaction pool
                                            '''
                                            MemoryPool.removeTransaction(transaction)
                        '''
                            (If we have not rejected):
                        '''
                        self.storage.set(blockHashBytes, block.serialize())

                        blockHeightKey = "{0}{1}{2}".format(self.BLOCK_HEIGHT_KEY, '_', blockHeight)
                        blockHeightKey = DataType.serialize(blockHeightKey)
                        self.index.set(blockHeightKey, blockHashBytes)
                        
                        indexBlock = IndexBlock()
                        indexBlock.chainWork = Bits.getChainworkFromBits(previousChainWork, bits)
                        indexBlock.previousHash = previousHash 
                        indexBlock.gasLimit = blockGasLimit
                        indexBlock.height = blockHeight
                        self.index.set(blockHashBytes, indexBlock.serialize())
        
                        '''
                            Relay block to our peers
                        '''
                        Sync.inv(InventoryType.BLOCK, blockHash)
                    
                    '''
                        For each orphan block for which this block is its prev, run all these steps (including this one) recursively on that orphan
                    '''
                    self.syncOrphanBlocks()
                    
                    return True
            finally:
                self.releaseBlockLock()
        '''
            For each orphan block for which this block is its prev, run all these steps (including this one) recursively on that orphan
        '''
        self.syncOrphanBlocks()
            
        '''     
            If we rejected, the block is not counted as part of the main branch
        '''
        return False
    
    '''
        For each orphan block for which this block is its prev, run all these steps (including this one) recursively on that orphan
    '''
    def syncOrphanBlocks(self):
        hasLock = False
        try:
            hasLock = self.acquireOrphanLock(False)
            if hasLock:
                orphanBlocks = []
                for orphanBlockHash in OrphanManager.getBlocks():
                    orphanBlock = OrphanManager.getBlock(orphanBlockHash)
                    previousHash = orphanBlock.previousHash
                    previousHashBytes = DataType.serialize(previousHash)
                    if self.index.get(previousHashBytes) != None:
                        orphanBlocks.append(orphanBlock)
                    elif not OrphanManager.hasBlock(previousHash):
                        Sync.getdata(InventoryType.BLOCK, previousHash)
                for orphanBlock in orphanBlocks:
                    OrphanManager.removeBlock(orphanBlock)
                for orphanBlock in orphanBlocks:
                    self.addBlock(orphanBlock)
        finally:
            if hasLock:
                self.releaseOrphanLock()
    
    def searchForkBlockHash(self, sideChainIndexBlock, chainHeadIndexBlock):
        sideChainBlock = sideChainIndexBlock
        chainHeadBlock = chainHeadIndexBlock
        while not chainHeadBlock.previousHash == sideChainBlock.previousHash:
            if chainHeadBlock.height > sideChainBlock.height:
                chainHeadBlock = self.getIndexBlockByHash(chainHeadBlock.previousHash)
            else:
                sideChainBlock = self.getIndexBlockByHash(sideChainBlock.previousHash)
        return chainHeadBlock.previousHash

    def addMiningWorker(self, account, enabled):
        miningWorker = MiningWorker(self, account, enabled)
        miningWorker.start()
        
        self.miningPool[account.address] = miningWorker
        
    def exitsMiningWorker(self, account):
        if account.address in self.miningPool:
            return True  
        else:
            return False

    def stopMiningWorker(self, account):
        miningWorker = self.miningPool[account.address]
        miningWorker.enabled = False
        self.miningPool.pop(account.address)
예제 #37
0
class Plot(object):
    """
    Plot Examples
    =============

    See examples/plotting.py for many more examples.


    >>> from sympy import Plot
    >>> from sympy.abc import x, y, z

    >>> Plot(x*y**3-y*x**3)

    >>> p = Plot()
    >>> p[1] = x*y
    >>> p[1].color = z, (0.4,0.4,0.9), (0.9,0.4,0.4)

    >>> p = Plot()
    >>> p[1] =  x**2+y**2
    >>> p[2] = -x**2-y**2


    Variable Intervals
    ==================

    The basic format is [var, min, max, steps], but the
    syntax is flexible and arguments left out are taken
    from the defaults for the current coordinate mode:

    >>> Plot(x**2) # implies [x,-5,5,100]
    >>> Plot(x**2, [], []) # [x,-1,1,40], [y,-1,1,40]
    >>> Plot(x**2-y**2, [100], [100]) # [x,-1,1,100], [y,-1,1,100]
    >>> Plot(x**2, [x,-13,13,100])
    >>> Plot(x**2, [-13,13]) # [x,-13,13,100]
    >>> Plot(x**2, [x,-13,13]) # [x,-13,13,100]
    >>> Plot(1*x, [], [x], mode='cylindrical')
    ... # [unbound_theta,0,2*Pi,40], [x,-1,1,20]


    Coordinate Modes
    ================

    Plot supports several curvilinear coordinate modes, and
    they independent for each plotted function. You can specify
    a coordinate mode explicitly with the 'mode' named argument,
    but it can be automatically determined for Cartesian or
    parametric plots, and therefore must only be specified for
    polar, cylindrical, and spherical modes.

    Specifically, Plot(function arguments) and Plot[n] =
    (function arguments) will interpret your arguments as a
    Cartesian plot if you provide one function and a parametric
    plot if you provide two or three functions. Similarly, the
    arguments will be interpreted as a curve is one variable is
    used, and a surface if two are used.

    Supported mode names by number of variables:

    1: parametric, cartesian, polar
    2: parametric, cartesian, cylindrical = polar, spherical

    >>> Plot(1, mode='spherical')


    Calculator-like Interface
    =========================

    >>> p = Plot(visible=False)
    >>> f = x**2
    >>> p[1] = f
    >>> p[2] = f.diff(x)
    >>> p[3] = f.diff(x).diff(x)
    >>> p
    [1]: x**2, 'mode=cartesian'
    [2]: 2*x, 'mode=cartesian'
    [3]: 2, 'mode=cartesian'
    >>> p.show()
    >>> p.clear()
    >>> p
    <blank plot>
    >>> p[1] =  x**2+y**2
    >>> p[1].style = 'solid'
    >>> p[2] = -x**2-y**2
    >>> p[2].style = 'wireframe'
    >>> p[1].color = z, (0.4,0.4,0.9), (0.9,0.4,0.4)
    >>> p[1].style = 'both'
    >>> p[2].style = 'both'
    >>> p.close()


    Plot Window Keyboard Controls
    =============================

    Screen Rotation:
        X,Y axis      Arrow Keys, A,S,D,W, Numpad 4,6,8,2
        Z axis        Q,E, Numpad 7,9

    Model Rotation:
        Z axis        Z,C, Numpad 1,3

    Zoom:             R,F, PgUp,PgDn, Numpad +,-

    Reset Camera:     X, Numpad 5

    Camera Presets:
        XY            F1
        XZ            F2
        YZ            F3
        Perspective   F4

    Sensitivity Modifier: SHIFT

    Axes Toggle:
        Visible       F5
        Colors        F6

    Close Window:     ESCAPE

    =============================
    """
    def __init__(self, *fargs, **win_args):
        """
        Positional Arguments
        ====================

        Any given positional arguments are used to
        initialize a plot function at index 1. In
        other words...

        >>> from sympy.core import Symbol
        >>> from sympy.abc import x
        >>> p = Plot(x**2, visible=False)

        ...is equivalent to...

        >>> p = Plot(visible=False)
        >>> p[1] = x**2

        Note that in earlier versions of the plotting
        module, you were able to specify multiple
        functions in the initializer. This functionality
        has been dropped in favor of better automatic
        plot plot_mode detection.


        Named Arguments
        ===============

        axes
            An option string of the form
            "key1=value1; key2 = value2" which
            can use the following options:

            style = ordinate
                none OR frame OR box OR ordinate

            stride = 0.25
                val OR (val_x, val_y, val_z)

            overlay = True (draw on top of plot)
                True OR False

            colored = False (False uses Black,
                             True uses colors
                             R,G,B = X,Y,Z)
                True OR False

            label_axes = False (display axis names
                                at endpoints)
                True OR False

        visible = True (show immediately
            True OR False


        The following named arguments are passed as
        arguments to window initialization:

        antialiasing = True
            True OR False

        ortho = False
            True OR False

        invert_mouse_zoom = False
            True OR False

        """
        self._win_args = win_args
        self._window = None

        self._render_lock = RLock()

        self._functions = {}
        self._pobjects = []
        self._screenshot = ScreenShot(self)

        axe_options = parse_option_string(win_args.pop('axes', ''))
        self.axes = PlotAxes(**axe_options)
        self._pobjects.append(self.axes)

        self[0] = fargs
        if win_args.get('visible', True):
            self.show()

    ## Window Interfaces

    def show(self):
        """
        Creates and displays a plot window, or activates it
        (gives it focus) if it has already been created.
        """
        if self._window and not self._window.has_exit:
            self._window.activate()
        else:
            self._win_args['visible'] = True
            self.axes.reset_resources()
            self._window = PlotWindow(self, **self._win_args)

    def close(self):
        """
        Closes the plot window.
        """
        if self._window:
            self._window.close()

    def saveimage(self, outfile=None, format='', size=(600, 500)):
        """
        Saves a screen capture of the plot window to an
        image file.

        If outfile is given, it can either be a path
        or a file object. Otherwise a png image will
        be saved to the current working directory.
        If the format is omitted, it is determined from
        the filename extension.
        """
        self._screenshot.save(outfile, format, size)

    ## Function List Interfaces

    def clear(self):
        """
        Clears the function list of this plot.
        """
        self._render_lock.acquire()
        self._functions = {}
        self.adjust_all_bounds()
        self._render_lock.release()

    def __getitem__(self, i):
        """
        Returns the function at position i in the
        function list.
        """
        return self._functions[i]

    def __setitem__(self, i, args):
        """
        Parses and adds a PlotMode to the function
        list.
        """
        if not (isinstance(i, (int, Integer)) and i >= 0):
            raise ValueError("Function index must " "be an integer >= 0.")

        if isinstance(args, PlotObject):
            f = args
        else:
            if (not ordered_iter(args)) or isinstance(args, GeometryEntity):
                args = [args]
            if len(args) == 0:
                return  # no arguments given
            kwargs = dict(bounds_callback=self.adjust_all_bounds)
            f = PlotMode(*args, **kwargs)

        if f:
            self._render_lock.acquire()
            self._functions[i] = f
            self._render_lock.release()
        else:
            raise ValueError("Failed to parse '%s'." %
                             ', '.join(str(a) for a in args))

    def __delitem__(self, i):
        """
        Removes the function in the function list at
        position i.
        """
        self._render_lock.acquire()
        del self._functions[i]
        self.adjust_all_bounds()
        self._render_lock.release()

    def firstavailableindex(self):
        """
        Returns the first unused index in the function list.
        """
        i = 0
        self._render_lock.acquire()
        while i in self._functions:
            i += 1
        self._render_lock.release()
        return i

    def append(self, *args):
        """
        Parses and adds a PlotMode to the function
        list at the first available index.
        """
        self.__setitem__(self.firstavailableindex(), args)

    def __len__(self):
        """
        Returns the number of functions in the function list.
        """
        return len(self._functions)

    def __iter__(self):
        """
        Allows iteration of the function list.
        """
        return self._functions.itervalues()

    def __repr__(self):
        return str(self)

    def __str__(self):
        """
        Returns a string containing a new-line separated
        list of the functions in the function list.
        """
        s = ""
        if len(self._functions) == 0:
            s += "<blank plot>"
        else:
            self._render_lock.acquire()
            s += "\n".join([
                "%s[%i]: %s" % ("", i, str(self._functions[i]))
                for i in self._functions
            ])
            self._render_lock.release()
        return s

    def adjust_all_bounds(self):
        self._render_lock.acquire()
        self.axes.reset_bounding_box()
        for f in self._functions:
            self.axes.adjust_bounds(self._functions[f].bounds)
        self._render_lock.release()

    def wait_for_calculations(self):
        sleep(0)
        self._render_lock.acquire()
        for f in self._functions:
            a = self._functions[f]._get_calculating_verts
            b = self._functions[f]._get_calculating_cverts
            while a() or b():
                sleep(0)
        self._render_lock.release()
예제 #38
0
class Server:
	
	def __init__(self, daemon=False):
		self.__loggingLock = Lock()
		self.__lock = RLock()
		self.__jails = Jails()
		self.__db = None
		self.__daemon = daemon
		self.__transm = Transmitter(self)
		self.__reload_state = {}
		#self.__asyncServer = AsyncServer(self.__transm)
		self.__asyncServer = None
		self.__logLevel = None
		self.__logTarget = None
		self.__verbose = None
		self.__syslogSocket = None
		self.__autoSyslogSocketPaths = {
			'Darwin':  '/var/run/syslog',
			'FreeBSD': '/var/run/log',
			'Linux': '/dev/log',
		}
		self.__prev_signals = {}
		# replace real thread name with short process name (for top/ps/pstree or diagnostic):
		prctl_set_th_name('f2b/server')

	def __sigTERMhandler(self, signum, frame): # pragma: no cover - indirect tested
		logSys.debug("Caught signal %d. Exiting", signum)
		self.quit()
	
	def __sigUSR1handler(self, signum, fname): # pragma: no cover - indirect tested
		logSys.debug("Caught signal %d. Flushing logs", signum)
		self.flushLogs()

	def _rebindSignal(self, s, new):
		"""Bind new signal handler while storing old one in _prev_signals"""
		self.__prev_signals[s] = signal.getsignal(s)
		signal.signal(s, new)

	def start(self, sock, pidfile, force=False, observer=True, conf={}):
		# First set the mask to only allow access to owner
		os.umask(0077)
		# Second daemonize before logging etc, because it will close all handles:
		if self.__daemon: # pragma: no cover
			logSys.info("Starting in daemon mode")
			ret = self.__createDaemon()
			# If forked parent - return here (parent process will configure server later):
			if ret is None:
				return False
			# If error:
			if not ret[0]:
				err = "Could not create daemon %s", ret[1:]
				logSys.error(err)
				raise ServerInitializationError(err)
			# We are daemon.
		
		# Set all logging parameters (or use default if not specified):
		self.__verbose = conf.get("verbose", None)
		self.setSyslogSocket(conf.get("syslogsocket", 
			self.__syslogSocket if self.__syslogSocket is not None else DEF_SYSLOGSOCKET))
		self.setLogLevel(conf.get("loglevel", 
			self.__logLevel if self.__logLevel is not None else DEF_LOGLEVEL))
		self.setLogTarget(conf.get("logtarget", 
			self.__logTarget if self.__logTarget is not None else DEF_LOGTARGET))

		logSys.info("-"*50)
		logSys.info("Starting Fail2ban v%s", version.version)
		
		if self.__daemon: # pragma: no cover
			logSys.info("Daemon started")

		# Install signal handlers
		if _thread_name() == '_MainThread':
			for s in (signal.SIGTERM, signal.SIGINT):
				self._rebindSignal(s, self.__sigTERMhandler)
			self._rebindSignal(signal.SIGUSR1, self.__sigUSR1handler)

		# Ensure unhandled exceptions are logged
		sys.excepthook = excepthook

		# Creates a PID file.
		try:
			logSys.debug("Creating PID file %s", pidfile)
			pidFile = open(pidfile, 'w')
			pidFile.write("%s\n" % os.getpid())
			pidFile.close()
		except (OSError, IOError) as e: # pragma: no cover
			logSys.error("Unable to create PID file: %s", e)
		
		# Create observers and start it:
		if observer:
			if Observers.Main is None:
				Observers.Main = ObserverThread()
				Observers.Main.start()

		# Start the communication
		logSys.debug("Starting communication")
		try:
			self.__asyncServer = AsyncServer(self.__transm)
			self.__asyncServer.onstart = conf.get('onstart')
			self.__asyncServer.start(sock, force)
		except AsyncServerException as e:
			logSys.error("Could not start server: %s", e)

		# Stop (if not yet already executed):
		self.quit()

		# Removes the PID file.
		try:
			logSys.debug("Remove PID file %s", pidfile)
			os.remove(pidfile)
		except (OSError, IOError) as e: # pragma: no cover
			logSys.error("Unable to remove PID file: %s", e)

	def quit(self):
		# Prevent to call quit twice:
		self.quit = lambda: False

		logSys.info("Shutdown in progress...")

		# Stop communication first because if jail's unban action
		# tries to communicate via fail2ban-client we get a lockup
		# among threads.  So the simplest resolution is to stop all
		# communications first (which should be ok anyways since we
		# are exiting)
		# See https://github.com/fail2ban/fail2ban/issues/7
		if self.__asyncServer is not None:
			self.__asyncServer.stop_communication()

		# Restore default signal handlers:
		if _thread_name() == '_MainThread':
			for s, sh in self.__prev_signals.iteritems():
				signal.signal(s, sh)

		# Give observer a small chance to complete its work before exit
		obsMain = Observers.Main
		if obsMain is not None:
			if obsMain.stop(forceQuit=False):
				obsMain = None
			Observers.Main = None

		# Now stop all the jails
		self.stopAllJail()

		# Stop observer ultimately
		if obsMain is not None:
			obsMain.stop()

		# Explicit close database (server can leave in a thread, 
		# so delayed GC can prevent commiting changes)
		if self.__db:
			self.__db.close()
			self.__db = None

		# Stop async and exit
		if self.__asyncServer is not None:
			self.__asyncServer.stop()
			self.__asyncServer = None
		logSys.info("Exiting Fail2ban")


	def addJail(self, name, backend):
		addflg = True
		if self.__reload_state.get(name) and self.__jails.exists(name):
			jail = self.__jails[name]
			# if backend switch - restart instead of reload:
			if jail.backend == backend:
				addflg = False
				logSys.info("Reload jail %r", name)
				# prevent to reload the same jail twice (temporary keep it in state, needed to commit reload):
				self.__reload_state[name] = None
			else:
				logSys.info("Restart jail %r (reason: %r != %r)", name, jail.backend, backend)
				self.delJail(name, stop=True)
				# prevent to start the same jail twice (no reload more - restart):
				del self.__reload_state[name]
		if addflg:
			self.__jails.add(name, backend, self.__db)
		if self.__db is not None:
			self.__db.addJail(self.__jails[name])
		
	def delJail(self, name, stop=True, join=True):
		jail = self.__jails[name]
		if join or jail.isAlive():
			jail.stop(stop=stop, join=join)
		if join:
			if self.__db is not None:
				self.__db.delJail(jail)
			del self.__jails[name]

	def startJail(self, name):
		with self.__lock:
			jail = self.__jails[name]
			if not jail.isAlive():
				jail.start()
			elif name in self.__reload_state:
				logSys.info("Jail %r reloaded", name)
				del self.__reload_state[name]
			if jail.idle:
				jail.idle = False
	
	def stopJail(self, name):
		with self.__lock:
			self.delJail(name, stop=True)
	
	def stopAllJail(self):
		logSys.info("Stopping all jails")
		with self.__lock:
			# 1st stop all jails (signal and stop actions/filter thread):
			for name in self.__jails.keys():
				self.delJail(name, stop=True, join=False)
			# 2nd wait for end and delete jails:
			for name in self.__jails.keys():
				self.delJail(name, stop=False, join=True)

	def reloadJails(self, name, opts, begin):
		if begin:
			# begin reload:
			if self.__reload_state and (name == '--all' or self.__reload_state.get(name)): # pragma: no cover
				raise ValueError('Reload already in progress')
			logSys.info("Reload " + (("jail %s" % name) if name != '--all' else "all jails"))
			with self.__lock:
				# if single jail:
				if name != '--all':
					jail = None
					# test jail exists (throws exception if not):
					if "--if-exists" not in opts or self.__jails.exists(name):
						jail = self.__jails[name]
					if jail:
						# first unban all ips (will be not restored after (re)start):
						if "--unban" in opts:
							self.setUnbanIP(name)
						# stop if expected:
						if "--restart" in opts:
							self.stopJail(name)
				else:
					# first unban all ips (will be not restored after (re)start):
					if "--unban" in opts:
						self.setUnbanIP()
					# stop if expected:
					if "--restart" in opts:
						self.stopAllJail()
				# first set all affected jail(s) to idle and reset filter regex and other lists/dicts:
				for jn, jail in self.__jails.iteritems():
					if name == '--all' or jn == name:
						jail.idle = True
						self.__reload_state[jn] = jail
						jail.filter.reload(begin=True)
						jail.actions.reload(begin=True)
				pass
		else:
			# end reload, all affected (or new) jails have already all new parameters (via stream) and (re)started:
			with self.__lock:
				deljails = []
				for jn, jail in self.__jails.iteritems():
					# still in reload state:
					if jn in self.__reload_state:
						# remove jails that are not reloaded (untouched, so not in new configuration)
						deljails.append(jn)
					else:
						# commit (reload was finished):
						jail.filter.reload(begin=False)
						jail.actions.reload(begin=False)
				for jn in deljails:
					self.delJail(jn)
			self.__reload_state = {}
			logSys.info("Reload finished.")

	def setIdleJail(self, name, value):
		self.__jails[name].idle = value
		return True

	def getIdleJail(self, name):
		return self.__jails[name].idle
	
	# Filter
	def setIgnoreSelf(self, name, value):
		self.__jails[name].filter.ignoreSelf = _as_bool(value)
	
	def getIgnoreSelf(self, name):
		return self.__jails[name].filter.ignoreSelf

	def addIgnoreIP(self, name, ip):
		self.__jails[name].filter.addIgnoreIP(ip)
	
	def delIgnoreIP(self, name, ip):
		self.__jails[name].filter.delIgnoreIP(ip)
	
	def getIgnoreIP(self, name):
		return self.__jails[name].filter.getIgnoreIP()
	
	def addLogPath(self, name, fileName, tail=False):
		filter_ = self.__jails[name].filter
		if isinstance(filter_, FileFilter):
			filter_.addLogPath(fileName, tail)
	
	def delLogPath(self, name, fileName):
		filter_ = self.__jails[name].filter
		if isinstance(filter_, FileFilter):
			filter_.delLogPath(fileName)
	
	def getLogPath(self, name):
		filter_ = self.__jails[name].filter
		if isinstance(filter_, FileFilter):
			return filter_.getLogPaths()
		else: # pragma: systemd no cover
			logSys.info("Jail %s is not a FileFilter instance" % name)
			return []
	
	def addJournalMatch(self, name, match): # pragma: systemd no cover
		filter_ = self.__jails[name].filter
		if isinstance(filter_, JournalFilter):
			filter_.addJournalMatch(match)
	
	def delJournalMatch(self, name, match): # pragma: systemd no cover
		filter_ = self.__jails[name].filter
		if isinstance(filter_, JournalFilter):
			filter_.delJournalMatch(match)
	
	def getJournalMatch(self, name): # pragma: systemd no cover
		filter_ = self.__jails[name].filter
		if isinstance(filter_, JournalFilter):
			return filter_.getJournalMatch()
		else:
			logSys.info("Jail %s is not a JournalFilter instance" % name)
			return []
	
	def setLogEncoding(self, name, encoding):
		filter_ = self.__jails[name].filter
		filter_.setLogEncoding(encoding)
	
	def getLogEncoding(self, name):
		filter_ = self.__jails[name].filter
		return filter_.getLogEncoding()
	
	def setFindTime(self, name, value):
		self.__jails[name].filter.setFindTime(value)
	
	def getFindTime(self, name):
		return self.__jails[name].filter.getFindTime()

	def setDatePattern(self, name, pattern):
		self.__jails[name].filter.setDatePattern(pattern)

	def getDatePattern(self, name):
		return self.__jails[name].filter.getDatePattern()

	def setLogTimeZone(self, name, tz):
		self.__jails[name].filter.setLogTimeZone(tz)

	def getLogTimeZone(self, name):
		return self.__jails[name].filter.getLogTimeZone()

	def setIgnoreCommand(self, name, value):
		self.__jails[name].filter.ignoreCommand = value

	def getIgnoreCommand(self, name):
		return self.__jails[name].filter.ignoreCommand

	def setIgnoreCache(self, name, value):
		value, options = extractOptions("cache["+value+"]")
		self.__jails[name].filter.ignoreCache = options

	def getIgnoreCache(self, name):
		return self.__jails[name].filter.ignoreCache

	def setPrefRegex(self, name, value):
		flt = self.__jails[name].filter
		logSys.debug("  prefregex: %r", value)
		flt.prefRegex = value

	def getPrefRegex(self, name):
		return self.__jails[name].filter.prefRegex
	
	def addFailRegex(self, name, value, multiple=False):
		flt = self.__jails[name].filter
		if not multiple: value = (value,)
		for value in value:
			logSys.debug("  failregex: %r", value)
			flt.addFailRegex(value)
	
	def delFailRegex(self, name, index=None):
		self.__jails[name].filter.delFailRegex(index)
	
	def getFailRegex(self, name):
		return self.__jails[name].filter.getFailRegex()
	
	def addIgnoreRegex(self, name, value, multiple=False):
		flt = self.__jails[name].filter
		if not multiple: value = (value,)
		for value in value:
			logSys.debug("  ignoreregex: %r", value)
			flt.addIgnoreRegex(value)
	
	def delIgnoreRegex(self, name, index):
		self.__jails[name].filter.delIgnoreRegex(index)
	
	def getIgnoreRegex(self, name):
		return self.__jails[name].filter.getIgnoreRegex()
	
	def setUseDns(self, name, value):
		self.__jails[name].filter.setUseDns(value)
	
	def getUseDns(self, name):
		return self.__jails[name].filter.getUseDns()
	
	def setMaxMatches(self, name, value):
		self.__jails[name].filter.failManager.maxMatches = value
	
	def getMaxMatches(self, name):
		return self.__jails[name].filter.failManager.maxMatches
	
	def setMaxRetry(self, name, value):
		self.__jails[name].filter.setMaxRetry(value)
	
	def getMaxRetry(self, name):
		return self.__jails[name].filter.getMaxRetry()
	
	def setMaxLines(self, name, value):
		self.__jails[name].filter.setMaxLines(value)
	
	def getMaxLines(self, name):
		return self.__jails[name].filter.getMaxLines()
	
	# Action
	def addAction(self, name, value, *args):
		## create (or reload) jail action:
		self.__jails[name].actions.add(value, *args, 
			reload=name in self.__reload_state)
	
	def getActions(self, name):
		return self.__jails[name].actions
	
	def delAction(self, name, value):
		del self.__jails[name].actions[value]
	
	def getAction(self, name, value):
		return self.__jails[name].actions[value]
	
	def setBanTime(self, name, value):
		self.__jails[name].actions.setBanTime(value)
	
	def addAttemptIP(self, name, *args):
		return self.__jails[name].filter.addAttempt(*args)

	def setBanIP(self, name, value):
		return self.__jails[name].actions.addBannedIP(value)

	def setUnbanIP(self, name=None, value=None, ifexists=True):
		if name is not None:
			# single jail:
			jails = [self.__jails[name]]
		else:
			# in all jails:
			jails = self.__jails.values()
		# unban given or all (if value is None):
		cnt = 0
		ifexists |= (name is None)
		for jail in jails:
			cnt += jail.actions.removeBannedIP(value, ifexists=ifexists)
		return cnt
		
	def getBanTime(self, name):
		return self.__jails[name].actions.getBanTime()

	def getBanList(self, name, withTime=False):
		"""Returns the list of banned IP addresses for a jail.

		Parameters
		----------
		name : str
			The name of a jail.

		Returns
		-------
		list
			The list of banned IP addresses.
		"""
		return self.__jails[name].actions.getBanList(withTime)

	def setBanTimeExtra(self, name, opt, value):
		self.__jails[name].setBanTimeExtra(opt, value)

	def getBanTimeExtra(self, name, opt):
		return self.__jails[name].getBanTimeExtra(opt)
	
	def isStarted(self):
		return self.__asyncServer is not None and self.__asyncServer.isActive()

	def isAlive(self, jailnum=None):
		if jailnum is not None and len(self.__jails) != jailnum:
			return 0
		for jail in self.__jails.values():
			if not jail.isAlive():
				return 0
		return 1

	# Status
	def status(self):
		try:
			self.__lock.acquire()
			jails = list(self.__jails)
			jails.sort()
			jailList = ", ".join(jails)
			ret = [("Number of jail", len(self.__jails)),
				   ("Jail list", jailList)]
			return ret
		finally:
			self.__lock.release()
	
	def statusJail(self, name, flavor="basic"):
		return self.__jails[name].status(flavor=flavor)

	# Logging
	
	##
	# Set the logging level.
	#
	# CRITICAL
	# ERROR
	# WARNING
	# NOTICE
	# INFO
	# DEBUG
	# @param value the level
	
	def setLogLevel(self, value):
		value = value.upper()
		with self.__loggingLock:
			if self.__logLevel == value:
				return
			ll = str2LogLevel(value)
			# don't change real log-level if running from the test cases:
			getLogger("fail2ban").setLevel(
				ll if DEF_LOGTARGET != "INHERITED" or ll < logging.DEBUG else DEF_LOGLEVEL)
			self.__logLevel = value
	
	##
	# Get the logging level.
	#
	# @see setLogLevel
	# @return the log level
	
	def getLogLevel(self):
		with self.__loggingLock:
			return self.__logLevel

	##
	# Sets the logging target.
	#
	# target can be a file, SYSLOG, STDOUT or STDERR.
	# @param target the logging target
	
	def setLogTarget(self, target):
		# check reserved targets in uppercase, don't change target, because it can be file:
		target, logOptions = extractOptions(target)
		systarget = target.upper()
		with self.__loggingLock:
			# don't set new handlers if already the same
			# or if "INHERITED" (foreground worker of the test cases, to prevent stop logging):
			if self.__logTarget == target:
				return True
			if systarget == "INHERITED":
				self.__logTarget = target
				return True
			padding = logOptions.get('padding')
			# set a format which is simpler for console use
			if systarget == "SYSLOG":
				facility = logOptions.get('facility', 'DAEMON').upper()
				# backwards compatibility - default no padding for syslog handler:
				if padding is None: padding = '0'
				try:
					facility = getattr(logging.handlers.SysLogHandler, 'LOG_' + facility)
				except AttributeError: # pragma: no cover
					logSys.error("Unable to set facility %r, using 'DAEMON'", logOptions.get('facility'))
					facility = logging.handlers.SysLogHandler.LOG_DAEMON
				if self.__syslogSocket == "auto":
					import platform
					self.__syslogSocket = self.__autoSyslogSocketPaths.get(
						platform.system())
				if self.__syslogSocket is not None\
						and os.path.exists(self.__syslogSocket)\
						and stat.S_ISSOCK(os.stat(
								self.__syslogSocket).st_mode):
					hdlr = logging.handlers.SysLogHandler(
						self.__syslogSocket, facility=facility)
				else:
					logSys.error(
						"Syslog socket file: %s does not exists"
						" or is not a socket" % self.__syslogSocket)
					return False
			elif systarget in ("STDOUT", "SYSOUT"):
				hdlr = logging.StreamHandler(sys.stdout)
			elif systarget == "STDERR":
				hdlr = logging.StreamHandler(sys.stderr)
			else:
				# Target should be a file
				try:
					open(target, "a").close()
					hdlr = logging.handlers.RotatingFileHandler(target)
				except IOError:
					logSys.error("Unable to log to %r", target)
					logSys.info("Logging to previous target %r", self.__logTarget)
					return False
			# Removes previous handlers -- in reverse order since removeHandler
			# alter the list in-place and that can confuses the iterable
			logger = getLogger("fail2ban")
			for handler in logger.handlers[::-1]:
				# Remove the handler.
				logger.removeHandler(handler)
				# And try to close -- it might be closed already
				try:
					handler.flush()
					handler.close()
				except (ValueError, KeyError): # pragma: no cover
					# Is known to be thrown after logging was shutdown once
					# with older Pythons -- seems to be safe to ignore there
					# At least it was still failing on 2.6.2-0ubuntu1 (jaunty)
					if (2, 6, 3) <= sys.version_info < (3,) or \
							(3, 2) <= sys.version_info:
						raise
			# detailed format by deep log levels (as DEBUG=10):
			if logger.getEffectiveLevel() <= logging.DEBUG: # pragma: no cover
				if self.__verbose is None:
					self.__verbose = logging.DEBUG - logger.getEffectiveLevel() + 1
			# If handler don't already add date to the message:
			addtime = logOptions.get('datetime')
			if addtime is not None:
				addtime = _as_bool(addtime)
			else:
				addtime = systarget not in ("SYSLOG", "SYSOUT")
			if padding is not None:
				padding = _as_bool(padding) 
			else:
				padding = True
			# If log-format is redefined in options:
			if logOptions.get('format', '') != '':
				fmt = logOptions.get('format')
			else:
				# verbose log-format:
				verbose = 0
				if self.__verbose is not None and self.__verbose > 2: # pragma: no cover
					verbose = self.__verbose-1
				fmt = getVerbosityFormat(verbose, addtime=addtime, padding=padding)
			# tell the handler to use this format
			hdlr.setFormatter(logging.Formatter(fmt))
			logger.addHandler(hdlr)
			# Does not display this message at startup.
			if self.__logTarget is not None:
				logSys.info("Start Fail2ban v%s", version.version)
				logSys.info(
					"Changed logging target to %s for Fail2ban v%s"
					% ((target
						if target != "SYSLOG"
						else "%s (%s)"
							 % (target, self.__syslogSocket)),
					   version.version))
			# Sets the logging target.
			self.__logTarget = target
			return True

	##
	# Sets the syslog socket.
	#
	# syslogsocket is the full path to the syslog socket
	# @param syslogsocket the syslog socket path
	def setSyslogSocket(self, syslogsocket):
		with self.__loggingLock:
			if self.__syslogSocket == syslogsocket:
				return True
			self.__syslogSocket = syslogsocket
		# Conditionally reload, logtarget depends on socket path when SYSLOG
		return self.__logTarget != "SYSLOG"\
			   or self.setLogTarget(self.__logTarget)

	def getLogTarget(self):
		with self.__loggingLock:
			return self.__logTarget

	def getSyslogSocket(self):
		with self.__loggingLock:
			return self.__syslogSocket

	def flushLogs(self):
		if self.__logTarget not in ['STDERR', 'STDOUT', 'SYSLOG']:
			for handler in getLogger("fail2ban").handlers:
				try:
					handler.doRollover()
					logSys.info("rollover performed on %s" % self.__logTarget)
				except AttributeError:
					handler.flush()
					logSys.info("flush performed on %s" % self.__logTarget)
			return "rolled over"
		else:
			for handler in getLogger("fail2ban").handlers:
				handler.flush()
				logSys.info("flush performed on %s" % self.__logTarget)
			return "flushed"
			
	def setThreadOptions(self, value):
		for o, v in value.iteritems():
			if o == 'stacksize':
				threading.stack_size(int(v)*1024)
			else: # pragma: no cover
				raise KeyError("unknown option %r" % o)

	def getThreadOptions(self):
		return {'stacksize': threading.stack_size() // 1024}

	def setDatabase(self, filename):
		# if not changed - nothing to do
		if self.__db and self.__db.filename == filename:
			return
		if not self.__db and filename.lower() == 'none':
			return
		if len(self.__jails) != 0:
			raise RuntimeError(
				"Cannot change database when there are jails present")
		if filename.lower() == "none":
			self.__db = None
		else:
			if Fail2BanDb is not None:
				self.__db = Fail2BanDb(filename)
				self.__db.delAllJails()
			else: # pragma: no cover
				logSys.error(
					"Unable to import fail2ban database module as sqlite "
					"is not available.")
		if Observers.Main is not None:
			Observers.Main.db_set(self.__db)
	
	def getDatabase(self):
		return self.__db

	def __createDaemon(self): # pragma: no cover
		""" Detach a process from the controlling terminal and run it in the
			background as a daemon.
		
			http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731
		"""
	
		# When the first child terminates, all processes in the second child
		# are sent a SIGHUP, so it's ignored.

		# We need to set this in the parent process, so it gets inherited by the
		# child process, and this makes sure that it is effect even if the parent
		# terminates quickly.
		self._rebindSignal(signal.SIGHUP, signal.SIG_IGN)

		try:
			# Fork a child process so the parent can exit.  This will return control
			# to the command line or shell.  This is required so that the new process
			# is guaranteed not to be a process group leader.  We have this guarantee
			# because the process GID of the parent is inherited by the child, but
			# the child gets a new PID, making it impossible for its PID to equal its
			# PGID.
			pid = os.fork()
		except OSError as e:
			return (False, (e.errno, e.strerror))	 # ERROR (return a tuple)
		
		if pid == 0:	   # The first child.
	
			# Next we call os.setsid() to become the session leader of this new
			# session.  The process also becomes the process group leader of the
			# new process group.  Since a controlling terminal is associated with a
			# session, and this new session has not yet acquired a controlling
			# terminal our process now has no controlling terminal.  This shouldn't
			# fail, since we're guaranteed that the child is not a process group
			# leader.
			os.setsid()
		
			try:
				# Fork a second child to prevent zombies.  Since the first child is
				# a session leader without a controlling terminal, it's possible for
				# it to acquire one by opening a terminal in the future.  This second
				# fork guarantees that the child is no longer a session leader, thus
				# preventing the daemon from ever acquiring a controlling terminal.
				pid = os.fork()		# Fork a second child.
			except OSError as e:
				return (False, (e.errno, e.strerror))  # ERROR (return a tuple)
		
			if (pid == 0):	  # The second child.
				# Ensure that the daemon doesn't keep any directory in use.  Failure
				# to do this could make a filesystem unmountable.
				os.chdir("/")
			else:
				os._exit(0)	  # Exit parent (the first child) of the second child.
		else:
			# Signal to exit, parent of the first child.
			return None
	
		# Close all open files.  Try the system configuration variable, SC_OPEN_MAX,
		# for the maximum number of open files to close.  If it doesn't exist, use
		# the default value (configurable).
		try:
			maxfd = os.sysconf("SC_OPEN_MAX")
		except (AttributeError, ValueError):
			maxfd = 256	   # default maximum
	
		# urandom should not be closed in Python 3.4.0. Fixed in 3.4.1
		# http://bugs.python.org/issue21207
		if sys.version_info[0:3] == (3, 4, 0): # pragma: no cover
			urandom_fd = os.open("/dev/urandom", os.O_RDONLY)
			for fd in range(0, maxfd):
				try:
					if not os.path.sameopenfile(urandom_fd, fd):
						os.close(fd)
				except OSError:   # ERROR (ignore)
					pass
			os.close(urandom_fd)
		else:
			os.closerange(0, maxfd)
	
		# Redirect the standard file descriptors to /dev/null.
		os.open("/dev/null", os.O_RDONLY)	# standard input (0)
		os.open("/dev/null", os.O_RDWR)		# standard output (1)
		os.open("/dev/null", os.O_RDWR)		# standard error (2)
		return (True,)
예제 #39
0
class QueryRunner(StreamListener):
    def __init__(self):
        register_default_functions()
        StreamListener.__init__(self)
        try:
            self.consumer_key = settings.CONSUMER_KEY
            self.consumer_secret = settings.CONSUMER_SECRET
            self.access_token = settings.ACCESS_TOKEN
            self.access_token_secret = settings.ACCESS_TOKEN_SECRET
        except AttributeError:
            print "Check if CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, and ACCESS_TOKEN_SECRET are defined in settings.py"
            self.consumer_key = raw_input('Consumer key: ')
            self.consumer_secret = getpass('Consumer secret: ')
            self.access_token = raw_input('Access token: ')
            self.access_token_secret = getpass('Access token secret: ')
        self.status_lock = RLock()
        self.statuses = []
        self.query_builder = gen_query_builder()
        self.stream = None

    def build_stream(self):
        if self.stream != None:
            self.stop_query()
            time.sleep(.01)  # make sure old stream has time to disconnect
        oauth = OAuthHandler(self.consumer_key, self.consumer_secret)
        oauth.set_access_token(self.access_token, self.access_token_secret)
        self.stream = Stream(
            oauth,  # do OAuthentication for stream
            self,  # this object implements StreamListener
            timeout=600,  # reconnect if no messages in 600s
            retry_count=20,  # try reconnecting 20 times
            retry_time=10.0,  # wait 10s if no HTTP 200
            snooze_time=1.0)  # wait 1s if timeout in 600s

    def run_built_query(self, query_built, async):
        self.build_stream()
        self.query = query_built
        self.query.handler.set_tuple_descriptor(
            self.query.get_tuple_descriptor())
        if self.query.source == StatusSource.TWITTER_FILTER:
            no_filter_exception = QueryException(
                "You haven't specified any filters that can query Twitter.  Perhaps you want to query TWITTER_SAMPLE?"
            )
            try:
                (follow_ids,
                 track_words) = self.query.query_tree.filter_params()
                if (follow_ids == None) and (track_words == [None]):
                    raise no_filter_exception
                self.stream.filter(follow_ids, track_words, async)
            except NotImplementedError:
                raise no_filter_exception
        elif self.query.source == StatusSource.TWITTER_SAMPLE:
            self.stream.sample(None, async)

    def run_query(self, query_str, async):
        if isinstance(query_str, str):
            query_str = unicode(query_str, 'utf-8')
        query_built = self.query_builder.build(query_str)
        self.run_built_query(query_built, async)

    def stop_query(self):
        if self.stream != None:
            self.stream.disconnect()
            self.flush_statuses()

    def filter_statuses(self, statuses, query):
        (passes, fails) = query.query_tree.filter(statuses, True, False)
        query.handler.handle_statuses(passes)

    def flush_statuses(self):
        self.status_lock.acquire()
        if len(self.statuses) > 0:
            filter_func = lambda s=self.statuses, q=self.query: self.filter_statuses(
                s, q)
            t = Thread(target=filter_func)
            t.start()
            self.statuses = []
        self.status_lock.release()

    """ StreamListener methods """

    def on_status(self, status):
        self.status_lock.acquire()
        t = Tuple()
        t.set_tuple_descriptor(None)
        t.set_data(status.__dict__)
        self.statuses.append(t)
        if len(self.statuses) >= self.query.handler.batch_size:
            self.flush_statuses()
        self.status_lock.release()

    def on_error(self, status_code):
        print 'An error has occured! Status code = %s' % status_code
        return True  # keep stream alive

    def on_timeout(self):
        print 'Snoozing Zzzzzz'

    def on_limit(self, limit_data):
        print "Twitter rate-limited this query.  Since query start, Twitter dropped %d messages." % (
            limit_data)
예제 #40
0
파일: worker.py 프로젝트: dynilath/xeHentai
class Monitor(Thread):
    def __init__(self, req, proxy, logger, task, exit_check=None, ignored_errors=[]):
        Thread.__init__(self, name = "monitor%s" % task.guid)
        Thread.setDaemon(self, True)
        # the count of votes per error code
        self.vote_result = {}
        # the error code to be ignored
        self.vote_cleared = set().union(ignored_errors)
        self.thread_last_seen = {}
        self.dctlock = RLock()
        self.votelock = RLock()
        self.thread_ref = {}
        self.thread_zombie = set()
        # HttpReq instance
        self.req = req
        # proxy.Pool instance
        self.proxy = proxy
        self.logger = logger
        self.task = task
        self._exit = exit_check if exit_check else lambda x: False
        self._cleaning_up = False

        if os.name == "nt":
            self.set_title = lambda s:os.system("TITLE %s" % (
                s if PY3K else s.encode(CODEPAGE, 'replace')))
        elif os.name == 'posix':
            import sys
            self.set_title = lambda s:sys.stdout.write("\033]2;%s\007" % (
                s if PY3K else s.encode(CODEPAGE, 'replace')))

    def set_vote_ns(self, tnames):
        t = time.time()
        self.thread_last_seen = {k:t for k in tnames}

    def vote(self, tname, code):
        # thread_id, result_code
        self.votelock.acquire()
        if code != ERR_NO_ERROR:
            self.logger.verbose("t-%s vote:%s" % (tname, code))
        if code not in self.vote_result:
            self.vote_result[code] = 1
        else:
            self.vote_result[code] += 1
        self.votelock.release()

    def wrk_keepalive(self, wrk_thread, _exit = False):
        tname = wrk_thread.name
        if tname in self.thread_zombie:
            self.thread_zombie.remove(tname)
        # all image downloaded
        # task is finished or failed
        # monitor is exiting or worker notify its exit
        _ = self.task.meta['finished'] == self.task.meta['total'] or \
            self.task.state in (TASK_STATE_FINISHED, TASK_STATE_FAILED) or \
            self._exit("mon") or _exit
        # self.logger.verbose("mon#%s %s ask, %s, %s" % (self.task.guid, tname, _,
        #    self.thread_last_seen))

        if _ or not wrk_thread.is_alive():
            self.dctlock.acquire()
            if tname in self.thread_last_seen:
                del self.thread_last_seen[tname]
            if tname in self.thread_ref:
                del self.thread_ref[tname]
            self.dctlock.release()
        else:
            self.thread_last_seen[tname] = time.time()
            if tname not in self.thread_ref:
                self.thread_ref[tname] = wrk_thread
        return _

    # def _rescan_pages(self):
    #     # not using
    #     # throw away existing page urls
    #     while True:
    #         try:
    #             self.task.page_q.get(False)
    #         except Empty:
    #             break
    #     # put page into task.list_q
    #     [self.task.list_q.put("%s/?p=%d" % (self.task.url, x)
    #         for x in range(1, 1 + int(math.ceil(self.task.meta['total']/20.0))))
    #     ]
    #     print(self.task.list_q.qsize())

    def _check_vote(self):
        if False and ERR_IMAGE_RESAMPLED in self.vote_result and ERR_IMAGE_RESAMPLED not in self.vote_cleared:
            self.logger.warning(i18n.TASK_START_PAGE_RESCAN % self.task.guid)
            self._rescan_pages()
            self.task.meta['has_ori'] = True
            self.vote_cleared.add(ERR_IMAGE_RESAMPLED)
        elif ERR_QUOTA_EXCEEDED in self.vote_result and \
            ERR_QUOTA_EXCEEDED not in self.vote_cleared and \
            self.vote_result[ERR_QUOTA_EXCEEDED] >= len(self.thread_last_seen):
            self.logger.error(i18n.TASK_STOP_QUOTA_EXCEEDED % self.task.guid)
            self.task.state = TASK_STATE_FAILED

    def run(self):
        CHECK_INTERVAL = 10
        STUCK_INTERVAL = 90
        intv = 0
        self.set_title(i18n.TASK_START % self.task.guid)
        last_change = time.time()
        last_finished = -1
        while len(self.thread_last_seen) > 0:
            intv += 1
            thread_working = 0
            thread_with_pages = 0
            self._check_vote()
            for k in list(self.thread_last_seen.keys()):
                _zombie_threshold = self.thread_ref[k].zombie_threshold if k in self.thread_ref else 30
                if time.time() - self.thread_last_seen[k] > _zombie_threshold:
                    if k in self.thread_ref and self.thread_ref[k].is_alive():
                        self.logger.warning(i18n.THREAD_MAY_BECOME_ZOMBIE % k)
                        self.thread_zombie.add(k)
                    else:
                        self.logger.warning(i18n.THREAD_SWEEP_OUT % k)
                    del self.thread_last_seen[k]

            for t in self.thread_ref.values():
                if t.is_working():
                    thread_working += 1

            if intv == CHECK_INTERVAL:
                _ = "%s %dW/%dR/%dZ, %s %dR/%dD/%dA" % (
                    i18n.THREAD,
                    thread_working, len(self.thread_last_seen), len(self.thread_zombie),
                    i18n.QUEUE,
                    self.task.img_q.qsize() if self.task.img_q else 0,
                    self.task.meta['finished'], self.task.meta['total'])
                self.logger.info(_)
                self.set_title(_)
                intv = 0
                # if not downloading any new images in 1.5 min, exit
                if last_finished != self.task.meta['finished']:
                    last_change = time.time()
                    last_finished = self.task.meta['finished']
                else:
                    if time.time() - last_change > STUCK_INTERVAL:
                        self.logger.warning(i18n.TASK_STUCK % self.task.guid)
                        last_change = time.time()
                        CHECK_INTERVAL *= 2
                        # break
            time.sleep(0.5)
        if self.task.meta['finished'] == self.task.meta['total']:
            # rename is finished along with downloading process
            self.set_title(i18n.TASK_FINISHED % self.task.guid)
            self.logger.info(i18n.TASK_FINISHED % self.task.guid)
            self.task.state = TASK_STATE_FINISHED
        self.task.cleanup()
예제 #41
0
class EventLog(NilEventLog):
    """An EventLog records events, aggregates them according to some time
       periods, and logs the totals to disk.

       Currently we retain two log files: one holds an interval-by-interval
       human-readable record of past intervals; the other holds a pickled
       record of events in the current interval.

       We take some pains to avoid flushing the statistics when too
       little time has passed.  We only rotate an aggregated total to disk
       when:
           - An interval has passed since the last rotation time
         AND
           - We have accumulated events for at least 75% of an interval's
             worth of time.

       The second requirement prevents the following unpleasant failure mode:
           - We set the interval to '1 day'.  At midnight on Monday,
             we rotate.  At 00:05, we go down.  At 23:55 we come back
             up.  At midnight at Tuesday, we noticing that it's been one
             day since the last rotation, and rotate again -- thus making
             a permanent record that reflects 10 minutes worth of traffic,
             potentially exposing more about individual users than we should.
    """

    ### Fields:
    # count: a map from event name -> argument|None -> total events received.
    # lastRotation: the time at which we last flushed the log to disk and
    #     reset the log.
    # filename, historyFile: Names of the pickled and long-term event logs.
    # rotateInterval: Interval after which to flush the current statistics
    #     to disk.
    # _lock: a threading.RLock object that must be held when modifying this
    #     object.
    # accumulatedTime: number of seconds since last rotation that we have
    #     been logging events.
    # lastSave: last time we saved the file.
    ### Pickled format:
    # Map from {"count","lastRotation","accumulatedTime"} to the values
    # for those fields.
    def __init__(self, filename, historyFile, interval):
        """Initializes an EventLog that caches events in 'filename', and
           periodically writes to 'historyFile' every 'interval' seconds."""
        NilEventLog.__init__(self)
        if os.path.exists(filename):
            self.__dict__.update(readPickled(filename))
            assert self.count is not None
            assert self.lastRotation is not None
            assert self.accumulatedTime is not None
            for e in _EVENTS:
                if not self.count.has_key(e):
                    self.count[e] = {}
        else:
            self.count = {}
            for e in _EVENTS:
                self.count[e] = {}
            self.lastRotation = time()
            self.accumulatedTime = 0
        self.filename = filename
        self.historyFilename = historyFile
        for fn in filename, historyFile:
            parent = os.path.split(fn)[0]
            createPrivateDir(parent)
        self.rotateInterval = interval
        self.lastSave = time()
        self._setNextRotation()
        self._lock = RLock()
        self.save()

    def save(self, now=None):
        """Write the statistics in this log to disk, rotating if necessary."""
        try:
            self._lock.acquire()
            self._save(now)
        finally:
            self._lock.release()

    def _save(self, now=None):
        """Implements 'save' method.  For internal use.  Must hold self._lock
           to invoke."""
        LOG.debug("Syncing statistics to disk")
        if not now: now = time()
        tmpfile = self.filename + "_tmp"
        tryUnlink(tmpfile)
        self.accumulatedTime += int(now - self.lastSave)
        self.lastSave = now
        writePickled(
            self.filename, {
                'count': self.count,
                'lastRotation': self.lastRotation,
                'accumulatedTime': self.accumulatedTime,
            })

    def _log(self, event, arg=None):
        try:
            self._lock.acquire()
            try:
                self.count[event][arg] += 1
            except KeyError:
                try:
                    self.count[event][arg] = 1
                except KeyError:
                    raise KeyError("No such event: %r" % event)
        finally:
            self._lock.release()

    def getNextRotation(self):
        return self.nextRotation

    def rotate(self, now=None):
        if now is None: now = time()
        if now < self.nextRotation:
            raise MixError("Not ready to rotate event stats")
        try:
            self._lock.acquire()
            self._rotate(now)
        finally:
            self._lock.release()

    def _rotate(self, now=None):
        """Flush all events since the last rotation to the history file,
           and clears the current event log."""

        # Must hold lock
        LOG.debug("Flushing statistics log")
        if now is None: now = time()

        starting = not os.path.exists(self.historyFilename)
        f = open(self.historyFilename, 'a')
        if starting:
            f.write(BOILERPLATE)
        self.dump(f, now)
        f.close()

        self.count = {}
        for e in _EVENTS:
            self.count[e] = {}
        self.lastRotation = now
        self._save(now)
        self.accumulatedTime = 0
        self._setNextRotation(now)

    def dump(self, f, now=None):
        """Write the current data to a file handle 'f'."""
        if now is None: now = time()
        try:
            self._lock.acquire()
            startTime = self.lastRotation
            endTime = now
            print >> f, "========== From %s to %s:" % (formatTime(
                startTime, 1), formatTime(endTime, 1))
            for event in _EVENTS:
                count = self.count[event]
                if len(count) == 0:
                    print >> f, "  %s: 0" % event
                    continue
                elif len(count) == 1 and count.keys()[0] is None:
                    print >> f, "  %s: %s" % (event, count[None])
                    continue
                print >> f, "  %s:" % event
                total = 0
                args = count.keys()
                args.sort()
                length = max([len(str(arg)) for arg in args])
                length = max((length, 10))
                fmt = "    %" + str(length) + "s: %s"
                for arg in args:
                    v = count[arg]
                    if arg is None: arg = "{Unknown}"
                    print >> f, fmt % (arg, v)
                    total += v
                print >> f, fmt % ("Total", total)
        finally:
            self._lock.release()

    def _setNextRotation(self, now=None):
        """Helper function: calculate the time when we next rotate the log."""
        # ???? Lock to 24-hour cycle

        # This is a little weird.  We won't save *until*:
        #       - .75 * rotateInterval seconds are accumulated.
        #  AND  - rotateInterval seconds have elapsed since the last
        #         rotation.
        #
        # IF the rotation interval is divisible by one hour, we also
        #  round to the hour, up to 5 minutes down and 55 up.
        if not now: now = time()

        accumulatedTime = self.accumulatedTime + (now - self.lastSave)
        secToGo = max(0, self.rotateInterval * 0.75 - accumulatedTime)
        self.nextRotation = max(self.lastRotation + self.rotateInterval,
                                now + secToGo)

        if self.nextRotation < now:
            self.nextRotation = now

        if (self.rotateInterval % 3600) == 0:
            mid = previousMidnight(self.nextRotation)
            rest = self.nextRotation - mid
            self.nextRotation = mid + 3600 * floorDiv(rest + 55 * 60, 3600)
예제 #42
0
파일: door_epc.py 프로젝트: wklharry/hrl
class Door_EPC(epc.EPC):
    def __init__(self, robot):
        epc.EPC.__init__(self, robot)

        self.mech_kinematics_lock = RLock()
        self.fit_circle_lock = RLock()

        rospy.Subscriber('mechanism_kinematics_rot', MechanismKinematicsRot,
                         self.mechanism_kinematics_rot_cb)
        rospy.Subscriber('epc/stop', Empty, self.stop_cb)
        # used in the ROS stop_cb and equi_pt_generator_control_radial_force
        self.force_traj_pub = rospy.Publisher('epc/force_test',
                                              ForceTrajectory)
        self.mech_traj_pub = rospy.Publisher('mechanism_trajectory', Point32)

    def init_log(self):
        self.f_list = []
        self.f_list_ati = []
        self.f_list_estimate = []
        self.f_list_torques = []
        self.cep_list = []
        self.ee_list = []
        self.ft = ForceTrajectory()
        if self.mechanism_type != '':
            self.ft.type = self.mechanism_type
        else:
            self.ft.type = 'rotary'

    def log_state(self, arm):
        # only logging the right arm.
        f = self.robot.get_wrist_force_ati(arm, base_frame=True)
        self.f_list_ati.append(f.A1.tolist())

        f = self.robot.get_wrist_force_estimate(arm, base_frame=True)
        self.f_list_estimate.append(f.A1.tolist())

        f = self.robot.get_force_from_torques(arm)
        self.f_list_torques.append(f.A1.tolist())

        f = self.robot.get_wrist_force(arm, base_frame=True)
        self.f_list.append(f.A1.tolist())

        cep, _ = self.robot.get_cep_jtt(arm, hook_tip=True)
        self.cep_list.append(cep.A1.tolist())
        #        ee, _ = self.robot.get_ee_jtt(arm)
        ee, _ = self.robot.end_effector_pos(arm)
        self.ee_list.append(ee.A1.tolist())

        if self.started_pulling_on_handle == False:
            if f[0, 0] > 10.:
                self.started_pulling_on_handle_count += 1
            else:
                self.started_pulling_on_handle_count = 0
                self.init_log(
                )  # reset logs until started pulling on the handle.
                self.init_tangent_vector = None

            if self.started_pulling_on_handle_count > 1:
                self.started_pulling_on_handle = True

        return ''

    ## ROS callback. Stop and maintain position.
    def stop_cb(self, cmd):
        self.stopping_string = 'stop_cb called.'

    def common_stopping_conditions(self):
        stop = ''
        # right arm only.
        wrist_force = self.robot.get_wrist_force(0, base_frame=True)
        mag = np.linalg.norm(wrist_force)
        if mag > self.eq_force_threshold:
            stop = 'force exceed'

        if mag < 1.2 and self.hooked_location_moved:
            if (self.prev_force_mag - mag) > 30.:
                stop = 'slip: force step decrease and below thresold.'
            else:
                self.slip_count += 1
        else:
            self.slip_count = 0

        if self.slip_count == 10:
            stop = 'slip: force below threshold for too long.'
        return stop

    def mechanism_kinematics_rot_cb(self, mk):
        self.fit_circle_lock.acquire()
        self.cx_start = mk.cx
        self.cy_start = mk.cy
        self.cz_start = mk.cz
        self.rad = mk.rad
        self.fit_circle_lock.release()

    ## constantly update the estimate of the kinematics and move the
    # equilibrium point along the tangent of the estimated arc, and
    # try to keep the radial force constant.
    # @param h_force_possible - True (hook side) or False (hook up).
    # @param v_force_possible - False (hook side) or True (hook up).
    # Is maintaining a radial force possible or not (based on hook
    # geometry and orientation)
    # @param cep_vel - tangential velocity of the cep in m/s
    def cep_gen_control_radial_force(self, arm, cep, cep_vel):
        self.log_state(arm)
        if self.started_pulling_on_handle == False:
            cep_vel = 0.02

        #step_size = 0.01 * cep_vel
        step_size = 0.1 * cep_vel  # 0.1 is the time interval between calls to the equi_generator function (see pull)
        stop = self.common_stopping_conditions()
        wrist_force = self.robot.get_wrist_force(arm, base_frame=True)
        mag = np.linalg.norm(wrist_force)

        curr_pos, _ = self.robot.get_ee_jtt(arm)
        if len(self.ee_list) > 1:
            start_pos = np.matrix(self.ee_list[0]).T
        else:
            start_pos = curr_pos

        #mechanism kinematics.
        if self.started_pulling_on_handle:
            self.mech_traj_pub.publish(
                Point32(curr_pos[0, 0], curr_pos[1, 0], curr_pos[2, 0]))

        self.fit_circle_lock.acquire()
        rad = self.rad
        cx_start, cy_start = self.cx_start, self.cy_start
        cz_start = self.cz_start
        self.fit_circle_lock.release()
        cx, cy = cx_start, cy_start
        cz = cz_start
        print 'cx, cy, r:', cx, cy, rad

        radial_vec = curr_pos - np.matrix([cx, cy, cz]).T
        radial_vec = radial_vec / np.linalg.norm(radial_vec)
        if cy_start < start_pos[1, 0]:
            tan_x, tan_y = -radial_vec[1, 0], radial_vec[0, 0]
        else:
            tan_x, tan_y = radial_vec[1, 0], -radial_vec[0, 0]

        if tan_x > 0. and (start_pos[0, 0] - curr_pos[0, 0]) < 0.09:
            tan_x = -tan_x
            tan_y = -tan_y

        if cy_start > start_pos[1, 0]:
            radial_vec = -radial_vec  # axis to the left, want force in
            # anti-radial direction.
        rv = radial_vec
        force_vec = np.matrix([rv[0, 0], rv[1, 0], 0.]).T
        tangential_vec = np.matrix([tan_x, tan_y, 0.]).T

        tangential_vec_ts = tangential_vec
        radial_vec_ts = radial_vec
        force_vec_ts = force_vec

        if arm == 'right_arm' or arm == 0:
            if force_vec_ts[1, 0] < 0.:  # only allowing force to the left
                force_vec_ts = -force_vec_ts
        else:
            if force_vec_ts[1, 0] > 0.:  # only allowing force to the right
                force_vec_ts = -force_vec_ts

        f_vec = -1 * np.array(
            [wrist_force[0, 0], wrist_force[1, 0], wrist_force[2, 0]])
        f_rad_mag = np.dot(f_vec, force_vec.A1)
        err = f_rad_mag - 4.
        if err > 0.:
            kp = -0.1
        else:
            kp = -0.2
        radial_motion_mag = kp * err  # radial_motion_mag in cm (depends on eq_motion step size)
        radial_motion_vec = force_vec * radial_motion_mag
        print 'tangential_vec:', tangential_vec.A1
        eq_motion_vec = copy.copy(tangential_vec)
        eq_motion_vec += radial_motion_vec

        self.prev_force_mag = mag

        if self.init_tangent_vector == None or self.started_pulling_on_handle == False:
            self.init_tangent_vector = copy.copy(tangential_vec_ts)
        c = np.dot(tangential_vec_ts.A1, self.init_tangent_vector.A1)
        ang = np.arccos(c)
        if np.isnan(ang):
            ang = 0.

        tangential_vec = tangential_vec / np.linalg.norm(
            tangential_vec)  # paranoia abot vectors not being unit vectors.
        dist_moved = np.dot((curr_pos - start_pos).A1, tangential_vec_ts.A1)
        ftan = abs(np.dot(wrist_force.A1, tangential_vec.A1))
        self.ft.tangential_force.append(ftan)
        self.ft.radial_force.append(f_rad_mag)

        if self.ft.type == 'rotary':
            self.ft.configuration.append(ang)
        else:  # drawer
            print 'dist_moved:', dist_moved
            self.ft.configuration.append(dist_moved)

        if self.started_pulling_on_handle:
            self.force_traj_pub.publish(self.ft)


#        if self.started_pulling_on_handle == False:
#            ftan_pull_test = -np.dot(wrist_force.A1, tangential_vec.A1)
#            print 'ftan_pull_test:', ftan_pull_test
#            if ftan_pull_test > 5.:
#                self.started_pulling_on_handle_count += 1
#            else:
#                self.started_pulling_on_handle_count = 0
#                self.init_log() # reset logs until started pulling on the handle.
#                self.init_tangent_vector = None
#
#            if self.started_pulling_on_handle_count > 1:
#                self.started_pulling_on_handle = True

        if abs(dist_moved) > 0.09 and self.hooked_location_moved == False:
            # change the force threshold once the hook has started pulling.
            self.hooked_location_moved = True
            self.eq_force_threshold = ut.bound(mag + 30., 20., 80.)
            self.ftan_threshold = 1.2 * self.ftan_threshold + 20.
        if self.hooked_location_moved:
            if abs(tangential_vec_ts[2,
                                     0]) < 0.2 and ftan > self.ftan_threshold:
                stop = 'ftan threshold exceed: %f' % ftan
        else:
            self.ftan_threshold = max(self.ftan_threshold, ftan)

        if self.hooked_location_moved and ang > math.radians(85.):
            print 'Angle:', math.degrees(ang)
            self.open_ang_exceed_count += 1
            if self.open_ang_exceed_count > 2:
                stop = 'opened mechanism through large angle: %.1f' % (
                    math.degrees(ang))
        else:
            self.open_ang_exceed_count = 0

        cep_t = cep + eq_motion_vec * step_size
        cep[0, 0] = cep_t[0, 0]
        cep[1, 0] = cep_t[1, 0]
        cep[2, 0] = cep_t[2, 0]

        print 'CEP:', cep.A1

        stop = stop + self.stopping_string
        return stop, (cep, None)

    def pull(self, arm, force_threshold, cep_vel, mechanism_type=''):
        self.mechanism_type = mechanism_type
        self.stopping_string = ''
        self.eq_pt_not_moving_counter = 0

        self.init_log()

        self.init_tangent_vector = None
        self.open_ang_exceed_count = 0.

        self.eq_force_threshold = force_threshold
        self.ftan_threshold = 2.
        self.hooked_location_moved = False  # flag to indicate when the hooking location started moving.
        self.prev_force_mag = np.linalg.norm(self.robot.get_wrist_force(arm))
        self.slip_count = 0

        self.started_pulling_on_handle = False
        self.started_pulling_on_handle_count = 0

        ee_pos, _ = self.robot.get_ee_jtt(arm)

        self.cx_start = ee_pos[0, 0]
        self.rad = 10.0
        self.cy_start = ee_pos[1, 0] - self.rad
        self.cz_start = ee_pos[2, 0]

        cep, _ = self.robot.get_cep_jtt(arm)
        arg_list = [arm, cep, cep_vel]
        result, _ = self.epc_motion(
            self.cep_gen_control_radial_force,
            0.1,
            arm,
            arg_list,
            self.log_state,
            #0.01, arm, arg_list,
            control_function=self.robot.set_cep_jtt)

        print 'EPC motion result:', result
        print 'Original force threshold:', force_threshold
        print 'Adapted force threshold:', self.eq_force_threshold
        print 'Adapted ftan threshold:', self.ftan_threshold

        d = {
            'f_list': self.f_list,
            'ee_list': self.ee_list,
            'cep_list': self.cep_list,
            'ftan_list': self.ft.tangential_force,
            'config_list': self.ft.configuration,
            'frad_list': self.ft.radial_force,
            'f_list_ati': self.f_list_ati,
            'f_list_estimate': self.f_list_estimate,
            'f_list_torques': self.f_list_torques
        }
        ut.save_pickle(d, 'pr2_pull_' + ut.formatted_time() + '.pkl')

    def search_and_hook(self,
                        arm,
                        hook_loc,
                        hooking_force_threshold=5.,
                        hit_threshold=15.,
                        hit_motions=1,
                        hook_direction='left'):
        # this needs to be debugged. Hardcoded for now.
        #if arm == 'right_arm' or arm == 0:
        #    hook_dir = np.matrix([0., 1., 0.]).T # hook direc in home position
        #    offset = -0.03
        #elif arm == 'left_arm' or arm == 1:
        #    hook_dir = np.matrix([0., -1., 0.]).T # hook direc in home position
        #    offset = -0.03
        #else:
        #    raise RuntimeError('Unknown arm: %s', arm)
        #start_loc = hook_loc + rot_mat.T * hook_dir * offset

        if hook_direction == 'left':
            #offset = np.matrix([0., -0.03, 0.]).T
            offset = np.matrix([0., -0.0, 0.]).T
            move_dir = np.matrix([0., 1., 0.]).T
        elif hook_direction == 'up':
            #offset = np.matrix([0., 0., -0.03]).T
            offset = np.matrix([0., 0., -0.0]).T
            move_dir = np.matrix([0., 0., 1.]).T
        start_loc = hook_loc + offset

        # vector normal to surface and pointing into the surface.
        normal_tl = np.matrix([1.0, 0., 0.]).T

        pt1 = start_loc - normal_tl * 0.1
        self.robot.go_cep_jtt(arm, pt1)

        #        raw_input('Hit ENTER to go')

        vec = normal_tl * 0.2
        rospy.sleep(1.)
        for i in range(hit_motions):
            s = self.move_till_hit(arm,
                                   vec=vec,
                                   force_threshold=hit_threshold,
                                   speed=0.07)

        cep_start, _ = self.robot.get_cep_jtt(arm)
        cep = copy.copy(cep_start)
        arg_list = [arm, move_dir, hooking_force_threshold, cep, cep_start]
        print 'Hi there.'
        s = self.epc_motion(self.cep_gen_surface_follow,
                            0.1,
                            arm,
                            arg_list,
                            control_function=self.robot.set_cep_jtt)
        print 'result:', s
        return s
예제 #43
0
class KernelConnection(object):
    """Interact with a Jupyter kernel."""

    class MessageReceiver(Thread):

        def __init__(self, kernel):
            """Initialize AsyncCommunicator class."""
            super().__init__()
            self._kernel = kernel
            self.exit = Event()

        def shutdown(self):
            self.exit.set()

    class ShellMessageReceiver(MessageReceiver):
        """Communicator that runs asynchroniously."""

        def run(self):
            """Main routine."""
            # TODO: log
            # TODO: remove view and regions from id2region
            while not self.exit.is_set():
                try:
                    msg = self._kernel.client.get_shell_msg(timeout=1)
                    self._kernel.shell_msg_queues_lock.acquire()
                    try:
                        queue = self._kernel.shell_msg_queues[msg['parent_header']['msg_id']]
                    finally:
                        self._kernel.shell_msg_queues_lock.release()
                    queue.put(msg)
                except Empty:
                    pass
                except Exception as ex:
                    self._kernel._logger.exception(ex)

    class IOPubMessageReceiver(MessageReceiver):
        """Receive and process IOPub messages."""

        def run(self):
            """Main routine."""
            # TODO: log, handle other message types.
            while not self.exit.is_set():
                try:
                    msg = self._kernel.client.get_iopub_msg(timeout=1)
                    self._kernel._logger.info(msg)
                    content = msg.get("content", dict())
                    execution_count = content.get("execution_count", None)
                    msg_type = msg['msg_type']
                    view, region = self._kernel.id2region.get(
                        msg['parent_header'].get('msg_id', None),
                        (None, None)
                    )
                    if msg_type == MSG_TYPE_STATUS:
                        self._kernel._execution_state = content['execution_state']
                    elif msg_type == MSG_TYPE_EXECUTE_INPUT:
                        self._kernel._write_text_to_view("\n\n")
                        self._kernel._output_input_code(content['code'], content['execution_count'])
                    elif msg_type == MSG_TYPE_ERROR:
                        self._kernel._logger.info("Handling error")
                        self._kernel._handle_error(
                            execution_count,
                            content["ename"],
                            content["evalue"],
                            content["traceback"],
                            region,
                            view,
                        )
                    elif msg_type == MSG_TYPE_DISPLAY_DATA:
                        self._kernel._write_mime_data_to_view(content["data"], region, view)
                    elif msg_type == MSG_TYPE_EXECUTE_RESULT:
                        self._kernel._write_mime_data_to_view(content["data"], region, view)
                    elif msg_type == MSG_TYPE_STREAM:
                        self._kernel._handle_stream(
                            content["name"],
                            content["text"],
                            region,
                            view,
                        )
                except Empty:
                    pass
                except Exception as ex:
                    self._kernel._logger.exception(ex)

    class StdInMessageReceiver(MessageReceiver):
        """Receive and process IOPub messages."""

        def _handle_input_request(self, prompt, password):
            def interrupt():
                self._kernel.interrupt_kernel(self.kernel_id)

            if password:
                show_password_input(prompt, self._kernel.input, interrupt)
            else:
                (sublime
                 .active_window()
                 .show_input_panel(
                     prompt,
                     "",
                     self._kernel.client.input,
                     lambda x: None,
                     interrupt
                 ))

        def run(self):
            """Main routine."""
            # TODO: log, handle other message types.
            while not self.exit.is_set():
                try:
                    msg = self._kernel.client.get_stdin_msg(timeout=1)
                    msg_type = msg['msg_type']
                    content = msg['content']
                    if msg_type == MSG_TYPE_INPUT_REQUEST:
                        self._handle_input_request(content["prompt"], content["password"])
                except Empty:
                    pass
                except Exception as ex:
                    self._kernel._logger.exception(ex)

    def _init_receivers(self):
        # Set the attributes refered by receivers before they start.
        self._shell_msg_receiver = self.ShellMessageReceiver(self)
        self._shell_msg_receiver.start()
        self._iopub_msg_receiver = self.IOPubMessageReceiver(self)
        self._iopub_msg_receiver.start()
        self._stdin_msg_receiver = self.StdInMessageReceiver(self)
        self._stdin_msg_receiver.start()

    def __init__(
        self,
        kernel_id,
        kernel_manager,
        parent,
        connection_name=None,
        logger=None,
    ):
        """Initialize KernelConnection class.

        paramters
        ---------
        kernel_id str: kernel ID
        parent parent kernel manager
        """
        self._logger = logger
        self.shell_msg_queues = defaultdict(Queue)
        self._kernel_id = kernel_id
        self.parent = parent
        self.kernel_manager = kernel_manager
        self.client = self.kernel_manager.client()
        self.client.start_channels()
        self.shell_msg_queues_lock = RLock()
        self.id2region = {}
        self._connection_name = connection_name
        self._execution_state = 'unknown'
        self._init_receivers()

    def __del__(self):
        self._shell_msg_receiver.shutdown()
        self._iopub_msg_receiver.shutdown()
        self._stdin_msg_receiver.shutdown()

    @property
    def lang(self):
        """Language of kernel."""
        return self.kernel_manager.kernel_name

    @property
    def kernel_id(self):
        """ID of kernel."""
        return self._kernel_id

    def shutdown_kernel(self):
        self.kernel_manager.shutdown_kernel()

    def restart_kernel(self):
        self.kernel_manager.restart_kernel()

    def interrupt_kernel(self):
        self.kernel_manager.interrupt_kernel()

    def get_connection_name(self):
        return self._connection_name

    def set_connection_name(self, new_name):
        # We also have to change the view name now.
        view = self.get_view()
        self._connection_name = new_name
        view.set_name(self.view_name)

    def del_connection_name(self):
        self._connection_name = None

    connection_name = property(
        get_connection_name,
        set_connection_name,
        del_connection_name,
        "Name of kernel connection shown in a view title.")

    @property
    def view_name(self):
        """The name of output view."""
        return "*Hermes Output* {repr}".format(repr=self.repr)

    @property
    def repr(self):
        """A string used as the representation of the connection"""
        if self.connection_name:
            return "{connection_name} ([{lang}] {kernel_id})".format(
                connection_name=self.connection_name,
                lang=self.lang,
                kernel_id=self.kernel_id)
        else:
            return "[{lang}] {kernel_id}".format(
                lang=self.lang,
                kernel_id=self.kernel_id)

    @property
    def execution_state(self):
        return self._execution_state

    @property
    def _show_inline_output(self):
        return (sublime
                .load_settings("Hermes.sublime-settings")
                .get("inline_output"))

    def activate_view(self):
        """Activate view to show the output of kernel."""
        view = self.get_view()
        current_view = sublime.active_window().active_view()
        sublime.active_window().focus_view(view)
        view.set_scratch(True)  # avoids prompting to save
        view.settings().set("word_wrap", "false")
        sublime.active_window().focus_view(current_view)

    def _output_input_code(self, code, execution_count):
        line = "In[{execution_count}]: {code}".format(
            execution_count=execution_count,
            code=code)
        self._write_text_to_view(line)

    def _handle_error(
        self,
        execution_count,
        ename,
        evalue,
        traceback,
        region: sublime.Region = None,
        view: sublime.View = None
    ) -> None:
        try:
            lines = "\nError[{execution_count}]: {ename}, {evalue}.\nTraceback:\n{traceback}".format(
                execution_count=execution_count,
                ename=ename,
                evalue=evalue,
                traceback="\n".join(traceback))
            lines = remove_ansi_escape(lines)
            self._write_text_to_view(lines)
            if region is not None:
                phantom_html = STREAM_PHANTOM.format(
                    name='error',
                    content=fix_whitespace_for_phantom(lines)
                )
                self._write_inline_html_phantom(phantom_html, region, view)
        except AttributeError:
            # Just there is no error.
            pass

    def _handle_stream(
        self,
        name,
        text,
        region: sublime.Region = None,
        view: sublime.View = None
    ) -> None:
        # Currently don't consider real time catching of streams.
        try:
            lines = "\n({name}):\n{text}".format(name=name, text=text)
            phantom_html = STREAM_PHANTOM.format(name=name, content=fix_whitespace_for_phantom(text))
            self._write_text_to_view(lines)
            if phantom_html and (region is not None):
                self._write_inline_html_phantom(phantom_html, region, view)
        except AttributeError:
            # Just there is no error.
            pass

    def _write_out_execution_count(self, execution_count) -> None:
        self._write_text_to_view("\nOut[{}]: ".format(execution_count))

    def _write_text_to_view(self, text: str) -> None:
        if self._show_inline_output:
            return
        self.activate_view()
        view = self.get_view()
        view.set_read_only(False)
        view.run_command(
            'append',
            {'characters': text})
        view.set_read_only(True)
        view.show(view.size())

    def _write_phantom(self, content: str):
        if self._show_inline_output:
            return
        self.activate_view()
        file_size = self.get_view().size()
        region = sublime.Region(file_size, file_size)
        self.get_view().add_phantom(
            HERMES_FIGURE_PHANTOMS,
            region,
            content,
            sublime.LAYOUT_BLOCK)
        self._logger.info("Created phantom {}".format(content))

    def _write_inline_html_phantom(self, content: str, region: sublime.Region, view: sublime.View):
        if self._show_inline_output:
            # region = self._inline_view.sel()[-1]
            id = HERMES_FIGURE_PHANTOMS + datetime.now().isoformat()
            html = TEXT_PHANTOM.format(content=content)
            view.add_phantom(
                id,
                region,
                html,
                sublime.LAYOUT_BLOCK,
                on_navigate=lambda href, id=id: view.erase_phantoms(id))
            self._logger.info("Created inline phantom {}".format(html))

    def _write_inline_image_phantom(self, data: str, region: sublime.Region, view: sublime.View):
        if self._show_inline_output:
            # region = self._inline_view.sel()[-1]
            id = HERMES_FIGURE_PHANTOMS + datetime.now().isoformat()
            html = IMAGE_PHANTOM.format(data=data)
            view.add_phantom(
                id,
                region,
                html,
                sublime.LAYOUT_BLOCK,
                on_navigate=lambda href, id=id: view.erase_phantoms(id))
            self._logger.info("Created inline phantom image")

    def _write_mime_data_to_view(self, mime_data: dict, region: sublime.Region, view: sublime.View) -> None:
        # Now we use basically text/plain for text type.
        # Jupyter kernels often emits html whom minihtml cannot render.
        if 'text/plain' in mime_data:
            content = mime_data["text/plain"]
            lines = "\n(display data): {content}".format(content=content)
            self._write_text_to_view(lines)
            self._write_inline_html_phantom(
                fix_whitespace_for_phantom(content),
                region,
                view
            )
        elif 'text/html' in mime_data:
            self._logger.info("Caught 'text/html' output without plain text. Try to show with phantom.")
            content = mime_data["text/html"]
            self._write_phantom(content)
            self._write_inline_html_phantom(content, region, view)

        if "image/png" in mime_data:
            data = mime_data["image/png"].strip()
            self._logger.info("Caught image.")
            self._logger.info("RELOADED -------------=================")
            content = (
                '<body style="background-color:white">' +
                '<img alt="Out" src="data:image/png;base64,{data}" />' +
                '</body>'
            ).format(
                data=data,
                bgcolor="white")
            self._write_phantom(content)
            self._write_inline_image_phantom(data, region, view)

    def _handle_inspect_reply(self, reply: dict):
        window = sublime.active_window()
        if window.find_output_panel(HERMES_OBJECT_INSPECT_PANEL) is not None:
            window.destroy_output_panel(HERMES_OBJECT_INSPECT_PANEL)
        view = window.create_output_panel(HERMES_OBJECT_INSPECT_PANEL)
        try:
            self._logger.debug(reply)
            text = remove_ansi_escape(reply["text/plain"])
            view.run_command(
                'append',
                {'characters': text})
            window.run_command(
                'show_panel',
                dict(panel="output." + HERMES_OBJECT_INSPECT_PANEL))
        except KeyError as ex:
            self._logger.exception(ex)

    def get_view(self):
        """Get view corresponds to the KernelConnection."""
        view = None
        view_name = self.view_name
        window = sublime.active_window()
        views = window.views()
        for view_candidate in views:
            if view_candidate.name() == view_name:
                return view_candidate
        if not view:
            active_group = window.active_group()
            view = window.new_file()
            view.set_name(view_name)
            num_group = window.num_groups()
            if num_group != 1:
                if active_group + 1 < num_group:
                    new_group = active_group + 1
                else:
                    new_group = active_group - 1
                window.set_view_index(
                    view,
                    new_group,
                    len(window.sheets_in_group(new_group))
                )
            return view

    def execute_code(self, code, phantom_region, view):
        """Run code with Jupyter kernel."""
        msg_id = self.client.execute(code)
        self.id2region[msg_id] = view, sublime.Region(phantom_region.end(), phantom_region.end())
        info_message = "Kernel executed code ```{code}```.".format(code=code)
        self._logger.info(info_message)

    def is_alive(self):
        """Return True if kernel is alive."""
        return self.client.hb_channel.is_beating()

    def get_complete(self, code, cursor_pos, timeout=None):
        """Generate complete request."""
        if self.execution_state != 'idle':
            return []
        msg_id = self.client.complete(code, cursor_pos)
        self.shell_msg_queues_lock.acquire()
        try:
            queue = self.shell_msg_queues[msg_id]
        finally:
            self.shell_msg_queues_lock.release()

        try:
            recv_msg = queue.get(timeout=timeout)
            recv_content = recv_msg['content']
            self._logger.info(recv_content)
            if '_jupyter_types_experimental' in recv_content.get('metadata', {}):
                # If the reply has typing metadata, use it.
                # This metadata for typing is obviously experimental
                # and not documented yet.
                return [
                    (match['text'] + '\t' + ('<no type info>' if match['type'] is None else match['type']),
                     match['text'])
                    for match
                    in recv_content['metadata']['_jupyter_types_experimental']
                ]
            else:
                # Just say the completion is came from this plugin, otherwise.
                return [
                    (match + '\tHermes', match)
                    for match
                    in recv_content['matches']
                ]
        except Empty:
            self._logger.info("Completion timeout.")
        except Exception as ex:
            self._logger.exception(ex)
        finally:
            self.shell_msg_queues_lock.acquire()
            try:
                self.shell_msg_queues.pop(msg_id, None)
            finally:
                self.shell_msg_queues_lock.release()

        return []

    def get_inspection(self, code, cursor_pos, detail_level=0, timeout=None):
        """Get object inspection by sending a `inspect_request` message to kernel."""
        msg_id = self.client.inspect(code, cursor_pos, detail_level)
        self.shell_msg_queues_lock.acquire()
        try:
            queue = self.shell_msg_queues[msg_id]
        finally:
            self.shell_msg_queues_lock.release()

        try:
            recv_msg = queue.get(timeout=timeout)
            self._handle_inspect_reply(recv_msg['content']['data'])
        except Empty:
            self._logger.info("Object inspection timeout.")

        finally:
            self.shell_msg_queues_lock.acquire()
            try:
                self.shell_msg_queues.pop(msg_id, None)
            finally:
                self.shell_msg_queues_lock.release()
예제 #44
0
class TelegramClient(TelegramBareClient):
    """Full featured TelegramClient meant to extend the basic functionality -

       As opposed to the TelegramBareClient, this one  features downloading
       media from different data centers, starting a second thread to
       handle updates, and some very common functionality.

       This should be used when the (slight) overhead of having locks,
       threads, and possibly multiple connections is not an issue.
    """

    # region Initialization

    def __init__(self,
                 session,
                 api_id,
                 api_hash,
                 proxy=None,
                 device_model=None,
                 system_version=None,
                 app_version=None,
                 lang_code=None,
                 timeout=timedelta(seconds=5),
                 session_base_path=None):
        """Initializes the Telegram client with the specified API ID and Hash.

           Session can either be a `str` object (filename for the .session)
           or it can be a `Session` instance (in which case list_sessions()
           would probably not work). Pass 'None' for it to be a temporary
           session - remember to '.log_out()'!

           Default values for the optional parameters if left as None are:
             device_model   = platform.node()
             system_version = platform.system()
             app_version    = TelegramClient.__version__
             lang_code      = 'en'
        """
        if not api_id or not api_hash:
            raise PermissionError(
                "Your API ID or Hash cannot be empty or None. "
                "Refer to Telethon's README.rst for more information.")

        # Determine what session object we have
        # TODO JsonSession until migration is complete (by v1.0)
        if isinstance(session, str) or session is None:
            session = JsonSession.try_load_or_create_new(
                session, base_path=session_base_path)
        elif not isinstance(session, Session):
            raise ValueError(
                'The given session must be a str or a Session instance.')

        super().__init__(session, api_id, api_hash, proxy, timeout=timeout)

        # Safety across multiple threads (for the updates thread)
        self._lock = RLock()

        # Updates-related members
        self._update_handlers = []
        self._updates_thread_running = Event()
        self._updates_thread_receiving = Event()

        self._next_ping_at = 0
        self.ping_interval = 60  # Seconds

        self._session_base_path = session_base_path

        # Used on connection - the user may modify these and reconnect
        if device_model:
            self.session.device_model = device_model

        if system_version:
            self.session.system_version = system_version

        self.session.app_version = \
            app_version if app_version else self.__version__

        if lang_code:
            self.session.lang_code = lang_code

        # Cache "exported" senders 'dc_id: MtProtoSender' and
        # their corresponding sessions not to recreate them all
        # the time since it's a (somewhat expensive) process.
        self._cached_clients = {}
        self._updates_thread = None
        self._phone_code_hashes = {}

    # endregion

    # region Connecting

    def connect(self, *args):
        """Connects to the Telegram servers, executing authentication if
           required. Note that authenticating to the Telegram servers is
           not the same as authenticating the desired user itself, which
           may require a call (or several) to 'sign_in' for the first time.

           The specified timeout will be used on internal .invoke()'s.

           *args will be ignored.
        """
        return super().connect()

    def disconnect(self):
        """Disconnects from the Telegram server
           and stops all the spawned threads"""
        self._set_updates_thread(running=False)
        super().disconnect()

        # Also disconnect all the cached senders
        for sender in self._cached_clients.values():
            sender.disconnect()

        self._cached_clients.clear()

    # endregion

    # region Working with different connections

    def _get_exported_client(self,
                             dc_id,
                             init_connection=False,
                             bypass_cache=False):
        """Gets a cached exported TelegramBareClient for the desired DC.

           If it's the first time retrieving the TelegramBareClient, the
           current authorization is exported to the new DC so that
           it can be used there, and the connection is initialized.

           If after using the sender a ConnectionResetError is raised,
           this method should be called again with init_connection=True
           in order to perform the reconnection.

           If bypass_cache is True, a new client will be exported and
           it will not be cached.
        """
        # Thanks badoualy/kotlogram on /telegram/api/DefaultTelegramClient.kt
        # for clearly showing how to export the authorization! ^^

        client = self._cached_clients.get(dc_id)
        if client and not bypass_cache:
            if init_connection:
                client.reconnect()
            return client
        else:
            dc = self._get_dc(dc_id)

            # Export the current authorization to the new DC.
            export_auth = self.invoke(ExportAuthorizationRequest(dc_id))

            # Create a temporary session for this IP address, which needs
            # to be different because each auth_key is unique per DC.
            #
            # Construct this session with the connection parameters
            # (system version, device model...) from the current one.
            session = JsonSession(self.session,
                                  base_path=self._session_base_path)
            session.server_address = dc.ip_address
            session.port = dc.port
            client = TelegramBareClient(session,
                                        self.api_id,
                                        self.api_hash,
                                        timeout=self._timeout)
            client.connect(exported_auth=export_auth)

            if not bypass_cache:
                # Don't go through this expensive process every time.
                self._cached_clients[dc_id] = client
            return client

    def create_new_connection(self, on_dc=None):
        """Creates a new connection which can be used in parallel
           with the original TelegramClient. A TelegramBareClient
           will be returned already connected, and the caller is
           responsible to disconnect it.

           If 'on_dc' is None, the new client will run on the same
           data center as the current client (most common case).

           If the client is meant to be used on a different data
           center, the data center ID should be specified instead.

           Note that TelegramBareClients will not handle automatic
           reconnection (i.e. switching to another data center to
           download media), and InvalidDCError will be raised in
           such case.
        """
        if on_dc is None:
            client = TelegramBareClient(self.session,
                                        self.api_id,
                                        self.api_hash,
                                        proxy=self.proxy)
            client.connect()
        else:
            client = self._get_exported_client(on_dc, bypass_cache=True)

        return client

    # endregion

    # region Telegram requests functions

    def invoke(self, request, *args):
        """Invokes (sends) a MTProtoRequest and returns (receives) its result.

           An optional timeout can be specified to cancel the operation if no
           result is received within such time, or None to disable any timeout.

           *args will be ignored.
        """
        if not issubclass(type(request), MTProtoRequest):
            raise ValueError('You can only invoke MtProtoRequests')

        if not self._sender:
            raise ValueError('You must be connected to invoke requests!')

        if self._updates_thread_receiving.is_set():
            self._sender.cancel_receive()

        try:
            self._lock.acquire()

            updates = [] if self._update_handlers else None
            result = super().invoke(request, updates=updates)

            if updates:
                for update in updates:
                    for handler in self._update_handlers:
                        handler(update)

            # TODO Retry if 'result' is None?
            return result

        except (PhoneMigrateError, NetworkMigrateError, UserMigrateError) as e:
            self._logger.info('DC error when invoking request, '
                              'attempting to reconnect at DC {}'.format(
                                  e.new_dc))

            self.reconnect(new_dc=e.new_dc)
            return self.invoke(request)

        finally:
            self._lock.release()

    def invoke_on_dc(self, request, dc_id, reconnect=False):
        """Invokes the given request on a different DC
           by making use of the exported MtProtoSenders.

           If 'reconnect=True', then the a reconnection will be performed and
           ConnectionResetError will be raised if it occurs a second time.
        """
        try:
            client = self._get_exported_client(dc_id,
                                               init_connection=reconnect)

            return client.invoke(request)

        except ConnectionResetError:
            if reconnect:
                raise
            else:
                return self.invoke_on_dc(request, dc_id, reconnect=True)

    # region Authorization requests

    def is_user_authorized(self):
        """Has the user been authorized yet
           (code request sent and confirmed)?"""
        return self.session and self.get_me() is not None

    def send_code_request(self, phone_number):
        """Sends a code request to the specified phone number"""
        result = self.invoke(
            SendCodeRequest(phone_number, self.api_id, self.api_hash))

        self._phone_code_hashes[phone_number] = result.phone_code_hash

    def sign_in(self,
                phone_number=None,
                code=None,
                password=None,
                bot_token=None):
        """Completes the sign in process with the phone number + code pair.

           If no phone or code is provided, then the sole password will be used.
           The password should be used after a normal authorization attempt
           has happened and an RPCError with `.password_required = True` was
           raised.

           To login as a bot, only `bot_token` should be provided.
           This should equal to the bot access hash provided by
           https://t.me/BotFather during your bot creation.

           If the login succeeds, the logged in user is returned.
        """
        if phone_number and code:
            if phone_number not in self._phone_code_hashes:
                raise ValueError(
                    'Please make sure to call send_code_request first.')

            try:
                result = self.invoke(
                    SignInRequest(phone_number,
                                  self._phone_code_hashes[phone_number], code))

            except (PhoneCodeEmptyError, PhoneCodeExpiredError,
                    PhoneCodeHashEmptyError, PhoneCodeInvalidError):
                return None

        elif password:
            salt = self.invoke(GetPasswordRequest()).current_salt
            result = self.invoke(
                CheckPasswordRequest(utils.get_password_hash(password, salt)))

        elif bot_token:
            result = self.invoke(
                ImportBotAuthorizationRequest(flags=0,
                                              bot_auth_token=bot_token,
                                              api_id=self.api_id,
                                              api_hash=self.api_hash))

        else:
            raise ValueError(
                'You must provide a phone_number and a code the first time, '
                'and a password only if an RPCError was raised before.')

        return result.user

    def sign_up(self, phone_number, code, first_name, last_name=''):
        """Signs up to Telegram. Make sure you sent a code request first!"""
        result = self.invoke(
            SignUpRequest(
                phone_number=phone_number,
                phone_code_hash=self._phone_code_hashes[phone_number],
                phone_code=code,
                first_name=first_name,
                last_name=last_name))

        self.session.user = result.user
        self.session.save()

    def log_out(self):
        """Logs out and deletes the current session.
           Returns True if everything went okay."""

        # Special flag when logging out (so the ack request confirms it)
        self._sender.logging_out = True
        try:
            self.invoke(LogOutRequest())
            self.disconnect()
            if not self.session.delete():
                return False

            self.session = None
            return True
        except (RPCError, ConnectionError):
            # Something happened when logging out, restore the state back
            self._sender.logging_out = False
            return False

    def get_me(self):
        """Gets "me" (the self user) which is currently authenticated,
           or None if the request fails (hence, not authenticated)."""
        try:
            return self.invoke(GetUsersRequest([InputUserSelf()]))[0]
        except UnauthorizedError:
            return None

    # endregion

    # region Dialogs ("chats") requests

    def get_dialogs(self,
                    limit=10,
                    offset_date=None,
                    offset_id=0,
                    offset_peer=InputPeerEmpty()):
        """Returns a tuple of lists ([dialogs], [entities])
           with at least 'limit' items each.

           If `limit` is 0, all dialogs will (should) retrieved.
           The `entities` represent the user, chat or channel
           corresponding to that dialog.
        """

        r = self.invoke(
            GetDialogsRequest(offset_date=offset_date,
                              offset_id=offset_id,
                              offset_peer=offset_peer,
                              limit=limit))
        return (r.dialogs, [
            find_user_or_chat(d.peer, r.users, r.chats) for d in r.dialogs
        ])

    # endregion

    # region Message requests

    def send_message(self, entity, message, no_web_page=False):
        """Sends a message to the given entity (or input peer)
           and returns the sent message ID"""
        request = SendMessageRequest(peer=get_input_peer(entity),
                                     message=message,
                                     entities=[],
                                     no_webpage=no_web_page)
        result = self.invoke(request)
        msg_id = None
        if isinstance(result, UpdatesTg):
            if result.updates is not None:
                msg_id = next((update.id for update in result.updates
                               if isinstance(update, UpdateMessageID)), None)
        elif hasattr(result, 'id'):
            msg_id = result.id

        if msg_id is None:  # log warning if msg_id not found
            self._logger.warning(
                'Send Text not getting message id on the result : {}'.format(
                    str(result)))

        return msg_id

    def get_message_history(self,
                            entity,
                            limit=20,
                            offset_date=None,
                            offset_id=0,
                            max_id=0,
                            min_id=0,
                            add_offset=0):
        """
        Gets the message history for the specified entity

        :param entity:      The entity (or input peer) from whom to retrieve the message history
        :param limit:       Number of messages to be retrieved
        :param offset_date: Offset date (messages *previous* to this date will be retrieved)
        :param offset_id:   Offset message ID (only messages *previous* to the given ID will be retrieved)
        :param max_id:      All the messages with a higher (newer) ID or equal to this will be excluded
        :param min_id:      All the messages with a lower (older) ID or equal to this will be excluded
        :param add_offset:  Additional message offset (all of the specified offsets + this offset = older messages)

        :return: A tuple containing total message count and two more lists ([messages], [senders]).
                 Note that the sender can be null if it was not found!
        """
        result = self.invoke(
            GetHistoryRequest(get_input_peer(entity),
                              limit=limit,
                              offset_date=offset_date,
                              offset_id=offset_id,
                              max_id=max_id,
                              min_id=min_id,
                              add_offset=add_offset))

        # The result may be a messages slice (not all messages were retrieved)
        # or simply a messages TLObject. In the later case, no "count"
        # attribute is specified, so the total messages count is simply
        # the count of retrieved messages
        total_messages = getattr(result, 'count', len(result.messages))

        # Iterate over all the messages and find the sender User
        entities = [
            find_user_or_chat(m.from_id, result.users, result.chats)
            if m.from_id is not None else find_user_or_chat(
                m.to_id, result.users, result.chats) for m in result.messages
        ]

        return total_messages, result.messages, entities

    def send_read_acknowledge(self, entity, messages=None, max_id=None):
        """Sends a "read acknowledge" (i.e., notifying the given peer that we've
           read their messages, also known as the "double check").

           Either a list of messages (or a single message) can be given,
           or the maximum message ID (until which message we want to send the read acknowledge).

           Returns an AffectedMessages TLObject"""
        if max_id is None:
            if not messages:
                raise InvalidParameterError(
                    'Either a message list or a max_id must be provided.')

            if isinstance(messages, list):
                max_id = max(msg.id for msg in messages)
            else:
                max_id = messages.id

        return self.invoke(
            ReadHistoryRequest(peer=get_input_peer(entity), max_id=max_id))

    # endregion

    # region Uploading files

    def send_photo_file(self, input_file, entity, caption=''):
        """Sends a previously uploaded input_file
           (which should be a photo) to the given entity (or input peer)"""
        msg_id = self.send_media_file(
            InputMediaUploadedPhoto(input_file, caption), entity)

        return msg_id

    def send_document_file(self, input_file, entity, caption=''):
        """Sends a previously uploaded input_file
           (which should be a document) to the given entity (or input peer)"""

        # Determine mime-type and attributes
        # Take the first element by using [0] since it returns a tuple
        mime_type = guess_type(input_file.name)[0]
        attributes = [
            DocumentAttributeFilename(input_file.name)
            # TODO If the input file is an audio, find out:
            # Performer and song title and add DocumentAttributeAudio
        ]
        # Ensure we have a mime type, any; but it cannot be None
        # 'The "octet-stream" subtype is used to indicate that a body
        # contains arbitrary binary data.'
        if not mime_type:
            mime_type = 'application/octet-stream'
        msg_id = self.send_media_file(
            InputMediaUploadedDocument(file=input_file,
                                       mime_type=mime_type,
                                       attributes=attributes,
                                       caption=caption), entity)
        return msg_id

    def send_media_file(self, input_media, entity):
        """Sends any input_media (contact, document, photo...) to the given entity"""
        result = self.invoke(
            SendMediaRequest(peer=get_input_peer(entity), media=input_media))
        msg_id = None
        if isinstance(result, UpdatesTg):
            if result.updates is not None:
                msg_id = next((update.id for update in result.updates
                               if isinstance(update, UpdateMessageID)), None)
        if msg_id is None:  # log warning if msg_id not found
            self._logger.warning(
                'Send Media not getting message id on the result : {}'.format(
                    str(result)))
        return msg_id

    # endregion

    # region Downloading media requests

    def download_profile_photo(self,
                               profile_photo,
                               file_path,
                               add_extension=True,
                               download_big=True):
        """Downloads the profile photo for an user or a chat (including channels).
           Returns False if no photo was provided, or if it was Empty"""

        if (not profile_photo
                or isinstance(profile_photo, UserProfilePhotoEmpty)
                or isinstance(profile_photo, ChatPhotoEmpty)):
            return False

        if add_extension:
            file_path += get_extension(profile_photo)

        if download_big:
            photo_location = profile_photo.photo_big
        else:
            photo_location = profile_photo.photo_small

        # Download the media with the largest size input file location
        self.download_file(
            InputFileLocation(volume_id=photo_location.volume_id,
                              local_id=photo_location.local_id,
                              secret=photo_location.secret), file_path)
        return True

    def download_msg_media(self,
                           message_media,
                           file_path,
                           add_extension=True,
                           progress_callback=None):
        """Downloads the given MessageMedia (Photo, Document or Contact)
           into the desired file_path, optionally finding its extension automatically
           The progress_callback should be a callback function which takes two parameters,
           uploaded size (in bytes) and total file size (in bytes).
           This will be called every time a part is downloaded"""
        if type(message_media) == MessageMediaPhoto:
            return self.download_photo(message_media, file_path, add_extension,
                                       progress_callback)

        elif type(message_media) == MessageMediaDocument:
            return self.download_document(message_media, file_path,
                                          add_extension, progress_callback)

        elif type(message_media) == MessageMediaContact:
            return self.download_contact(message_media, file_path,
                                         add_extension)

    def download_photo(self,
                       message_media_photo,
                       file_path,
                       add_extension=False,
                       progress_callback=None):
        """Downloads MessageMediaPhoto's largest size into the desired
           file_path, optionally finding its extension automatically
           The progress_callback should be a callback function which takes two parameters,
           uploaded size (in bytes) and total file size (in bytes).
           This will be called every time a part is downloaded"""

        # Determine the photo and its largest size
        photo = message_media_photo.photo
        largest_size = photo.sizes[-1]
        file_size = largest_size.size
        largest_size = largest_size.location

        if add_extension:
            file_path += get_extension(message_media_photo)

        # Download the media with the largest size input file location
        self.download_file(InputFileLocation(volume_id=largest_size.volume_id,
                                             local_id=largest_size.local_id,
                                             secret=largest_size.secret),
                           file_path,
                           file_size=file_size,
                           progress_callback=progress_callback)
        return file_path

    def download_document(self,
                          message_media_document,
                          file_path=None,
                          add_extension=True,
                          progress_callback=None):
        """Downloads the given MessageMediaDocument into the desired
           file_path, optionally finding its extension automatically.
           If no file_path is given, it will try to be guessed from the document
           The progress_callback should be a callback function which takes two parameters,
           uploaded size (in bytes) and total file size (in bytes).
           This will be called every time a part is downloaded"""
        document = message_media_document.document
        file_size = document.size

        # If no file path was given, try to guess it from the attributes
        if file_path is None:
            for attr in document.attributes:
                if type(attr) == DocumentAttributeFilename:
                    file_path = attr.file_name
                    break  # This attribute has higher preference

                elif type(attr) == DocumentAttributeAudio:
                    file_path = '{} - {}'.format(attr.performer, attr.title)

            if file_path is None:
                raise ValueError('Could not infer a file_path for the document'
                                 '. Please provide a valid file_path manually')

        if add_extension:
            file_path += get_extension(message_media_document)

        self.download_file(InputDocumentFileLocation(
            id=document.id,
            access_hash=document.access_hash,
            version=document.version),
                           file_path,
                           file_size=file_size,
                           progress_callback=progress_callback)
        return file_path

    @staticmethod
    def download_contact(message_media_contact, file_path, add_extension=True):
        """Downloads a media contact using the vCard 4.0 format"""

        first_name = message_media_contact.first_name
        last_name = message_media_contact.last_name
        phone_number = message_media_contact.phone_number

        # The only way we can save a contact in an understandable
        # way by phones is by using the .vCard format
        if add_extension:
            file_path += '.vcard'

        # Ensure that we'll be able to download the contact
        utils.ensure_parent_dir_exists(file_path)

        with open(file_path, 'w', encoding='utf-8') as file:
            file.write('BEGIN:VCARD\n')
            file.write('VERSION:4.0\n')
            file.write('N:{};{};;;\n'.format(first_name,
                                             last_name if last_name else ''))
            file.write('FN:{}\n'.format(' '.join((first_name, last_name))))
            file.write(
                'TEL;TYPE=cell;VALUE=uri:tel:+{}\n'.format(phone_number))
            file.write('END:VCARD\n')

        return file_path

    def download_file(self,
                      input_location,
                      file,
                      part_size_kb=None,
                      file_size=None,
                      progress_callback=None,
                      on_dc=None):
        """Downloads the given InputFileLocation to file (a stream or str).

           If 'progress_callback' is not None, it should be a function that
           takes two parameters, (bytes_downloaded, total_bytes). Note that
           'total_bytes' simply equals 'file_size', and may be None.
        """
        if on_dc is None:
            try:
                super().download_file(input_location,
                                      file,
                                      part_size_kb=part_size_kb,
                                      file_size=file_size,
                                      progress_callback=progress_callback)
            except FileMigrateError as e:
                on_dc = e.new_dc

        if on_dc is not None:
            client = self._get_exported_client(on_dc)
            client.download_file(input_location,
                                 file,
                                 part_size_kb=part_size_kb,
                                 file_size=file_size,
                                 progress_callback=progress_callback)

    # endregion

    # endregion

    # region Updates handling

    def add_update_handler(self, handler):
        """Adds an update handler (a function which takes a TLObject,
          an update, as its parameter) and listens for updates"""
        if not self._sender:
            raise RuntimeError("You can't add update handlers until you've "
                               "successfully connected to the server.")

        first_handler = not self._update_handlers
        self._update_handlers.append(handler)
        if first_handler:
            self._set_updates_thread(running=True)

    def remove_update_handler(self, handler):
        self._update_handlers.remove(handler)
        if not self._update_handlers:
            self._set_updates_thread(running=False)

    def list_update_handlers(self):
        return self._update_handlers[:]

    def _set_updates_thread(self, running):
        """Sets the updates thread status (running or not)"""
        if running == self._updates_thread_running.is_set():
            return

        # Different state, update the saved value and behave as required
        self._logger.info('Changing updates thread running status to %s',
                          running)
        if running:
            self._updates_thread_running.set()
            if not self._updates_thread:
                self._updates_thread = Thread(
                    name='UpdatesThread',
                    daemon=True,
                    target=self._updates_thread_method)

            self._updates_thread.start()
        else:
            self._updates_thread_running.clear()
            if self._updates_thread_receiving.is_set():
                self._sender.cancel_receive()

    def _updates_thread_method(self):
        """This method will run until specified and listen for incoming updates"""

        # Set a reasonable timeout when checking for updates
        timeout = timedelta(minutes=1)

        while self._updates_thread_running.is_set():
            # Always sleep a bit before each iteration to relax the CPU,
            # since it's possible to early 'continue' the loop to reach
            # the next iteration, but we still should to sleep.
            sleep(0.1)

            with self._lock:
                self._logger.debug('Updates thread acquired the lock')
                try:
                    self._updates_thread_receiving.set()
                    self._logger.debug(
                        'Trying to receive updates from the updates thread')

                    if time() > self._next_ping_at:
                        self._next_ping_at = time() + self.ping_interval
                        self.invoke(PingRequest(utils.generate_random_long()))

                    updates = self._sender.receive_updates(timeout=timeout)

                    self._updates_thread_receiving.clear()
                    self._logger.info(
                        'Received {} update(s) from the updates thread'.format(
                            len(updates)))
                    for update in updates:
                        for handler in self._update_handlers:
                            handler(update)

                except ConnectionResetError:
                    self._logger.info(
                        'Server disconnected us. Reconnecting...')
                    self.reconnect()

                except TimeoutError:
                    self._logger.debug('Receiving updates timed out')

                except ReadCancelledError:
                    self._logger.info('Receiving updates cancelled')

                except BrokenPipeError:
                    self._logger.info('Tcp session is broken. Reconnecting...')
                    self.reconnect()

                except InvalidChecksumError:
                    self._logger.info(
                        'MTProto session is broken. Reconnecting...')
                    self.reconnect()

                except OSError:
                    self._logger.warning(
                        'OSError on updates thread, %s logging out',
                        'was' if self._sender.logging_out else 'was not')

                    if self._sender.logging_out:
                        # This error is okay when logging out, means we got disconnected
                        # TODO Not sure why this happens because we call disconnect()...
                        self._set_updates_thread(running=False)
                    else:
                        raise

            self._logger.debug('Updates thread released the lock')

        # Thread is over, so clean unset its variable
        self._updates_thread = None
예제 #45
0
class Connection(object):
    """The RPyC *connection* (AKA *protocol*).

    :param root: the :class:`~rpyc.core.service.Service` object to expose
    :param channel: the :class:`~rpyc.core.channel.Channel` over which messages are passed
    :param config: the connection's configuration dict (overriding parameters
                   from the :data:`default configuration <DEFAULT_CONFIG>`)
    """
    def __init__(self, root, channel, config={}):
        self._closed = True
        self._config = DEFAULT_CONFIG.copy()
        self._config.update(config)
        if self._config["connid"] is None:
            self._config["connid"] = "conn%d" % (
                next(_connection_id_generator), )

        self._HANDLERS = self._request_handlers()
        self._channel = channel
        self._seqcounter = itertools.count()
        self._recvlock = Lock()
        self._sendlock = Lock()
        self._sync_replies = {}
        self._sync_lock = RLock()
        self._sync_event = Event()
        self._async_callbacks = {}
        self._local_objects = RefCountingColl()
        self._last_traceback = None
        self._proxy_cache = WeakValueDict()
        self._netref_classes_cache = {}
        self._remote_root = None
        self._send_queue = []
        self._local_root = root
        self._closed = False

    def __del__(self):
        self.close()

    def __enter__(self):
        return self

    def __exit__(self, t, v, tb):
        self.close()

    def __repr__(self):
        a, b = object.__repr__(self).split(" object ")
        return "%s %r object %s" % (a, self._config["connid"], b)

    #
    # IO
    #
    def _cleanup(self, _anyway=True):
        if self._closed and not _anyway:
            return
        self._closed = True
        self._channel.close()
        self._local_root.on_disconnect(self)
        self._sync_replies.clear()
        self._async_callbacks.clear()
        self._local_objects.clear()
        self._proxy_cache.clear()
        self._netref_classes_cache.clear()
        self._last_traceback = None
        self._remote_root = None
        self._local_root = None
        #self._seqcounter = None
        #self._config.clear()
        del self._HANDLERS

    def close(self, _catchall=True):
        """closes the connection, releasing all held resources"""
        if self._closed:
            return
        self._closed = True
        try:
            self._async_request(consts.HANDLE_CLOSE)
        except EOFError:
            pass
        except Exception:
            if not _catchall:
                raise
        finally:
            self._cleanup(_anyway=True)

    @property
    def closed(self):
        """Indicates whether the connection has been closed or not"""
        return self._closed

    def fileno(self):
        """Returns the connectin's underlying file descriptor"""
        return self._channel.fileno()

    def ping(self, data=None, timeout=3):
        """
        Asserts that the other party is functioning properly, by making sure
        the *data* is echoed back before the *timeout* expires

        :param data: the data to send (leave ``None`` for the default buffer)
        :param timeout: the maximal time to wait for echo

        :raises: :class:`PingError` if the echoed data does not match
        """
        if data is None:
            data = "abcdefghijklmnopqrstuvwxyz" * 20
        res = self.async_request(consts.HANDLE_PING, data, timeout=timeout)
        if res.value != data:
            raise PingError("echo mismatches sent data")

    def _get_seq_id(self):
        return next(self._seqcounter)

    def _send(self, msg, seq, args):
        data = brine.dump((msg, seq, args))
        # GC might run while sending data
        # if so, a BaseNetref.__del__ might be called
        # BaseNetref.__del__ must call asyncreq,
        # which will cause a deadlock
        # Solution:
        # Add the current request to a queue and let the thread that currently
        # holds the sendlock send it when it's done with its current job.
        # NOTE: Atomic list operations should be thread safe,
        # please call me out if they are not on all implementations!
        self._send_queue.append(data)
        # It is crucial to check the queue each time AFTER releasing the lock:
        while self._send_queue:
            if not self._sendlock.acquire(False):
                # Another thread holds the lock. It will send the data after
                # it's done with its current job. We can safely return.
                return
            try:
                # Can happen if another consumer was scheduled in between
                # `while` and `acquire`:
                if not self._send_queue:
                    # Must `continue` to ensure that `send_queue` is checked
                    # after releasing the lock! (in case another producer is
                    # scheduled before `release`)
                    continue
                data = self._send_queue.pop(0)
                self._channel.send(data)
            finally:
                self._sendlock.release()

    def _send_request(self, seq, handler, args):
        self._send(consts.MSG_REQUEST, seq, (handler, self._box(args)))

    def _send_reply(self, seq, obj):
        self._send(consts.MSG_REPLY, seq, self._box(obj))

    def _send_exception(self, seq, exctype, excval, exctb):
        exc = vinegar.dump(
            exctype,
            excval,
            exctb,
            include_local_traceback=self._config["include_local_traceback"])
        self._send(consts.MSG_EXCEPTION, seq, exc)

    #
    # boxing
    #
    def _box(self, obj):
        """store a local object in such a way that it could be recreated on
        the remote party either by-value or by-reference"""
        if brine.dumpable(obj):
            return consts.LABEL_VALUE, obj
        if type(obj) is tuple:
            return consts.LABEL_TUPLE, tuple(self._box(item) for item in obj)
        elif isinstance(obj, netref.BaseNetref) and obj.____conn__() is self:
            return consts.LABEL_LOCAL_REF, obj.____oid__
        else:
            self._local_objects.add(obj)
            try:
                cls = obj.__class__
            except Exception:
                # see issue #16
                cls = type(obj)
            if not isinstance(cls, type):
                cls = type(obj)
            return consts.LABEL_REMOTE_REF, (id(obj), cls.__name__,
                                             cls.__module__)

    def _unbox(self, package):
        """recreate a local object representation of the remote object: if the
        object is passed by value, just return it; if the object is passed by
        reference, create a netref to it"""
        label, value = package
        if label == consts.LABEL_VALUE:
            return value
        if label == consts.LABEL_TUPLE:
            return tuple(self._unbox(item) for item in value)
        if label == consts.LABEL_LOCAL_REF:
            return self._local_objects[value]
        if label == consts.LABEL_REMOTE_REF:
            oid, clsname, modname = value
            if oid in self._proxy_cache:
                proxy = self._proxy_cache[oid]
                proxy.____refcount__ += 1  # other side increased refcount on boxing,
                # if I'm returning from cache instead of new object,
                # must increase refcount to match
                return proxy
            proxy = self._netref_factory(oid, clsname, modname)
            self._proxy_cache[oid] = proxy
            return proxy
        raise ValueError("invalid label %r" % (label, ))

    def _netref_factory(self, oid, clsname, modname):
        typeinfo = (clsname, modname)
        if typeinfo in self._netref_classes_cache:
            cls = self._netref_classes_cache[typeinfo]
        elif typeinfo in netref.builtin_classes_cache:
            cls = netref.builtin_classes_cache[typeinfo]
        else:
            info = self.sync_request(consts.HANDLE_INSPECT, oid)
            cls = netref.class_factory(clsname, modname, info)
            self._netref_classes_cache[typeinfo] = cls
        return cls(weakref.ref(self), oid)

    #
    # dispatching
    #
    def _dispatch_request(self, seq, raw_args):
        try:
            handler, args = raw_args
            args = self._unbox(args)
            res = self._HANDLERS[handler](self, *args)
        except:
            # need to catch old style exceptions too
            t, v, tb = sys.exc_info()
            self._last_traceback = tb
            logger = self._config["logger"]
            if logger and t is not StopIteration:
                logger.debug("Exception caught", exc_info=True)
            if t is SystemExit and self._config["propagate_SystemExit_locally"]:
                raise
            if t is KeyboardInterrupt and self._config[
                    "propagate_KeyboardInterrupt_locally"]:
                raise
            self._send_exception(seq, t, v, tb)
        else:
            self._send_reply(seq, res)

    def _dispatch_reply(self, seq, raw):
        obj = self._unbox(raw)
        if seq in self._async_callbacks:
            self._async_callbacks.pop(seq)(False, obj)
        else:
            self._sync_replies[seq] = (False, obj)

    def _unbox_exception(self, raw):
        return vinegar.load(
            raw,
            import_custom_exceptions=self._config["import_custom_exceptions"],
            instantiate_custom_exceptions=self.
            _config["instantiate_custom_exceptions"],
            instantiate_oldstyle_exceptions=self.
            _config["instantiate_oldstyle_exceptions"])

    def _dispatch_exception(self, seq, raw):
        obj = self._unbox_exception(raw)
        if seq in self._async_callbacks:
            self._async_callbacks.pop(seq)(True, obj)
        else:
            self._sync_replies[seq] = (True, obj)

    #
    # serving
    #
    def _recv(self, timeout, wait_for_lock):
        if not self._recvlock.acquire(wait_for_lock):
            return None
        try:
            if self._channel.poll(timeout):
                data = self._channel.recv()
            else:
                data = None
        except EOFError:
            self.close()
            raise
        finally:
            self._recvlock.release()
        return data

    def _dispatch(self, data):
        msg, seq, args = brine.load(data)
        if msg == consts.MSG_REQUEST:
            self._dispatch_request(seq, args)
        elif msg == consts.MSG_REPLY:
            self._dispatch_reply(seq, args)
        elif msg == consts.MSG_EXCEPTION:
            self._dispatch_exception(seq, args)
        else:
            raise ValueError("invalid message type: %r" % (msg, ))

    def sync_recv_and_dispatch(self, timeout, wait_for_lock):
        # lock or wait for signal
        if self._sync_lock.acquire(False):
            try:
                self._sync_event.clear()
                data = self._recv(timeout, wait_for_lock=False)
                if not data:
                    return False
                self._dispatch(data)
                return True
            finally:
                self._sync_lock.release()
                self._sync_event.set()
        else:
            self._sync_event.wait()

    def poll(self, timeout=0):
        """Serves a single transaction, should one arrives in the given
        interval. Note that handling a request/reply may trigger nested
        requests, which are all part of a single transaction.

        :returns: ``True`` if a transaction was served, ``False`` otherwise"""
        return self.sync_recv_and_dispatch(timeout, wait_for_lock=False)

    def serve(self, timeout=1):
        """Serves a single request or reply that arrives within the given
        time frame (default is 1 sec). Note that the dispatching of a request
        might trigger multiple (nested) requests, thus this function may be
        reentrant.

        :returns: ``True`` if a request or reply were received, ``False``
                  otherwise.
        """
        return self.sync_recv_and_dispatch(timeout, wait_for_lock=True)

    def serve_all(self):
        """Serves all requests and replies for as long as the connection is
        alive."""
        try:
            while True:
                self.serve(None)
        except (socket.error, select_error, IOError):
            if not self.closed:
                raise
        except EOFError:
            pass
        finally:
            self.close()

    def serve_threaded(self, thread_count=10):
        """Serves all requests and replies for as long as the connection is
        alive."""
        def _thread_target():
            try:
                while True:
                    self.serve(None)
            except (socket.error, select_error, IOError):
                if not self.closed:
                    raise
            except EOFError:
                pass

        try:
            threads = [spawn(_thread_target) for _ in range(thread_count)]

            for thread in threads:
                thread.join()
        finally:
            self.close()

    def poll_all(self, timeout=0):
        """Serves all requests and replies that arrive within the given interval.

        :returns: ``True`` if at least a single transaction was served, ``False`` otherwise
        """
        at_least_once = False
        t0 = time.time()
        duration = timeout
        try:
            while True:
                if self.poll(duration):
                    at_least_once = True
                if timeout is not None:
                    duration = t0 + timeout - time.time()
                    if duration < 0:
                        break
        except EOFError:
            pass
        return at_least_once

    #
    # requests
    #
    def sync_request(self, handler, *args):
        """Sends a synchronous request (waits for the reply to arrive)

        :raises: any exception that the requets may be generated
        :returns: the result of the request
        """
        seq = self._get_seq_id()
        self._send_request(seq, handler, args)

        timeout = self._config["sync_request_timeout"]
        while seq not in self._sync_replies:
            self.sync_recv_and_dispatch(timeout, True)

        isexc, obj = self._sync_replies.pop(seq)
        if isexc:
            raise obj
        else:
            return obj

    def _async_request(self, handler, args=(), callback=(lambda a, b: None)):
        seq = self._get_seq_id()
        self._async_callbacks[seq] = callback
        try:
            self._send_request(seq, handler, args)
        except:
            if seq in self._async_callbacks:
                del self._async_callbacks[seq]
            raise

    def async_request(self, handler, *args, **kwargs):
        """Send an asynchronous request (does not wait for it to finish)

        :returns: an :class:`rpyc.core.async_.AsyncResult` object, which will
                  eventually hold the result (or exception)
        """
        timeout = kwargs.pop("timeout", None)
        if kwargs:
            raise TypeError("got unexpected keyword argument(s) %s" %
                            (list(kwargs.keys()), ))
        res = AsyncResult(weakref.proxy(self))
        self._async_request(handler, args, res)
        if timeout is not None:
            res.set_expiry(timeout)
        return res

    @property
    def root(self):
        """Fetches the root object (service) of the other party"""
        if self._remote_root is None:
            self._remote_root = self.sync_request(consts.HANDLE_GETROOT)
        return self._remote_root

    #
    # attribute access
    #
    def _check_attr(self, obj, name, perm):
        config = self._config
        if not config[perm]:
            raise AttributeError("cannot access %r" % (name, ))
        prefix = config["allow_exposed_attrs"] and config["exposed_prefix"]
        plain = (config["allow_all_attrs"]
                 or config["allow_exposed_attrs"] and name.startswith(prefix)
                 or config["allow_safe_attrs"] and name in config["safe_attrs"]
                 or config["allow_public_attrs"] and not name.startswith("_"))
        has_exposed = prefix and hasattr(obj, prefix + name)
        if plain and (not has_exposed or hasattr(obj, name)):
            return name
        if has_exposed:
            return prefix + name
        if plain:
            return name  # chance for better traceback
        raise AttributeError("cannot access %r" % (name, ))

    def _access_attr(self, obj, name, args, overrider, param, default):
        if is_py3k:
            if type(name) is bytes:
                name = str(name, "utf8")
            elif type(name) is not str:
                raise TypeError("name must be a string")
        else:
            if type(name) not in (str, unicode):
                raise TypeError("name must be a string")
            name = str(name)  # IronPython issue #10 + py3k issue
        accessor = getattr(type(obj), overrider, None)
        if accessor is None:
            accessor = default
            name = self._check_attr(obj, name, param)
        return accessor(obj, name, *args)

    #
    # request handlers
    #
    @classmethod
    def _request_handlers(cls):
        return {
            consts.HANDLE_PING: cls._handle_ping,
            consts.HANDLE_CLOSE: cls._handle_close,
            consts.HANDLE_GETROOT: cls._handle_getroot,
            consts.HANDLE_GETATTR: cls._handle_getattr,
            consts.HANDLE_DELATTR: cls._handle_delattr,
            consts.HANDLE_SETATTR: cls._handle_setattr,
            consts.HANDLE_CALL: cls._handle_call,
            consts.HANDLE_CALLATTR: cls._handle_callattr,
            consts.HANDLE_REPR: cls._handle_repr,
            consts.HANDLE_STR: cls._handle_str,
            consts.HANDLE_CMP: cls._handle_cmp,
            consts.HANDLE_HASH: cls._handle_hash,
            consts.HANDLE_DIR: cls._handle_dir,
            consts.HANDLE_PICKLE: cls._handle_pickle,
            consts.HANDLE_DEL: cls._handle_del,
            consts.HANDLE_INSPECT: cls._handle_inspect,
            consts.HANDLE_BUFFITER: cls._handle_buffiter,
            consts.HANDLE_OLDSLICING: cls._handle_oldslicing,
            consts.HANDLE_CTXEXIT: cls._handle_ctxexit,
        }

    def _handle_ping(self, data):
        return data

    def _handle_close(self):
        self._cleanup()

    def _handle_getroot(self):
        return self._local_root

    def _handle_del(self, obj, count=1):
        self._local_objects.decref(id(obj), count)

    def _handle_repr(self, obj):
        return repr(obj)

    def _handle_str(self, obj):
        return str(obj)

    def _handle_cmp(self, obj, other):
        # cmp() might enter recursive resonance... yet another workaround
        #return cmp(obj, other)
        try:
            return type(obj).__cmp__(obj, other)
        except (AttributeError, TypeError):
            return NotImplemented

    def _handle_hash(self, obj):
        return hash(obj)

    def _handle_call(self, obj, args, kwargs=()):
        return obj(*args, **dict(kwargs))

    def _handle_dir(self, obj):
        return tuple(dir(obj))

    def _handle_inspect(self, oid):
        return tuple(netref.inspect_methods(self._local_objects[oid]))

    def _handle_getattr(self, obj, name):
        return self._access_attr(obj, name, (), "_rpyc_getattr",
                                 "allow_getattr", getattr)

    def _handle_delattr(self, obj, name):
        return self._access_attr(obj, name, (), "_rpyc_delattr",
                                 "allow_delattr", delattr)

    def _handle_setattr(self, obj, name, value):
        return self._access_attr(obj, name, (value, ), "_rpyc_setattr",
                                 "allow_setattr", setattr)

    def _handle_callattr(self, obj, name, args, kwargs=()):
        obj = self._handle_getattr(obj, name)
        return self._handle_call(obj, args, kwargs)

    def _handle_ctxexit(self, obj, exc):
        if exc:
            try:
                raise exc
            except:
                exc, typ, tb = sys.exc_info()
        else:
            typ = tb = None
        return self._handle_getattr(obj, "__exit__")(exc, typ, tb)

    def _handle_pickle(self, obj, proto):
        if not self._config["allow_pickle"]:
            raise ValueError("pickling is disabled")
        return pickle.dumps(obj, proto)

    def _handle_buffiter(self, obj, count):
        return tuple(itertools.islice(obj, count))

    def _handle_oldslicing(self, obj, attempt, fallback, start, stop, args):
        try:
            # first try __xxxitem__
            getitem = self._handle_getattr(obj, attempt)
            return getitem(slice(start, stop), *args)
        except Exception:
            # fallback to __xxxslice__. see issue #41
            if stop is None:
                stop = maxint
            getslice = self._handle_getattr(obj, fallback)
            return getslice(start, stop, *args)
예제 #46
0
class PipcoDaten:
    __m_instance = None
    m_lock = None

    def __init__(self):
        if self.__m_instance is not None:
            raise Exception("Error - Trying to init second instance")
        else:
            self.m_data_persistence = DataPersistence.DataPersistence(self)
            self.__m_emails_lock = RLock()
            self.__m_log_lock = RLock()
            self.__m_log_fr_lock = RLock()
            self.__m_database_lock = RLock()
            self.__m_setting_lock = RLock()
            self.__m_settings = self.m_data_persistence.load_settings()
            self.__m_emails = self.m_data_persistence.load_emails()
            self.__m_log = self.m_data_persistence.load_logs()
            self.__m_log_fr = self.m_data_persistence.load_logs_fr()
            if not self.__m_log_fr:
                self.__m_log_fr = AutoIdDict()
            if not self.__m_log:
                self.__m_log = AutoIdDict()
            if not self.__m_emails:
                self.__m_emails = AutoIdDict()
            if not self.__m_settings:
                self.__m_settings = Settings()
            self.__m_instance = self
            self.__m_image = None
            self.__m_image_fr = None
            self.__m_image_without = None
            self.__m_user = USER
            self.__m_password = PASSWORD
            interfacedb.initialize(DATABASE_PATH, TEMPORARY_PATH)
            self.m_stream_fps = 30
            self.m_data_persistence.save_settings(self.__m_settings)

    @staticmethod
    def get_instance():
        if PipcoDaten.__m_instance is None:
            PipcoDaten.__m_instance = PipcoDaten()
        return PipcoDaten.__m_instance

    @contextmanager
    def lock_all(self):
        self.__m_log_lock.acquire()
        self.__m_emails_lock.acquire()
        self.__m_setting_lock.acquire()
        try:
            yield self.__m_log_lock and self.__m_emails_lock and self.__m_setting_lock
        finally:
            self.__m_log_lock.release()
            self.__m_emails_lock.release()
            self.__m_setting_lock.release()

    def toggle_mail_notify(self, id):
        with self.__m_emails_lock:
            state = not self.__m_emails[int(id)].notify
            self.__m_emails[int(id)].notify = state
            self.m_data_persistence.save_emails(self.__m_emails)
            return state

    def add_mail(self, address):
        with self.__m_emails_lock:
            ret = self.__m_emails.append(Mail(address))
            self.m_data_persistence.save_emails(self.__m_emails)
            return ret

    def create_person(self, person):
        name = person.get('name')
        surname = person.get('surname')
        comment = person.get('comment')
        file = person.get('file')

        missing_padding = len(file) % 4
        while missing_padding:
            file += '='
            missing_padding = len(file) % 4

        file = base64.b64decode(file.replace('data:image/jpeg;base64,', '', 1))

        with self.__m_database_lock:
            ret = interfacedb.insert_person(name, surname, comment)
            ret_file = interfacedb.insert_picture_as_bytes(ret, file)

            return ret

    def remove_mail(self, id):
        with self.__m_emails_lock:
            self.__m_emails.__delitem__(id)
            self.m_data_persistence.save_emails(self.__m_emails)
            return id

    def get_mails(self):
        with self.__m_emails_lock:
            ret = copy.deepcopy(self.__m_emails)
            return ret

    def get_settings(self):
        with self.__m_setting_lock:
            ret = copy.copy(self.__m_settings)
            return ret

    def change_settings(self,
                        sensitivity=None,
                        brightness=None,
                        contrast=None,
                        streamaddress=None,
                        global_notify=None,
                        log_enabled=None,
                        fr_log_enabled=None,
                        cliplength=None,
                        max_logs=None,
                        max_storage=None,
                        cam_mode=None):
        with self.__m_setting_lock:
            ret = {}
            if sensitivity is not None:
                ret["sensitivity"] = sensitivity
                self.__m_settings.sensitivity = float(sensitivity)
            if brightness is not None:
                ret["brightness"] = brightness
                self.__m_settings.brightness = float(brightness)
            if contrast is not None:
                ret["contrast"] = contrast
                self.__m_settings.contrast = float(contrast)
            if streamaddress is not None:
                ret["streamaddress"] = streamaddress
                self.__m_settings.streamaddress = streamaddress
            if global_notify is not None:
                ret["global_notify"] = global_notify
                self.__m_settings.global_notify = bool(global_notify)
            if log_enabled is not None:
                ret["log_enabled"] = log_enabled
                self.__m_settings.log_enabled = bool(log_enabled)
            if fr_log_enabled is not None:
                ret["fr_log_enabled"] = fr_log_enabled
                self.__m_settings.fr_log_enabled = bool(fr_log_enabled)
            if cliplength is not None:
                ret["cliplength"] = cliplength
                self.__m_settings.cliplength = int(cliplength)
            if max_logs is not None:
                ret["max_logs"] = max_logs
                self.__m_settings.max_logs = int(max_logs)
            if max_storage is not None:
                ret["max_storage"] = max_storage
                self.__m_settings.max_storage = int(max_storage)
            if cam_mode is not None:
                ret["cam_mode"] = cam_mode
                self.__m_settings.cam_mode = int(cam_mode)
            self.m_data_persistence.save_settings(self.__m_settings)
            return ret

    def set_image(self, image):
        self.__m_image = image

    def get_image(self):
        return self.__m_image

    #Images für Facerecognition-Stream
    def set_image_fr(self, image):
        self.__m_image_fr = image

    def get_image_fr(self):
        return self.__m_image_fr

    #Images für Stream ohne Facerecognition oder Movement-Detection
    def set_image_without(self, image):
        self.__m_image_without = image

    def get_image_without(self):
        return self.__m_image_without

    def get_log_page(self, page, batchsize):
        with self.__m_log_lock:
            selected = {}
            for idx, key in enumerate(
                    sorted(self.__m_log.keys(),
                           reverse=True)[int(page) * int(batchsize):]):
                selected[key] = copy.copy(self.__m_log[key])
                if int(batchsize) - 1 == idx:
                    return selected
            return selected

    def get_log_page_fr(self, page, batchsize):
        with self.__m_log_lock:
            selected = {}
            for idx, key in enumerate(
                    sorted(self.__m_log_fr.keys(),
                           reverse=True)[int(page) * int(batchsize):]):
                selected[key] = copy.copy(self.__m_log_fr[key])
                if int(batchsize) - 1 == idx:
                    return selected
            return selected

    def get_free_index(self):
        with self.__m_log_lock:
            return self.__m_log.get_free_index()

    def get_free_index_fr(self):
        with self.__m_log_fr_lock:
            return self.__m_log_fr.get_free_index()

    def add_log(self):
        max_logs = self.get_settings().max_logs
        with self.__m_log_lock:
            if len(self.__m_log) >= max_logs:
                idx = self.__m_log.get_oldest_key()
                self.remove_log(idx)
            idx = self.__m_log.get_free_index()
            idx = self.__m_log.append(Log(idx))
            self.m_data_persistence.save_logs(self.__m_log)
            return idx

    def add_log_fr(self):
        max_logs = self.get_settings().max_logs
        with self.__m_log_fr_lock:
            if len(self.__m_log_fr) >= max_logs:
                idx = self.__m_log_fr.get_oldest_key()
                self.remove_log_fr(idx)
            idx = self.__m_log_fr.get_free_index()
            idx = self.__m_log_fr.append(Log(idx))
            self.m_data_persistence.save_logs_fr(self.__m_log_fr)
            return idx

    def check_login(self, user, password):
        return self.__m_user == user and self.__m_password == password

    def remove_log(self, id):
        from src.ImageProcessing import THUMBNAIL_TYPE, RECORDING_TYPE
        with self.__m_log_lock:
            try:
                os.remove(THUMBNAIL_PATH + str(id) + THUMBNAIL_TYPE)
            except FileNotFoundError as e:
                print(e)
            try:
                os.remove(RECORDINGS_PATH + str(id) + RECORDING_TYPE)
            except FileNotFoundError as e:
                print(e)
            self.__m_log.__delitem__(id)
            self.m_data_persistence.save_logs(self.__m_log)
        return id

    def remove_log_fr(self, id):

        from src.Gesichtsreidentifikation import Gesichtsreidentifikation
        with self.__m_log_lock:
            try:
                os.remove('data/videos/' + 'thumbnails/' + str(id) + '.jpg')
            except FileNotFoundError as e:
                print(e)
            try:
                os.remove('data/videos/' + str(id) + '.mp4')
            except FileNotFoundError as e:
                print(e)
            self.__m_log_fr.__delitem__(id)
            self.m_data_persistence.save_logs_fr(self.__m_log_fr)
        return id
예제 #47
0
class GameModel(GObject.GObject, Thread):
    """ GameModel contains all available data on a chessgame.
        It also has the task of controlling players actions and moves """

    __gsignals__ = {
        # game_started is emitted when control is given to the players for the
        # first time. Notice this is after players.start has been called.
        "game_started": (GObject.SignalFlags.RUN_FIRST, None, ()),
        # game_changed is emitted when a move has been made.
        "game_changed": (GObject.SignalFlags.RUN_FIRST, None, (int, )),
        # moves_undoig is emitted when a undoMoves call has been accepted, but
        # before anywork has been done to execute it.
        "moves_undoing": (GObject.SignalFlags.RUN_FIRST, None, (int, )),
        # moves_undone is emitted after n moves have been undone in the
        # gamemodel and the players.
        "moves_undone": (GObject.SignalFlags.RUN_FIRST, None, (int, )),
        # game_unended is emitted if moves have been undone, such that the game
        # which had previously ended, is now again active.
        "game_unended": (GObject.SignalFlags.RUN_FIRST, None, ()),
        # game_loading is emitted if the GameModel is about to load in a chess
        # game from a file.
        "game_loading": (GObject.SignalFlags.RUN_FIRST, None, (object, )),
        # game_loaded is emitted after the chessformat handler has loaded in
        # all the moves from a file to the game model.
        "game_loaded": (GObject.SignalFlags.RUN_FIRST, None, (object, )),
        # game_saved is emitted in the end of model.save()
        "game_saved": (GObject.SignalFlags.RUN_FIRST, None, (str, )),
        # game_ended is emitted if the models state has been changed to an
        # "ended state"
        "game_ended": (GObject.SignalFlags.RUN_FIRST, None, (int, )),
        # game_terminated is emitted if the game was terminated. That is all
        # players and clocks were stopped, and it is no longer possible to
        # resume the game, even by undo.
        "game_terminated": (GObject.SignalFlags.RUN_FIRST, None, ()),
        # game_paused is emitted if the game was successfully paused.
        "game_paused": (GObject.SignalFlags.RUN_FIRST, None, ()),
        # game_paused is emitted if the game was successfully resumed from a
        # pause.
        "game_resumed": (GObject.SignalFlags.RUN_FIRST, None, ()),
        # action_error is currently only emitted by ICGameModel, in the case
        # the "web model" didn't accept the action you were trying to do.
        "action_error": (GObject.SignalFlags.RUN_FIRST, None, (object, int)),
        # players_changed is emitted if the players list was changed.
        "players_changed": (GObject.SignalFlags.RUN_FIRST, None, ()),
        "analyzer_added": (GObject.SignalFlags.RUN_FIRST, None, (object, str)),
        "analyzer_removed":
        (GObject.SignalFlags.RUN_FIRST, None, (object, str)),
        "analyzer_paused":
        (GObject.SignalFlags.RUN_FIRST, None, (object, str)),
        "analyzer_resumed":
        (GObject.SignalFlags.RUN_FIRST, None, (object, str)),
        # opening_changed is emitted if the move changed the opening.
        "opening_changed": (GObject.SignalFlags.RUN_FIRST, None, ()),
        # variation_added is emitted if a variation was added.
        "variation_added":
        (GObject.SignalFlags.RUN_FIRST, None, (object, object, str, str)),
        # variation_extended is emitted if a new move was added to a variation.
        "variation_extended": (GObject.SignalFlags.RUN_FIRST, None, (object,
                                                                     object)),
        # scores_changed is emitted if the analyzing scores was changed.
        "analysis_changed": (GObject.SignalFlags.RUN_FIRST, None, (int, )),
        # FICS games can get kibitz/whisper messages
        "message_received": (GObject.SignalFlags.RUN_FIRST, None, (str, str)),
        # FICS games can have observers
        "observers_received": (GObject.SignalFlags.RUN_FIRST, None, (str, )),
    }

    def __init__(self, timemodel=None, variant=NormalBoard):
        GObject.GObject.__init__(self)
        Thread.__init__(self, name=fident(self.run))
        self.daemon = True
        self.variant = variant
        self.boards = [variant(setup=True)]

        self.moves = []
        self.scores = {}
        self.spy_scores = {}
        self.players = []

        self.gameno = None
        self.variations = [self.boards]

        self.terminated = False
        self.status = WAITING_TO_START
        self.reason = UNKNOWN_REASON
        self.curColor = WHITE

        if timemodel is None:
            self.timemodel = TimeModel()
        else:
            self.timemodel = timemodel
        self.timemodel.gamemodel = self

        self.connections = defaultdict(list)  # mainly for IC subclasses
        self.analyzer_cids = {}
        self.examined = False

        now = datetime.datetime.now()
        self.tags = {
            "Event": _("Local Event"),
            "Site": _("Local Site"),
            "Round": 1,
            "Year": now.year,
            "Month": now.month,
            "Day": now.day,
            "Time": "%02d:%02d:00" % (now.hour, now.minute),
            "Result": "*",
        }

        self.endstatus = None
        self.timed = self.timemodel.minutes != 0 or self.timemodel.gain != 0
        if self.timed:
            self.zero_reached_cid = self.timemodel.connect(
                'zero_reached', self.zero_reached)

            self.tags["TimeControl"] = \
                "%d+%d" % (self.timemodel.minutes * 60, self.timemodel.gain)
            # Notice: tags["WhiteClock"] and tags["BlackClock"] are never set
            # on the gamemodel, but simply written or read during saving/
            # loading from pgn. If you want to know the time left for a player,
            # check the time model.

            # Keeps track of offers, so that accepts can be spotted
        self.offers = {}
        # True if the game has been changed since last save
        self.needsSave = False
        # The uri the current game was loaded from, or None if not a loaded
        # game
        self.uri = None

        self.spectators = {}

        self.applyingMoveLock = RLock()
        self.undoLock = RLock()
        self.undoQueue = Queue()

    def zero_reached(self, timemodel, color):
        if conf.get('autoCallFlag',
                    False) and self.players[1 - color].__type__ == ARTIFICIAL:
            if self.status == RUNNING and timemodel.getPlayerTime(color) <= 0:
                log.info(
                    'Automatically sending flag call on behalf of player %s.' %
                    self.players[1 - color].name)
                self.players[1 - color].emit("offer", Offer(FLAG_CALL))

    def __repr__(self):
        string = "<GameModel at %s" % id(self)
        string += " (ply=%s" % self.ply
        if len(self.moves) > 0:
            string += ", move=%s" % self.moves[-1]
        string += ", variant=%s" % self.variant.name.encode('utf-8')
        string += ", status=%s, reason=%s" % (str(self.status), str(
            self.reason))
        string += ", players=%s" % str(self.players)
        string += ", tags=%s" % str(self.tags)
        if len(self.boards) > 0:
            string += "\nboard=%s" % self.boards[-1]
        return string + ")>"

    @property
    def display_text(self):
        if self.variant == NormalBoard and not self.timed:
            return "[ " + _("Untimed") + " ]"
        else:
            text = "[ "
            if self.variant != NormalBoard:
                text += self.variant.name + " "
            if self.timed:
                text += self.timemodel.display_text + " "
            return text + "]"

    def setPlayers(self, players):
        log.debug("GameModel.setPlayers: starting")
        assert self.status == WAITING_TO_START
        self.players = players
        for player in self.players:
            self.connections[player].append(
                player.connect("offer", self.offerReceived))
            self.connections[player].append(
                player.connect("withdraw", self.withdrawReceived))
            self.connections[player].append(
                player.connect("decline", self.declineReceived))
            self.connections[player].append(
                player.connect("accept", self.acceptReceived))
        self.tags["White"] = str(self.players[WHITE])
        self.tags["Black"] = str(self.players[BLACK])
        log.debug("GameModel.setPlayers: -> emit players_changed")
        self.emit("players_changed")
        log.debug("GameModel.setPlayers: <- emit players_changed")
        log.debug("GameModel.setPlayers: returning")

    def color(self, player):
        if player is self.players[0]:
            return WHITE
        else:
            return BLACK

    def start_analyzer(self, analyzer_type):
        from pychess.Players.engineNest import init_engine
        analyzer = init_engine(analyzer_type, self)
        if analyzer is None:
            return

        analyzer.setOptionInitialBoard(self)
        self.spectators[analyzer_type] = analyzer
        self.emit("analyzer_added", analyzer, analyzer_type)
        self.analyzer_cids[analyzer_type] = analyzer.connect(
            "analyze", self.on_analyze)
        return analyzer

    def remove_analyzer(self, analyzer_type):
        try:
            analyzer = self.spectators[analyzer_type]
        except KeyError:
            return

        analyzer.disconnect(self.analyzer_cids[analyzer_type])
        analyzer.end(KILLED, UNKNOWN_REASON)
        self.emit("analyzer_removed", analyzer, analyzer_type)
        del self.spectators[analyzer_type]

    def resume_analyzer(self, analyzer_type):
        try:
            analyzer = self.spectators[analyzer_type]
        except KeyError:
            analyzer = self.start_analyzer(analyzer_type)
            if analyzer is None:
                return

        analyzer.resume()
        analyzer.setOptionInitialBoard(self)
        self.emit("analyzer_resumed", analyzer, analyzer_type)

    def pause_analyzer(self, analyzer_type):
        try:
            analyzer = self.spectators[analyzer_type]
        except KeyError:
            return

        analyzer.pause()
        self.emit("analyzer_paused", analyzer, analyzer_type)

    def restart_analyzer(self, analyzer_type):
        self.remove_analyzer(analyzer_type)
        self.start_analyzer(analyzer_type)
        if self.isPlayingICSGame():
            self.pause_analyzer(analyzer_type)

    def on_analyze(self, analyzer, analysis):
        if analysis and analysis[0] is not None:
            pv, score, depth = analysis[0]
            ply = analyzer.board.ply
            if score is not None:
                if analyzer.mode == ANALYZING:
                    self.scores[ply] = (pv, score, depth)
                    self.emit("analysis_changed", ply)
                else:
                    self.spy_scores[ply] = (pv, score, depth)

    def setOpening(self, ply=None):
        if ply is None:
            ply = self.ply
        if ply > 40:
            return

        if ply > 0:
            opening = get_eco(self.getBoardAtPly(ply).board.hash)
        else:
            opening = ("", "", "")
        if opening is not None:
            self.tags["ECO"] = opening[0]
            self.tags["Opening"] = opening[1]
            self.tags["Variation"] = opening[2]
            self.emit("opening_changed")

    # Board stuff

    def _get_ply(self):
        return self.boards[-1].ply

    ply = property(_get_ply)

    def _get_lowest_ply(self):
        return self.boards[0].ply

    lowply = property(_get_lowest_ply)

    def _get_curplayer(self):
        try:
            return self.players[self.getBoardAtPly(self.ply).color]
        except IndexError:
            log.error("%s %s" %
                      (self.players, self.getBoardAtPly(self.ply).color))
            raise

    curplayer = property(_get_curplayer)

    def _get_waitingplayer(self):
        try:
            return self.players[1 - self.getBoardAtPly(self.ply).color]
        except IndexError:
            log.error("%s %s" %
                      (self.players, 1 - self.getBoardAtPly(self.ply).color))
            raise

    waitingplayer = property(_get_waitingplayer)

    def _plyToIndex(self, ply):
        index = ply - self.lowply
        if index < 0:
            raise IndexError("%s < %s\n" % (ply, self.lowply))
        return index

    def getBoardAtPly(self, ply, variation=0):
        # Losing on time in FICS game will undo our last move if it was taken
        # too late
        if variation == 0 and ply > self.ply:
            ply = self.ply
        try:
            return self.variations[variation][self._plyToIndex(ply)]
        except IndexError:
            log.error(
                "%d\t%d\t%d\t%d\t%d" %
                (self.lowply, ply, self.ply, variation, len(self.variations)))
            raise

    def getMoveAtPly(self, ply, variation=0):
        try:
            return Move(self.variations[variation][self._plyToIndex(ply) +
                                                   1].board.lastMove)
        except IndexError:
            log.error(
                "%d\t%d\t%d\t%d\t%d" %
                (self.lowply, ply, self.ply, variation, len(self.variations)))
            raise

    def hasLocalPlayer(self):
        if self.players[0].__type__ == LOCAL or self.players[
                1].__type__ == LOCAL:
            return True
        else:
            return False

    def hasEnginePlayer(self):
        if self.players[0].__type__ == ARTIFICIAL or self.players[
                1].__type__ == ARTIFICIAL:
            return True
        else:
            return False

    def isLocalGame(self):
        if self.players[0].__type__ != REMOTE and self.players[
                1].__type__ != REMOTE:
            return True
        else:
            return False

    def isObservationGame(self):
        return not self.hasLocalPlayer()

    def isEngine2EngineGame(self):
        if self.players[0].__type__ == ARTIFICIAL and self.players[
                1].__type__ == ARTIFICIAL:
            return True
        else:
            return False

    def isPlayingICSGame(self):
        if self.players and self.status in (WAITING_TO_START, PAUSED, RUNNING):
            if self.players[0].__type__ == LOCAL and self.players[1].__type__ == REMOTE or \
               self.players[1].__type__ == LOCAL and self.players[0].__type__ == REMOTE:
                return True
        return False

    def isLoadedGame(self):
        return self.gameno is not None

    # Offer management

    def offerReceived(self, player, offer):
        log.debug("GameModel.offerReceived: offerer=%s %s" %
                  (repr(player), offer))
        if player == self.players[WHITE]:
            opPlayer = self.players[BLACK]
        elif player == self.players[BLACK]:
            opPlayer = self.players[WHITE]
        else:
            # Player comments echoed to opponent if the player started a conversation
            # with you prior to observing a game the player is in #1113
            return

        if offer.type == HURRY_ACTION:
            opPlayer.hurry()

        elif offer.type == CHAT_ACTION:
            # print("GameModel.offerreceived(player, offer)", player.name, offer.param)
            opPlayer.putMessage(offer.param)

        elif offer.type == RESIGNATION:
            if player == self.players[WHITE]:
                self.end(BLACKWON, WON_RESIGN)
            else:
                self.end(WHITEWON, WON_RESIGN)

        elif offer.type == FLAG_CALL:
            assert self.timed
            if self.timemodel.getPlayerTime(1 - player.color) <= 0:
                if self.timemodel.getPlayerTime(player.color) <= 0:
                    self.end(DRAW, DRAW_CALLFLAG)
                elif not playerHasMatingMaterial(self.boards[-1],
                                                 player.color):
                    if player.color == WHITE:
                        self.end(DRAW, DRAW_WHITEINSUFFICIENTANDBLACKTIME)
                    else:
                        self.end(DRAW, DRAW_BLACKINSUFFICIENTANDWHITETIME)
                else:
                    if player == self.players[WHITE]:
                        self.end(WHITEWON, WON_CALLFLAG)
                    else:
                        self.end(BLACKWON, WON_CALLFLAG)
            else:
                player.offerError(offer, ACTION_ERROR_NOT_OUT_OF_TIME)

        elif offer.type == DRAW_OFFER and isClaimableDraw(self.boards[-1]):
            reason = getStatus(self.boards[-1])[1]
            self.end(DRAW, reason)

        elif offer.type == TAKEBACK_OFFER and offer.param < self.lowply:
            player.offerError(offer, ACTION_ERROR_TOO_LARGE_UNDO)

        elif offer.type in OFFERS:
            if offer not in self.offers:
                log.debug("GameModel.offerReceived: doing %s.offer(%s)" %
                          (repr(opPlayer), offer))
                self.offers[offer] = player
                opPlayer.offer(offer)
            # If we updated an older offer, we want to delete the old one
            keys = self.offers.keys()
            for offer_ in keys:
                if offer.type == offer_.type and offer != offer_:
                    del self.offers[offer_]

    def withdrawReceived(self, player, offer):
        log.debug("GameModel.withdrawReceived: withdrawer=%s %s" %
                  (repr(player), offer))
        if player == self.players[WHITE]:
            opPlayer = self.players[BLACK]
        else:
            opPlayer = self.players[WHITE]

        if offer in self.offers and self.offers[offer] == player:
            del self.offers[offer]
            opPlayer.offerWithdrawn(offer)
        else:
            player.offerError(offer, ACTION_ERROR_NONE_TO_WITHDRAW)

    def declineReceived(self, player, offer):
        log.debug("GameModel.declineReceived: decliner=%s %s" %
                  (repr(player), offer))
        if player == self.players[WHITE]:
            opPlayer = self.players[BLACK]
        else:
            opPlayer = self.players[WHITE]

        if offer in self.offers and self.offers[offer] == opPlayer:
            del self.offers[offer]
            log.debug("GameModel.declineReceived: declining %s" % offer)
            opPlayer.offerDeclined(offer)
        else:
            player.offerError(offer, ACTION_ERROR_NONE_TO_DECLINE)

    def acceptReceived(self, player, offer):
        log.debug("GameModel.acceptReceived: accepter=%s %s" %
                  (repr(player), offer))
        if player == self.players[WHITE]:
            opPlayer = self.players[BLACK]
        else:
            opPlayer = self.players[WHITE]

        if offer in self.offers and self.offers[offer] == opPlayer:
            if offer.type == DRAW_OFFER:
                self.end(DRAW, DRAW_AGREE)
            elif offer.type == TAKEBACK_OFFER:
                log.debug("GameModel.acceptReceived: undoMoves(%s)" %
                          (self.ply - offer.param))
                self.undoMoves(self.ply - offer.param)
            elif offer.type == ADJOURN_OFFER:
                self.end(ADJOURNED, ADJOURNED_AGREEMENT)
            elif offer.type == ABORT_OFFER:
                self.end(ABORTED, ABORTED_AGREEMENT)
            elif offer.type == PAUSE_OFFER:
                self.pause()
            elif offer.type == RESUME_OFFER:
                self.resume()
            del self.offers[offer]
        else:
            player.offerError(offer, ACTION_ERROR_NONE_TO_ACCEPT)

    # Data stuff

    def loadAndStart(self, uri, loader, gameno, position, first_time=True):
        if first_time:
            assert self.status == WAITING_TO_START

        uriIsFile = not isinstance(uri, str)
        if not uriIsFile:
            chessfile = loader.load(protoopen(uri))
        else:
            chessfile = loader.load(uri)

        self.gameno = gameno
        self.emit("game_loading", uri)
        try:
            chessfile.loadToModel(gameno, -1, self)
        # Postpone error raising to make games loadable to the point of the
        # error
        except LoadingError as e:
            error = e
        else:
            error = None
        if self.players:
            self.players[WHITE].setName(self.tags["White"])
            self.players[BLACK].setName(self.tags["Black"])
        self.emit("game_loaded", uri)

        self.needsSave = False
        if not uriIsFile:
            self.uri = uri
        else:
            self.uri = None

        # Even if the game "starts ended", the players should still be moved
        # to the last position, so analysis is correct, and a possible "undo"
        # will work as expected.
        for spectator in self.spectators.values():
            spectator.setOptionInitialBoard(self)
        for player in self.players:
            player.setOptionInitialBoard(self)
        if self.timed:
            self.timemodel.setMovingColor(self.boards[-1].color)

        if first_time:
            if self.status == RUNNING:
                if self.timed:
                    self.timemodel.start()

            # Store end status from Result tag
            if self.status in (DRAW, WHITEWON, BLACKWON):
                self.endstatus = self.status
            self.status = WAITING_TO_START
            self.start()

        if error:
            raise error

    def save(self, uri, saver, append, position=None):
        if isinstance(uri, basestring):
            fileobj = protosave(uri, append)
            self.uri = uri
        else:
            fileobj = uri
            self.uri = None
        saver.save(fileobj, self, position)
        self.needsSave = False
        self.emit("game_saved", uri)

    # Run stuff

    def run(self):
        log.debug("GameModel.run: Starting. self=%s" % self)
        # Avoid racecondition when self.start is called while we are in
        # self.end
        if self.status != WAITING_TO_START:
            return

        if not self.isLocalGame():
            self.timemodel.handle_gain = False

        self.status = RUNNING

        for player in self.players + list(self.spectators.values()):
            player.start()

        log.debug("GameModel.run: emitting 'game_started' self=%s" % self)
        self.emit("game_started")

        # Let GameModel end() itself on games started with loadAndStart()
        self.checkStatus()

        self.curColor = self.boards[-1].color

        while self.status in (PAUSED, RUNNING, DRAW, WHITEWON, BLACKWON):
            curPlayer = self.players[self.curColor]

            if self.timed:
                log.debug(
                    "GameModel.run: id=%s, players=%s, self.ply=%s: updating %s's time"
                    % (id(self), str(self.players), str(
                        self.ply), str(curPlayer)))
                curPlayer.updateTime(
                    self.timemodel.getPlayerTime(self.curColor),
                    self.timemodel.getPlayerTime(1 - self.curColor))

            try:
                log.debug(
                    "GameModel.run: id=%s, players=%s, self.ply=%s: calling %s.makeMove()"
                    % (id(self), str(self.players), self.ply, str(curPlayer)))
                if self.ply > self.lowply:
                    move = curPlayer.makeMove(self.boards[-1], self.moves[-1],
                                              self.boards[-2])
                else:
                    move = curPlayer.makeMove(self.boards[-1], None, None)
                log.debug(
                    "GameModel.run: id=%s, players=%s, self.ply=%s: got move=%s from %s"
                    % (id(self), str(
                        self.players), self.ply, move, str(curPlayer)))
            except PlayerIsDead as e:
                if self.status in (WAITING_TO_START, PAUSED, RUNNING):
                    stringio = StringIO()
                    traceback.print_exc(file=stringio)
                    error = stringio.getvalue()
                    log.error(
                        "GameModel.run: A Player died: player=%s error=%s\n%s"
                        % (curPlayer, error, e))
                    if self.curColor == WHITE:
                        self.kill(WHITE_ENGINE_DIED)
                    else:
                        self.kill(BLACK_ENGINE_DIED)
                break
            except InvalidMove as e:
                if self.curColor == WHITE:
                    self.end(BLACKWON, WON_ADJUDICATION)
                else:
                    self.end(WHITEWON, WON_ADJUDICATION)
                break
            except TurnInterrupt:
                log.debug(
                    "GameModel.run: id=%s, players=%s, self.ply=%s: TurnInterrupt"
                    % (id(self), str(self.players), self.ply))
                self.curColor = self.boards[-1].color
                continue

            log.debug(
                "GameModel.run: id=%s, players=%s, self.ply=%s: acquiring self.applyingMoveLock"
                % (id(self), str(self.players), self.ply))
            assert isinstance(move, Move), "%s" % repr(move)

            self.applyingMoveLock.acquire()
            try:
                log.debug(
                    "GameModel.run: id=%s, players=%s, self.ply=%s: applying move=%s"
                    % (id(self), str(self.players), self.ply, str(move)))
                self.needsSave = True
                newBoard = self.boards[-1].move(move)
                newBoard.board.prev = self.boards[-1].board

                # Variation on next move can exist from the hint panel...
                if self.boards[-1].board.next is not None:
                    newBoard.board.children = self.boards[
                        -1].board.next.children

                self.boards = self.variations[0]
                self.boards[-1].board.next = newBoard.board
                self.boards.append(newBoard)
                self.moves.append(move)

                if self.timed:
                    self.timemodel.tap()

                if not self.terminated:
                    self.emit("game_changed", self.ply)

                for spectator in self.spectators.values():
                    if spectator.board == self.boards[-2]:
                        spectator.putMove(self.boards[-1], self.moves[-1],
                                          self.boards[-2])

                self.setOpening()

                self.checkStatus()
                self.curColor = 1 - self.curColor

            finally:
                log.debug("GameModel.run: releasing self.applyingMoveLock")
                self.applyingMoveLock.release()

    def checkStatus(self):
        """ Updates self.status so it fits with what getStatus(boards[-1])
            would return. That is, if the game is e.g. check mated this will
            call mode.end(), or if moves have been undone from an otherwise
            ended position, this will call __resume and emit game_unended. """

        log.debug("GameModel.checkStatus:")

        # call flag by engine
        if self.isEngine2EngineGame() and self.status in UNDOABLE_STATES:
            return

        status, reason = getStatus(self.boards[-1])

        if self.endstatus is not None:
            self.end(self.endstatus, reason)
            return

        if status != RUNNING and self.status in (WAITING_TO_START, PAUSED,
                                                 RUNNING):
            if status == DRAW and reason in (DRAW_REPITITION, DRAW_50MOVES):
                if self.isEngine2EngineGame():
                    self.end(status, reason)
                    return
            else:
                self.end(status, reason)
                return

        if status != self.status and self.status in UNDOABLE_STATES \
                and self.reason in UNDOABLE_REASONS:
            self.__resume()
            self.status = status
            self.reason = UNKNOWN_REASON
            self.emit("game_unended")

    def __pause(self):
        log.debug("GameModel.__pause: %s" % self)
        if self.isEngine2EngineGame():
            for player in self.players:
                player.end(self.status, self.reason)
            if self.timed:
                self.timemodel.end()
        else:
            for player in self.players:
                player.pause()
            if self.timed:
                self.timemodel.pause()

    @inthread
    def pause(self):
        """ Players will raise NotImplementedError if they doesn't support
            pause. Spectators will be ignored. """

        self.applyingMoveLock.acquire()
        try:
            self.__pause()
            self.status = PAUSED
        finally:
            self.applyingMoveLock.release()
        self.emit("game_paused")

    def __resume(self):
        for player in self.players:
            player.resume()
        if self.timed:
            self.timemodel.resume()
        self.emit("game_resumed")

    @inthread
    def resume(self):
        self.applyingMoveLock.acquire()
        try:
            self.status = RUNNING
            self.__resume()
        finally:
            self.applyingMoveLock.release()

    def end(self, status, reason):
        if self.status not in UNFINISHED_STATES:
            log.info(
                "GameModel.end: Can't end a game that's already ended: %s %s" %
                (status, reason))
            return
        if self.status not in (WAITING_TO_START, PAUSED, RUNNING):
            self.needsSave = True

        log.debug(
            "GameModel.end: players=%s, self.ply=%s: Ending a game with status %d for reason %d"
            % (repr(self.players), str(self.ply), status, reason))
        self.status = status
        self.reason = reason

        self.emit("game_ended", reason)

        self.__pause()

    def kill(self, reason):
        log.debug(
            "GameModel.kill: players=%s, self.ply=%s: Killing a game for reason %d\n%s"
            % (repr(self.players), str(self.ply), reason, "".join(
                traceback.format_list(traceback.extract_stack())).strip()))

        self.status = KILLED
        self.reason = reason

        for player in self.players:
            player.end(self.status, reason)

        for spectator in self.spectators.values():
            spectator.end(self.status, reason)

        if self.timed:
            self.timemodel.end()

        self.emit("game_ended", reason)

    def terminate(self):
        log.debug("GameModel.terminate: %s" % self)
        self.terminated = True

        if self.status != KILLED:
            for player in self.players:
                player.end(self.status, self.reason)

            analyzer_types = list(self.spectators.keys())
            for analyzer_type in analyzer_types:
                self.remove_analyzer(analyzer_type)

            if self.timed:
                log.debug("GameModel.terminate: -> timemodel.end()")
                self.timemodel.end()
                log.debug("GameModel.terminate: <- timemodel.end() %s" %
                          repr(self.timemodel))
                self.timemodel.disconnect(self.zero_reached_cid)

        # ICGameModel may did this if game was a FICS game
        if self.connections is not None:
            for player in self.players:
                for cid in self.connections[player]:
                    player.disconnect(cid)
        self.connections = {}

        self.timemodel.gamemodel = None
        self.players = []
        self.emit("game_terminated")

    # Other stuff

    @inthread
    @undolocked
    def undoMoves(self, moves):
        """ Undo and remove moves number of moves from the game history from
            the GameModel, players, and any spectators """
        if self.ply < 1 or moves < 1:
            return
        if self.ply - moves < 0:
            # There is no way in the current threaded/asynchronous design
            # for the GUI to know that the number of moves it requests to takeback
            # will still be valid once the undo is actually processed. So, until
            # we either add some locking or get a synchronous design, we quietly
            # "fix" the takeback request rather than cause AssertionError or IndexError
            moves = 1

        log.debug(
            "GameModel.undoMoves: players=%s, self.ply=%s, moves=%s, board=%s"
            % (repr(self.players), self.ply, moves, self.boards[-1]))
        log.debug("GameModel.undoMoves: acquiring self.applyingMoveLock")
        self.applyingMoveLock.acquire()
        log.debug("GameModel.undoMoves: self.applyingMoveLock acquired")
        try:
            self.emit("moves_undoing", moves)
            self.needsSave = True

            self.boards = self.variations[0]
            del self.boards[-moves:]
            del self.moves[-moves:]
            self.boards[-1].board.next = None

            for player in self.players:
                player.playerUndoMoves(moves, self)
            for spectator in self.spectators.values():
                spectator.spectatorUndoMoves(moves, self)

            log.debug("GameModel.undoMoves: undoing timemodel")
            if self.timed:
                self.timemodel.undoMoves(moves)

            self.checkStatus()
            self.setOpening()
        finally:
            log.debug("GameModel.undoMoves: releasing self.applyingMoveLock")
            self.applyingMoveLock.release()

        self.emit("moves_undone", moves)

    def isChanged(self):
        if self.ply == 0:
            return False
        if self.needsSave:
            return True
        if not self.uri or not isWriteable(self.uri):
            return True
        return False

    def add_variation(self, board, moves, comment="", score=""):
        board0 = board
        board = board0.clone()
        board.board.prev = None

        variation = [board]

        for move in moves:
            new = board.move(move)
            if len(variation) == 1:
                new.board.prev = board0.board
                variation[0].board.next = new.board
            else:
                new.board.prev = board.board
                board.board.next = new.board
            variation.append(new)
            board = new

        if board0.board.next is None:
            # If we are in the latest played board, and want to add a variation
            # we have to add a not played yet board first
            # which can hold the variation as his child
            from pychess.Utils.lutils.LBoard import LBoard
            null_board = LBoard()
            null_board.prev = board0.board
            board0.board.next = null_board

        board0.board.next.children.append(
            [vboard.board for vboard in variation])

        head = None
        for vari in self.variations:
            if board0 in vari:
                head = vari
                break

        variation[0] = board0
        self.variations.append(head[:board0.ply - self.lowply] + variation)
        self.needsSave = True
        self.emit("variation_added", board0.board.next.children[-1],
                  board0.board.next, comment, score)
        return self.variations[-1]

    def add_move2variation(self, board, move, variationIdx):
        new = board.move(move)
        new.board.prev = board.board
        board.board.next = new.board

        # Find the variation (low level lboard list) to append
        cur_board = board.board
        vari = None
        while cur_board.prev is not None:
            for child in cur_board.prev.next.children:
                if isinstance(child, list) and cur_board in child:
                    vari = child
                    break
            if vari is None:
                cur_board = cur_board.prev
            else:
                break
        vari.append(new.board)

        self.variations[variationIdx].append(new)
        self.needsSave = True
        self.emit("variation_extended", board.board, new.board)
class RecentlyUsedContainer(dict):
    """
    Provides a dict-like that maintains up to ``maxsize`` keys while throwing
    away the least-recently-used keys beyond ``maxsize``.
    """

    # If len(self.access_log) exceeds self._maxsize * CLEANUP_FACTOR, then we
    # will attempt to cleanup the invalidated entries in the access_log
    # datastructure during the next 'get' operation.
    CLEANUP_FACTOR = 10

    def __init__(self, maxsize=10):
        self._maxsize = maxsize

        self._container = {}

        # We use a deque to to store our keys ordered by the last access.
        self.access_log = deque()
        self.access_log_lock = RLock()

        # We look up the access log entry by the key to invalidate it so we can
        # insert a new authorative entry at the head without having to dig and
        # find the old entry for removal immediately.
        self.access_lookup = {}

        # Trigger a heap cleanup when we get past this size
        self.access_log_limit = maxsize * self.CLEANUP_FACTOR

    def _invalidate_entry(self, key):
        "If exists: Invalidate old entry and return it."
        old_entry = self.access_lookup.get(key)
        if old_entry:
            old_entry.is_valid = False

        return old_entry

    def _push_entry(self, key):
        "Push entry onto our access log, invalidate the old entry if exists."
        self._invalidate_entry(key)

        new_entry = AccessEntry(key)
        self.access_lookup[key] = new_entry

        self.access_log_lock.acquire()
        self.access_log.appendleft(new_entry)
        self.access_log_lock.release()

    def _prune_entries(self, num):
        "Pop entries from our access log until we popped ``num`` valid ones."
        while num > 0:
            self.access_log_lock.acquire()
            p = self.access_log.pop()
            self.access_log_lock.release()

            if not p.is_valid:
                continue  # Invalidated entry, skip

            dict.pop(self, p.key, None)
            self.access_lookup.pop(p.key, None)
            num -= 1

    def _prune_invalidated_entries(self):
        "Rebuild our access_log without the invalidated entries."
        self.access_log_lock.acquire()
        self.access_log = deque(e for e in self.access_log if e.is_valid)
        self.access_log_lock.release()

    def _get_ordered_access_keys(self):
        "Return ordered access keys for inspection. Used for testing."
        self.access_log_lock.acquire()
        r = [e.key for e in self.access_log if e.is_valid]
        self.access_log_lock.release()

        return r

    def __getitem__(self, key):
        item = dict.get(self, key)

        if not item:
            raise KeyError(key)

        # Insert new entry with new high priority, also implicitly invalidates
        # the old entry.
        self._push_entry(key)

        if len(self.access_log) > self.access_log_limit:
            # Heap is getting too big, try to clean up any tailing invalidated
            # entries.
            self._prune_invalidated_entries()

        return item

    def __setitem__(self, key, item):
        # Add item to our container and access log
        dict.__setitem__(self, key, item)
        self._push_entry(key)

        # Discard invalid and excess entries
        self._prune_entries(len(self) - self._maxsize)

    def __delitem__(self, key):
        self._invalidate_entry(key)
        self.access_lookup.pop(key, None)
        dict.__delitem__(self, key)

    def get(self, key, default=None):
        try:
            return self[key]
        except KeyError:
            return default
class Amp(eg.PluginBase):
    def __init__(self):

        #actions
        group_Connection = self.AddGroup(
            "Connection", "Connect and disconnect to/from Amplifier")
        group_Connection.AddAction(ConnectToAmp)
        group_Connection.AddAction(DisconnectFromAmp)

        group_TimerClock = self.AddGroup(
            "Timer, Clock & Sleep",
            "Set Timer, switch it On/Off and show the Clock")
        group_TimerClock.AddAction(TimerOn)
        group_TimerClock.AddAction(TimerOff)
        group_TimerClock.AddAction(Clock)
        group_TimerClock.AddAction(setSleep)

        group_Power = self.AddGroup(
            "Power", "Actions regarding the Power State of the amplifier")
        group_Power.AddAction(PowerOn)
        group_Power.AddAction(PowerOff)
        group_Power.AddAction(MakeAmpReadyForMP)

        group_Vol = self.AddGroup("Volume", "Actions regarding the Volume")
        group_Vol.AddAction(setVolumeTo)
        group_Vol.AddAction(VolUp)
        group_Vol.AddAction(VolDown)
        group_Vol.AddAction(VolPct)
        group_Vol.AddAction(gradualVolChange)
        group_Vol.AddAction(stopGradualVolChange)
        group_Vol.AddAction(NormalMode)
        group_Vol.AddAction(StadiumMode)
        group_Vol.AddAction(NightMode)
        group_Vol.AddAction(NextAudioMode)
        group_Vol.AddAction(NightModeIfNoStadiumMode)

        group_Other = self.AddGroup(
            "Other",
            "Other Stuff like Reading the display, calling Favourites, setting the display's brightness, etc."
        )
        group_Other.AddAction(PrintCurrentParameters)
        group_Other.AddAction(ReadAmpDisplay)
        group_Other.AddAction(Favourite)
        group_Other.AddAction(setDisplayBrightness)
        group_Other.AddAction(sendCustomCommand)

        #available commands
        self.available_commands = [
            ('PWON', "Power On"), ('PWOFF', "Power Off"),
            ('PW?', "Request Power Status"), ('MVUP', "Volume Up"),
            ('MVDOWN', "Volume Down"), ('MV[0-9][0-9]', "Volume %s"),
            ('MV?', "Request Volume Status"), ('MVVOAUP', "Volume Up"),
            ('MVVOADOWN', "Volume Down"), ('MVVOA[0-9][0-9]', "Volume %s"),
            ('MVVOA?', "Digital In"), ('MUON', "Mute"), ('MUOFF', "Mute Off"),
            ('MU?', "Request Mute Status"), ('MUVOAON', "Mute"),
            ('MUVOAOFF', "Mute Off"), ('MUVOA?', "Request Mute Status"),
            ('SIIRADIO', "Internet Radio"), ('SIBLUETOOTH', "Bluetooth"),
            ('SISERVER', "Server"),
            ('SIUSB', "USB"), ('SIREARUSB', "Rear USB"),
            ('SIDIGITALIN1', "Digital In"), ('SIANALOGIN', "Analog In"),
            ('SLPOFF', "Sleep Off"), ('SLP[0-9][0-9][0-9]', "Sleep %s"),
            ('SLP?', "Request Sleep Status"),
            ('TSONCE @**##-@$$%% [F] [N] VV O', "Timer Once Off"),
            ('TEVERY @**##-@$$%% [F] [N] VV O', "Timer Every Off"),
            ('TSONCE @**##-@$$%% [F] [N] VV O', "Timer Once set to %s "),
            ('TEVERY @**##-@$$%% [F] [N] VV O', "Timer Every set to %s "),
            ('CLK', "toggle Clock"), ('FV $$', "Favourite %s"),
            ('FVMEM [0-9][0-9]', "Set to Favourite %s"),
            ('FVDEL [0-9][0-9]', "Delete Favourite %s"),
            ('FV ?', "Request Favourite List"), ('PSBAS UP', "Bass Up"),
            ('PSBAS DOWN', "Bass Down"),
            ('PSBAS [0-9][0-9]', "Set Bass to %s"),
            ('PSBAS ?', "Request Bass Level"), ('PSTRE UP', "Treble Up"),
            ('PSTRE DOWN', "Treble Down"),
            ('PSTRE [0-9][0-9]', "Set Treble to %s"),
            ('PSTRE ?', "Request Treble Level"),
            ('PSBAL LEFT', "Balance left"), ('PSBAL RIGHT', "Balance right"),
            ('PSBAL [0-9][0-9]', "Set Balance to %s"),
            ('PSBAL ?', "Request Balance Level"),
            ('PSSDB ON', "Dynamic Bass Boost On"),
            ('PSSDB OFF', "Dynamic Bass Boost Off"),
            ('PSSDB ?', "Request Dynamic Bass Boost Status"),
            ('PSSDI ON', "Source Direct On"), ('PSSDI OFF',
                                               "Source Direct Off"),
            ('PSSDI ?', "Request Source Direct Status")
        ]

    def __start__(self, IP_str, Input_str1, Input_str2, Input_str3, Input_str4,
                  Input_str5, Input_str6, Input_str7):
        print "starting"

        #set the configuration variables
        self.HOST = IP_str

        #initiate the dict for the status variables
        self.status_variables = {
            "Power": None,
            "Input": "N/A",
            "Volume": None,
            "Mute": None,
            "SourceDirect": None,
            "Treble": None,
            "Bass": None,
            "Balance": None,
            "Timer": (None, None),  #(once, every)
            "DynamicBassBoost": None,
            "Sleep": None,
            "AudioMode": None,  # 0 is Normal, 1 is Night, 2 is Stadium
            "ConnectStatus": 0,
            "Display": [""] * 9
        }

        # Names for Inputs (for outputting)
        self.InputNames = {
            "Internet Radio": Input_str1,
            "Bluetooth": Input_str2,
            "Server": Input_str3,
            "USB": Input_str4,
            "Rear USB": Input_str5,
            "Digital In": Input_str6,
            "Analog In": Input_str7
        }

        # a dictionary for values which cannot be set at the moment of the command, because the amplifier is switched off
        self.remember = {}

        # connect to the application
        self.start_connection()

    def __stop__(self):
        print "stopping plugin"
        self.stop_connection()

    def __close__(self):
        print "closing plugin"

    def OnComputerSuspend(self):
        self.stop_connection()

    def OnComputerResume(self):
        self.start_connection()

    def start_connection(self):

        #initiate the socket & lock
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.sock.settimeout(10)
        self.sockLock = RLock()

        #connect to the amplifier
        port = 23
        self.sock.connect((self.HOST, port))

        # Start the Thread for Receiving
        self.stopThreadEvent = Event()
        thread = Thread(target=self.ThreadLoop, args=(self.stopThreadEvent, ))
        thread.start()

        self.status_variables["ConnectStatus"] = 1

        self.request_status_variables_update()

    def stop_connection(self):
        #stop the ThreadLoop
        self.stopThreadEvent.set()
        #shut down the socket connection
        self.sock.close()

        self.status_variables["ConnectStatus"] = 0
        print "done"

    def Configure(self,
                  IP_str="192.168.1.197",
                  Input_str1="Internet Radio",
                  Input_str2="Bluetooth",
                  Input_str3="Server",
                  Input_str4="USB",
                  Input_str5="Rear USB",
                  Input_str6="Digital In",
                  Input_str7="Analog In"):

        panel = eg.ConfigPanel()

        IP_str_Control2 = panel.TextCtrl(IP_str)
        Input_str1_Ctrl = panel.TextCtrl(Input_str1)
        Input_str2_Ctrl = panel.TextCtrl(Input_str2)
        Input_str3_Ctrl = panel.TextCtrl(Input_str3)
        Input_str4_Ctrl = panel.TextCtrl(Input_str4)
        Input_str5_Ctrl = panel.TextCtrl(Input_str5)
        Input_str6_Ctrl = panel.TextCtrl(Input_str6)
        Input_str7_Ctrl = panel.TextCtrl(Input_str7)

        panel.AddLine("IP address of Amplifier: ", IP_str_Control2)
        panel.AddLine("Customised Names for the various Inputs:")
        panel.AddLine("Internet Radio: ", Input_str1_Ctrl)
        panel.AddLine("Bluetooth: ", Input_str2_Ctrl)
        panel.AddLine("Server: ", Input_str3_Ctrl)
        panel.AddLine("USB: ", Input_str4_Ctrl)
        panel.AddLine("Rear USB: ", Input_str5_Ctrl)
        panel.AddLine("Digital In: ", Input_str6_Ctrl)
        panel.AddLine("Analog In: ", Input_str7_Ctrl)

        while panel.Affirmed():
            panel.SetResult(IP_str_Control2.GetValue(),
                            Input_str1_Ctrl.GetValue(),
                            Input_str2_Ctrl.GetValue(),
                            Input_str3_Ctrl.GetValue(),
                            Input_str4_Ctrl.GetValue(),
                            Input_str5_Ctrl.GetValue(),
                            Input_str6_Ctrl.GetValue(),
                            Input_str7_Ctrl.GetValue())

    def ThreadLoop(self, stopThreadEvent):
        while not stopThreadEvent.isSet():
            received_data_in_cur_round = False  #if we received data, we do not want to wait for the next round
            self.sockLock.acquire()
            readable, writable, exceptional = select([self.sock], [],
                                                     [self.sock], 0)
            if readable:
                receive_data = self.sock.recv(1024)
                self.sockLock.release()
                received_data_in_cur_round = True
                receive_data = receive_data.split("\r")
                for msg in receive_data:
                    if not msg:
                        continue  #only messages with content
                    self.handle_rcv_content(msg)
            else:
                self.sockLock.release()

            if exceptional:
                print "error in socket"
                print exceptional

            if not received_data_in_cur_round:
                stopThreadEvent.wait(0.1)

    #define a function to handle responses
    def receive_responses(self, exp_nb_responses=1):
        #TODO: maybe I should get rid of this function. Is it really needed? (Currently not in use)

        #try four times (last try has to be after some time t>200milliseconds)
        #after exp_nb_responses or more responses the cycle breaks
        n_responses = 0
        for recvTry in range(4):
            readable, writable, exceptional = select([self.plugin.sock], [],
                                                     [self.plugin.sock], 0)
            if readable:
                receive_data = self.sock.recv(1024)
                receive_data = receive_data.split("\r")
                for msg in receive_data:
                    if not msg:
                        continue  #only messages with content
                    self.handle_rcv_content(msg)
                    print msg
                    n_responses += 1
                if n_responses >= exp_nb_responses:
                    break
            sleep(0.07)
        sleep(0.01)

    def handle_rcv_content(self, msg):
        #print msg

        if msg.startswith("MVVOA"):
            self.status_variables["Volume"] = int(msg[5:7])
            self.TriggerEvent("Vol",
                              payload=str(self.status_variables["Volume"]))
        elif msg.startswith("MV"):
            self.status_variables["Volume"] = int(msg[2:4])
            self.TriggerEvent("Vol",
                              payload=str(self.status_variables["Volume"]))

        elif msg.startswith("MU"):
            if msg == "MUON":
                self.status_variables["Mute"] = True
            elif msg == "MUOFF":
                self.status_variables["Mute"] = False
            #trigger Event
            self.TriggerEvent("Mute",
                              payload=str(self.status_variables["Mute"]))

        elif msg.startswith("PW"):
            if msg == "PWON":
                self.status_variables["Power"] = True
                if len(self.remember) > 0:
                    sleep(4)
                    self.execute_remembered_values()
            elif msg == "PWSTANDBY":
                self.status_variables["Power"] = False
            #trigger Event
            self.TriggerEvent("Power." + str(self.status_variables["Power"]))

        elif msg.startswith("SI"):
            if msg == "SIIRADIO":
                self.status_variables["Input"] = self.InputNames[
                    "Internet Radio"]
            elif msg == "SIBLUETOOTH":
                self.status_variables["Input"] = self.InputNames["Bluetooth"]
            elif msg == "SISERVER":
                self.status_variables["Input"] = self.InputNames["Server"]
            elif msg == "SIUSB":
                self.status_variables["Input"] = self.InputNames["USB"]
            elif msg == "SIREARUSB":
                self.status_variables["Input"] = self.InputNames["Rear USB"]
            elif msg == "SIDIGITALIN1":
                self.status_variables["Input"] = self.InputNames["Digital In"]
            elif msg == "SIANALOGIN":
                self.status_variables["Input"] = self.InputNames["Analog In"]
            #trigger Event
            self.TriggerEvent("Input", payload=self.status_variables["Input"])

        elif msg.startswith("PS"):
            if msg.startswith("PSTRE"):
                self.status_variables["Treble"] = int(msg[6:8])
            elif msg.startswith("PSBAS"):
                self.status_variables["Bass"] = int(msg[6:8])
            elif msg.startswith("PSBAL"):
                self.status_variables["Balance"] = int(msg[6:8])
            elif msg.startswith("PSSDB"):
                if msg == "PSSDB ON":
                    self.status_variables["DynamicBassBoost"] = True
                elif msg == "PSSDB OFF":
                    self.status_variables["DynamicBassBoost"] = False
            elif msg.startswith("PSSDI"):
                if msg == "PSSDI ON":
                    self.status_variables["SourceDirect"] = True
                elif msg == "PSSDI OFF":
                    self.status_variables["SourceDirect"] = False

        elif msg.startswith("SLP"):
            if msg.startswith("SLPOFF"):
                self.status_variables["Sleep"] = 0
            else:
                self.status_variables["Sleep"] = int(msg[3:6])
            #trigger Event
            self.TriggerEvent("SLP",
                              payload=str(self.status_variables["Sleep"]))

        elif msg.startswith("NSE"):
            self.status_variables["Display"][int(msg[3:4])] = msg[4:len(msg)]

    def execute_remembered_values(self):
        if "AudioMode" in self.remember:
            self.activateAudioMode(self.remember["AudioMode"])
            self.remember.pop("AudioMode", None)
        if len(self.remember) > 0:
            print "there are remembered values which have not been executed"

    def request_status_variables_update(self):
        #TODO: Check how large the buffer is (and how it interacts with the 1024 recv length). Maybe need to do a sockLock pause
        with self.sockLock:
            self.sock.sendall(b'PW?\r')
            self.sock.sendall(b'SI?\r')
            self.sock.sendall(b'MV?\r')
            self.sock.sendall(b'MU?\r')
            self.sock.sendall(b'PSSDI ?\r')
            self.sock.sendall(b'PSBAS ?\r')
            self.sock.sendall(b'PSTRE ?\r')
            self.sock.sendall(b'PSBAL ?\r')
            self.sock.sendall(b'PSSDB ?\r')
            self.sock.sendall(b'SLP?\r')
            self.sock.sendall(b'TS?\r')  #TODO: Check Timer request command
            self.sock.sendall(b'NSE\r')

    def activateAudioMode(self, mode):
        #first check whether the AudioMode is already active. If yes, then nothing has to be done
        if not (self.status_variables["AudioMode"] == mode):
            #check if the Power is On, if not, then we cannot change the Audio Mode. In this case, we remember, that we need to set it as soon as the amplifier is switched on again.
            if not self.status_variables["Power"]:
                self.remember["AudioMode"] = mode
                #trigger Event
                self.TriggerEvent("AudioMode", payload="R" + str(mode))
            else:
                if mode == 0:  #normal
                    with self.sockLock:
                        self.sock.sendall(b'PSSDI ON\r')
                        self.sock.sendall(b'SSDIM100\r')
                        self.sock.sendall(b'PSBAS 50\r')
                        self.sock.sendall(b'PSTRE 50\r')
                        self.sock.sendall(b'PSBAL 50\r')
                        self.sock.sendall(b'PSSDB OFF\r')
                    self.status_variables["AudioMode"] = 0

                elif mode == 1:  #night
                    with self.sockLock:
                        self.sock.sendall(b'PSSDI OFF\r')
                        self.sock.sendall(b'PSBAS 40\r')
                        self.sock.sendall(b'PSTRE 52\r')
                        self.sock.sendall(b'SSDIM050\r')
                    self.status_variables["AudioMode"] = 1

                elif mode == 2:  #stadium
                    with self.sockLock:
                        self.sock.sendall(b'PSSDI OFF\r')
                        self.sock.sendall(b'PSBAS 52\r')
                        self.sock.sendall(b'PSTRE 58\r')
                        self.sock.sendall(b'SSDIM050\r')
                    self.status_variables["AudioMode"] = 2

                #trigger Event
                self.TriggerEvent("AudioMode",
                                  payload=str(
                                      self.status_variables["AudioMode"]))

    def switchToNextAudioMode(self):
        if self.status_variables["AudioMode"] is None:
            self.activateAudioMode(0)
        else:
            newAudioMode = (self.status_variables["AudioMode"] + 1) % 3
            self.activateAudioMode(newAudioMode)

    def sendCommand(self, cmd):
        with self.sockLock:
            self.sock.sendall(cmd)

    def repeatCommandThread(self, stopRepeatingCommand, cmd_str, interval,
                            nb_loops, end_event_string):
        for loop_cur in range(nb_loops):
            if stopRepeatingCommand.isSet():
                break
            self.sendCommand(cmd_str)
            stopRepeatingCommand.wait(interval)
        stopRepeatingCommand.set()
        self.TriggerEvent(end_event_string)
예제 #50
0
class ReaderTreeCtrl(BaseCardTreeCtrl):
    """The ReaderTreeCtrl monitors inserted cards and readers and notifies the
    application client dialog of any card activation."""
    def __init__(self,
                 parent,
                 ID=wx.NewId(),
                 pos=wx.DefaultPosition,
                 size=wx.DefaultSize,
                 style=0,
                 clientpanel=None):
        """Constructor. Create a reader tree control."""

        BaseCardTreeCtrl.__init__(self, parent, ID, pos, size,
                                  wx.TR_SINGLE | wx.TR_NO_BUTTONS, clientpanel)

        self.mutex = RLock()

        self.root = self.AddRoot("Smartcard Readers")
        self.SetPyData(self.root, None)
        self.SetItemImage(self.root, self.fldrindex, wx.TreeItemIcon_Normal)
        self.SetItemImage(self.root, self.fldropenindex,
                          wx.TreeItemIcon_Expanded)
        self.Expand(self.root)

    def AddATR(self, readernode, atr):
        """Add an ATR to a reader node."""
        capchild = self.AppendItem(readernode, atr)
        self.SetPyData(capchild, None)
        self.SetItemImage(capchild, self.cardimageindex,
                          wx.TreeItemIcon_Normal)
        self.SetItemImage(capchild, self.cardimageindex,
                          wx.TreeItemIcon_Expanded)
        self.Expand(capchild)
        return capchild

    def GetATR(self, reader):
        """Return the ATR of the card inserted into the reader."""
        atr = "no card inserted"
        try:
            if not type(reader) is str:
                connection = reader.createConnection()
                connection.connect()
                atr = toHexString(connection.getATR())
                connection.disconnect()
        except NoCardException:
            pass
        except CardConnectionException:
            pass
        return atr

    def OnAddCards(self, addedcards):
        """Called when a card is inserted.
        Adds the smart card child to the reader node."""
        self.mutex.acquire()
        try:
            parentnode = self.root
            for cardtoadd in addedcards:
                (childReader, cookie) = self.GetFirstChild(parentnode)
                found = False
                while childReader.IsOk() and not found:
                    if self.GetItemText(childReader) == str(cardtoadd.reader):
                        (childCard, cookie2) = self.GetFirstChild(childReader)
                        self.SetItemText(childCard, toHexString(cardtoadd.atr))
                        self.SetPyData(childCard, cardtoadd)
                        found = True
                    else:
                        (childReader,
                         cookie) = self.GetNextChild(parentnode, cookie)

                # reader was not found, add reader node
                # this happens when card monitoring thread signals
                # added cards before reader monitoring thread signals
                # added readers
                if not found:
                    childReader = self.AppendItem(parentnode,
                                                  str(cardtoadd.reader))
                    self.SetPyData(childReader, cardtoadd.reader)
                    self.SetItemImage(childReader, self.readerimageindex,
                                      wx.TreeItemIcon_Normal)
                    self.SetItemImage(childReader, self.readerimageindex,
                                      wx.TreeItemIcon_Expanded)
                    childCard = self.AddATR(childReader,
                                            toHexString(cardtoadd.atr))
                    self.SetPyData(childCard, cardtoadd)
                    self.Expand(childReader)

            self.Expand(self.root)
        finally:
            self.mutex.release()
        self.EnsureVisible(self.root)
        self.Repaint()

    def OnAddReaders(self, addedreaders):
        """Called when a reader is inserted.
        Adds the smart card reader to the smartcard readers tree."""
        self.mutex.acquire()

        try:
            parentnode = self.root
            for readertoadd in addedreaders:
                # is the reader already here?
                found = False
                (childReader, cookie) = self.GetFirstChild(parentnode)
                while childReader.IsOk() and not found:
                    if self.GetItemText(childReader) == str(readertoadd):
                        found = True
                    else:
                        (childReader,
                         cookie) = self.GetNextChild(parentnode, cookie)
                if not found:
                    childReader = self.AppendItem(parentnode, str(readertoadd))
                    self.SetPyData(childReader, readertoadd)
                    self.SetItemImage(childReader, self.readerimageindex,
                                      wx.TreeItemIcon_Normal)
                    self.SetItemImage(childReader, self.readerimageindex,
                                      wx.TreeItemIcon_Expanded)
                    self.AddATR(childReader, self.GetATR(readertoadd))
                    self.Expand(childReader)
            self.Expand(self.root)
        finally:
            self.mutex.release()
        self.EnsureVisible(self.root)
        self.Repaint()

    def OnRemoveCards(self, removedcards):
        """Called when a card is removed.
        Removes the card from the tree."""
        self.mutex.acquire()
        try:
            parentnode = self.root
            for cardtoremove in removedcards:
                (childReader, cookie) = self.GetFirstChild(parentnode)
                found = False
                while childReader.IsOk() and not found:
                    if self.GetItemText(childReader) == \
                            str(cardtoremove.reader):
                        (childCard, cookie2) = self.GetFirstChild(childReader)
                        self.SetItemText(childCard, 'no card inserted')
                        found = True
                    else:
                        (childReader, cookie) = \
                            self.GetNextChild(parentnode, cookie)
            self.Expand(self.root)
        finally:
            self.mutex.release()
        self.EnsureVisible(self.root)
        self.Repaint()

    def OnRemoveReaders(self, removedreaders):
        """Called when a reader is removed.
        Removes the reader from the smartcard readers tree."""
        self.mutex.acquire()
        try:
            parentnode = self.root
            for readertoremove in removedreaders:
                (childReader, cookie) = self.GetFirstChild(parentnode)
                while childReader.IsOk():
                    if self.GetItemText(childReader) == str(readertoremove):
                        self.Delete(childReader)
                    else:
                        (childReader, cookie) = \
                            self.GetNextChild(parentnode, cookie)
            self.Expand(self.root)
        finally:
            self.mutex.release()
        self.EnsureVisible(self.root)
        self.Repaint()
예제 #51
0
class RecentlyUsedContainer(dict):
    """
    Provides a dict-like that maintains up to ``maxsize`` keys while throwing
    away the least-recently-used keys beyond ``maxsize``.
    """
    CLEANUP_FACTOR = 10

    def __init__(self, maxsize=10):
        self._maxsize = maxsize
        self._container = {}
        self.access_log = deque()
        self.access_log_lock = RLock()
        self.access_lookup = {}
        self.access_log_limit = maxsize * self.CLEANUP_FACTOR

    def _invalidate_entry(self, key):
        """If exists: Invalidate old entry and return it."""
        old_entry = self.access_lookup.get(key)
        if old_entry:
            old_entry.is_valid = False
        return old_entry

    def _push_entry(self, key):
        """Push entry onto our access log, invalidate the old entry if exists."""
        self._invalidate_entry(key)
        new_entry = AccessEntry(key)
        self.access_lookup[key] = new_entry
        self.access_log_lock.acquire()
        self.access_log.appendleft(new_entry)
        self.access_log_lock.release()

    def _prune_entries(self, num):
        """Pop entries from our access log until we popped ``num`` valid ones."""
        while num > 0:
            self.access_log_lock.acquire()
            p = self.access_log.pop()
            self.access_log_lock.release()
            if not p.is_valid:
                continue
            dict.pop(self, p.key, None)
            self.access_lookup.pop(p.key, None)
            num -= 1

    def _prune_invalidated_entries(self):
        """Rebuild our access_log without the invalidated entries."""
        self.access_log_lock.acquire()
        self.access_log = deque((e for e in self.access_log if e.is_valid))
        self.access_log_lock.release()

    def _get_ordered_access_keys(self):
        """Return ordered access keys for inspection. Used for testing."""
        self.access_log_lock.acquire()
        r = [e.key for e in self.access_log if e.is_valid]
        self.access_log_lock.release()
        return r

    def __getitem__(self, key):
        item = dict.get(self, key)
        if not item:
            raise KeyError(key)
        self._push_entry(key)
        if len(self.access_log) > self.access_log_limit:
            self._prune_invalidated_entries()
        return item

    def __setitem__(self, key, item):
        dict.__setitem__(self, key, item)
        self._push_entry(key)
        self._prune_entries(len(self) - self._maxsize)

    def __delitem__(self, key):
        self._invalidate_entry(key)
        self.access_lookup.pop(key, None)
        dict.__delitem__(self, key)

    def get(self, key, default=None):
        try:
            return self[key]
        except KeyError:
            return default
예제 #52
0
class PlayerPod():
    """La classe PlayerPod. Représente un joueur."""
    def __init__(self, pseudo, initialPosition, initialAngle, canvasTagId,
                 image, photoImage):
        """constructeur
        
        Arguments:
            pseudo -- Le pseudo du joueur.
            initialPosition  -- La position initiale du joueur.
            initialAngle -- L'angle initial du joueur.
            canvasTagId -- Le tag du joueur dans le canvas.
            image -- L'image du joueur.
        """
        self.pseudo = pseudo
        self.position = initialPosition
        self.angle = initialAngle
        self.canvasTagId = canvasTagId
        self.image = image
        self.photoImage = photoImage
        self.vector = Pair(0, 0)
        self.updateVector()
        self.lock = RLock()
        self.angleCommand = 0
        self.thrustCommand = 0
        self.immobilized = False
        self.immobilizedSince = 0

    def __str__(self):
        return "Position - " + str(self.position) + ", Vecteur - " + str(
            self.vector) + ", Angle - " + str(self.angle)

    def acquire(self, blocking=True, timeout=-1):
        """Tente d'obtenir le verrou du joueur.
        
        Keyword Arguments:
            blocking  -- La tentative doit-elle être bloquante ? (default: {True})
            timeout -- Le délai d'attente maximum pour obtenir le verrou (default: {-1})
        
        Returns:
            True si le verrou à été obtenu, False sinon.
        """
        return self.lock.acquire(blocking=blocking, timeout=timeout)

    def release(self):
        """Relâche le verrou du joueur."""
        self.lock.release()

    def getPseudo(self):
        """Getteur sur le pseudo du joueur.
        
        Returns:
            Le pseudo du joueur.
        """
        return self.pseudo

    def getPosition(self):
        """Getteur sur la position du joueur.
        
        Returns:
            La position du joueur.
        """
        return self.position

    def getPositionX(self):
        """Getteur sur la coordonnée X du joueur.
        
        Returns:
            La coordonnée X du joueur.
        """
        return self.position.getX()

    def setPositionX(self, newX):
        """Setteur de la coordonnée X du joueur.
        
        Arguments:
            newX -- La nouvelle coordonnée X du joueur.
        """
        self.position.setX(newX)

    def getPositionY(self):
        """Getteur sur la coordonnée Y du joueur.
        
        Returns:
            La coordonnée Y du joueur.
        """
        return self.position.getY()

    def setPositionY(self, newY):
        """Setteur de la coordonnée Y du joueur.
        
        Arguments:
            newY -- La nouvelle coordonnée Y du joueur.
        """
        self.position.setY(newY)

    def getVectorX(self):
        """Getteur sur le vecteur X du joueur.
        
        Returns:
            Le vecteur X du joueur.
        """
        return self.vector.getX()

    def setVectorX(self, newX):
        """Setteur sur le vecteur X du joueur.
        
        Arguments:
            newX -- Le vecteur X du joueur.
        """
        self.vector.setX(newX)

    def getVectorY(self):
        """Getteur sur le vecteur Y du joueur.
        
        Returns:
            Le vecteur Y du joueur.
        """
        return self.vector.getY()

    def setVectorY(self, newY):
        """Setteur sur le vecteur Y du joueur.
        
        Arguments:
            newY -- Le vecteur Y du joueur.
        """
        self.vector.setY(newY)

    def getAngle(self):
        """Getteur sur l'angle du joueur.
        
        Returns:
            L'angle du joueur.
        """
        return self.angle

    def setAngle(self, newAngle):
        """Setteur sur l'angle du joueur.
        
        Arguments:
            newAngle -- Le nouvel angle.
        """
        self.angle = newAngle

    def getCanvasTagId(self):
        """Getteur sur le tag du joueur pour le canvas de l'interface graphique.
        
        Returns:
            Le tag du joueur.
        """
        return self.canvasTagId

    def setCanvasTagId(self, newId):
        """Setteur sur le tag du joueur pour le canvas de l'interface graphique.
        
        Arguments:
            newId -- Le nouveau tag du joueur.
        """
        self.canvasTagId = newId

    def getImage(self):
        """Getteur sur l'image du joueur dans l'interface graphique.
        
        Returns:
            L'image du joueur.
        """
        return self.image

    def setImage(self, image):
        """Setteur sur l'image du joueur dans l'interface graphique.
        
        Arguments:
            image -- La nouvelle image du joueur.
        """
        self.image = image

    def setPhotoImage(self, photoImage):
        """Setteur sur la photoImage du joueur dans l'interface graphique.
        
        Arguments:
            photoImage -- La nouvelle photoImage du joueur.
        """
        self.photoImage = photoImage

    def getCommand(self):
        """Retourne les commandes sous forme d'une chaîne de caractère comme spécifié dans le protocole.
        
        Returns:
            Les commandes effectuées sur le pod.
        """
        return "A" + str(radians(self.angleCommand)) + "T" + str(
            self.thrustCommand)

    def resetCommand(self):
        """Reset des commandes du joueur."""
        self.thrustCommand = 0
        self.angleCommand = 0

    def updateVector(self):
        """Met à jour le vecteur du joueur en fonction de son angle."""
        # Met à jour vx
        newVX = self.getVectorX() * cos(radians(self.getAngle()))
        self.setVectorX(newVX)

        # Met à jour vy
        newVY = self.getVectorY() * -sin(radians(self.getAngle()))
        self.setVectorY(newVY)

    def fullyUpdate(self, newX, newY, newVX, newVY, newAngle):
        """Remet entièrement à jour un joueur.
        
        Arguments:
            newX  -- La nouvelle coordonnée X.
            newY -- La nouvelle coordonnée Y.
            newVX -- Le nouveau vecteur X.
            newVY -- Le nouveau vecteur Y.
            newAngle -- Le nouvel angle.
        """

        self.position.setX(newX)
        self.position.setY(newY)
        self.vector.setX(newVX)
        self.vector.setY(newVY)
        self.setAngle(round(degrees(newAngle)))

    def fullyUpdateFromScratch(self, newX, newY, newVX, newVY, newAngle):
        """Remet entièrement à jour un joueur jusque la sans position ni vecteur.
        
        Arguments:
            newX  -- La nouvelle coordonnée X.
            newY -- La nouvelle coordonnée Y.
            newVX -- Le nouveau vecteur X.
            newVY -- Le nouveau vecteur Y.
            newAngle -- Le nouvel angle.
        """
        self.position = Pair(newX, newY)
        self.vector = Pair(newVX, newVY)
        self.angle = round(degrees(newAngle))

    def thrust(self):
        """Applique une impulsion au joueur. Met à jour ses vecteurs."""
        # Met à jour vx.
        newVX = self.getVectorX()
        newVX += dat.THRUST_IT * cos(radians(self.getAngle()))
        self.setVectorX(newVX)

        # Met à jour VY
        newVY = self.getVectorY()
        newVY += dat.THRUST_IT * -sin(radians(self.getAngle()))
        self.setVectorY(newVY)

        self.thrustCommand += 1

    def antiClock(self):
        """Applique une rotation inverse sur le joueur. Met à jour son angle"""
        newAngle = self.getAngle() + dat.TURN_IT
        self.setAngle(newAngle % 360)
        self.angleCommand += dat.TURN_IT

    def clock(self):
        """Applique une rotation sur le joueur. Met à jour son angle"""
        newAngle = self.getAngle() - dat.TURN_IT
        self.setAngle(newAngle % 360)
        self.angleCommand -= dat.TURN_IT

    def reverseVectors(self):
        """Inverse les vecteurs du joueur."""
        self.setVectorX(-self.getVectorX())
        self.setVectorY(-self.getVectorY())

    def isImmobilized(self):
        """ Un joueur est-il actuellement immobilisé ?
        
        Returns:
            True si le joueur est immobilisé, False sinon.
        """
        return self.immobilized

    def immobilize(self):
        """Immobilise un joueur."""
        self.immobilized = True
        self.immobilizedSince = 0

    def stepImmobilize(self):
        """Met à jour le nombre de server_tickrate écoulé depuis que le jouer est immobilisé."""
        self.immobilizedSince += 1

    def unImmobilize(self):
        """Redonne la possibilité à un joueur de bouger."""
        self.immobilized = False
        self.immobilizedSince = 0
예제 #53
0
class ParamDictionary(object):
    
    def __init__(self, reg_manager):
        """
        ctor.
        @param subscribers: parameter subscribers
        @type  subscribers: Registrations
        """
        self.lock = RLock()
        self.parameters = {}
        self.reg_manager = reg_manager

    def get_param_names(self):
        """
        Get list of all parameter names stored on this server.

        @return: [code, statusMessage, parameterNameList]
        @rtype: [int, str, [str]]
        """
        try:
            self.lock.acquire()
            param_names = []
            _get_param_names(param_names, '/', self.parameters)
        finally:
            self.lock.release()
        return param_names
        
    def search_param(self, ns, key):
        """
        Search for matching parameter key for search param
        key. Search for key starts at ns and proceeds upwards to
        the root. As such, search_param should only be called with a
        relative parameter name.

        search_param's behavior is to search for the first partial match.
        For example, imagine that there are two 'robot_description' parameters:

         - /robot_description
         -   /robot_description/arm
         -   /robot_description/base

         - /pr2/robot_description
         -   /pr2/robot_description/base

        If I start in the namespace /pr2/foo and search for
        'robot_description', search_param will match
        /pr2/robot_description. If I search for 'robot_description/arm'
        it will return /pr2/robot_description/arm, even though that
        parameter does not exist (yet).

        @param ns: namespace to begin search from.
        @type  ns: str
        @param key: Parameter key. 
        @type  key: str
        @return: key of matching parameter or None if no matching
        parameter.
        @rtype: str
        """
        if not key or is_private(key):
            raise ValueError("invalid key")
        if not is_global(ns):
            raise ValueError("namespace must be global")            
        if is_global(key):
            if self.has_param(key):
                return key
            else:
                return None

        # there are more efficient implementations, but our hiearchy
        # is not very deep and this is fairly clean code to read.

        # - we only search for the first namespace in the key to check for a match
        key_namespaces = [x for x in key.split(SEP) if x]
        key_ns = key_namespaces[0]

        #  - corner case: have to test initial namespace first as
        #    negative indices won't work with 0
        search_key = ns_join(ns, key_ns)
        if self.has_param(search_key):
            # resolve to full key
            return ns_join(ns, key) 
        
        namespaces = [x for x in ns.split(SEP) if x]
        for i in range(1, len(namespaces)+1):
            search_key = SEP + SEP.join(namespaces[0:-i] + [key_ns])
            if self.has_param(search_key):
                # we have a match on the namespace of the key, so
                # compose the full key and return it
                full_key = SEP + SEP.join(namespaces[0:-i] + [key]) 
                return full_key
        return None

    def get_param(self, key):
        """
        Get the parameter in the parameter dictionary.

        @param key: parameter key
        @type  key: str
        @return: parameter value
        """
        try:
            self.lock.acquire()
            val = self.parameters
            if key != GLOBALNS:
                # split by the namespace separator, ignoring empty splits
                namespaces = [x for x in key.split(SEP)[1:] if x]
                for ns in namespaces:
                    if not type(val) == dict:
                        raise KeyError(val)
                    val = val[ns]
            return val
        finally:
            self.lock.release()
    
    def set_param(self, key, value, notify_task=None, caller_id=None):
        """
        Set the parameter in the parameter dictionary.

        @param key: parameter key
        @type  key: str
        @param value: parameter value
        @param notify_task: function to call with
        subscriber updates. updates is of the form
        [(subscribers, param_key, param_value)*]. The empty dictionary
        represents an unset parameter.
        @type  notify_task: fn(updates)
        @param caller_id: the caller id
        @type caller_id: str
        """
        try:
            self.lock.acquire()
            if key == GLOBALNS:
                if type(value) != dict:
                    raise TypeError("cannot set root of parameter tree to non-dictionary")
                self.parameters = value
            else:
                namespaces = [x for x in key.split(SEP) if x]
                # - last namespace is the actual key we're storing in
                value_key = namespaces[-1]
                namespaces = namespaces[:-1]
                d = self.parameters
                # - descend tree to the node we're setting
                for ns in namespaces:
                    if not ns in d:
                        new_d = {}
                        d[ns] = new_d
                        d = new_d
                    else:
                        val = d[ns]
                        # implicit type conversion of value to namespace
                        if type(val) != dict:
                            d[ns] = val = {}
                        d = val

                d[value_key] = value

            # ParamDictionary needs to queue updates so that the updates are thread-safe
            if notify_task:
                updates = compute_param_updates(self.reg_manager.param_subscribers, key, value, caller_id)
                if updates:
                    notify_task(updates)
        finally:
            self.lock.release()


    def subscribe_param(self, key, registration_args):
        """
        @param key: parameter key
        @type  key: str
        @param registration_args: additional args to pass to
        subscribers.register. First parameter is always the parameter
        key.
        @type  registration_args: tuple
        """
        if key != SEP:
            key = canonicalize_name(key) + SEP
        try:
            self.lock.acquire()
            # fetch parameter value
            try:
                val = self.get_param(key)
            except KeyError:
                # parameter not set yet
                val = {}
            self.reg_manager.register_param_subscriber(key, *registration_args)
            return val
        finally:
            self.lock.release()
            

    def unsubscribe_param(self, key, unregistration_args):
        """
        @param key str: parameter key
        @type  key: str
        @param unregistration_args: additional args to pass to
        subscribers.unregister. i.e. unregister will be called with
        (key, *unregistration_args)
        @type  unregistration_args: tuple
        @return: return value of subscribers.unregister()
        """
        if key != SEP:
            key = canonicalize_name(key) + SEP
        return self.reg_manager.unregister_param_subscriber(key, *unregistration_args)

    def delete_param(self, key, notify_task=None):
        """
        Delete the parameter in the parameter dictionary.
        @param key str: parameter key
        @param notify_task fn(updates): function to call with
        subscriber updates. updates is of the form
        [(subscribers, param_key, param_value)*]. The empty dictionary
        represents an unset parameter.
        """
        try:
            self.lock.acquire()
            if key == GLOBALNS:
                raise KeyError("cannot delete root of parameter tree")
            else:
                # key is global, so first split is empty
                namespaces = [x for x in key.split(SEP) if x]
                # - last namespace is the actual key we're deleting
                value_key = namespaces[-1]
                namespaces = namespaces[:-1]
                d = self.parameters
                # - descend tree to the node we're setting
                for ns in namespaces:
                    if type(d) != dict or not ns in d:
                        raise KeyError(key)
                    else:
                        d = d[ns]

                if not value_key in d:
                    raise KeyError(key)
                else:
                    del d[value_key]
                    
                # ParamDictionary needs to queue updates so that the updates are thread-safe
                if notify_task:
                    updates = compute_param_updates(self.reg_manager.param_subscribers, key, {})
                    if updates:
                        notify_task(updates)
        finally:
            self.lock.release()
    
    def has_param(self, key):
        """
        Test for parameter existence

        @param key: parameter key
        @type  key: str
        @return: True if parameter set, False otherwise
        @rtype: bool
        """
        try:
            # more efficient implementations are certainly possible,
            # but this guarantees correctness for now
            self.get_param(key)
            return True
        except KeyError:
            return False
예제 #54
0
class InputHandler(ExitNotifyThread):
    """Listens for input via the curses interfaces"""

    #TODO, we need to use the ugly exitnotifythread (rather than simply
    #threading.Thread here, so exiting this thread via the callback
    #handler, kills off all parents too. Otherwise, they would simply
    #continue.
    def __init__(self, ui):
        super(InputHandler, self).__init__()
        self.char_handler = None
        self.ui = ui
        self.enabled = Event()
        # We will only parse input if we are enabled.
        self.inputlock = RLock()
        # denotes whether we should be handling the next char.
        self.start()  #automatically start the thread

    def get_next_char(self):
        """Return the key pressed or -1.

        Wait until `enabled` and loop internally every stdscr.timeout()
        msecs, releasing the inputlock.
        :returns: char or None if disabled while in here"""

        self.enabled.wait()
        while self.enabled.is_set():
            with self.inputlock:
                char = self.ui.stdscr.getch()
            if char != -1: yield char

    def run(self):
        while True:
            char_gen = self.get_next_char()
            for char in char_gen:
                self.char_handler(char)
                #curses.ungetch(char)

    def set_char_hdlr(self, callback):
        """Sets a character callback handler.

        If a key is pressed it will be passed to this handler. Keys
        include the curses.KEY_RESIZE key.

        callback is a function taking a single arg -- the char pressed.
        If callback is None, input will be ignored."""

        with self.inputlock:
            self.char_handler = callback
            # start or stop the parsing of things
            if callback is None:
                self.enabled.clear()
            else:
                self.enabled.set()

    def input_acquire(self):
        """Call this method when you want exclusive input control.

        Make sure to call input_release afterwards! While this lockis
        held, input can go to e.g. the getpass input."""

        self.enabled.clear()
        self.inputlock.acquire()

    def input_release(self):
        """Call this method when you are done getting input."""

        self.inputlock.release()
        self.enabled.set()
예제 #55
0
class AzureCloud(Cloud):
    def __init__(self, *args, **kwargs):
        super(AzureCloud, self).__init__(*args, **kwargs)
        self.vnet_lock = RLock()
        self.pace_lock = RLock()
        self.pace_timer = 300

    # Is only allowed to be called once every 5 seconds
    # TODO: Why is there a limitation like this on Azure ... 5 seconds
    # wait per request is too much ...
    #@rate_limit(0.2)
    def execute(self, command, obj={}):
        ret = super(AzureCloud, self).execute(command, obj)

        # If we are too fast, backoff for 5 minutes before continuing again
        if 'Too many requests received' in obj['stderr']:
            print "Sleeping for 5 minutes:\n > %s, %s, %s" % (
                command, obj['stderr'], obj['stdout'])
            time.sleep(self.pace_timer)
            return self.execute(command, obj)

        return ret

    def start_virtual_machine(self, vm):
        """ Start a virtual machine """
        cmd = ['azure', 'vm', 'start', self.unique(vm.name)]
        vm._started = True
        return self.execute(cmd)

    def stop_virtual_machine(self, vm):
        """ Stop a virtual machine """
        cmd = ['azure', 'vm', 'shutdown', self.unique(vm.name)]
        vm._started = False
        return self.execute(cmd)

    def status_virtual_machine(self, vm):
        return vm._started

        # cmd = ['azure', 'vm', 'show', self.unique(vm.name)]
        #
        # output = {}
        # self.execute(cmd, output)
        # if 'ReadyRole' in output['stdout']:
        #     return True
        # if 'Stopped' in output['stdout']:
        #     return False
        # return None

    def exists_virtual_machine(self, vm):
        cmd = ['azure', 'vm', 'show', self.unique(vm.name)]
        output = {}
        self.execute(cmd, output)
        return 'No VMs found' in output['stdout']

    def address_virtual_machine(self, vm):
        """ Returns the address of a vm """
        # TODO: Change the name to address_virtual_machine
        return self.unique(vm.name) + ".cloudapp.net"

    def hashify_22(self, name):
        import hashlib
        return str(hashlib.md5(name).hexdigest())[0:22]

    def create_location(self, group):
        """ Create an affinity group in microsoft terms """
        # cmd  = ['azure', 'storage', 'account', 'create']
        # cmd += ['-a', self.unique(group.name)]
        # cmd += ['--type', group.storage_type]
        # cmd += [self.hashify_22(self.unique(group.name))]

        # return self.execute(cmd)
        cmd = ['azure', 'account', 'affinity-group', 'create']
        cmd += self.if_available('-l', group.location)
        cmd += ['-e', base64.b64encode(self.unique(group.name))]
        cmd += [self.unique(group.name)]

        self.execute(cmd)

        # TODO: creating a location tends to fail because we can't
        # cleanly delete it ... for now return true on creating a
        # location
        return True

    def create_security_group(self, ep):
        """ Create endpoints in the microsoft terms """
        ret = True

        # TODO: Can parallelize here
        def create_endpoint(vm):
            cmd = ['azure', 'vm', 'endpoint', 'create']
            cmd += [self.unique(vm), ep.public_port, ep.private_port]
            cmd += ['--name', self.unique(ep.name)[-15:]
                    ]  # Endpoint name should be at most 15 characters
            cmd += ['--protocol', ep.protocol]
            self.execute(cmd)

        parallel(create_endpoint, ep.virtual_machines())
        return ret

    def create_virtual_machine(self, vm):
        """ Create a virtual machine """
        cmd = ['azure', 'vm', 'create', '-z', vm.type]
        cmd += self.if_available('-a', self.unique(vm.location()))
        cmd += self.if_available('-w', self.unique(vm.virtual_network()))
        cmd += [
            '-e', '22',
            self.unique(vm.name), vm.image, 'cloudbench', '-P', '-t',
            constants.DEFAULT_VM_PUBLIC_KEY
        ]

        ret = self.execute(cmd)
        return True

    def create_virtual_network(self, vnet):
        """ Create a virtual network """
        # Azure cannot create multiple VNets together, lock on creation
        # of each VNet
        self.vnet_lock.acquire()
        ret = False
        try:
            cmd = ['azure', 'network', 'vnet', 'create']
            cmd += self.if_available('-e', vnet.address_range)
            cmd += self.if_available('-a', self.unique(vnet.location()))
            cmd += [self.unique(vnet.name)]

            ret = self.execute(cmd)
        finally:
            self.vnet_lock.release()
        return True

    def delete_security_group(self, _):
        """ Delete an azure 'security-group' a.k.a. an endpoint.

        We do not need to delete anything here, Azure takes care of it when we wipe out the machine"""

        return True

    def delete_virtual_machine(self, virtual_machine):
        """ Delete a virtual machine and the associated storage """
        cmd = [
            'azure', 'vm', 'delete', '-b', '-q',
            self.unique(virtual_machine.name)
        ]
        return self.execute(cmd)

    def delete_virtual_network(self, vnet):
        """ Delete a virtual network """
        # Serialize network creation
        self.vnet_lock.acquire()
        ret = False
        try:
            cmd = [
                'azure', 'network', 'vnet', 'delete', '-q',
                self.unique(vnet.name)
            ]
            ret = self.execute(cmd)
        finally:
            self.vnet_lock.release()

        return ret

    def delete_location(self, group):
        cmd = ['azure', 'account', 'affinity-group', 'delete', '-q']
        cmd += [self.unique(group.name)]
        self.execute(cmd)

        # TODO: creating a location tends to fail because we can't
        # cleanly delete it ... for now return true on creating a
        # location
        return True
예제 #56
0
class CursesUtil:
    def __init__(self, *args, **kwargs):
        # iolock protects access to the
        self.iolock = RLock()
        self.tframe_lock = RLock()
        # tframe_lock protects the self.threadframes manipulation to
        # only happen from 1 thread.
        self.colormap = {}
        """dict, translating color string to curses color pair number"""

    def curses_colorpair(self, col_name):
        """Return the curses color pair, that corresponds to the color."""

        return curses.color_pair(self.colormap[col_name])

    def init_colorpairs(self):
        """Initialize the curses color pairs available."""

        # set special colors 'gray' and 'banner'
        self.colormap['white'] = 0  #hardcoded by curses
        curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)
        self.colormap['banner'] = 1  # color 'banner' for bannerwin

        bcol = curses.COLOR_BLACK
        colors = (  # name, color, bold?
            ('black', curses.COLOR_BLACK,
             False), ('blue', curses.COLOR_BLUE,
                      False), ('red', curses.COLOR_RED,
                               False), ('purple', curses.COLOR_MAGENTA, False),
            ('cyan', curses.COLOR_CYAN,
             False), ('green', curses.COLOR_GREEN,
                      False), ('orange', curses.COLOR_YELLOW, False))
        #set the rest of all colors starting at pair 2
        i = 1
        for name, fcol, bold in colors:
            i += 1
            self.colormap[name] = i
            curses.init_pair(i, fcol, bcol)

    def lock(self, block=True):
        """Locks the Curses ui thread.

        Can be invoked multiple times from the owning thread. Invoking
        from a non-owning thread blocks and waits until it has been
        unlocked by the owning thread."""

        return self.iolock.acquire(block)

    def unlock(self):
        """Unlocks the Curses ui thread.

        Decrease the lock counter by one and unlock the ui thread if the
        counter reaches 0.  Only call this method when the calling
        thread owns the lock. A RuntimeError is raised if this method is
        called when the lock is unlocked."""

        self.iolock.release()

    def exec_locked(self, target, *args, **kwargs):
        """Perform an operation with full locking."""

        self.lock()
        try:
            target(*args, **kwargs)
        finally:
            self.unlock()

    def refresh(self):
        def lockedstuff():
            curses.panel.update_panels()
            curses.doupdate()

        self.exec_locked(lockedstuff)

    def isactive(self):
        return hasattr(self, 'stdscr')
예제 #57
0
class priority_queue:
    def __init__(self):
        self.heap_list = [0]
        self.size = 0
        self.rlock = RLock()

    def insert(self, item):
        self.rlock.acquire()
        self.heap_list.append(item)
        self.size += 1
        self.__perc_up(self.size)
        self.rlock.release()

    def __perc_up(self, index):
        while index // 2 > 0:
            if self.heap_list[index] < self.heap_list[index // 2]:
                self.heap_list[index // 2], self.heap_list[
                    index] = self.heap_list[index], self.heap_list[index // 2]
            index = index // 2

    def get_min(self):
        if self.size > 0:
            return self.heap_list[1]

    def del_min(self):
        self.rlock.acquire()
        if self.size > 0:
            ret_val = self.heap_list[1]
            self.heap_list[1] = self.heap_list[self.size]
            self.size -= 1
            self.heap_list.pop()
            self.__perc_down(1)
            return ret_val
        self.rlock.release()

    def __perc_down(self, index):
        def get_min_child(index):
            if index * 2 + 1 > self.size:
                return index * 2
            else:
                if self.heap_list[index * 2] < self.heap_list[index * 2 + 1]:
                    return index * 2
                else:
                    return index * 2 + 1

        while index * 2 <= self.size:
            min_child = get_min_child(index)
            if self.heap_list[index] > self.heap_list[min_child]:
                self.heap_list[index], self.heap_list[
                    min_child] = self.heap_list[min_child], self.heap_list[
                        index]
            index = min_child

    def delete(self, item):
        self.heap_list.remove(item)
        self.size -= 1
        i = self.size // 2
        while i > 0:
            self.__perc_down(i)
            i -= 1

    def __len__(self):
        return self.size

    def __contains__(self, item):
        return item in self.heap_list

    def __iter__(self):  #TODO returns 0 element too
        return iter(self.heap_list)
예제 #58
0
class Logger(object):
    # paste from goagent
    CRITICAL = 5
    FATAL = CRITICAL
    ERROR = 4
    WARNING = 3
    WARN = WARNING
    INFO = 2
    DEBUG = 1
    VERBOSE = 0

    def __init__(self, *args, **kwargs):
        # self.level = self.__class__.INFO
        self.logf = None
        self.__write = __write = lambda x: sys.stdout.write(safestr(x))
        self.isatty = getattr(sys.stdout, 'isatty', lambda: False)()
        self.__set_error_color = lambda: None
        self.__set_warning_color = lambda: None
        self.__set_debug_color = lambda: None
        self.__set_verbose_color = lambda: None
        self.__reset_color = lambda: None
        if self.isatty:
            if os.name == 'nt':
                self._nt_color_lock = RLock()
                import ctypes
                SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
                GetStdHandle = ctypes.windll.kernel32.GetStdHandle
                self.__set_error_color = lambda: (self._nt_color_lock.acquire(
                ), SetConsoleTextAttribute(GetStdHandle(-11), 0x0C))
                self.__set_warning_color = lambda: (
                    self._nt_color_lock.acquire(),
                    SetConsoleTextAttribute(GetStdHandle(-11), 0x06))
                self.__set_debug_color = lambda: (self._nt_color_lock.acquire(
                ), SetConsoleTextAttribute(GetStdHandle(-11), 0x02))
                self.__set_verbose_color = lambda: (
                    self._nt_color_lock.acquire(),
                    SetConsoleTextAttribute(GetStdHandle(-11), 0x08))
                self.__set_bright_color = lambda: (self._nt_color_lock.acquire(
                ), SetConsoleTextAttribute(GetStdHandle(-11), 0x0F))
                self.__reset_color = lambda: (SetConsoleTextAttribute(
                    GetStdHandle(-11), 0x07), self._nt_color_lock.release())
            elif os.name == 'posix':
                self.__set_error_color = lambda: __write('\033[31m')
                self.__set_warning_color = lambda: __write('\033[33m')
                self.__set_debug_color = lambda: __write('\033[32m')
                self.__set_verbose_color = lambda: __write('\033[36m')
                self.__set_bright_color = lambda: __write('\033[32m')
                self.__reset_color = lambda: __write('\033[0m')

    @classmethod
    def getLogger(cls, *args, **kwargs):
        return cls(*args, **kwargs)

    def cleanup(self):
        if self.logf:
            _ = self.logf
            self.logf = None
            _.close()

    def set_logfile(self, fpath):
        if self.logf:
            self.logf.close()
        self.logf = open(fpath, "ab")

    def set_level(self, level):
        f = ('verbose', 'debug', 'info')
        lv = min(max(level, 0), 3)
        for p in range(lv):
            setattr(self, f[p], self.dummy)

    def log(self, level, fmt, *args, **kwargs):
        # fmt=du8(fmt)
        try:
            try:
                self.__write('%-4s - [%s] %s\n' %
                             (level, datetime.datetime.now(
                                 tz_GMT8()).strftime('%X'), fmt % args))
            except (ValueError, TypeError):
                fmt = fmt.replace('%', '%%')
                self.__write('%-4s - [%s] %s\n' %
                             (level, datetime.datetime.now(
                                 tz_GMT8()).strftime('%X'), fmt % args))
        except IOError:  # fix for Windows console
            pass
        sys.stdout.flush()
        if self.logf:
            _ = ('[%s] %s%s' % (datetime.datetime.now(
                tz_GMT8()).strftime('%b %d %X'), fmt % args, endl))
            self.logf.write(_.encode("utf-8", 'replace'))

    def dummy(self, *args, **kwargs):
        pass

    def debug(self, fmt, *args, **kwargs):
        self.__set_debug_color()
        self.log('DEBG', fmt, *args, **kwargs)
        self.__reset_color()

    def info(self, fmt, *args, **kwargs):
        puretext = self.log('INFO', fmt, *args)
        # if self.logfile:
        #    self.logfile.write(puretext)

    def verbose(self, fmt, *args, **kwargs):
        self.__set_verbose_color()
        self.log('VERB', fmt, *args, **kwargs)
        self.__reset_color()

    def warning(self, fmt, *args, **kwargs):
        self.__set_warning_color()
        self.log('WARN', fmt, *args, **kwargs)
        self.__reset_color()

    def warn(self, fmt, *args, **kwargs):
        self.warning(fmt, *args, **kwargs)

    def error(self, fmt, *args, **kwargs):
        self.__set_error_color()
        self.log('ERROR', fmt, *args, **kwargs)
        self.__reset_color()

    def exception(self, fmt, *args, **kwargs):
        self.error(fmt, *args, **kwargs)
        traceback.print_exc(file=sys.stderr)

    def critical(self, fmt, *args, **kwargs):
        self.__set_error_color()
        self.log('CRITICAL', fmt, *args, **kwargs)
        self.__reset_color()
예제 #59
0
class MotorCtrl:
    def __init__(self):
        self.motor1 = Motor(26, 19)
        self.motor2 = Motor(5, 6)
        self.motor3 = Motor(12, 13)
        self.motor4 = Motor(23, 24)
        self.motors = (None, self.motor1, self.motor2, self.motor3,
                       self.motor4)
        self.loop = asyncio.get_event_loop()
        # 创建一个多线程锁
        self.lock = RLock()

    def MoveMotor(self, num, direc, time):
        self.lock.acquire()
        self.motors[num].stop()
        if direc:
            self.motors[num].forward()
        else:
            self.motors[num].backward()
        sleep(time)
        self.motors[num].stop()
        self.lock.release()

    async def asyncMoveMotor(self, num, direc):
        times = {1: 2, 2: 2, 3: 2, 4: 2}
        self.motors[num].stop()
        if direc:
            self.motors[num].forward()
        else:
            self.motors[num].backward()
        await asyncio.sleep(times[num])
        self.motors[num].stop()

    # 垃圾类别id,-1:'未识别', 0:'可回收垃圾', 1:'有害垃圾', 2:'湿垃圾', 3:'干垃圾'
    def MovePan(self, types):
        tasks = []
        if types == 0:
            tasks.append(self.asyncMoveMotor(1, 1))
            tasks.append(self.asyncMoveMotor(2, 1))
            tasks.append(self.asyncMoveMotor(3, 0))
            tasks.append(self.asyncMoveMotor(4, 0))
        elif types == 1:
            tasks.append(self.asyncMoveMotor(1, 0))
            tasks.append(self.asyncMoveMotor(2, 1))
            tasks.append(self.asyncMoveMotor(3, 1))
            tasks.append(self.asyncMoveMotor(4, 0))
        elif types == 2:
            tasks.append(self.asyncMoveMotor(1, 0))
            tasks.append(self.asyncMoveMotor(2, 0))
            tasks.append(self.asyncMoveMotor(3, 1))
            tasks.append(self.asyncMoveMotor(4, 1))
        elif types == 3:
            tasks.append(self.asyncMoveMotor(1, 1))
            tasks.append(self.asyncMoveMotor(2, 0))
            tasks.append(self.asyncMoveMotor(3, 0))
            tasks.append(self.asyncMoveMotor(4, 1))
        else:
            return
        self.loop.run_until_complete(asyncio.wait(tasks))

    # 平板复原
    def MovePanFlat(self, types):
        tasks = []
        if types == 0:
            tasks.append(self.asyncMoveMotor(1, 0))
            tasks.append(self.asyncMoveMotor(2, 0))
            tasks.append(self.asyncMoveMotor(3, 1))
            tasks.append(self.asyncMoveMotor(4, 1))
        elif types == 1:
            tasks.append(self.asyncMoveMotor(1, 1))
            tasks.append(self.asyncMoveMotor(2, 0))
            tasks.append(self.asyncMoveMotor(3, 0))
            tasks.append(self.asyncMoveMotor(4, 1))
        elif types == 2:
            tasks.append(self.asyncMoveMotor(1, 1))
            tasks.append(self.asyncMoveMotor(2, 1))
            tasks.append(self.asyncMoveMotor(3, 0))
            tasks.append(self.asyncMoveMotor(4, 0))
        elif types == 3:
            tasks.append(self.asyncMoveMotor(1, 0))
            tasks.append(self.asyncMoveMotor(2, 1))
            tasks.append(self.asyncMoveMotor(3, 1))
            tasks.append(self.asyncMoveMotor(4, 0))
        else:
            return
        self.loop.run_until_complete(asyncio.wait(tasks))

    # 所有同时移动
    def allMove(self, dirc):
        tasks = []
        tasks.append(self.asyncMoveMotor(1, dirc))
        tasks.append(self.asyncMoveMotor(2, dirc))
        tasks.append(self.asyncMoveMotor(3, dirc))
        tasks.append(self.asyncMoveMotor(4, dirc))
        self.loop.run_until_complete(asyncio.wait(tasks))

    # 具体垃圾信息
    def Garbge(self, types):
        self.lock.acquire()
        self.MovePan(types)
        sleep(1)
        self.MovePanFlat(types)
        # 板子平衡问题  需要加一些补偿
        if types == 0:
            self.MoveMotor(1, 1, 0.06)
            self.MoveMotor(2, 1, 0.10)
            self.MoveMotor(3, 1, 0.12)
            self.MoveMotor(4, 1, 0.07)
        elif types == 1:
            self.MoveMotor(1, 1, 0.06)
            self.MoveMotor(2, 1, 0.10)
            self.MoveMotor(3, 1, 0.12)
            self.MoveMotor(4, 1, 0.10)
        elif types == 2:
            self.MoveMotor(1, 1, 0.06)
            self.MoveMotor(2, 1, 0.08)
            self.MoveMotor(3, 1, 0.12)
            self.MoveMotor(4, 1, 0.12)
        elif types == 3:
            self.MoveMotor(1, 1, 0.06)
            self.MoveMotor(2, 1, 0.06)
            self.MoveMotor(3, 1, 0.10)
            self.MoveMotor(4, 1, 0.12)
        self.lock.release()
예제 #60
0
class Client(Object):
    """
    generic client
    it's blocking. 'not responding, still trying'?
  """
    def __init__(self, url, master=None):
        Object.__init__(self)
        self.debug(1, "Client: %s" % (url))
        self._url = URL(url)
        self._dead = False
        self._connected = False
        self._lock = RLock()
        self._socket = None
        self._master = master

    def __del__(self):
        try:
            self.close()
        except:
            # just in case __init__ didn't run completely
            pass

    def url(self):
        return self._url

    def ident(self):
        data = self._socket.getSockName()
        self.write(["i am", "virtue://%s:%d/" % data])
        ans = self.read()

    def read(self):
        return self._socket.read()

    def readData(self, size):
        return self._socket.readData(size)

    def write(self, what, data=None):
        self._socket.write(what, data)

    def writeData(self, data):
        return self._socket.writeData(data)

    def ask(self, what, data=None):
        """
      what should already be a list with message and params
    """
        self._lock.acquire()
        try:
            if not self._connected:
                # TODO: move this to a method
                # stablish connecton
                self._socket = RequestSocket(None)
                self.debug(2, "urlParams: %s %s" % self._url.getParams())
                self._socket.connect(self._url.getParams())
                self.debug(
                    1, "created Client socket w/ fd %d to %s" %
                    (self._socket.fileno(), self._url))
                # throw away the greeting
                ans = self.read()
                self._connected = True
                # identify ourselves so the server knows what are we.
                self.ident()

            if not type(what) == ListType:
                self.debug(1, "what's not a list!: %s" % str(what))
            self.write(what, data)

            data = self.read()
            self._lock.release()

            if not data == None:
                return data
            else:
                self.debug(1, "dead!: closed")
                self._dead = True
                self.close()
                raise UmbDead

        # other errors?
        except (ValueError, IOError, error), e:
            self.debug(1, "dead!: %s" % e)
            self._dead = True
            self._lock.release()
            self.close()
            raise UmbDead