Example #1
0
class TempDirs(object):
    """Tempdir manager."""
    def __init__(self, tmpdir, prefix="rez_"):
        self.tmpdir = tmpdir
        self.prefix = prefix
        self.dirs = set()
        self.lock = Lock()
        # previous version overloaded TempDirs.__del__ in an unsafe manner;
        # the __del__ method is not guaranteed to be called before sys.modules begins
        # breaking down, so the os.path call was failing with a muted AttributeError,
        # leaving the directory on disk even when the program exited normally; by registering
        # an atexit callback we should ensure the directories are cleared at shutdown
        atexit.register(self.clear)

    def mkdtemp(self, cleanup=True):
        path = mkdtemp(dir=self.tmpdir, prefix=self.prefix)
        if not cleanup:
            return path

        try:
            self.lock.acquire()
            self.dirs.add(path)
        finally:
            self.lock.release()
        return path

    def clear(self):
        dirs = self.dirs
        for path in dirs:
            if os.path.exists(path):
                shutil.rmtree(path)
Example #2
0
class PriorityQueue:
    """ a non blocking priority queue """

    def __init__(self):
        self.queue = []
        self.lock = Lock()

    def __iter__(self):
        return iter(self.queue)

    def __delitem__(self, key):
        del self.queue[key]

    def put(self, element):
        self.lock.acquire()
        heappush(self.queue, element)
        self.lock.release()

    def get(self):
        """ return element or None """
        self.lock.acquire()
        try:
            el = heappop(self.queue)
            return el
        except IndexError:
            return None, None
        finally:
            self.lock.release()
Example #3
0
class Reader(Thread):

    def __init__(self):
        super(Reader, self).__init__()
        self.buffer = []
        self.lock = Lock()

    def run(self):
        while True:
            self.lock.acquire()
            self.buffer.append(getch())
            self.lock.release()
            sleep(0)

    def read(self):
        self.start()
        while True:
            if self.buffer:
                # The reader may be in the middle of receiving multi-byte
                # input; wait long enough for this to finish.
                sleep(0.1)
                self.lock.acquire()
                chars, self.buffer = self.buffer, []
                self.lock.release()
                yield chars
Example #4
0
class BatchInsertCollector():
    def __init__(self, cur, table_name, header=None, threshold=1000000):
        if not isinstance(cur, MyCursor):
            raise TypeError
        self.cur = cur
        self.table_name = table_name
        print 'lyc here'
        print table_name
        if header is None:
            print 'this way'
            self.header = cur.get_header(table_name)
        else:
            print 'that way'
            self.header = header
        print 'I have a header'
        self.sql_header = ''
        self.cur_len = 0
        self.reset_header()
        self.threshold = threshold
        self.values = []
        self.stat_total = 0
        self.mutex = Lock()
        print 'initial finish'

    def __del__(self):
        self.flush()
        self.cur.con.commit()

    def reset_header(self):
        self.sql_header = 'insert into %s (%s) values ' % (self.table_name, ','.join(self.header))
        self.cur_len = len(self.sql_header)

    def flush(self):
        if len(self.values) == 0:
            return
        self.cur.cur.execute(self.sql_header + ','.join(self.values))
        self.cur_len = len(self.sql_header)
        self.cur.con.commit()
        print 'flush called: %d records, total %d records' % (len(self.values), self.stat_total)
        self.values = []

    def append(self, data):
        assert isinstance(data, DictItem)
        self.mutex.acquire()

        def find(val):
            if val not in data.fields:
                return u"''"
            else:
                return u"'%s'" % unicode(data[val])

        cvalues = u','.join(map(find, self.header))
        val1 = u"(%s)" % cvalues
        # print self.cur_len
        if self.cur_len + len(val1) > self.threshold:
            self.flush()
        self.values.append(val1)
        self.cur_len += len(val1) + 1
        self.stat_total += 1
        self.mutex.release()
Example #5
0
class WorkerTask(object):
    """
    This class used to represent page loading task
    """
    def __init__(self, args, routine, user_data = None):
        self._lock_complete = Lock()
        self.__complete = False
        self.gathered = False
        self.args = args
        if callable(routine):
            self.routine = routine
        else:
            raise AttributeError('<routine> argument should be callable function')
        self.user_data = user_data
        self.result = []
        self.thread = None

    def _get_complete(self):
        """complete property getter"""
        self._lock_complete.acquire()
        complete = self.__complete
        self._lock_complete.release()
        return complete

    def _set_complete(self, state):
        """complete property setter"""
        if type(state) != types.BooleanType:
            raise TypeError('state should be boolean')
        self._lock_complete.acquire()
        self.__complete = state
        self._lock_complete.release()

    complete = property(_get_complete, _set_complete)
Example #6
0
class FutureTask(Task):
    def __init__(self,function,*args,**kwargs):
        self.function = function
        self.args = args
        self.kwargs = kwargs
        self.lock = Lock()
        self.e = None
        self.executing = False;
    
    def _execute(self):
        try:
            self.lock.acquire()
            self.executing = True;
            self.result = self.function(*self.args,**self.kwargs);
        except Exception as e:
            self.e = e;
        finally:
            self.lock.release()
        
    def result(self):
        try:
            while True:
                if self.executing: #检查标志位,避免死锁
                    self.lock.acquire()
                    if self.e is not None:
                        raise self.e;
                    return self.result
        finally:
            self.lock.release()
Example #7
0
class _CoreScheduleThread(Thread):
    def __init__(self,threadpool):
        self.scheduletasks = [];
        self.tasklock = Lock();
        self.condition = Condition(Lock())
        self.threadpool = threadpool
        Thread.__init__(self)

    def run(self):
        while True:
            self.condition.acquire()
            if len(self.scheduletasks) == 0:
                self.condition.wait();
            else:
                task = self.scheduletasks.pop(0)
                if dates.current_timestamps()>=task.nexttime: 
                    self.threadpool.execute(task.function,*task.args,**task.kwargs)
                    task.nexttime = dates.current_timestamps()+task.period;
                else:
                    self.condition.wait(task.nexttime-dates.current_timestamps())
                self.addtask(task)
            self.condition.release()

    
    def addtask(self,task): # copy on write
        self.tasklock.acquire()
        tasks = [ t for t in self.scheduletasks ]
        tasks.append(task)
        tasks.sort(key=lambda task:task.nexttime)
        self.scheduletasks = tasks
        self.tasklock.release()
Example #8
0
class ProgressBarLogger:
    def __init__(self, msg, total):
        self.msg = msg
        self.total = total
        self.status = 0
        self.lock = Lock()

    def log(self, *_):
        self.lock.acquire()
        self.status += 1
        self._print_progress_bar(self.status, self.total, prefix=self.msg, bar_length=50)
        self.lock.release()

    # from here http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
    # Print iterations progress
    @staticmethod
    def _print_progress_bar(iteration, total, prefix='', suffix='', decimals=2, bar_length=100):
        """
        Call in a loop to create terminal progress bar
        @params:
            iteration   - Required  : current iteration (Int)
            total       - Required  : total iterations (Int)
            prefix      - Optional  : prefix string (Str)
            suffix      - Optional  : suffix string (Str)
            decimals    - Optional  : number of decimals in percent complete (Int)
            bar_length   - Optional  : character length of bar (Int)
        """
        filled_length = int(round(bar_length * iteration / float(total)))
        percents = round(100.00 * (iteration / float(total)), decimals)
        bar_char = '#' * filled_length + '-' * (bar_length - filled_length)
        sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar_char, percents, '%', suffix))
        sys.stdout.flush()
        if iteration == total:
            sys.stdout.write('\n')
            sys.stdout.flush()
Example #9
0
	def handle(self):
		#try:
			data, socket = self.request

			lock = Lock()
			lock.acquire()

			DataOffset    = struct.unpack('<H',data[139:141])[0]
			BrowserPacket = data[82+DataOffset:]
			ReqType       = RequestType(BrowserPacket[0])

			Domain = Decode_Name(data[49:81])
			Name   = Decode_Name(data[15:47])
			Role1  = NBT_NS_Role(data[45:48])
			Role2  = NBT_NS_Role(data[79:82])
			Fprint = WorkstationFingerPrint(data[190:192])
			Roles  = ParseRoles(data[192:196])

			print text("[BROWSER] Request Type : %s" % ReqType)
			print text("[BROWSER] Address      : %s" % self.client_address[0])
			print text("[BROWSER] Domain       : %s" % Domain)
			print text("[BROWSER] Name         : %s" % Name)
			print text("[BROWSER] Main Role    : %s" % Role1)
			print text("[BROWSER] 2nd Role     : %s" % Role2)
			print text("[BROWSER] Fingerprint  : %s" % Fprint)
			print text("[BROWSER] Role List    : %s" % Roles)

			RAPThisDomain(self.client_address[0], Domain)

			lock.release()
Example #10
0
class Skeleton(object):
    def __init__(self, config_file):
        self.logger = logging.getLogger(self.__class__.__name__)
        config = Config(config_file)
        config.get_configs()
        self.config = config
        self.lock = Lock()
        self.queues = dict()

    def produce_task(self, tasker_name, tasker):
        while True:
            try:
                queue = self.queues.get(tasker_name)
                self.logger.info("old {0} queue size: {1}".format(tasker_name, queue.qsize()))
                queue.put(tasker.size)
                self.logger.info("new {0} queue size: {1}".format(tasker_name, queue.qsize()))
            except Exception as error:
                self.logger.exception("{0} {1}".format(tasker_name, error))
            finally:
                time.sleep(self.config.scan_task_interval)
        return

    def consume_task(self, n, tasker_name, tasker):
        while True:
            handler = Handler()
            queue = self.queues.get(tasker_name)
            if queue.empty():
                time.sleep(self.config.wait_time)
                continue
            try:
                while not queue.empty():
                    size = queue.get()
                    self.lock.acquire()
                    handler.human_readable(size)
                    self.lock.release()
            except Exception as error:
                self.logger.exception('Thread-{0}: error {1}'.format(n, error))
            finally:
                del(handler)

    def do_work(self):
        for tasker_name, tasker in self.config.taskers.items():
            self.queues[tasker_name] = Queue()

            # Spwan produce_task thread
            t = Thread(target=self.produce_task, args=(tasker_name, tasker))
            t.setDaemon(True)
            t.start()

            # Spwan consume_task thread
            for n in range(tasker.max_workers):
                t = Thread(target=self.consume_task, args=(n, tasker_name, tasker))
                t.setDaemon(True)
                t.start()

        while True:
            signal.signal(signal.SIGTERM, sigterm_handler)
            # Round robin and Sleep some seconds.
            time.sleep(self.config.scan_task_interval)
        return
Example #11
0
class WaitCursor(Thread):
    """A waiting cursor for long operation that
    catch output and flush it after waiting"""
    def __init__(self):
        self.state = "WAIT"
        self.lock = Lock()           # Lock used to synchronise IO and cursor stop
        Thread.__init__(self)

    def run(self):
        """Method executed when the thread object start() method is called"""

        realStdout = sys.stdout # Backup stdout
        tmpStdout = StringIO()  # Store here all data output during waiting state
        sys.stdout = tmpStdout  # Capture stdout
        cursorState = ("-", "\\", "|", "/")
        i = 0
        self.lock.acquire()
        while self.state == "WAIT":
            realStdout.write(cursorState[i % 4])
            realStdout.flush()
            sleep(0.1)
            realStdout.write("\b")
            i += 1

        # Restore standard output and print temp data
        sys.stdout = realStdout
        sys.stdout.write(tmpStdout.getvalue())
        sys.stdout.flush()
        self.lock.release()

    def stop(self):
        self.state = "STOP"
        self.lock.acquire() # Wait end of IO flush before returning
    def start_manager(self):

        exit_flags[self.tab_id] = 0

        log.info('START | Layers Download Manager')

        thread_list = ['Alpha', 'Bravo', 'Charlie', 'Delta', 'Echo', 'Foxtrot', 'Golf', 'Hotel', 'India', 'Juliet']
        queue_lock = Lock()
        work_queue = Queue.Queue(len(self.file_paths_and_sizes))
        threads = []

        for thread_name in thread_list:
            key = str(uuid.uuid4())
            thread = LayerDownloadThread(self.source, thread_name, work_queue, queue_lock, key, self.target_dir, self.tab_id)
            thread.start()
            if not threads_map_key in thread_manager_processes:
                thread_manager_processes[threads_map_key] = {}
            thread_manager_processes[threads_map_key][key] = thread
            threads.append(thread)

        queue_lock.acquire()
        for word in self.file_paths_and_sizes:
            work_queue.put(word)
        queue_lock.release()

        while not work_queue.empty():
            pass

        exit_flags[self.tab_id] = 1

        for t in threads:
            t.join()

        log.info('DONE | Layers Download Manager')
Example #13
0
class InMemoryItemValue(object):
  _lock = None
  """:type _lock Lock"""

  def __init__(self, value=None, expire_in=None):
    self._lock = Lock()
    self._value = value
    self._expire_in = None
    self._expire_in_time = None

    self.update_expire_time(expire_in)

  @property
  def value(self):
    return self._value

  @value.setter
  def value(self, val):
    self._lock.acquire()
    self._value = val
    self._expire_in = datetime.now() + timedelta(seconds=float(self._expire_in_time)) if self._expire_in_time else None
    self._lock.release()

  def update_expire_time(self, t):
    self._expire_in_time = t

  @property
  def is_expired(self):
    return (self._expire_in - datetime.now()).days < 0 if self._expire_in else False
Example #14
0
class Manager:
  def __init__(self):
    self.networks = set()
    self.logs = []
    self.next_log_id = 0
    self.log_lock = Lock()

  def notifyNetUp(self, pno, net_name):
    self.networks.add(net_name)
    sys.stdout.write(">> network: %s is up (%d)\n" % (net_name, len(self.networks)))

  def getNetworks(self):
    return self.networks

  def getLogs(self, since=-1):
    if since >= 0:
      return filter(lambda l: l['id'] > since, self.logs)
    else:
      return self.logs

  def putLog(self, host, log):
    self.log_lock.acquire()
    l = {'id': self.next_log_id, 'host': host, 'log': log}
    self.next_log_id = self.next_log_id + 1
    sys.stdout.write(">> log: %s\n" % json.dumps(l))
    self.logs.append(l)
    self.log_lock.release()
Example #15
0
class Queue:
	"""Command queue class
	"""
	def __init__(self):
		self.lock = Lock()
		self.locks = {}
	
	def queue(self, command, *args):
		check = inspect.getargspec(command)
		cmdname = command.__name__ if command.__name__ else "uknown_cmd"
		if len(check[0]) != len(args):
			logging.warn("Queue command '%s' expected %u args, got %u!" % (cmdname, len(check[0]), len(args)))
			
		# If we have enough args, try running the command
		if len(args) >= len(check[0]):
			args = args[:len(check[0])] # Resize arg list if needed
			ret = None
			server = args[0]
			self.lock.acquire()
			if not server in self.locks:
				self.locks[server] = Lock()
			self.lock.release()
			
			self.locks[server].acquire()
			# Run in an enclosure, so as to be able to release lock if it fails
			try:
				ret = command(*args)
			except Exception as err:
				logging.warn("Queue command returned error: %s" % err)
			self.locks[server].release()
			
			if ret:
				return ret
		return None
Example #16
0
class shm:
    def __init__(self, data=None):
        self._data = data
        self._lock = Lock()

    def set(self, data, lock=True):
        if lock:
            with self._lock:
                self._data = data
        else:
            self._data = data

    def get(self, lock=True):
        if lock:
            with self._lock:
                data = self._data
                return data
        else:
            return self._data

    def acquire(self):
        self._lock.acquire()

    def release(self):
        self._lock.release()

    data = property(get, set)
Example #17
0
class DPMClient():   
    def __init__(self, uid=None, key=None):
        self._lock = Lock()
        self._uid = uid
        self._key = None
        if key:
            self._key = rsa.PublicKey.load_pkcs1(key)
      
    def request(self, addr, port, buf):
        self._lock.acquire()
        try:
            return self._request(addr, port, buf)
        finally:
            self._lock.release()
    
    def _request(self, addr, port, buf):
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect((addr, port))
        try:
            if self._key:
                stream = Stream(sock, uid=self._uid, key=self._key)
            else:
                stream = Stream(sock)
            stream.write( buf)
            if self._key:
                stream = Stream(sock)
            _, _, res = stream.readall()
            return res
        finally:
            sock.close()
Example #18
0
class Syncer(object):

    def __init__(self, slave):
        self.slave = slave
        self.lock = Lock()
        self.pb = PostBox()
        for i in range(int(gconf.sync_jobs)):
            t = Thread(target=self.syncjob)
            t.start()

    def syncjob(self):
        while True:
            pb = None
            while True:
                self.lock.acquire()
                if self.pb:
                    pb, self.pb = self.pb, PostBox()
                self.lock.release()
                if pb:
                    break
                time.sleep(0.5)
            pb.close()
            pb.wakeup(self.slave.rsync(pb))

    def add(self, e):
        while True:
            try:
                self.pb.append(e)
                return self.pb
            except BoxClosedErr:
                pass
Example #19
0
class BlaLock(object):
    """
    Simple wrapper class for the thread.lock class which only raises an
    exception when trying to release an unlocked lock if it's initialized with
    strict=True.
    """

    def __init__(self, strict=False, blocking=True):
        self.__strict = strict
        self.__blocking = blocking
        self.__lock = Lock()

    def acquire(self):
        self.__lock.acquire(self.__blocking)

    def release(self):
        try:
            self.__lock.release()
        except ThreadError:
            if self.__strict:
                raise

    def locked(self):
        return self.__lock.locked()

    def __enter__(self, *args):
        self.acquire()

    def __exit__(self, *args):
        self.release()
Example #20
0
class ProcessThread(Thread):
	# Initialize this thread
	def __init__(self):
		Thread.__init__(self)
		self.stopped = Event() # Cancel Event
		self.mutex = Lock()
		self.data = None

	# Threaded code
	def run(self):
		while not self.stopped.isSet():
			# Check if status data is available and process it
			data = None
			self.mutex.acquire()
			if self.data:
				data = self.data
				self.data = None
			self.mutex.release()

			if data:
				global outputfile
				try:
					fp = file(outputfile, 'wu')
					fp.write(data.encode('utf-8'))
					fp.close()
				except Exception, e:
					console.log(LOG_PYTHON, "Couldn't write status to '%s': %s.\n" % (outputfile, str(e)))

			self.stopped.wait(0.5)
class Serial:
  def __init__(self, port='COM5', rate=9600, timeout=10):
      self._serial = serial.Serial(port, rate, timeout=timeout)
      self._mutex = Lock()
      self._mutex.acquire()
      response = self._serial.readline().strip()
      print response
      if response != 'OK':
          raise Exception("Failed to communicate with the serial device!")
      self._mutex.release()

  def _shortCommand(self, command):
      self._serial.write(command)
      response = self._serial.readline()
      return response.strip()

  def _longCommand(self, command):
      response = self._shortCommand('RCV ' + str(len(command)) + "\n")
      if response != 'RDY':
          return None
      for i in range(int(math.ceil(len(command) / 128.0))):
          c = command[128*i:128*(i+1)]
          response = self._shortCommand(c)
      return self._serial.readline().strip()

  def command(self, command):
      self._mutex.acquire()
      if len(command) < 128:
          response = self._shortCommand(command + "\n")
      else:
          response = self._longCommand(command)
      self._mutex.release()
      return response
Example #22
0
    def _setup_to_do_n_cycles(self, number_of_cycles: int, updates_each_cycle: UpdateCollection=None):
        """
        Sets up the test so that the retriever will only do n cycles.
        :param number_of_cycles: the number of cycles to do
        """
        if updates_each_cycle is None:
            updates_each_cycle = UpdateCollection([])

        semaphore = Semaphore(0)
        lock_until_counted = Lock()
        lock_until_counted.acquire()

        def increase_counter(*args) -> UpdateCollection:
            semaphore.release()
            lock_until_counted.acquire()
            return updates_each_cycle

        self.retrieval_manager.update_mapper.get_all_since.side_effect = increase_counter
        self.retrieval_manager.start()

        run_counter = 0
        while run_counter < number_of_cycles:
            semaphore.acquire()
            run_counter += 1
            lock_until_counted.release()
            if run_counter == number_of_cycles:
                self.retrieval_manager.stop()

        self.retrieval_manager.update_mapper.get_all_since.side_effect = None
Example #23
0
class Promise:
  def __init__(self):
    self.value = None
    self.mutex = Lock()
    self.callbacks = []

  def updateValue(self, new_value):
    if (self.mutex.acquire(blocking = False) and self.value is None):
      self.value = new_value
      for cb in self.callbacks:
        cb(new_value)
      self.mutex.release()
    else:
      raise RuntimeError("cannot set the value of an already resolved promise")

  def addCallback(self, cb):
    self.mutex.acquire(blocking = True)
    self.callbacks.append(cb)
    self.mutex.release()
    return self

  def map(self, f):
    fp = Promise()
    def chain(v):
      fp.updateValue(f(v))
    self.addCallback(chain)
    return fp

  def flatMap(self, f):
    fp = Promise()
    def chain(v):
      f(v).addCallback(fp.updateValue)
    self.addCallback(chain)
    return fp
class TfBroadcasterThread(Thread):
    def __init__(self,child_frame,parent_frame,tf_br=None):
        Thread.__init__(self)
        rospy.loginfo("Initializing tf broadcaster with child frame "+child_frame+" and parent frame "+parent_frame)
        if tf_br is None:
            self.tf_br = tf.TransformBroadcaster()
        else:
            self.tf_br = tf_br
        self.translation = None
        self.quaternion = None
        self.child_frame = child_frame
        self.parent_frame = parent_frame
        self.has_transformation=False
        self.lock=Lock()
            
    def set_transformation(self,translation,quaternion):
        self.lock.acquire()
        self.translation = translation
        self.quaternion = quaternion
        self.lock.release()
        self.has_transformation =True

    def run(self):
        while not rospy.is_shutdown():
            try:
                if self.has_transformation:
                    self.lock.acquire()
                    self.tf_br.sendTransform(self.translation ,self.quaternion , rospy.Time.now(), self.child_frame,self.parent_frame)
                    self.lock.release()
            except Exception,e:
                print 'TfBroadcasterThread:',e
Example #25
0
class PandoraPool(object):
	def __init__(self, poolSize, proxy=None, expireTime=3600):
		self.size = poolSize
		self.proxy = proxy
		self.expire = expireTime
		self.pool = [self.createPandoraAgent() for i in xrange(self.size)]
		self.mutex = Lock()

	def createPandoraAgent(self):
		return PandoraAgent(datetime.now() + timedelta(0, self.expire), self.proxy)

	def refreshPandoraAgent(self, agent):
		if agent.isExpired():
			agent.authenticate_connection()
			agent.setExpireDate(datetime.now() + timedelta(0, self.expire))
		return agent

	def getAgent(self):
		try:
			return self.refreshPandoraAgent(self.pool.pop())
		except IndexError:
			return self.createPandoraAgent()

	def hasAvailableConnections(self):
		return len(self.pool) > 0

	def releaseAgent(self, agent):
		self.mutex.acquire()
		if len(self.pool) < self.size:
			self.pool.append(agent)
		self.mutex.release()
Example #26
0
class DataWindow(Thread):
    def __init__(self,data_adapter):
        Thread.__init__(self)
        self.win = N.zeros((100,3))
        self.winlock = Lock()
        self.data_adapter = data_adapter
    
    def run(self):
        self.data_adapter.start()
        self.running = True
        while self.running:
            self.winlock.acquire()
            try:
                while 1:
                    newdata = self.data_adapter.q.get(block=False)
                    self.win[:-1,:] = self.win[1:,:]
                    self.win[-1,:] = newdata[1:]
            except Queue.Empty:
                pass
            finally:
                self.winlock.release()
        self.data_adapter.stop()
    
    def stop(self):
        self.running = False
Example #27
0
class DebuggingLock:
    def __init__(self, name):
        self.lock = Lock()
        self.name = name

    def acquire(self, blocking = 1):
        self.print_tb("Acquire lock")
        self.lock.acquire(blocking)
        self.logmsg("===== %s: Thread %s acquired lock\n"%
            (self.name, currentThread().getName()))

    def release(self):
        self.print_tb("Release lock")
        self.lock.release()

    def logmsg(self, msg):
        loglock.acquire()
        logfile.write(msg + "\n")
        logfile.flush()
        loglock.release()

    def print_tb(self, msg):
        self.logmsg(".... %s: Thread %s attempting to %s\n"% \
                    (self.name, currentThread().getName(), msg) + \
                    "\n".join(traceback.format_list(traceback.extract_stack())))
Example #28
0
class TUNERS:
    def __init__(self, str):
        from threading import Lock

        tuners = "".join(str.split()) # remove white space
        tuners = tuners.split(',')
        tuners = [tuple(x.split(':')[0:2]) for x in tuners]
        # Add priority
        self.tuner_list = [(i, v[0], v[1]) for i,v in enumerate(tuners)]
        heapq.heapify(self.tuner_list)
        self.lock = Lock()

    def get_tuner(self):
        self.lock.acquire()
        try:
            tuner = heapq.heappop(self.tuner_list)
        except IndexError:
            tuner = None
        finally:
            self.lock.release()
        return tuner

    def put_tuner(self, tuner):
        self.lock.acquire()
        heapq.heappush(self.tuner_list, tuner)
        self.lock.release()
Example #29
0
def run(conn):
    """Function to handle running implosion generation in separate :py:class:`multithreading.Process`

    :param conn: A connection, i.e. one end of a `Pipe()`
    """
    # Need duck-checking instead of real type-checking...
    assert hasattr(conn, 'send') and hasattr(conn, 'recv')

    # Get the implosion object from the pipe:
    imp = conn.recv()
    assert isinstance(imp, Implosion)

    connLock = Lock()

    # Run in a separate thread in this process:
    def impRun():
        nonlocal imp, conn
        try:
            imp.generate()
        except Exception as e:
            connLock.acquire()
            conn.send(e)
            connLock.release()

    t = Thread(target=impRun)
    t.start()

    while t.is_alive():
        connLock.acquire()
        conn.send(imp.progress())
        connLock.release()
        time.sleep(0.01)

    # When the thread is done, send the Implosion object back:
    conn.send(imp)
Example #30
0
class IndependentWorker(Thread):
    def __init__(self, id, manager, workerArgs = None):
        '''A worker needs a task stack(queue), a name (id) and a boss (manager)'''
        self.id = id
        self.manager = manager
        self.completedTasksLock = Lock()
        self.completedTasks = 0
        Thread.__init__(self)
    def run(self):
        subject = self.manager.getTask()
        while subject != None:
            if self._task(subject):
                self.completedTasksLock.acquire()
                self.completedTasks += 1
                self.completedTasksLock.release()
            subject = self.manager.getTask()
        self._close()
        sys.exit()
    def getAndResetCompletedTasks(self):
        completedTasks = self.completedTasks
        self.completedTasksLock.acquire()
        self.completedTasks = 0
        self.completedTasksLock.release()
        return completedTasks
    def _task(self, subject):
        print 'You should initialize Worker._task(self, subject)'
        sys.exit()
        pass
    def _close(self):
        pass
Example #31
0
class TSSModel(object):
    def __init__(self, width, height, start_time, verbose=True):
        self.width = width
        self.height = height
        self.start_time = start_time
        self.verbose = verbose
        self.lock = Lock()

        self.leadingstate = GameState(self.width, self.height, self.verbose)
        self.trailingstate01 = GameState(self.width, self.height, self.verbose)
        # only used during rollback period
        self.tempstate = None
        self.is_in_rollback = False

        # THE ABSOLUTE SOURCE OF TRUTH OF GAME RUNNING STATE
        # It should be True if running, False if not running
        self.game_running_flag = True

        # game timer in milliseconds
        self.game_timer = 0

        # initialize our lamport clock
        self.event_clock = 0

        # locked cells if in the spawning phase
        self.locked_cells = []
        self.client_last_seen_time = {}

        self.players_and_dragons_have_spawned_flag = False

    def update_client_last_seen_time(self, pl_id):
        self.lock.acquire()
        self.client_last_seen_time[pl_id] = self.game_timer
        self.lock.release()

    def unset_client_last_seen_time(self, pl_id):
        self.lock.acquire()
        self.client_last_seen_time.pop(pl_id, None)
        self.lock.release()

    def get_client_last_seen_time(self, pl_id):
        # get last seen time of player pl_id
        # return None if it doesn't exist
        return self.client_last_seen_time.get(pl_id)

    def get_unactive_player_ids(self):
        # get players with last seen less than a specified time
        # return empty list it doesn't exist
        boundary_time = self.get_current_time() - SERVERSIDE_CLIENT_TIMEOUT
        result = []
        self.lock.acquire()
        try:
            for key, val in self.client_last_seen_time.iteritems():
                obj = self.get_object_by_id(key)
                # only human can be retrieved
                if obj != None:
                    if obj.get_type() == 'h' and val < boundary_time:
                        result.append(key)
        except RuntimeError as e:
            logging.warning(str(e))
        finally:
            self.lock.release()
                    
        return result

    def get_offline_player_state_by_id(self, obj_id):
        # get info on offline character, return None if it does not exist
        return self.leadingstate.get_offline_player_state_by_id(obj_id)

    def rollback_state(self, command_list):
        self.is_in_rollback = True
        # print "leading_before: %s" % str(self.leadingstate)
        self.tempstate = copy.deepcopy(self.trailingstate01)
        # print "trailing01_before: %s" % str(self.tempstate)
        self.process_action_list(command_list, state_id=TEMP_STATE)
        self.leadingstate = self.tempstate
        # print "leading_after: %s" % str(self.leadingstate)
        self.is_in_rollback = False

    def get_event_clock(self):
        return self.event_clock

    def increase_event_clock(self):
        self.event_clock += 1

    def lock_cell(self, proposal_msg):
        #self.lock.acquire()
        self.locked_cells.append(proposal_msg)
        #self.lock.release()

    def unlock_cell(self, player_id):
        #self.lock.acquire()
        lst = self.locked_cells
        player_idx = next((index for (index, d) in enumerate(lst) if d["player_id"] == player_id), None)
        if player_idx is not None:
            del lst[player_idx]
        #self.lock.release()

    def collide_with_locked(self,x, y):
        #self.lock.acquire()
        for lock in self.locked_cells:
            if lock["x"] == x and lock["y"] == y:
                return True
        #self.lock.release()

        return False
    
    def get_locked(self, player_id):
        #self.lock.acquire()
        lst = self.locked_cells
        player_idx = next((index for (index, d) in enumerate(lst) if d["player_id"] == player_id), None)
        #self.lock.release()
        return lst[player_idx]

    def set_event_clock(self, new_clock):
        self.event_clock = new_clock

    def get_current_time(self):
        return self.game_timer

    def get_epoch_time(self):
        return self.start_time

    def advance_game_time_by(self, ms):
        self.game_timer += ms

    def is_game_running(self):
        return self.game_running_flag

    def is_game_finished(self):
        return not self.game_running_flag

    def stop_game(self):
        self.game_running_flag = False

    def check_game_end_condition(self):
        if self.is_in_rollback:
            # we're in rollback, don't end game
            self.game_running_flag = True
            return None

        state = True

        if self.players_and_dragons_have_spawned_flag:
            gs = self.get_leadingstate()

            if gs.get_dragon_count() == 0 and gs.get_human_count() > 0:
                msg = "Humans win!"
                print msg
                logging.info(msg)
                state = False
            elif gs.get_dragon_count() > 0 and gs.get_human_count() == 0:
                msg = "Dragons win!"
                print msg
                logging.info(msg)
                state = False

        self.game_running_flag = state

    def get_rollback_status(self):
        return self.is_in_rollback

    def get_list_of(self, c):
        return self.leadingstate.get_list_of(c)

    def get_leadingstate(self):
        return self.leadingstate

    def get_firsttrailingstate(self):
        return self.trailingstate01

    def process_action(self, action, state_id=LEADING_STATE):
        # check which state the action is going to be applied to
        # state_id possible values: leading (0), trailing1 (1)
        state = None
        state_name = ""

        if state_id == LEADING_STATE:
            state = self.leadingstate
            state_name = "leadingstate"
        elif state_id == TRAILING_01_STATE:
            state = self.trailingstate01
            state_name = "trailingstate01"
        elif state_id == TEMP_STATE:
            state = self.tempstate
            state_name = "tempstate"

        # save action for checking purpose in the trailing states
        state.add_action(action)

        # based on message type, do an action
        action_type = action["type"]

        # now do the action
        if action_type == "gamestart":
            # get start timer indicator from peer
            self.start_time = action["start_time"]
            # update our local game_timer
            self.game_timer = int(round(time.time() * 1000)) - self.start_time

        elif action_type == "spawn":
            # create a new character
            obj_id = action["player_id"]
            obj_name = action["player_id"]
            obj_type = action["player_type"]

            x = action["x"]
            y = action["y"]

            hp = action["hp"]
            max_hp = action["max_hp"]
            ap = action["ap"]

            new_obj = None
            if obj_type == 'h':
                new_obj = Human(obj_id, obj_name,
                    hp, max_hp, ap, x, y, verbose=self.verbose)
                logging.info("[%s] player %s is spawned" % (state_name, obj_id))
            elif obj_type == 'd':
                new_obj = Dragon(obj_id, obj_name,
                    hp, max_hp, ap, x, y, verbose=self.verbose)
                logging.info("[%s] dragon %s is spawned" % (state_name, obj_id))

            state.add_character(new_obj)

            # finally unlock the player field
            try:
                self.unlock_cell(obj_id)
            except:
                pass

            # precondition for game end condition check, a player must be in the game once
            if not self.players_and_dragons_have_spawned_flag:
                if state.get_human_count() > 0 and state.get_dragon_count() > 0:
                    self.players_and_dragons_have_spawned_flag = True

        elif action_type == "off":
            # a player is offline
            obj_id = action["player_id"]
            state.make_offline(obj_id)
            self.unset_client_last_seen_time(obj_id)
            logging.info("[%s] %s is offline" % (state_name, obj_id))

        elif action_type == "move":
            # move a character
            obj_id = action["player_id"]

            x = action["x"]
            y = action["y"]

            if self.collide_with_locked(x,y) == False:
                obj = state.move(obj_id, x, y)
                logging.info("[%s] %s move to (%d,%d)" % (state_name, obj_id, x, y))

        elif action_type == "attack":
            # attack a character
            obj_id = action["player_id"]
            target_id = action["target_id"]

            state.attack(obj_id, target_id)
            logging.info("[%s] %s is attacked by %s" % (state_name, obj_id, target_id))

        elif action_type == "heal":
            # heal a character
            obj_id = action["obj_id"]
            target_id = action["target_id"]

            state.heal(obj_id, target_id)
            logging.info("[%s] %s is healed by %s" % (state_name, obj_id, target_id))

    def process_action_list(self, action_list, state_id=LEADING_STATE):
        for action in action_list:
            self.process_action(action, state_id)

    def get_message_box(self):
        return self.msg_box

    def get_object(self, x, y):
        return self.leadingstate.get_object(x,y)

    def get_width(self):
        return self.width

    def get_height(self):
        return self.height

    def get_object_by_id(self, obj_id):
        gs = self.leadingstate
        return gs.get_object_by_id(obj_id)
Example #32
0
class _SessionRPC(Session):
    _PULSE_FREQUENCY_SECONDS = 5

    def __init__(self, client, database: str, session_type: SessionType, options: GraknOptions = None):
        if not options:
            options = GraknOptions.core()
        self._client = client
        self._address = client._address
        self._channel = grpc.aio.insecure_channel(client._address)
        self._scheduler = sched.scheduler(time.time, time.sleep)
        self._database = _DatabaseRPC(database_manager=client.databases(), name=database)
        self._session_type = session_type
        self._grpc_stub = GraknStub(self._channel)
        self._lock = Lock()

        open_req = session_proto.Session.Open.Req()
        open_req.database = database
        open_req.type = _session_type_proto(session_type)
        open_req.options.CopyFrom(grakn_proto_builder.options(options))

        self._session_id: bytes = self._grpc_stub.session_open(open_req).session_id
        self._is_open = True
        self._pulse = self._scheduler.enter(delay=self._PULSE_FREQUENCY_SECONDS, priority=1, action=self._transmit_pulse, argument=())
        Thread(target=self._scheduler.run, name="session_pulse_{}".format(self._session_id.hex()), daemon=True).start()

    def transaction(self, transaction_type: TransactionType, options=None) -> Transaction:
        if not options:
            options = GraknOptions.core()
        return Transaction(self._address, self._session_id, transaction_type, options)

    def session_type(self) -> SessionType:
        return self._session_type

    def is_open(self) -> bool:
        return self._is_open

    def close(self) -> None:
        self._lock.acquire(blocking=True)
        if self._is_open:
            self._is_open = False
            self._client.remove_session(self)
            self._lock.release()
            self._scheduler.cancel(self._pulse)
            self._scheduler.empty()
            req = session_proto.Session.Close.Req()
            req.session_id = self._session_id
            try:
                self._grpc_stub.session_close(req)
            finally:
                self._channel.close()
        else:
            self._lock.release()

    def database(self) -> Database:
        return self._database

    def session_id(self) -> bytes:
        return self._session_id

    def _transmit_pulse(self) -> None:
        if not self._is_open:
            return
        pulse_req = session_proto.Session.Pulse.Req()
        pulse_req.session_id = self._session_id
        res = self._grpc_stub.session_pulse(pulse_req)
        if res.alive:
            self._pulse = self._scheduler.enter(delay=self._PULSE_FREQUENCY_SECONDS, priority=1, action=self._transmit_pulse, argument=())
            Thread(target=self._scheduler.run, name="session_pulse_{}".format(self._session_id.hex()), daemon=True).start()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()
        if exc_tb is None:
            pass
        else:
            return False
Example #33
0
class History(object):
    """Time series data for Runs.

    See the documentation online: https://docs.wandb.com/wandb/log
    """
    # Only tests are allowed to keep all row history in memory
    keep_rows = False

    def __init__(self, run, add_callback=None, jupyter_callback=None, stream_name="default"):
        self._run = run
        out_dir = run.dir
        fname = wandb.wandb_run.HISTORY_FNAME
        self._start_time = wandb.START_TIME
        self._current_timestamp = None
        self.out_dir = out_dir
        self.fname = os.path.join(out_dir, fname)
        self.rows = []
        self.row = {}
        self.stream_name = stream_name
        # This enables / disables history logging. It's used by the
        # History.step() context manager to avoid compute-heavy computations
        # that are only necessary for logging.
        self.compute = True
        self.batched = False
        # not all rows have the same keys. this is the union of them all.
        self._keys = set()
        self._process = "user" if os.getenv("WANDB_INITED") else "wandb"
        self._streams = {}
        self._steps = 0  # index of the step to which we are currently logging
        self._lock = Lock()
        self._torch = None
        self.load()
        self._file = open(self.fname, 'a')
        self._add_callback = add_callback
        self._jupyter_callback = jupyter_callback

    def load(self):
        self.rows = []
        try:
            # only preload the default stream, TODO: better stream support
            if self.stream_name == "default":
                with open(self.fname) as f:
                    for line in f:
                        try:
                            self.row = json.loads(line)
                            self._index(self.row)
                        except TypeError:
                            print('warning: malformed history line: %s...' %
                                  line[:40])
                # initialize steps and run time based on existing data.
                if '_step' in self.row:
                    self._steps = self.row['_step'] + 1
                # fudge the start_time to compensate for previous run length
                if '_runtime' in self.row:
                    self._start_time = wandb.START_TIME - self.row['_runtime']
                self.row = {}
        except IOError:
            pass

    def keys(self):
        rich_keys = []
        if self.rows:
            rich_keys = [k for k, v in six.iteritems(
                self.rows[-1]) if isinstance(v, dict) and v.get("_type")]
        return [k for k in self._keys - set(rich_keys) if not k.startswith("_")]

    def stream(self, name):
        """Stream can be used to record different time series:

        run.history.stream("batch").add({"gradients": 1})
        """
        if self.stream_name != "default":
            raise ValueError("Nested streams aren't supported")
        if self._streams.get(name) == None:
            self._streams[name] = History(self._run,
                                          add_callback=self._add_callback, stream_name=name)
        return self._streams[name]

    def column(self, key):
        """Iterator over a given column, skipping steps that don't have that key
        """
        for row in self.rows:
            if key in row:
                yield row[key]

    def add(self, row={}, step=None, timestamp=None):
        """Adds or updates a history step.

        If row isn't specified, will write the current state of row.

        If step is specified, the row will be written only when add() is called with
        a different step value.

        run.history.row["duration"] = 1.0
        run.history.add({"loss": 1})
        => {"duration": 1.0, "loss": 1}

        """
        if not isinstance(row, collections.Mapping):
            raise wandb.Error('history.add expects dict-like object')
        if timestamp and self._current_timestamp and timestamp < self._current_timestamp:
            wandb.termwarn("When passing timestamp, it must be increasing.  Current timestamp is {} but was passed {}".format(
                self._current_timestamp, timestamp))
        self._current_timestamp = timestamp or time.time()
        # Importing data, reset start time to the first timestamp passed in
        if self._start_time > self._current_timestamp:
            self._start_time = timestamp

        if step is None:
            self.update(row)
            if not self.batched:
                self._write()
        else:
            if not isinstance(step, numbers.Number):
                raise wandb.Error(
                    "Step must be a number, not {}".format(step))
            else:
                if step != round(step):
                    # tensorflow just applies `int()`. seems a little crazy.
                    wandb.termwarn('Non-integer history step: {}; rounding.'.format(step))

                # the backend actually handles floats right now. seems a bit weird to let those through though.
                step = int(round(step))

                if step < self._steps:
                    wandb.termwarn(
                        "Adding to old History rows isn't currently supported.  Step {} < {}; dropping {}.".format(step, self._steps, row))
                    return
                elif step == self._steps:
                    pass
                elif self.batched:
                    raise wandb.Error(
                        "Can't log to a particular History step ({}) while in batched mode.".format(step))
                else:  # step > self._steps
                    self._write()
                    self._steps = step

            self.update(row)

    def update(self, new_vals):
        """Add a dictionary of values to the current step without writing it to disk.
        """
        with self._lock:
            for k, v in six.iteritems(new_vals):
                k = k.strip()
                self.row[k] = v

    @contextlib.contextmanager
    def step(self, compute=True):
        """Context manager to gradually build a history row, then commit it at the end.

        To reduce the number of conditionals needed, code can check run.history.compute:

        with run.history.step(batch_idx % log_interval == 0):
            run.history.add({"nice": "ok"})
            if run.history.compute:
                # Something expensive here
        """
        if self.batched:  # we're already in a context manager
            raise wandb.Error("Nested History step contexts aren't supported")
        self.batched = True
        self.compute = compute
        yield self
        if compute:
            self._write()
        compute = True

    @property
    def torch(self):
        if self._torch is None:
            self._torch = TorchHistory(self)
        return self._torch

    def log_tf_summary(self, summary_pb_bin):
        from wandb.tensorflow import tf_summary_to_dict
        self.add(tf_summary_to_dict(summary_pb_bin))

    def ensure_jupyter_started(self):
        if self._jupyter_callback:
            self._jupyter_callback()

    def _index(self, row, keep_rows=False):
        """Add a row to the internal list of rows without writing it to disk.

        This function should keep the data structure consistent so it's usable
        for both adding new rows, and loading pre-existing histories.
        """
        if self.keep_rows or keep_rows:
            self.rows.append(row)
        self._keys.update(row.keys())
        self._steps += 1

    def _transform(self):
        """Transforms special classes into the proper format before writing"""
        self.row = data_types.history_dict_to_json(self._run, self.row)

    def _write(self):
        self._current_timestamp = self._current_timestamp or time.time()
        # Saw a race in tests where we closed history and another log was called
        # we check if self._file is set to ensure we don't bomb out
        if self.row and self._file:
            self._lock.acquire()
            # Jupyter starts logging the first time wandb.log is called in a cell.
            # This will resume the run and potentially update self._steps
            self.ensure_jupyter_started()
            try:
                self.row['_runtime'] = self._current_timestamp - self._start_time
                self.row['_timestamp'] = self._current_timestamp
                self.row['_step'] = self._steps
                if self.stream_name != "default":
                    self.row["_stream"] = self.stream_name
                self._transform()
                self._file.write(util.json_dumps_safer_history(self.row))
                self._file.write('\n')
                self._file.flush()
                os.fsync(self._file.fileno())
                if self._add_callback:
                    self._add_callback(self.row)
                self._index(self.row)
                self.row = {}
            finally:
                self._lock.release()
            return True
        else:
            return False

    def close(self):
        self._write()
        self._lock.acquire()
        try:
            if self._file:
                self._file.close()
                self._file = None
        finally:
            self._lock.release()
Example #34
0
class FileIndex(SegmentDeletionMixin, Index):
    def __init__(self, storage, schema, create=False,
                 indexname=_DEF_INDEX_NAME):
        self.storage = storage
        self.indexname = indexname

        if schema is not None and not isinstance(schema, Schema):
            raise ValueError("%r is not a Schema object" % schema)

        self.generation = self.latest_generation()

        if create:
            if schema is None:
                raise IndexError("To create an index you must specify a schema")

            self.schema = schema
            self.generation = 0
            self.segment_counter = 0
            self.segments = SegmentSet()

            # Clear existing files
            prefix = "_%s_" % self.indexname
            for filename in self.storage:
                if filename.startswith(prefix):
                    storage.delete_file(filename)

            self._write()
        elif self.generation >= 0:
            self._read(schema)
        else:
            raise EmptyIndexError("No index named %r in storage %r" % (indexname, storage))

        # Open a reader for this index. This is used by the
        # deletion methods, but mostly it's to keep the underlying
        # files open so they don't get deleted from underneath us.
        self._searcher = self.searcher()

        self.segment_num_lock = Lock()

    def __repr__(self):
        return "%s(%r, %r)" % (self.__class__.__name__,
                               self.storage, self.indexname)

    def __del__(self):
        if (hasattr(self, "_searcher")
            and self._searcher
            and not self._searcher.is_closed):
            self._searcher.close()

    def close(self):
        self._searcher.close()

    def latest_generation(self):
        pattern = _toc_pattern(self.indexname)

        max = -1
        for filename in self.storage:
            m = pattern.match(filename)
            if m:
                num = int(m.group(1))
                if num > max: max = num
        return max

    def refresh(self):
        if not self.up_to_date():
            return self.__class__(self.storage, self.schema,
                                  indexname=self.indexname)
        else:
            return self

    def up_to_date(self):
        return self.generation == self.latest_generation()

    def _write(self):
        # Writes the content of this index to the .toc file.
        for field in self.schema:
            field.clean()
        #stream = self.storage.create_file(self._toc_filename())

        # Use a temporary file for atomic write.
        tocfilename = self._toc_filename()
        tempfilename = '%s.%s' % (tocfilename, time())
        stream = self.storage.create_file(tempfilename)

        stream.write_varint(_INT_SIZE)
        stream.write_varint(_FLOAT_SIZE)
        stream.write_int(-12345)

        stream.write_int(_INDEX_VERSION)
        for num in __version__[:3]:
            stream.write_varint(num)

        stream.write_string(cPickle.dumps(self.schema, -1))
        stream.write_int(self.generation)
        stream.write_int(self.segment_counter)
        stream.write_pickle(self.segments)
        stream.close()

        # Rename temporary file to the proper filename
        self.storage.rename_file(tempfilename, self._toc_filename(), safe=True)

    def _read(self, schema):
        # Reads the content of this index from the .toc file.
        stream = self.storage.open_file(self._toc_filename())

        if stream.read_varint() != _INT_SIZE or \
           stream.read_varint() != _FLOAT_SIZE:
            raise IndexError("Index was created on an architecture with different data sizes")

        if not stream.read_int() == -12345:
            raise IndexError("Number misread: byte order problem")

        version = stream.read_int()
        if version != _INDEX_VERSION:
            raise IndexVersionError("Can't read format %s" % version, version)
        self.version = version
        self.release = (stream.read_varint(),
                        stream.read_varint(),
                        stream.read_varint())

        # If the user supplied a schema object with the constructor, don't load
        # the pickled schema from the saved index.
        if schema:
            self.schema = schema
            stream.skip_string()
        else:
            self.schema = cPickle.loads(stream.read_string())

        generation = stream.read_int()
        assert generation == self.generation
        self.segment_counter = stream.read_int()
        self.segments = stream.read_pickle()
        stream.close()

    def _next_segment_name(self):
        #Returns the name of the next segment in sequence.
        if self.segment_num_lock.acquire():
            try:
                self.segment_counter += 1
                return "_%s_%s" % (self.indexname, self.segment_counter)
            finally:
                self.segment_num_lock.release()
        else:
            raise LockError

    def _toc_filename(self):
        # Returns the computed filename of the TOC for this index name and
        # generation.
        return "_%s_%s.toc" % (self.indexname, self.generation)

    def last_modified(self):
        return self.storage.file_modified(self._toc_filename())

    def is_empty(self):
        return len(self.segments) == 0

    def optimize(self):
        if len(self.segments) < 2 and not self.segments.has_deletions():
            return

        from whoosh.filedb.filewriting import OPTIMIZE
        w = self.writer()
        w.commit(OPTIMIZE)

    def commit(self, new_segments=None):
        self._searcher.close()

        if not self.up_to_date():
            raise OutOfDateError

        if new_segments:
            if not isinstance(new_segments, SegmentSet):
                raise ValueError("FileIndex.commit() called with something other than a SegmentSet: %r" % new_segments)
            self.segments = new_segments

        self.generation += 1
        self._write()
        self._clean_files()

        self._searcher = self.searcher()

    def _clean_files(self):
        # Attempts to remove unused index files (called when a new generation
        # is created). If existing Index and/or reader objects have the files
        # open, they may not get deleted immediately (i.e. on Windows) but will
        # probably be deleted eventually by a later call to clean_files.

        storage = self.storage
        current_segment_names = set(s.name for s in self.segments)

        tocpattern = _toc_pattern(self.indexname)
        segpattern = _segment_pattern(self.indexname)

        for filename in storage:
            m = tocpattern.match(filename)
            if m:
                num = int(m.group(1))
                if num != self.generation:
                    try:
                        storage.delete_file(filename)
                    except OSError:
                        # Another process still has this file open
                        pass
            else:
                m = segpattern.match(filename)
                if m:
                    name = m.group(1)
                    if name not in current_segment_names:
                        try:
                            storage.delete_file(filename)
                        except OSError:
                            # Another process still has this file open
                            pass

    def doc_count_all(self):
        return self.segments.doc_count_all()

    def doc_count(self):
        return self.segments.doc_count()

    def field_length(self, fieldid):
        fieldnum = self.schema.to_number(fieldid)
        return sum(s.field_length(fieldnum) for s in self.segments)

    def reader(self):
        return self.segments.reader(self.storage, self.schema)

    def writer(self, **kwargs):
        from whoosh.filedb.filewriting import FileIndexWriter
        return FileIndexWriter(self, **kwargs)
class ArcusMCNode(object):
    worker = None
    shutdown = False

    def __init__(self, addr, name, transcoder, node_allocator):
        #mandatory files
        self.addr = addr
        self.name = name
        self.in_use = False
        self.transcoder = transcoder

        self.handle = Connection(addr)
        self.ops = []
        self.lock = Lock()  # for ordering worker.q and ops

        self.node_allocator = node_allocator

    def __repr__(self):
        return '%s-%s' % (self.addr, self.name)

    def get_fileno(self):
        return self.handle.socket.fileno()

    def disconnect(self):
        # disconnect socket
        self.handle.disconnect()

        # clear existing operation
        for op in self.ops:
            op.set_invalid()

        self.ops = []

    def disconnect_all(self):  # shutdown
        self.node_allocator.shutdown = True
        self.disconnect()

        self.node_allocator.worker.q.put(None)

    def process_request(self, request):
        if self.handle.disconnected():
            ret = self.handle.connect()
            if ret != None:
                # re-register if node connection is available
                self.node_allocator.worker.register_node(self)

        self.handle.send_request(request)

    ##########################################################################################
    ### commands
    ##########################################################################################
    def get(self, key):
        return self._get('get', key)

    def gets(self, key):
        return self._get('gets', key)

    def set(self, key, val, exptime=0):
        return self._set("set", key, val, exptime)

    def cas(self, key, val, cas_id, exptime=0):
        return self._cas(key, 'cas', val, cas_id, exptime)

    def incr(self, key, value=1):
        return self._incr_decr("incr", key, value)

    def decr(self, key, value=1):
        return self._incr_decr("decr", key, value)

    def add(self, key, val, exptime=0):
        return self._set("add", key, val, exptime)

    def append(self, key, val, exptime=0):
        return self._set("append", key, val, exptime)

    def prepend(self, key, val, exptime=0):
        return self._set("prepend", key, val, exptime)

    def replace(self, key, val, exptime=0):
        return self._set("replace", key, val, exptime)

    def delete(self, key):
        full_cmd = "delete %s" % key
        return self.add_op('delete', full_cmd, self._recv_delete)

    def flush_all(self):
        full_cmd = 'flush_all'
        return self.add_op('flush_all', full_cmd, self._recv_ok)

    def get_stats(self, stat_args=None):
        if stat_args == None:
            full_cmd = 'stats'
        else:
            full_cmd = 'stats ' + stat_args

        op = self.add_op('stats', full_cmd, self._recv_stat)

    def lop_create(self, key, flags, exptime=0, noreply=False, attr=None):
        return self._coll_create('lop create', key, flags, exptime, noreply,
                                 attr)

    def lop_insert(self,
                   key,
                   index,
                   value,
                   noreply=False,
                   pipe=False,
                   attr=None):
        return self._coll_set('lop insert', key, index, value, noreply, pipe,
                              attr)

    def lop_delete(self, key, range, drop=False, noreply=False, pipe=False):
        option = ''
        if drop == True:
            option += 'drop'

        if noreply == True:
            option += ' noreply'

        if pipe == True:
            assert noreply == False
            option += ' pipe'

        if isinstance(range, tuple):
            full_cmd = 'lop delete %s %d..%d %s' % (key, range[0], range[1],
                                                    option)
            return self.add_op('lop delete', full_cmd, self._recv_delete,
                               noreply or pipe)
        else:
            full_cmd = 'lop delete %s %d %s' % (key, range, option)
            return self.add_op('lop delete', full_cmd, self._recv_delete,
                               noreply or pipe)

    def lop_get(self, key, range, delete=False, drop=False):
        return self._coll_get('lop get', key, range, self._recv_lop_get,
                              delete, drop)

    def sop_create(self, key, flags, exptime=0, noreply=False, attr=None):
        return self._coll_create('sop create', key, flags, exptime, noreply,
                                 attr)

    def sop_insert(self, key, value, noreply=False, pipe=False, attr=None):
        return self._coll_set('sop insert', key, None, value, noreply, pipe,
                              attr)

    def sop_get(self, key, count=0, delete=False, drop=False):
        return self._coll_get('sop get', key, count, self._recv_sop_get,
                              delete, drop)

    def sop_delete(self, key, val, drop=False, noreply=False, pipe=False):
        flags, len, value = self.transcoder.encode(val)

        option = '%d' % len
        if drop == True:
            option += 'drop'

        if noreply == True:
            option += ' noreply'

        if pipe == True:
            assert noreply == False
            option += ' pipe'

        option += '\r\n'

        full_cmd = 'sop delete %s %s' % (key, option) + value
        return self.add_op('sop delete', full_cmd, self._recv_delete, noreply
                           or pipe)

    def sop_exist(self, key, val, pipe=False):
        flags, len, value = self.transcoder.encode(val)

        option = '%d' % len
        if pipe == True:
            assert noreply == False
            option += ' pipe'

        option += '\r\n'

        full_cmd = 'sop exist %s %s' % (key, option) + value
        return self.add_op('sop exist', full_cmd, self._recv_exist, pipe)

    def bop_create(self, key, flags, exptime=0, noreply=False, attr=None):
        return self._coll_create('bop create', key, flags, exptime, noreply,
                                 attr)

    def bop_insert(self,
                   key,
                   bkey,
                   value,
                   eflag=None,
                   noreply=False,
                   pipe=False,
                   attr=None):
        return self._coll_set('bop insert', key, None, value, noreply, pipe,
                              attr, bkey, eflag)

    def bop_upsert(self,
                   key,
                   bkey,
                   value,
                   eflag=None,
                   noreply=False,
                   pipe=False,
                   attr=None):
        return self._coll_set('bop upsert', key, None, value, noreply, pipe,
                              attr, bkey, eflag)

    def bop_update(self,
                   key,
                   bkey,
                   value,
                   eflag=None,
                   noreply=False,
                   pipe=False,
                   attr=None):
        return self._coll_set('bop update', key, None, value, noreply, pipe,
                              attr, bkey, eflag)

    def bop_delete(self,
                   key,
                   range,
                   filter=None,
                   count=None,
                   drop=False,
                   noreply=False,
                   pipe=False):
        option = ''

        if filter != None:
            option += filter.get_expr() + ' '

        if count != None:
            option += '%d ' % count

        if drop == True:
            option += 'drop'

        if noreply == True:
            option += ' noreply'

        if pipe == True:
            assert noreply == False
            option += ' pipe'

        if isinstance(range, tuple):
            if isinstance(range[0], str):
                if range[0][:2] != '0x' or range[1][:2] != '0x':
                    raise CollectionHexFormat()

                full_cmd = 'bop delete %s %s..%s %s' % (key, range[0],
                                                        range[1], option)
                return self.add_op('bop delete', full_cmd, self._recv_delete,
                                   noreply or pipe)
            else:
                full_cmd = 'bop delete %s %d..%d %s' % (key, range[0],
                                                        range[1], option)
                return self.add_op('bop delete', full_cmd, self._recv_delete,
                                   noreply or pipe)
        else:
            if isinstance(range, str):
                if range[:2] != '0x':
                    raise CollectionHexFormat()

                full_cmd = 'bop delete %s %s %s' % (key, range, option)
                return self.add_op('bop delete', full_cmd, self._recv_delete,
                                   noreply or pipe)
            else:
                full_cmd = 'bop delete %s %d %s' % (key, range, option)
                return self.add_op('bop delete', full_cmd, self._recv_delete,
                                   noreply or pipe)

    def bop_get(self, key, range, filter=None, delete=False, drop=False):
        return self._coll_get('bop get',
                              key,
                              range,
                              self._recv_bop_get,
                              delete,
                              drop,
                              filter=filter)

    def bop_mget(self, key_list, range, filter=None, offset=None, count=50):
        return self._coll_mget('bop mget', key_list, range, filter, offset,
                               count)

    def bop_smget(self, key_list, range, filter=None, offset=None, count=2000):
        return self._coll_mget('bop smget', key_list, range, filter, offset,
                               count)

    def bop_count(self, key, range, filter):
        return self._coll_get('bop count',
                              key,
                              range,
                              self._recv_bop_get,
                              filter=filter)

    def bop_incr(self, key, bkey, value, noreply=False, pipe=False):
        return self._bop_incrdecr('bop incr', key, bkey, value, noreply, pipe)

    def bop_decr(self, key, bkey, value, noreply=False, pipe=False):
        return self._bop_incrdecr('bop decr', key, bkey, value, noreply, pipe)

    ##########################################################################################
    ### Queue senders
    ##########################################################################################
    def add_op(self, cmd, full_cmd, callback, noreply=False):
        op = ArcusOperation(self, full_cmd, callback)
        ##arcuslog(self, 'add operation %s(%s:%s) to %s' % (full_cmd, callback, hex(id(op)), self))

        if noreply:  # or pipe
            # don't need to receive response, set_result now
            self.node_allocator.worker.q.put(op)
            op.set_result(True)
        else:
            self.lock.acquire()
            self.node_allocator.worker.q.put(op)
            self.ops.append(op)
            self.lock.release()

        return op

    def _get(self, cmd, key):
        full_cmd = "%s %s" % (cmd, key)
        if cmd == 'gets':
            callback = self._recv_cas_value
        else:
            callback = self._recv_value

        op = self.add_op(cmd, full_cmd, callback)
        return op

    def _set(self, cmd, key, val, exptime=0):
        flags, len, value = self.transcoder.encode(val)
        if flags == None:
            return (0)

        full_cmd = "%s %s %d %d %d\r\n" % (cmd, key, flags, exptime, len)
        full_cmd += value

        op = self.add_op(cmd, full_cmd, self._recv_set)
        return op

    def _cas(self, cmd, key, val, cas_id, exptime=0):
        flags, len, value = self.transcoder.encode(val)
        if flags == None:
            return (0)

        full_cmd = "%s %s %d %d %d %d\r\n" % (cmd, key, flags, exptime, len,
                                              cas_id)
        full_cmd += value

        op = self.add_op(cmd, full_cmd, self._recv_set)
        return op

    def _incr_decr(self, cmd, key, value):
        full_cmd = "%s %s %d" % (cmd, key, value)

        op = self.add_op(cmd, full_cmd, self._recv_set)
        return op

    def _coll_create(self,
                     cmd,
                     key,
                     flags,
                     exptime=0,
                     noreply=False,
                     attr=None):
        if attr == None:
            attr = {}

        # default value
        if 'maxcount' not in attr:
            attr['maxcount'] = 4000
        if 'ovflaction' not in attr:
            attr['ovflaction'] = 'tail_trim'
        if 'readable' not in attr:
            attr['readable'] = True

        option = '%d %d %d' % (flags, exptime, attr['maxcount'])
        if attr['ovflaction'] != 'tail_trim':
            option += ' ' + attr['ovflaction']
        if attr['readable'] == False:
            option += ' unreadable'

        if noreply == True:
            option += ' noreply'

        full_cmd = '%s %s %s' % (cmd, key, option)
        return self.add_op(cmd, full_cmd, self._recv_coll_create, noreply)

    def _bop_incrdecr(self, cmd, key, bkey, val, noreply=False, pipe=False):
        if isinstance(val, int):
            value = '%d' % val
        else:
            value = val

        if isinstance(bkey, int):
            bkey_str = '%d' % bkey
        else:
            if bkey[:2] != '0x':
                raise CollectionHexFormat()
            bkey_str = '%s' % bkey

        option = '%s %s' % (bkey_str, value)

        if noreply == True:
            option += ' noreply'

        if pipe == True:
            assert noreply == False
            option += ' pipe'

        full_cmd = '%s %s %s' % (cmd, key, option)
        return self.add_op(cmd, full_cmd, self._recv_set, noreply or pipe)

    def _coll_set(self,
                  cmd,
                  key,
                  index,
                  val,
                  noreply=False,
                  pipe=False,
                  attr=None,
                  bkey=None,
                  eflag=None):
        flags, len, value = self.transcoder.encode(val)

        if bkey != None:  # bop
            assert index == None

            if isinstance(bkey, int):
                bkey_str = '%d' % bkey
            else:
                if bkey[:2] != '0x':
                    raise CollectionHexFormat()
                bkey_str = '%s' % bkey

            if eflag != None:
                if eflag[:2] != '0x':
                    raise CollectionHexFormat()
                option = '%s %s %d' % (bkey_str, eflag, len)
            else:
                option = '%s %d' % (bkey_str, len)
        elif index != None:  # lop
            option = '%d %d' % (index, len)
        else:  # sop
            option = '%d' % (len)

        if attr != None:
            # default mandatory value
            if 'flags' not in attr:
                attr['flags'] = 0
            if 'exptime' not in attr:
                attr['exptime'] = 0
            if 'maxcount' not in attr:
                attr['maxcount'] = 4000

            option += ' create %d %d %d' % (attr['flags'], attr['exptime'],
                                            attr['maxcount'])
            if 'ovflaction' in attr:
                option += ' ' + attr['ovflaction']
            if 'readable' in attr and attr['readable'] == False:
                option += ' unreadable'

        if noreply == True:
            option += ' noreply'

        if pipe == True:
            assert noreply == False
            option += ' pipe'

        option += '\r\n'

        full_cmd = '%s %s %s' % (cmd, key, option) + value
        return self.add_op(cmd, full_cmd, self._recv_coll_set, noreply or pipe)

    def _coll_get(self,
                  cmd,
                  key,
                  range,
                  callback,
                  delete=None,
                  drop=None,
                  filter=None):
        option = ''
        type = cmd[:3]

        if filter != None:
            option += filter.get_expr() + ' '

        if delete == True:
            option += 'delete'

        if drop == True:
            assert delete == False
            option += 'drop'

        if isinstance(range, tuple):
            if type == 'bop' and isinstance(range[0], str):
                if range[0][:2] != '0x' or range[1][:2] != '0x':
                    raise CollectionHexFormat()

                full_cmd = "%s %s %s..%s %s" % (cmd, key, range[0], range[1],
                                                option)
                return self.add_op(cmd, full_cmd, callback)
            else:
                full_cmd = "%s %s %d..%d %s" % (cmd, key, range[0], range[1],
                                                option)
                return self.add_op(cmd, full_cmd, callback)
        else:
            if type == 'bop' and isinstance(range, str):
                if range[:2] != '0x':
                    raise CollectionHexFormat()

                full_cmd = "%s %s %s %s" % (cmd, key, range, option)
                return self.add_op(cmd, full_cmd, callback)
            else:
                full_cmd = "%s %s %d %s" % (cmd, key, range, option)
                return self.add_op(cmd, full_cmd, callback)

    def _coll_mget(self, org_cmd, key_list, range, filter, offset, count):

        comma_sep_keys = ''
        for key in key_list:
            if comma_sep_keys != '':
                comma_sep_keys += ','
            comma_sep_keys += key

        cmd = '%s %d %d ' % (org_cmd, len(comma_sep_keys), len(key_list))

        if isinstance(range, tuple):
            if isinstance(range[0], str):
                if range[0][:2] != '0x' or range[1][:2] != '0x':
                    raise CollectionHexFormat()

                cmd += '%s..%s' % range
            else:
                cmd += '%d..%d' % range
        else:
            if isinstance(range, str):
                if range[:2] != '0x':
                    raise CollectionHexFormat()

                cmd += '%s' % range
            else:
                cmd += '%d' % range

        if filter != None:
            cmd += ' ' + filter.get_expr()

        if offset != None:
            cmd += ' %d' % offset
        cmd += ' %d' % count

        cmd += '\r\n%s' % comma_sep_keys

        if org_cmd == 'bop mget':
            reply = self._recv_mget
        else:
            reply = self._recv_smget

        op = self.add_op(org_cmd, cmd, reply)

        return op

    ##########################################################################################
    ### recievers
    ##########################################################################################
    def do_op(self):
        self.lock.acquire()
        if len(self.ops) <= 0:
            arcuslog('ops empty (%s)' % self.addr)
            self.lock.release()
            return

        op = self.ops.pop(0)
        self.lock.release()

        ret = op.callback()
        op.set_result(ret)

        while self.handle.hasline():  # remaining jobs
            self.lock.acquire()
            op = self.ops.pop(0)
            self.lock.release()

            ret = op.callback()
            op.set_result(ret)

    def _recv_ok(self):
        line = self.handle.readline()
        if line == 'OK':
            return True

        return False

    def _recv_stat(self):
        data = {}
        while True:
            line = handle.readline()
            if line[:3] == 'END' or line is None:
                break

            dummy, k, v = line.split(' ', 2)
            data[k] = v

            return data

    def _recv_set(self):
        line = self.handle.readline()
        if line[0:8] == 'RESPONSE':
            dummy, count = line.split()

            ret = []
            for i in xrange(0, int(count)):
                line = self.handle.readline()
                ret.append(line)

            line = self.handle.readline()  # b'END'

            return ret

        if line == 'STORED':
            return True

        if line == 'NOT_FOUND':
            return False

        if line == 'TYPE_MISMATCH':
            raise CollectionType()

        if line == 'OVERFLOWED':
            raise CollectionOverflow()

        if line == 'OUT_OF_RANGE':
            raise CollectionIndex()

        if line.isdigit():  # incr, decr, bop incr, bop decr
            return int(line)

        return False

    def _recv_delete(self):
        line = self.handle.readline()
        if line[0:8] == 'RESPONSE':
            dummy, count = line.split()

            ret = []
            for i in xrange(0, int(count)):
                line = self.handle.readline()
                ret.append(line)

            line = self.handle.readline()  # b'END'

            return ret

        if line == 'DELETED':
            return True

        if line == 'NOT_FOUND':
            return True  # True ?? (or exception)

        if line == 'TYPE_MISMATCH':
            raise CollectionType()

        if line == 'OVERFLOWED':
            raise CollectionOverflow()

        if line == 'OUT_OF_RANGE' or line == 'NOT_FOUND_ELEMENT':
            raise CollectionIndex()

        return False

    def _recv_cas_value(self):
        line = self.handle.readline()
        if line and line[:5] != 'VALUE':
            return None

        resp, rkey, flags, len, cas_id = line.split()
        flags = int(flags)
        rlen = int(len)
        val = self._decode_value(flags, rlen)
        return (val, cas_id)

    def _recv_value(self):
        line = self.handle.readline()
        if line and line[:5] != 'VALUE':
            return None

        resp, rkey, flags, len = line.split()
        flags = int(flags)
        rlen = int(len)
        return self._decode_value(flags, rlen)

    def _recv_coll_create(self):
        line = self.handle.readline()
        if line == 'CREATED':
            return True

        if line == 'EXISTS':
            raise CollectionExist()

        return False

    def _recv_coll_set(self):
        line = self.handle.readline()
        if line[0:8] == 'RESPONSE':
            dummy, count = line.split()

            ret = []
            for i in xrange(0, int(count)):
                line = self.handle.readline()
                ret.append(line)

            line = self.handle.readline()  # b'END'

            return ret

        if line == 'STORED':
            return True

        if line == 'NOT_FOUND':
            return False

        if line == 'TYPE_MISMATCH':
            raise CollectionType()

        if line == 'OVERFLOWED':
            raise CollectionOverflow()

        if line == 'OUT_OF_RANGE':
            raise CollectionIndex()

        return False

    def _recv_lop_get(self):
        ret, value = self._decode_collection('lop')
        if ret == 'NOT_FOUND':
            return None

        if ret == 'TYPE_MISMATCH':
            raise CollectionType()

        if ret == 'UNREADABLE':
            raise CollectionUnreadable()

        if ret == 'OUT_OF_RANGE' or ret == 'NOT_FOUND_ELEMENT':
            value = []

        return value

    def _recv_sop_get(self):
        ret, value = self._decode_collection('sop')
        if ret == 'NOT_FOUND':
            return None

        if ret == 'TYPE_MISMATCH':
            raise CollectionType()

        if ret == 'UNREADABLE':
            raise CollectionUnreadable()

        if ret == 'OUT_OF_RANGE' or ret == 'NOT_FOUND_ELEMENT':
            value = set()

        return value

    def _recv_exist(self):
        line = self.handle.readline()
        return line == 'EXIST'

    def _recv_bop_get(self):
        ret, value = self._decode_collection('bop')
        if ret == 'NOT_FOUND':
            return None

        if ret == 'TYPE_MISMATCH':
            raise CollectionType()

        if ret == 'UNREADABLE':
            raise CollectionUnreadable()

        if ret == 'OUT_OF_RANGE' or ret == 'NOT_FOUND_ELEMENT':
            value = {}

        return value

    def _recv_mget(self):
        ret, value, miss = self._decode_bop_mget()
        if ret == 'NOT_FOUND':
            return None

        if ret == 'TYPE_MISMATCH':
            raise CollectionType()

        if ret == 'UNREADABLE':
            raise CollectionUnreadable()

        if ret == 'OUT_OF_RANGE' or ret == 'NOT_FOUND_ELEMENT':
            raise CollectionIndex()

        return (value, miss)

    def _recv_smget(self):
        ret, value, miss = self._decode_bop_smget()
        if ret == 'NOT_FOUND':
            return None

        if ret == 'TYPE_MISMATCH':
            raise CollectionType()

        if ret == 'UNREADABLE':
            raise CollectionUnreadable()

        if ret == 'OUT_OF_RANGE' or ret == 'NOT_FOUND_ELEMENT':
            raise CollectionIndex()

        return (value, miss)

    ##########################################################################################
    ### decoders
    ##########################################################################################
    def _decode_value(self, flags, rlen):
        rlen += 2  # include \r\n
        buf = self.handle.recv(rlen)
        if len(buf) != rlen:
            raise ArcusNodeSocketException(
                "received %d bytes when expecting %d" % (len(buf), rlen))

        if len(buf) == rlen:
            buf = buf[:-2]  # strip \r\n

        val = self.transcoder.decode(flags, buf)

        line = self.handle.readline()
        if line != 'END':
            raise ArcusProtocolException(
                'invalid response expect END but recv: %s' % line)

        return val

    def _decode_collection(self, type):
        if type == 'bop':
            values = {}
        elif type == 'sop':
            values = set()
        else:  # lop
            values = []

        while True:
            line = self.handle.readline()
            if line[:5] != 'VALUE' and line[:5] != 'COUNT':
                return (line, values)

            if line[:5] == 'VALUE':
                resp, flags, count = line.split()
                flags = int(flags)
                count = int(count)
            elif line[:5] == 'COUNT':
                cmd, count = line.split('=')
                return (cmd, int(count))

            for i in xrange(0, count):
                line = self.handle.readline()
                if type == 'bop':  # bop get
                    bkey, eflag, length_buf = line.split(' ', 2)

                    if eflag.isdigit():  # eflag not exist
                        length = eflag
                        eflag = None
                        buf = length_buf
                    else:
                        length, buf = length_buf.split(' ', 1)

                    if bkey.isdigit():
                        bkey = int(bkey)

                    val = self.transcoder.decode(flags, buf)
                    values[bkey] = (eflag, val)
                elif type == 'lop':
                    length, buf = line.split(' ', 1)
                    val = self.transcoder.decode(flags, buf)
                    values.append(val)
                else:  # sop
                    length, buf = line.split(' ', 1)
                    val = self.transcoder.decode(flags, buf)
                    values.add(val)

        return None

    def _decode_bop_mget(self):
        values = {}
        missed_keys = []

        while True:
            line = self.handle.readline()
            if line[:11] == 'MISSED_KEYS':
                dummy, count = line.split(' ')
                count = int(count)
                for i in xrange(0, count):
                    line = self.handle.readline()
                    missed_keys.append(line)

                continue

            if line[:5] != 'VALUE' and line[:5] != 'COUNT':
                return (line, values, missed_keys)

            ret = line.split()
            key = ret[1]
            status = ret[2]

            if status == 'NOT_FOUND':
                missed_keys.append(key)
                continue

            count = 0
            if len(ret) == 5:
                flags = int(ret[3])
                count = int(ret[4])

            val = {}
            for i in xrange(0, count):
                line = self.handle.readline()
                element, bkey, eflag, length_buf = line.split(' ', 3)

                if eflag.isdigit():  # eflag not exist
                    length = eflag
                    eflag = None
                    buf = length_buf
                else:
                    length, buf = length_buf.split(' ', 1)

                if bkey.isdigit():
                    bkey = int(bkey)

                ret = self.transcoder.decode(flags, buf)
                val[bkey] = (eflag, ret)

            values[key] = val

        return None

    def _decode_bop_smget(self):
        values = []
        missed_keys = []

        while True:
            line = self.handle.readline()
            if line[:11] == 'MISSED_KEYS':
                dummy, count = line.split(' ')
                count = int(count)
                for i in xrange(0, count):
                    line = self.handle.readline()
                    missed_keys.append(line)

                continue

            if line[:5] != 'VALUE' and line[:5] != 'COUNT':
                return (line, values, missed_keys)

            ret = line.split()
            count = int(ret[1])

            for i in xrange(0, count):
                line = self.handle.readline()
                key, flags, bkey, eflag, length_buf = line.split(' ', 4)

                if eflag.isdigit():  # eflag not exist
                    length = eflag
                    eflag = None
                    buf = length_buf
                else:
                    length, buf = length_buf.split(' ', 1)

                if bkey.isdigit():
                    bkey = int(bkey)

                val = self.transcoder.decode(int(flags), buf)
                values.append((bkey, key, eflag, val))

        return None
Example #36
0
File: m2t.py Project: cuijinyu/m2t
class DHTProtocolHandler:

    def __init__(self):
        self.mm = MsgMaker()
        self.s = create_socket("0.0.0.0", 6882)

        self.info_hash = None
        self.nid = random_nid()

        self.nodes_lock = Lock()
        self.nodes = set()

        self.peers_lock = Lock()
        self.peers = set()

        self.tids = list()

    def send_msg(self, msg, dst):
        self.s.sendto(bencode(msg), dst)

    def bootstrap(self):
        if self.nodes == set():
            self.send_get_peers(BOOTSTRAP_ADDR)
            Timer(3, self.bootstrap).start()
        else:
            pass

    def get_torrent(self, info_hash):
        self.info_hash = info_hash
        self.bootstrap()

    def send_get_peers(self, address):
        msg = self.mm.form_query_get_peers(self.nid, self.info_hash)
        self.tids.append(msg['t'])
        self.send_msg(msg, address)

    def recv_msg(self):
        while True:
            try:
                msg_bencode, addr = self.s.recvfrom(65536)
                break
            except Exception:
                pass
        return bdecode(msg_bencode), addr

    def on_query(self, tid, q, addr):
        print('query received: %s' % q)
        if q == 'find_node':
            pass
        elif q == 'get_peers':
            self.on_query_get_peers(tid, addr)
        elif q == 'announce_peer':
            self.on_query_announce_peer(tid, addr)
        elif q == 'ping':
            self.on_query_ping(tid, addr)
        else:
            pass

    def on_query_get_peers(self, tid, addr):
        msg = self.mm.form_response_get_peers(self.nid, tid)
        self.send_msg(msg, addr)

    def on_query_announce_peer(self, tid, addr):
        msg = self.mm.form_response_announce_peer(self.nid, tid)
        self.send_msg(msg, addr)

    def on_query_ping(self, tid, addr):
        msg = self.mm.form_response_ping(self.nid, tid)
        self.send_msg(msg, addr)

    def on_response(self, tid, response):
        if tid in self.tids:
            self.on_response_get_peers(response)
            self.tids.remove(tid)
        else:
            print('tid=%s not exist.' % str(tid))

    def on_response_get_peers(self, response):
        if response.has_key('values'):
            self.decode_peers(response['values'])
        if response.has_key('nodes'):
            self.decode_nodes(response['nodes'])

    def decode_peers(self, peers):
        for peer in peers:
            ip = socket.inet_ntoa(peer[:4])
            port, = struct.unpack('>H', peer[4:])
            self.peers_lock.acquire()
            self.peers.add(Peer(ip, port))
            self.peers_lock.release()

    def decode_nodes(self, nodes):
        for i in range(0, len(nodes), 26):
            nid = nodes[i:i + 20]
            ip = socket.inet_ntoa(nodes[i + 20:i + 24])
            port, = struct.unpack('>H', nodes[i + 24:i + 26])
            node = Node(nid, ip, port)
            self.nodes_lock.acquire()
            self.nodes.add(node)
            self.nodes_lock.release()

    def msg_listener(self):
        while True:
            msg, addr = self.recv_msg()
            try:
                y = msg['y']
                if y == 'q':
                    self.on_query(msg['t'], msg['q'], addr)
                elif y == 'r':
                    self.on_response(msg['t'], msg['r'])
                elif y == 'e':
                    print(msg['e'])
                else:
                    pass
            except:
                print("no dht msg recved -> ", msg)


    def auto_get_peers(self):
        while True:
            send_count = 0
            self.nodes_lock.acquire()
            for node in self.nodes:
                if send_count >= 300:
                    break
                if node.queried < 3:
                    self.send_get_peers((node.ip, node.port))
                    node.queried += 1
                    send_count += 1
            self.nodes_lock.release()
            time.sleep(1)

    def auto_get_metadata(self):
        md = MetadataDownloader()
        while True:
            self.peers_lock.acquire()
            peers = self.peers.copy()
            self.peers_lock.release()
            for peer in peers:
                if peer.asked == True:
                    continue
                print('try to get metadata from (%s, %d)' %peer.addr)
                metadata = md.get_metadata(self.info_hash, peer.addr)
                peer.asked = True
                if metadata != None:
                    # todo metadata got
                    return
            if len(peers) == len(self.peers):
                for peer in peers:
                    peer.asked = False
            time.sleep(1)

    def run(self):
        auto_get_peers_thread = Thread(target=self.auto_get_peers)
        auto_get_metadata_thread = Thread(target=self.auto_get_metadata)
        msg_listener_thread = Thread(target=self.msg_listener)
        auto_get_peers_thread.start()
        auto_get_metadata_thread.start()
        msg_listener_thread.start()
Example #37
0
class VideoSync:
    def __init__(self, videos, on_reset, scale_callback):
        self.videos = videos
        self.on_reset = on_reset
        self.lock = Lock()
        self.is_playing = False
        self.stop_thread = False
        self.with_skeleton = None
        self.with_image = None
        self.play_speed = 1
        self.frame_skip = 1
        self.stream_thread = None
        self.executor = None
        self.running_tasks = []
        self.scale_callback = scale_callback
        self.scale_focused = False

    def scale_focus(self, is_focused):
        self.scale_focused = is_focused

    def start(self):
        self.lock.acquire()
        self.stop_thread = False
        self.executor = ThreadPoolExecutor(max_workers=6)

        self.stream_thread = threading.Thread(target=self.stream)
        self.stream_thread.daemon = 1
        self.stream_thread.start()
        self.lock.release()

    def stop(self):
        self.lock.acquire()
        self.stop_thread = True
        for t in self.running_tasks:
            t.cancel()
        if self.executor:
            self.executor.shutdown(wait=False)
        self.lock.release()

    def stream(self):
        vids = self.videos()
        fps = 60
        delay = 1 / fps
        while True:
            start = timer()
            while not self.is_playing:
                sleep(0.01)

            while (timer() - start) < (delay / self.play_speed):
                sleep(0.00001)

            if not self.stop_thread:
                if not self.scale_focused:
                    self.scale_callback(
                        np.array([v.get_time_sec() for v in vids]).max())
                self.running_tasks = [
                    self.executor.submit(task)
                    for task in [v.next for v in vids]
                ]
                for t in self.running_tasks:
                    t.result()
            else:
                break

    def reset(self):
        self.lock.acquire()
        try:
            self.is_playing = False
            self.stop_thread = False
            self.on_reset()
        finally:
            self.lock.release()

    def set_speed(self, speed):
        self.play_speed = int(np.power(speed, 2))
        self.frame_skip = max(1, speed)
Example #38
0
class SerializableLock(object):
    _locks = WeakValueDictionary()
    """ A Serializable per-process Lock

    This wraps a normal ``threading.Lock`` object and satisfies the same
    interface.  However, this lock can also be serialized and sent to different
    processes.  It will not block concurrent operations between processes (for
    this you should look at ``multiprocessing.Lock`` or ``locket.lock_file``
    but will consistently deserialize into the same lock.

    So if we make a lock in one process::

        lock = SerializableLock()

    And then send it over to another process multiple times::

        bytes = pickle.dumps(lock)
        a = pickle.loads(bytes)
        b = pickle.loads(bytes)

    Then the deserialized objects will operate as though they were the same
    lock, and collide as appropriate.

    This is useful for consistently protecting resources on a per-process
    level.

    The creation of locks is itself not threadsafe.
    """
    def __init__(self, token=None):
        self.token = token or str(uuid.uuid4())
        if self.token in SerializableLock._locks:
            self.lock = SerializableLock._locks[self.token]
        else:
            self.lock = Lock()
            SerializableLock._locks[self.token] = self.lock

    def acquire(self, *args, **kwargs):
        return self.lock.acquire(*args, **kwargs)

    def release(self, *args, **kwargs):
        return self.lock.release(*args, **kwargs)

    def __enter__(self):
        self.lock.__enter__()

    def __exit__(self, *args):
        self.lock.__exit__(*args)

    def locked(self):
        return self.lock.locked()

    def __getstate__(self):
        return self.token

    def __setstate__(self, token):
        self.__init__(token)

    def __str__(self):
        return "<%s: %s>" % (self.__class__.__name__, self.token)

    __repr__ = __str__
Example #39
0
class LRUCache(object):
    """A simple LRU Cache implementation."""

    # this is fast for small capacities (something below 1000) but doesn't
    # scale.  But as long as it's only used as storage for templates this
    # won't do any harm.

    def __init__(self, capacity):
        self.capacity = capacity
        self._mapping = {}
        self._queue = deque()
        self._postinit()

    def _postinit(self):
        # alias all queue methods for faster lookup
        self._popleft = self._queue.popleft
        self._pop = self._queue.pop
        self._remove = self._queue.remove
        self._wlock = Lock()
        self._append = self._queue.append

    def __getstate__(self):
        return {
            'capacity': self.capacity,
            '_mapping': self._mapping,
            '_queue': self._queue
        }

    def __setstate__(self, d):
        self.__dict__.update(d)
        self._postinit()

    def __getnewargs__(self):
        return (self.capacity, )

    def copy(self):
        """Return a shallow copy of the instance."""
        rv = self.__class__(self.capacity)
        rv._mapping.update(self._mapping)
        rv._queue = deque(self._queue)
        return rv

    def get(self, key, default=None):
        """Return an item from the cache dict or `default`"""
        try:
            return self[key]
        except KeyError:
            return default

    def setdefault(self, key, default=None):
        """Set `default` if the key is not in the cache otherwise
        leave unchanged. Return the value of this key.
        """
        self._wlock.acquire()
        try:
            try:
                return self[key]
            except KeyError:
                self[key] = default
                return default
        finally:
            self._wlock.release()

    def clear(self):
        """Clear the cache."""
        self._wlock.acquire()
        try:
            self._mapping.clear()
            self._queue.clear()
        finally:
            self._wlock.release()

    def __contains__(self, key):
        """Check if a key exists in this cache."""
        return key in self._mapping

    def __len__(self):
        """Return the current size of the cache."""
        return len(self._mapping)

    def __repr__(self):
        return '<%s %r>' % (self.__class__.__name__, self._mapping)

    def __getitem__(self, key):
        """Get an item from the cache. Moves the item up so that it has the
        highest priority then.

        Raise a `KeyError` if it does not exist.
        """
        self._wlock.acquire()
        try:
            rv = self._mapping[key]
            if self._queue[-1] != key:
                try:
                    self._remove(key)
                except ValueError:
                    # if something removed the key from the container
                    # when we read, ignore the ValueError that we would
                    # get otherwise.
                    pass
                self._append(key)
            return rv
        finally:
            self._wlock.release()

    def __setitem__(self, key, value):
        """Sets the value for an item. Moves the item up so that it
        has the highest priority then.
        """
        self._wlock.acquire()
        try:
            if key in self._mapping:
                self._remove(key)
            elif len(self._mapping) == self.capacity:
                del self._mapping[self._popleft()]
            self._append(key)
            self._mapping[key] = value
        finally:
            self._wlock.release()

    def __delitem__(self, key):
        """Remove an item from the cache dict.
        Raise a `KeyError` if it does not exist.
        """
        self._wlock.acquire()
        try:
            del self._mapping[key]
            try:
                self._remove(key)
            except ValueError:
                # __getitem__ is not locked, it might happen
                pass
        finally:
            self._wlock.release()

    def items(self):
        """Return a list of items."""
        result = [(key, self._mapping[key]) for key in list(self._queue)]
        result.reverse()
        return result

    def iteritems(self):
        """Iterate over all items."""
        return iter(self.items())

    def values(self):
        """Return a list of all values."""
        return [x[1] for x in self.items()]

    def itervalue(self):
        """Iterate over all values."""
        return iter(self.values())

    def keys(self):
        """Return a list of all keys ordered by most recent usage."""
        return list(self)

    def iterkeys(self):
        """Iterate over all keys in the cache dict, ordered by
        the most recent usage.
        """
        return reversed(tuple(self._queue))

    __iter__ = iterkeys

    def __reversed__(self):
        """Iterate over the values in the cache dict, oldest items
        coming first.
        """
        return iter(tuple(self._queue))

    __copy__ = copy
Example #40
0
class MapQueue:
    def __init__(self, iterable=None, maxlen=0):
        self.stack = []
        self.maxlen = maxlen
        if iterable is None:
            self.length = 0
        else:
            self.length = max(len(iterable, maxlen))
            for i in range(self.length):
                self.stack.append(iterable[i])
        self.pending = 0
        self.mainlock = Lock()
        self.innerlock = RLock()

    def qsize(self):
        self.innerlock.acquire()
        try:
            ret = self.length
        finally:
            self.innerlock.release()
        return ret

    def task_done(self):
        self.mainlock.acquire()
        try:
            self.pending -= 1
        finally:
            self.mainlock.release()

    def join(self):
        self.mainlock.acquire()
        try:
            while self.pending > 0:
                self.mainlock.release()
                sleep(0.1)
                self.mainlock.acquire()
        finally:
            self.mainlock.release()

    def get(self, block=True, timeout=None):
        infinite = timeout is None
        timeout = ternary(timeout is None, 0, timeout)
        self.mainlock.acquire()
        if (self.empty()):
            if not block:
                self.mainlock.release()
                raise Queue.Empty
            t = clock()
            curtime = t

            while self.empty() and curtime - t <= timeout:
                self.mainlock.release()
                sleep(0.1)
                curtime = clock()
                self.mainlock.acquire()
                if infinite and self.empty():
                    timeout = curtime + 1

            if (infinite or clock() - t > timeout) and self.empty():
                self.mainlock.release()
                raise Queue.Empty

        ret = self.stack.pop()
        self.pending += 1
        self.length -= 1
        self.mainlock.release()
        return ret

    def get_nowait(self):
        return self.get(False)

    def put(self, item):
        self.mainlock.acquire()
        try:
            self.stack.append(item)
            self.length += 1
        finally:
            self.mainlock.release()

    def empty(self):
        self.innerlock.acquire()
        try:
            ret = self.qsize() == 0
        finally:
            self.innerlock.release()
        return ret
Example #41
0
class DateDetector(object):
    """Manages one or more date templates to find a date within a log line.

	Attributes
	----------
	templates
	"""
    def __init__(self):
        self.__lock = Lock()
        self.__templates = list()
        self.__known_names = set()

    def _appendTemplate(self, template):
        name = template.name
        if name in self.__known_names:
            raise ValueError("There is already a template with name %s" % name)
        self.__known_names.add(name)
        self.__templates.append(template)

    def appendTemplate(self, template):
        """Add a date template to manage and use in search of dates.

		Parameters
		----------
		template : DateTemplate or str
			Can be either a `DateTemplate` instance, or a string which will
			be used as the pattern for the `DatePatternRegex` template. The
			template will then be added to the detector.

		Raises
		------
		ValueError
			If a template already exists with the same name.
		"""
        if isinstance(template, str):
            template = DatePatternRegex(template)
        self._appendTemplate(template)

    def addDefaultTemplate(self):
        """Add Fail2Ban's default set of date templates.
		"""
        self.__lock.acquire()
        try:
            # asctime with optional day, subsecond and/or year:
            # Sun Jan 23 21:59:59.011 2005
            self.appendTemplate("(?:%a )?%b %d %H:%M:%S(?:\.%f)?(?: %Y)?")
            # asctime with optional day, subsecond and/or year coming after day
            # http://bugs.debian.org/798923
            # Sun Jan 23 2005 21:59:59.011
            self.appendTemplate("(?:%a )?%b %d %Y %H:%M:%S(?:\.%f)?")
            # simple date, optional subsecond (proftpd):
            # 2005-01-23 21:59:59
            # simple date: 2005/01/23 21:59:59
            # custom for syslog-ng 2006.12.21 06:43:20
            self.appendTemplate(
                "%Y(?P<_sep>[-/.])%m(?P=_sep)%d %H:%M:%S(?:,%f)?")
            # simple date too (from x11vnc): 23/01/2005 21:59:59
            # and with optional year given by 2 digits: 23/01/05 21:59:59
            # (See http://bugs.debian.org/537610)
            # 17-07-2008 17:23:25
            self.appendTemplate(
                "%d(?P<_sep>[-/])%m(?P=_sep)(?:%Y|%y) %H:%M:%S")
            # Apache format optional time zone:
            # [31/Oct/2006:09:22:55 -0000]
            # 26-Jul-2007 15:20:52
            self.appendTemplate(
                "%d(?P<_sep>[-/])%b(?P=_sep)%Y[ :]?%H:%M:%S(?:\.%f)?(?: %z)?")
            # CPanel 05/20/2008:01:57:39
            self.appendTemplate("%m/%d/%Y:%H:%M:%S")
            # named 26-Jul-2007 15:20:52.252
            # roundcube 26-Jul-2007 15:20:52 +0200
            # 01-27-2012 16:22:44.252
            # subseconds explicit to avoid possible %m<->%d confusion
            # with previous
            self.appendTemplate("%m-%d-%Y %H:%M:%S\.%f")
            # TAI64N
            template = DateTai64n()
            template.name = "TAI64N"
            self.appendTemplate(template)
            # Epoch
            template = DateEpoch()
            template.name = "Epoch"
            self.appendTemplate(template)
            # ISO 8601
            self.appendTemplate("%Y-%m-%d[T ]%H:%M:%S(?:\.%f)?(?:%z)?")
            # Only time information in the log
            self.appendTemplate("^%H:%M:%S")
            # <09/16/08@05:03:30>
            self.appendTemplate("^<%m/%d/%y@%H:%M:%S>")
            # MySQL: 130322 11:46:11
            self.appendTemplate("^%y%m%d  ?%H:%M:%S")
            # Apache Tomcat
            self.appendTemplate("%b %d, %Y %I:%M:%S %p")
            # ASSP: Apr-27-13 02:33:06
            self.appendTemplate("^%b-%d-%y %H:%M:%S")
            # HLDS: L 27/02/2016 - 00:33:41:
            self.appendTemplate("^L %d/%m/%Y - %H:%M:%S:")
        finally:
            self.__lock.release()

    @property
    def templates(self):
        """List of template instances managed by the detector.
		"""
        return self.__templates

    def matchTime(self, line):
        """Attempts to find date on a log line using templates.

		This uses the templates' `matchDate` method in an attempt to find
		a date. It also increments the match hit count for the winning
		template.

		Parameters
		----------
		line : str
			Line which is searched by the date templates.

		Returns
		-------
		re.MatchObject
			The regex match returned from the first successfully matched
			template.
		"""
        self.__lock.acquire()
        try:
            for template in self.__templates:
                match = template.matchDate(line)
                if not match is None:
                    logSys.debug("Matched time template %s" % template.name)
                    template.hits += 1
                    return match
            return None
        finally:
            self.__lock.release()

    def getTime(self, line):
        """Attempts to return the date on a log line using templates.

		This uses the templates' `getDate` method in an attempt to find
		a date.

		Parameters
		----------
		line : str
			Line which is searched by the date templates.

		Returns
		-------
		float
			The Unix timestamp returned from the first successfully matched
			template.
		"""
        self.__lock.acquire()
        try:
            for template in self.__templates:
                try:
                    date = template.getDate(line)
                    if date is None:
                        continue
                    logSys.debug("Got time %f for \"%r\" using template %s" %
                                 (date[0], date[1].group(), template.name))
                    return date
                except ValueError:
                    pass
            return None
        finally:
            self.__lock.release()

    def sortTemplate(self):
        """Sort the date templates by number of hits

		Sort the template lists using the hits score. This method is not
		called in this object and thus should be called from time to time.
		This ensures the most commonly matched templates are checked first,
		improving performance of matchTime and getTime.
		"""
        self.__lock.acquire()
        try:
            logSys.debug("Sorting the template list")
            self.__templates.sort(key=lambda x: x.hits, reverse=True)
            t = self.__templates[0]
            logSys.debug("Winning template: %s with %d hits" %
                         (t.name, t.hits))
        finally:
            self.__lock.release()
Example #42
0
class TetrisApp(object):
    def __init__(self, runner=None):
        self.DROPEVENT = pygame.USEREVENT + 1

        pygame.init()
        pygame.display.set_caption("Tetris_AI")
        pygame.key.set_repeat(250, 25)
        self.width = CELL_SIZE * (COLS + 10)
        self.height = CELL_SIZE * ROWS
        self.rlim = CELL_SIZE * COLS
        self.bground_grid = [[8 if x % 2 == y % 2 else 0 for x in range(COLS)]
                             for y in range(ROWS)]
        self.default_font = pygame.font.Font(pygame.font.get_default_font(),
                                             11)
        self.screen = pygame.display.set_mode((self.width, self.height))
        self.next_block = BLOCKS[randrange(len(BLOCKS))]
        self.gameover = False
        self.runner = runner
        self.player_ai = None
        self.lock = Lock()
        self.init_game()

    def start_game(self):
        if self.gameover:
            self.init_game()
            self.gameover = False

    def ai_toggle(self):
        if self.player_ai:
            self.player_ai.instant_play = not self.player_ai.instant_play

    def draw_matrix(self, matrix, offset):
        off_x, off_y = offset
        for y, row in enumerate(matrix):
            for x, val in enumerate(row):
                if val:
                    try:
                        pygame.draw.rect(
                            self.screen, COLORS[val],
                            pygame.Rect((off_x + x) * CELL_SIZE,
                                        (off_y + y) * CELL_SIZE, CELL_SIZE,
                                        CELL_SIZE), 0)
                    except IndexError:
                        print("Corrupted board")
                        print(self.board)

    def run(self):
        key_actions = {
            'ESCAPE': sys.exit,
            'LEFT': lambda: self.move(-1),
            'RIGHT': lambda: self.move(+1),
            'DOWN': self.drop,
            'UP': self.rotate_block,
            'SPACE': self.start_game,
            'RETURN': self.insta_drop,
            'p': self.ai_toggle,
        }

        clock = pygame.time.Clock()
        while True:
            if DRAW:
                self.screen.fill((0, 0, 0))
                if self.gameover:
                    self.center_msg(
                        "Game Over!\nYour score: %d\nPress space to continue" %
                        self.score)
                else:
                    pygame.draw.line(self.screen, (255, 255, 255),
                                     (self.rlim + 1, 0),
                                     (self.rlim + 1, self.height - 1))
                    self.disp_msg("Next:", (self.rlim + CELL_SIZE, 2))
                    self.disp_msg("Score: %d" % self.score,
                                  (self.rlim + CELL_SIZE, CELL_SIZE * 5))
                    self.draw_matrix(self.bground_grid, (0, 0))
                    self.draw_matrix(self.board, (0, 0))
                    self.draw_matrix(self.block, (self.block_x, self.block_y))
                    self.draw_matrix(self.next_block, (COLS + 1, 2))
                pygame.display.update()

            for event in pygame.event.get():
                if event.type == self.DROPEVENT:
                    self.drop()
                elif event.type == pygame.QUIT:
                    sys.exit()
                elif event.type == pygame.KEYDOWN:
                    for key in key_actions:
                        if event.key == eval("pygame.K_" + key):
                            key_actions[key]()

            clock.tick(MAX_FPS)

    def disp_msg(self, msg, topleft):
        x, y = topleft
        for line in msg.splitlines():
            self.screen.blit(
                self.default_font.render(line, False, (255, 255, 255),
                                         (0, 0, 0)), (x, y))
            y += 14

    def center_msg(self, msg):
        for i, line in enumerate(msg.splitlines()):
            msg_image = self.default_font.render(line, False, (255, 255, 255),
                                                 (0, 0, 0))

            msgim_center_x, msgim_center_y = msg_image.get_size()
            msgim_center_x //= 2
            msgim_center_y //= 2

            self.screen.blit(msg_image,
                             (self.width // 2 - msgim_center_x,
                              self.height // 2 - msgim_center_y + i * 22))

    def new_block(self):
        self.block = self.next_block
        self.next_block = BLOCKS[randrange(len(BLOCKS))]
        self.block_x = COLS // 2 - len(self.block[0]) // 2
        self.block_y = 0
        self.score += 1

        if check_collision(self.board, self.block,
                           (self.block_x, self.block_y)):
            self.gameover = True
            if self.runner:
                self.runner.on_game_over(self.score)

    def init_game(self):
        self.board = new_board()
        self.score = 0
        self.new_block()
        pygame.time.set_timer(self.DROPEVENT, DROP_TIME)

    def add_cl_lines(self, n):
        linescores = [0, 50, 100, 200, 1000]
        self.score += linescores[n]

    def rotate_block(self):
        if not self.gameover:
            new_block = rotate_clockwise(self.block)
            if not check_collision(self.board, new_block,
                                   (self.block_x, self.block_y)):
                self.block = new_block

    def move_to(self, x):
        self.move(x - self.block_x)

    def move(self, delta_x):
        if not self.gameover:
            new_x = self.block_x + delta_x
            if new_x < 0:
                new_x = 0
            if new_x > COLS - len(self.block[0]):
                new_x = COLS - len(self.block[0])
            if not check_collision(self.board, self.block,
                                   (new_x, self.block_y)):
                self.block_x = new_x

    def drop(self):
        self.lock.acquire()
        if not self.gameover:
            self.block_y += 1
            if check_collision(self.board, self.block,
                               (self.block_x, self.block_y)):
                self.board = join_matrices(self.board, self.block,
                                           (self.block_x, self.block_y))
                self.new_block()
                cleared_rows = 0
                for i, row in enumerate(self.board[:-1]):
                    if 0 not in row:
                        self.board = remove_row(self.board, i)
                        cleared_rows += 1
                self.add_cl_lines(cleared_rows)

                self.lock.release()
                if self.player_ai:
                    self.player_ai.do_action()

                return True
        self.lock.release()
        return False

    def insta_drop(self):
        if not self.gameover:
            while not self.drop():
                pass
Example #43
0
class WebsocketServer(object):
    def __init__(self, args, mitm_mapper, db_wrapper, routemanagers, device_mappings, auths, pogoWindowManager,
                 configmode=False):
        self.__current_users = {}
        self.__current_users_mutex = Lock()
        self.__stop_server = Event()

        self.args = args
        self.__listen_address = args.ws_ip
        self.__listen_port = int(args.ws_port)

        self.__send_queue = queue.Queue()

        self.__received = {}
        self.__received_mutex = Lock()
        self.__requests = {}
        self.__requests_mutex = Lock()

        self.__db_wrapper = db_wrapper
        self.__device_mappings = device_mappings
        self.__routemanagers = routemanagers
        self.__auths = auths
        self.__pogoWindowManager = pogoWindowManager
        self.__mitm_mapper = mitm_mapper

        self.__next_id = 0
        self.__id_mutex = Lock()
        self._configmode = configmode

        self.__loop = None

    def start_server(self):
        logger.info("Starting websocket server...")
        self.__loop = asyncio.new_event_loop()
        # build list of origin IDs
        allowed_origins = []
        for device in self.__device_mappings.keys():
            allowed_origins.append(device)

        logger.debug("Device mappings: {}", str(self.__device_mappings))
        logger.debug("Allowed origins derived: {}", str(allowed_origins))

        asyncio.set_event_loop(self.__loop)
        self.__loop.run_until_complete(
            websockets.serve(self.handler, self.__listen_address, self.__listen_port, max_size=2 ** 25,
                             origins=allowed_origins, ping_timeout=10, ping_interval=15))
        self.__loop.run_forever()

    def stop_server(self):
        # TODO: cleanup workers...
        self.__stop_server.set()
        self.__current_users_mutex.acquire()
        for id, worker in self.__current_users.items():
            logger.info('Stopping worker {} to apply new mappings.', id)
            worker[1].stop_worker()
        self.__current_users_mutex.release()

        # wait for all workers to be stopped...
        while True:
            self.__current_users_mutex.acquire()
            if len(self.__current_users) == 0:
                self.__current_users_mutex.release()
                break
            else:
                self.__current_users_mutex.release()
                time.sleep(1)
        for routemanager in self.__routemanagers.keys():
            area = self.__routemanagers.get(routemanager, None)
            if area is None:
                continue
            area["routemanager"].stop_routemanager()

        if self.__loop is not None:
            self.__loop.call_soon_threadsafe(self.__loop.stop)

    async def handler(self, websocket_client_connection, path):
        logger.info("Waiting for connection...")
        # wait for a connection...
        continue_work = await self.__register(websocket_client_connection)
        if not continue_work:
            logger.error("Failed registering client, closing connection")
            await websocket_client_connection.close()
            return

        consumer_task = asyncio.ensure_future(
            self.__consumer_handler(websocket_client_connection))
        producer_task = asyncio.ensure_future(
            self.__producer_handler(websocket_client_connection))
        done, pending = await asyncio.wait(
            [producer_task, consumer_task],
            return_when=asyncio.FIRST_COMPLETED,
        )
        logger.info("consumer or producer of {} stopped, cancelling pending tasks", str(
            websocket_client_connection.request_headers.get_all("Origin")[0]))
        for task in pending:
            task.cancel()
        logger.info("Awaiting unregister of {}", str(
            websocket_client_connection.request_headers.get_all("Origin")[0]))
        await self.__unregister(websocket_client_connection)
        logger.info("All done with {}", str(
            websocket_client_connection.request_headers.get_all("Origin")[0]))

    async def __register(self, websocket_client_connection):
        logger.info("Client {} registering", str(
            websocket_client_connection.request_headers.get_all("Origin")[0]))
        if self.__stop_server.is_set():
            logger.info(
                "MAD is set to shut down, not accepting new connection")
            return False

        try:
            id = str(
                websocket_client_connection.request_headers.get_all("Origin")[0])
        except IndexError:
            logger.warning("Client from {} tried to connect without Origin header", str(
                websocket_client_connection.request_headers.get_all("Origin")[0]))
            return False

        if self.__auths:
            try:
                authBase64 = str(
                    websocket_client_connection.request_headers.get_all("Authorization")[0])
            except IndexError:
                logger.warning("Client from {} tried to connect without auth header", str(
                    websocket_client_connection.request_headers.get_all("Origin")[0]))
                return False

        self.__current_users_mutex.acquire()
        try:
            logger.debug("Checking if {} is already present", str(id))
            user_present = self.__current_users.get(id)
            if user_present is not None:
                logger.warning("Worker with origin {} is already running, killing the running one and have client reconnect",
                               str(websocket_client_connection.request_headers.get_all("Origin")[0]))
                user_present[1].stop_worker()
                return False
            elif self.__auths and authBase64 and not check_auth(authBase64, self.args, self.__auths):
                logger.warning("Invalid auth details received from {}", str(
                    websocket_client_connection.request_headers.get_all("Origin")[0]))
                return False

            if self._configmode:
                worker = WorkerConfigmode(self.args, id, self)
                logger.debug("Starting worker for {}", str(id))
                new_worker_thread = Thread(
                    name='worker_%s' % id, target=worker.start_worker)
                self.__current_users[id] = [
                    new_worker_thread, worker, websocket_client_connection, 0]
                return True

            last_known_state = {}
            client_mapping = self.__device_mappings[id]
            devicesettings = client_mapping["settings"]
            logger.info("Setting up routemanagers for {}", str(id))

            if client_mapping.get("walker", None) is not None:
                if "walker_area_index" not in devicesettings:
                    devicesettings['walker_area_index'] = 0
                    devicesettings['finished'] = False
                    devicesettings['last_action_time'] = None
                    devicesettings['last_cleanup_time'] = None

                walker_index = devicesettings.get('walker_area_index', 0)

                if walker_index > 0:
                    # check status of last area
                    if not devicesettings.get('finished', False):
                        logger.info(
                            'Something wrong with last round - get back to old area')
                        walker_index -= 1
                        devicesettings['walker_area_index'] = walker_index

                walker_area_array = client_mapping["walker"]
                walker_settings = walker_area_array[walker_index]

                # preckeck walker setting
                while not pre_check_value(walker_settings) and walker_index-1 <= len(walker_area_array):
                    walker_area_name = walker_area_array[walker_index]['walkerarea']
                    logger.info(
                        '{} dont using area {} - Walkervalue out of range', str(id), str(walker_area_name))
                    if walker_index >= len(walker_area_array) - 1:
                        logger.error(
                            'Dont find any working area - check your config')
                        walker_index = 0
                        devicesettings['walker_area_index'] = walker_index
                        walker_settings = walker_area_array[walker_index]
                        break
                    walker_index += 1
                    devicesettings['walker_area_index'] = walker_index
                    walker_settings = walker_area_array[walker_index]

                if devicesettings['walker_area_index'] >= len(walker_area_array):
                    # check if array is smaller then expected - f.e. on the fly changes in mappings.json
                    devicesettings['walker_area_index'] = 0
                    devicesettings['finished'] = False
                    walker_index = devicesettings.get('walker_area_index', 0)

                walker_area_name = walker_area_array[walker_index]['walkerarea']

                if walker_area_name not in self.__routemanagers:
                    raise WrongAreaInWalker()

                logger.debug('Devicesettings {}: {}', str(id), devicesettings)
                logger.info('{} using walker area {} [{}/{}]', str(id), str(
                    walker_area_name), str(walker_index+1), str(len(walker_area_array)))
                walker_routemanager = \
                    self.__routemanagers[walker_area_name].get(
                        "routemanager", None)
                devicesettings['walker_area_index'] += 1
                devicesettings['finished'] = False
                if walker_index >= len(walker_area_array) - 1:
                    devicesettings['walker_area_index'] = 0

                # set global mon_iv
                client_mapping['mon_ids_iv'] = \
                    self.__routemanagers[walker_area_name].get(
                        "routemanager").settings.get("mon_ids_iv", [])

            else:
                walker_routemanager = None

            if "last_location" not in devicesettings:
                devicesettings['last_location'] = Location(0.0, 0.0)

            logger.debug("Setting up worker for {}", str(id))

            if walker_routemanager is None:
                pass
            elif walker_routemanager.mode in ["raids_mitm", "mon_mitm", "iv_mitm"]:
                worker = WorkerMITM(self.args, id, last_known_state, self, walker_routemanager,
                                    self.__mitm_mapper, devicesettings, db_wrapper=self.__db_wrapper,
                                    pogoWindowManager=self.__pogoWindowManager, walker=walker_settings)
            elif walker_routemanager.mode in ["raids_ocr"]:
                from worker.WorkerOCR import WorkerOCR
                worker = WorkerOCR(self.args, id, last_known_state, self, walker_routemanager,
                                   devicesettings, db_wrapper=self.__db_wrapper,
                                   pogoWindowManager=self.__pogoWindowManager, walker=walker_settings)
            elif walker_routemanager.mode in ["pokestops"]:
                worker = WorkerQuests(self.args, id, last_known_state, self, walker_routemanager,
                                      self.__mitm_mapper, devicesettings, db_wrapper=self.__db_wrapper,
                                      pogoWindowManager=self.__pogoWindowManager, walker=walker_settings)
            elif walker_routemanager.mode in ["idle"]:
                worker = WorkerConfigmode(self.args, id, self)
            else:
                logger.error("Mode not implemented")
                sys.exit(1)

            logger.debug("Starting worker for {}", str(id))
            new_worker_thread = Thread(
                name='worker_%s' % id, target=worker.start_worker)

            new_worker_thread.daemon = False

            self.__current_users[id] = [new_worker_thread,
                                        worker, websocket_client_connection, 0]
            new_worker_thread.start()
        except WrongAreaInWalker:
            logger.error('Unknown Area in Walker settings - check config')
        finally:
            self.__current_users_mutex.release()

        return True

    async def __unregister(self, websocket_client_connection):
        worker_id = str(
            websocket_client_connection.request_headers.get_all("Origin")[0])
        self.__current_users_mutex.acquire()
        worker = self.__current_users.get(worker_id, None)
        if worker is not None:
            self.__current_users.pop(worker_id)
        self.__current_users_mutex.release()
        logger.info("Worker {} unregistered", str(worker_id))

    async def __producer_handler(self, websocket_client_connection):
        while websocket_client_connection.open:
            # logger.debug("Connection still open, trying to send next message")
            # retrieve next message from queue to be sent, block if empty
            next = None
            while next is None and websocket_client_connection.open:
                logger.debug("Retrieving next message to send")
                next = await self.__retrieve_next_send(websocket_client_connection)
                if next is None:
                    # logger.debug("next is None, stopping connection...")
                    return
                await self.__send_specific(websocket_client_connection, next.id, next.message)

    async def __send_specific(self, websocket_client_connection, id, message):
        # await websocket_client_connection.send(message)
        for key, value in self.__current_users.items():
            if key == id and value[2].open:
                await value[2].send(message)

    async def __retrieve_next_send(self, websocket_client_connection):
        found = None
        while found is None and websocket_client_connection.open:
            try:
                found = self.__send_queue.get_nowait()
            except Exception as e:
                await asyncio.sleep(0.02)
        if not websocket_client_connection.open:
            logger.warning(
                "retrieve_next_send: connection closed, returning None")
        return found

    async def __consumer_handler(self, websocket_client_connection):
        if websocket_client_connection is None:
            return
        worker_id = str(
            websocket_client_connection.request_headers.get_all("Origin")[0])
        logger.info("Consumer handler of {} starting", str(worker_id))
        while websocket_client_connection.open:
            message = None
            try:
                message = await asyncio.wait_for(websocket_client_connection.recv(), timeout=2.0)
            except asyncio.TimeoutError as te:
                await asyncio.sleep(0.02)
            except websockets.exceptions.ConnectionClosed as cc:
                logger.warning(
                    "Connection to {} was closed, stopping worker", str(worker_id))
                self.__current_users_mutex.acquire()
                worker = self.__current_users.get(worker_id, None)
                self.__current_users_mutex.release()
                if worker is not None:
                    # TODO: do it abruptly in the worker, maybe set a flag to be checked for in send_and_wait to
                    # TODO: throw an exception
                    worker[1].stop_worker()
                self.clean_up_user(worker_id, None)
                return

            if message is not None:
                await self.__on_message(message)
        logger.warning(
            "Connection of {} closed in consumer_handler", str(worker_id))

    def clean_up_user(self, worker_id, worker_instance):
        """
        :param worker_id: The ID/Origin of the worker
        :param worker_instance: None if the cleanup is called from within the websocket server
        :return:
        """
        self.__current_users_mutex.acquire()
        if worker_id in self.__current_users.keys() and (worker_instance is None
                                                         or self.__current_users[worker_id][1] == worker_instance):
            if self.__current_users[worker_id][2].open:
                logger.info("Calling close for {}...", str(worker_id))
                asyncio.ensure_future(
                    self.__current_users[worker_id][2].close(), loop=self.__loop)
            self.__current_users.pop(worker_id)
            logger.info("Info of {} removed in websocket", str(worker_id))
        self.__current_users_mutex.release()

    async def __on_message(self, message):
        id = -1
        response = None
        if isinstance(message, str):
            logger.debug("Receiving message: {}", str(message.strip()))
            splitup = message.split(";")
            id = int(splitup[0])
            response = splitup[1]
        else:
            logger.debug("Received binary values.")
            id = int.from_bytes(message[:4], byteorder='big', signed=False)
            response = message[4:]
        await self.__set_response(id, response)
        if not await self.__set_event(id):
            # remove the response again - though that is kinda stupid
            self.__pop_response(id)

    async def __set_event(self, id):
        result = False
        self.__requests_mutex.acquire()
        if id in self.__requests:
            self.__requests[id].set()
            result = True
        else:
            # the request has already been deleted due to a timeout...
            logger.error("Request has already been deleted...")
        self.__requests_mutex.release()
        return result

    async def __set_response(self, id, message):
        self.__received_mutex.acquire()
        self.__received[id] = message
        self.__received_mutex.release()

    def __pop_response(self, id):
        self.__received_mutex.acquire()
        message = self.__received.pop(id)
        self.__received_mutex.release()
        return message

    def __get_new_message_id(self):
        self.__id_mutex.acquire()
        self.__next_id += 1
        self.__next_id = int(math.fmod(self.__next_id, 100000))
        if self.__next_id == 100000:
            self.__next_id = 1
        toBeReturned = self.__next_id
        self.__id_mutex.release()
        return toBeReturned

    def __send(self, id, to_be_sent):
        next_message = OutgoingMessage(id, to_be_sent)
        self.__send_queue.put(next_message)

    def send_and_wait(self, id, worker_instance, message, timeout):
        logger.debug("{} sending command: {}", str(id), message.strip())
        self.__current_users_mutex.acquire()
        user_entry = self.__current_users.get(id, None)
        self.__current_users_mutex.release()

        if user_entry is None or user_entry[1] != worker_instance and worker_instance != 'madmin':
            raise WebsocketWorkerRemovedException

        message_id = self.__get_new_message_id()
        message_event = Event()
        message_event.clear()

        self.__set_request(message_id, message_event)

        to_be_sent = u"%s;%s" % (str(message_id), message)
        logger.debug("To be sent: {}", to_be_sent.strip())
        self.__send(id, to_be_sent)

        # now wait for the response!
        result = None
        logger.debug("Timeout: {}", str(timeout))
        if message_event.wait(timeout):
            logger.debug("Received answer in time, popping response")
            self.__reset_fail_counter(id)
            result = self.__pop_response(message_id)
            if isinstance(result, str):
                logger.debug("Response to {}: {}",
                             str(id), str(result.strip()))
            else:
                logger.debug("Received binary data to {}, starting with {}", str(
                    id), str(result[:10]))
        else:
            # timeout reached
            logger.warning("Timeout, increasing timeout-counter")
            # TODO: why is the user removed here?
            new_count = self.__increase_fail_counter(id)
            if new_count > 5:
                logger.error("5 consecutive timeouts to {}, cleanup", str(id))
                # TODO: signal worker to stop and NOT cleanup the websocket by itself!
                self.clean_up_user(id, None)
                raise WebsocketWorkerTimeoutException

        self.__remove_request(message_id)
        return result

    def __set_request(self, id, event):
        self.__requests_mutex.acquire()
        self.__requests[id] = event
        self.__requests_mutex.release()

    def __reset_fail_counter(self, id):
        self.__current_users_mutex.acquire()
        if id in self.__current_users.keys():
            self.__current_users[id][3] = 0
        self.__current_users_mutex.release()

    def __increase_fail_counter(self, id):
        self.__current_users_mutex.acquire()
        if id in self.__current_users.keys():
            new_count = self.__current_users[id][3] + 1
            self.__current_users[id][3] = new_count
        else:
            new_count = 100
        self.__current_users_mutex.release()
        return new_count

    def __remove_request(self, message_id):
        self.__requests_mutex.acquire()
        self.__requests.pop(message_id)
        self.__requests_mutex.release()

    def update_settings(self, routemanagers, device_mappings, auths):
        for dev in self.__device_mappings:
            if "last_location" in self.__device_mappings[dev]['settings']:
                device_mappings[dev]['settings']["last_location"] = \
                    self.__device_mappings[dev]['settings']["last_location"]
            if "walker_area_index" in self.__device_mappings[dev]['settings']:
                device_mappings[dev]['settings']["walker_area_index"] = \
                    self.__device_mappings[dev]['settings']["walker_area_index"]
            if "last_mode" in self.__device_mappings[dev]['settings']:
                device_mappings[dev]['settings']["last_mode"] = \
                    self.__device_mappings[dev]['settings']["last_mode"]
        self.__current_users_mutex.acquire()
        # save reference to old routemanagers to stop them
        old_routemanagers = routemanagers
        self.__device_mappings = device_mappings
        self.__routemanagers = routemanagers
        self.__auths = auths
        for id, worker in self.__current_users.items():
            logger.info('Stopping worker {} to apply new mappings.', id)
            worker[1].stop_worker()
        self.__current_users_mutex.release()
        for routemanager in old_routemanagers.keys():
            area = routemanagers.get(routemanager, None)
            if area is None:
                continue
            area["routemanager"].stop_routemanager()

    def get_reg_origins(self):
        return self.__current_users

    def get_origin_communicator(self, origin):
        if self.__current_users.get(origin, None) is not None:
            return self.__current_users[origin][1].get_communicator()
        return None

    def set_geofix_sleeptime_worker(self, origin, sleeptime):
        if self.__current_users.get(origin, None) is not None:
            return self.__current_users[origin][1].set_geofix_sleeptime(sleeptime)
        return False
Example #44
0
class WorkerBase(ABC):
    def __init__(self,
                 args,
                 id,
                 last_known_state,
                 websocket_handler,
                 walker_routemanager,
                 devicesettings,
                 db_wrapper,
                 pogoWindowManager,
                 NoOcr=True,
                 walker=None):
        # self.thread_pool = ThreadPool(processes=2)
        self._walker_routemanager = walker_routemanager
        self._route_manager_last_time = None
        self._websocket_handler = websocket_handler
        self._communicator = Communicator(websocket_handler, id, self,
                                          args.websocket_command_timeout)
        self._id = id
        self._applicationArgs = args
        self._last_known_state = last_known_state
        self._work_mutex = Lock()
        self.loop = None
        self.loop_started = Event()
        self.loop_tid = None
        self._async_io_looper_thread = None
        self._location_count = 0
        self._init = self._walker_routemanager.init
        self._walker = walker
        self._walkerstart = None

        self._lastScreenshotTaken = 0
        self._stop_worker_event = Event()
        self._db_wrapper = db_wrapper
        self._redErrorCount = 0
        self._lastScreenHash = None
        self._lastScreenHashCount = 0
        self._devicesettings = devicesettings
        self._resocalc = Resocalculator
        self._screen_x = 0
        self._screen_y = 0
        self._lastStart = ""
        self._pogoWindowManager = pogoWindowManager

        self.current_location = Location(0.0, 0.0)
        self.last_location = self._devicesettings.get("last_location", None)
        if self.last_location is None:
            self.last_location = Location(0.0, 0.0)
        self.last_processed_location = Location(0.0, 0.0)

    @abstractmethod
    def _pre_work_loop(self):
        """
        Work to be done before the main while true work-loop
        Start off asyncio loops etc in here
        :return:
        """
        pass

    @abstractmethod
    def _health_check(self):
        """
        Health check before a location is grabbed. Internally, a self._start_pogo call is already executed since
        that usually includes a topmost check
        :return:
        """
        pass

    @abstractmethod
    def _pre_location_update(self):
        """
        Override to run stuff like update injections settings in MITM worker
        Runs before walk/teleport to the location previously grabbed
        :return:
        """
        pass

    @abstractmethod
    def _move_to_location(self):
        """
        Location has previously been grabbed, the overriden function will be called.
        You may teleport or walk by your choosing
        Any post walk/teleport delays/sleeps have to be run in the derived, override method
        :return:
        """
        pass

    @abstractmethod
    def _post_move_location_routine(self, timestamp):
        """
        Routine called after having moved to a new location. MITM worker e.g. has to wait_for_data
        :param timestamp:
        :return:
        """

    @abstractmethod
    def _start_pogo(self):
        """
        Routine to start pogo.
        Return the state as a boolean do indicate a successful start
        :return:
        """
        pass

    @abstractmethod
    def _cleanup(self):
        """
        Cleanup any threads you started in derived classes etc
        self.stop_worker() and self.loop.stop() will be called afterwards
        :return:
        """

    @abstractmethod
    def _valid_modes(self):
        """
        Return a list of valid modes for the health checks
        :return:
        """

    def _start_asyncio_loop(self):
        self.loop = asyncio.new_event_loop()
        asyncio.set_event_loop(self.loop)
        self.loop_tid = current_thread()
        self.loop.call_soon(self.loop_started.set)
        self.loop.run_forever()

    def _add_task_to_loop(self, coro):
        f = functools.partial(self.loop.create_task, coro)
        if current_thread() == self.loop_tid:
            return f(
            )  # We can call directly if we're not going between threads.
        else:
            # We're in a non-event loop thread so we use a Future
            # to get the task from the event loop thread once
            # it's ready.
            return self.loop.call_soon_threadsafe(f)

    def start_worker(self):
        # async_result = self.thread_pool.apply_async(self._main_work_thread, ())
        t_main_work = Thread(target=self._main_work_thread)
        t_main_work.daemon = False
        t_main_work.start()
        # do some other stuff in the main process
        while not self._stop_worker_event.isSet():
            time.sleep(1)
        t_main_work.join()
        log.info("Worker %s stopping gracefully" % str(self._id))
        # async_result.get()
        return self._last_known_state

    def stop_worker(self):
        self._stop_worker_event.set()
        log.warning("Worker %s stop called" % str(self._id))

    def _internal_pre_work(self):
        current_thread().name = self._id

        self._work_mutex.acquire()
        try:
            self._turn_screen_on_and_start_pogo()
        except WebsocketWorkerRemovedException:
            log.error("Timeout during init of worker %s" % str(self._id))
            # no cleanup required here? TODO: signal websocket server somehow
            self._stop_worker_event.set()
            return

        # register worker  in routemanager
        log.info("Try to register %s in Routemanager %s" %
                 (str(self._id), str(self._walker_routemanager.name)))
        self._walker_routemanager.register_worker(self._id)

        self._work_mutex.release()

        self._async_io_looper_thread = Thread(name=str(self._id) +
                                              '_asyncio_' + self._id,
                                              target=self._start_asyncio_loop)
        self._async_io_looper_thread.daemon = False
        self._async_io_looper_thread.start()

        self.loop_started.wait()
        self._pre_work_loop()

    def _internal_health_check(self):
        # check if pogo is topmost and start if necessary
        log.debug(
            "_internal_health_check: Calling _start_pogo routine to check if pogo is topmost"
        )
        self._work_mutex.acquire()
        log.debug("_internal_health_check: worker lock acquired")
        log.debug("Checking if we need to restart pogo")
        # Restart pogo every now and then...
        if self._devicesettings.get("restart_pogo", 80) > 0:
            # log.debug("main: Current time - lastPogoRestart: %s" % str(curTime - lastPogoRestart))
            # if curTime - lastPogoRestart >= (args.restart_pogo * 60):
            if self._location_count > self._devicesettings.get(
                    "restart_pogo", 80):
                log.error("scanned " +
                          str(self._devicesettings.get("restart_pogo", 80)) +
                          " locations, restarting pogo")
                pogo_started = self._restart_pogo()
                self._location_count = 0
            else:
                pogo_started = self._start_pogo()
        else:
            pogo_started = self._start_pogo()
        self._work_mutex.release()
        log.debug("_internal_health_check: worker lock released")
        return pogo_started

    def _internal_cleanup(self):
        # set the event just to make sure - in case of exceptions for example
        self._stop_worker_event.set()
        log.info("Internal cleanup of %s started" % str(self._id))
        self._cleanup()
        log.info("Internal cleanup of %s signalling end to websocketserver" %
                 str(self._id))
        self._walker_routemanager.unregister_worker(self._id)

        log.info("Stopping Route")
        # self.stop_worker()
        if self._async_io_looper_thread is not None:
            log.info("Stopping worker's asyncio loop")
            self.loop.call_soon_threadsafe(self.loop.stop)
            self._async_io_looper_thread.join()

        self._communicator.cleanup_websocket()
        log.info("Internal cleanup of %s finished" % str(self._id))

    def _main_work_thread(self):
        # TODO: signal websocketserver the removal
        try:
            self._internal_pre_work()
        except (InternalStopWorkerException, WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException) \
                as e:
            log.error(
                "Failed initializing worker %s, connection terminated exceptionally"
                % str(self._id))
            self._internal_cleanup()
            return

        if not check_max_walkers_reached(self._walker,
                                         self._walker_routemanager):
            log.warning('Max. Walkers in Area %s - closing connections' %
                        str(self._walker_routemanager.name))
            self._devicesettings['finished'] = True
            self._internal_cleanup()
            return

        while not self._stop_worker_event.isSet():
            try:
                # TODO: consider getting results of health checks and aborting the entire worker?
                walkercheck = self.check_walker()
                if not walkercheck:
                    self._devicesettings['finished'] = True
                    break
            except (InternalStopWorkerException, WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException) \
                    as e:
                log.warning("Worker %s killed by walker settings" %
                            str(self._id))
                break

            try:
                # TODO: consider getting results of health checks and aborting the entire worker?
                self._internal_health_check()
                self._health_check()
            except (InternalStopWorkerException, WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException) \
                    as e:
                log.error(
                    "Websocket connection to %s lost while running healthchecks, "
                    "connection terminated exceptionally" % str(self._id))
                break

            try:
                settings = self._internal_grab_next_location()
                if settings is None:
                    continue
            except (InternalStopWorkerException, WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException) \
                    as e:
                log.warning(
                    "Worker of %s does not support mode that's to be run, "
                    "connection terminated exceptionally" % str(self._id))
                break

            try:
                log.debug('Checking if new location is valid')
                valid = self._check_location_is_valid()
                if not valid:
                    break
            except (InternalStopWorkerException, WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException) \
                    as e:
                log.warning("Worker %s get non valid coords!" % str(self._id))
                break

            try:
                self._pre_location_update()
            except (InternalStopWorkerException, WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException) \
                    as e:
                log.warning(
                    "Worker of %s stopping because of stop signal in pre_location_update, "
                    "connection terminated exceptionally" % str(self._id))
                break

            try:
                log.debug(
                    'main worker %s: LastLat: %s, LastLng: %s, CurLat: %s, CurLng: %s'
                    %
                    (str(self._id), self._devicesettings["last_location"].lat,
                     self._devicesettings["last_location"].lng,
                     self.current_location.lat, self.current_location.lng))
                time_snapshot, process_location = self._move_to_location()
            except (InternalStopWorkerException, WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException) \
                    as e:
                log.warning(
                    "Worker %s failed moving to new location, stopping worker, "
                    "connection terminated exceptionally" % str(self._id))
                break

            if process_location:
                self._add_task_to_loop(self._update_position_file())
                self._location_count += 1
                if self._applicationArgs.last_scanned:
                    log.info('main: Set new scannedlocation in Database')
                    # self.update_scanned_location(currentLocation.lat, currentLocation.lng, curTime)
                    self._add_task_to_loop(
                        self.update_scanned_location(self.current_location.lat,
                                                     self.current_location.lng,
                                                     time_snapshot))

                try:
                    self._post_move_location_routine(time_snapshot)
                except (InternalStopWorkerException, WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException) \
                    as e:
                    log.warning(
                        "Worker %s failed running post_move_location_routine, stopping worker"
                        % str(self._id))
                    break
                log.info("Worker %s finished iteration, continuing work" %
                         str(self._id))

        self._internal_cleanup()

    async def _update_position_file(self):
        log.debug("Updating .position file")
        if self.current_location is not None:
            with open(self._id + '.position', 'w') as outfile:
                outfile.write(
                    str(self.current_location.lat) + ", " +
                    str(self.current_location.lng))

    async def update_scanned_location(self, latitude, longitude, timestamp):
        try:
            self._db_wrapper.set_scanned_location(str(latitude),
                                                  str(longitude),
                                                  str(timestamp))
        except Exception as e:
            log.error("Failed updating scanned location: %s" % str(e))
            return

    def check_walker(self):
        mode = self._walker['walkertype']
        if mode == "countdown":
            log.info("Checking Walker Mode Countdown")
            countdown = self._walker['walkervalue']
            if not countdown:
                log.error(
                    "No Value for Mode - check your settings! - kill Worker")
                return False
            if self._walkerstart is None:
                self._walkerstart = math.floor(time.time())
            else:
                if math.floor(time.time()) >= int(
                        self._walkerstart) + int(countdown):
                    return False
            return True
        elif mode == "timer":
            log.info("Checking Walker Mode Timer")
            exittime = self._walker['walkervalue']
            if not exittime or ':' not in exittime:
                log.error(
                    "No or wrong Value for Mode - check your settings! - kill Worker"
                )
                return False
            return check_walker_value_type(exittime)
        elif mode == "round":
            log.info("Checking Walker Mode Round")
            rounds = self._walker['walkervalue']
            if len(rounds) == 0:
                log.error(
                    "No Value for Mode - check your settings! - kill Worker")
                return False
            processed_rounds = self._walker_routemanager.get_rounds(self._id)
            if int(processed_rounds) >= int(rounds):
                return False
            return True
        elif mode == "period":
            log.info("Checking Walker Mode Period")
            period = self._walker['walkervalue']
            if len(period) == 0:
                log.error(
                    "No Value for Mode - check your settings! - kill Worker")
                return False
            return check_walker_value_type(period)
        elif mode == "coords":
            exittime = self._walker['walkervalue']
            if len(exittime) > 0:
                return check_walker_value_type(exittime)
            return True
        elif mode == "idle":
            log.info("Checking Walker Mode Idle")
            if len(self._walker['walkervalue']) == 0:
                log.error(
                    "Wrong Value for mode - check your settings! - kill Worker"
                )
                return False
            sleeptime = self._walker['walkervalue']
            log.info('%s going to sleep' % str(self._id))
            killpogo = False
            if check_walker_value_type(sleeptime):
                self._stop_pogo()
                killpogo = True
            while not self._stop_worker_event.isSet(
            ) and check_walker_value_type(sleeptime):
                time.sleep(1)
            log.info('%s just woke up' % str(self._id))
            if killpogo:
                self._start_pogo()
            return False
        else:
            log.error("Dont know this Walker Mode - kill Worker")
            return False
        return True

    def _internal_grab_next_location(self):
        # TODO: consider adding runWarningThreadEvent.set()
        self._last_known_state["last_location"] = self.last_location

        log.debug("Requesting next location from routemanager")
        # requesting a location is blocking (iv_mitm will wait for a prioQ item), we really need to clean
        # the workers up...
        routemanager = self._walker_routemanager
        self.current_location = routemanager.get_next_location()
        return routemanager.settings

    def _init_routine(self):
        if self._applicationArgs.initial_restart is False:
            self._turn_screen_on_and_start_pogo()
        else:
            if not self._start_pogo():
                while not self._restart_pogo():
                    log.warning("failed starting pogo")
                    # TODO: stop after X attempts

    def _check_location_is_valid(self):
        if self.current_location is None:
            # there are no more coords - so worker is finished successfully
            self._devicesettings['finished'] = True
            return None
        elif self.current_location is not None:
            log.debug('Coords are valid')
            return True

    def _turn_screen_on_and_start_pogo(self):
        if not self._communicator.isScreenOn():
            self._communicator.startApp("de.grennith.rgc.remotegpscontroller")
            log.warning("Turning screen on")
            self._communicator.turnScreenOn()
            time.sleep(self._devicesettings.get("post_turn_screen_on_delay",
                                                2))
        # check if pogo is running and start it if necessary
        log.info("turnScreenOnAndStartPogo: (Re-)Starting Pogo")
        self._start_pogo()

    def _check_screen_on(self):
        if not self._communicator.isScreenOn():
            self._communicator.startApp("de.grennith.rgc.remotegpscontroller")
            log.warning("Turning screen on")
            self._communicator.turnScreenOn()
            time.sleep(self._devicesettings.get("post_turn_screen_on_delay",
                                                2))

    def _stop_pogo(self):
        attempts = 0
        stop_result = self._communicator.stopApp("com.nianticlabs.pokemongo")
        pogoTopmost = self._communicator.isPogoTopmost()
        while pogoTopmost:
            attempts += 1
            if attempts > 10:
                return False
            stop_result = self._communicator.stopApp(
                "com.nianticlabs.pokemongo")
            time.sleep(1)
            pogoTopmost = self._communicator.isPogoTopmost()
        return stop_result

    def _reboot(self):
        try:
            start_result = self._communicator.reboot()
        except WebsocketWorkerRemovedException as e:
            log.error(
                "Could not reboot due to client already having disconnected")
            start_result = False
        time.sleep(5)
        self._db_wrapper.save_last_reboot(self._id)
        self.stop_worker()
        return start_result

    def _start_pogodroid(self):
        start_result = self._communicator.startApp("com.mad.pogodroid")
        time.sleep(5)
        return start_result

    def _stopPogoDroid(self):
        stopResult = self._communicator.stopApp("com.mad.pogodroid")
        return stopResult

    def _restart_pogo(self, clear_cache=True):
        successful_stop = self._stop_pogo()
        self._db_wrapper.save_last_restart(self._id)
        log.debug("restartPogo: stop pogo resulted in %s" %
                  str(successful_stop))
        if successful_stop:
            if clear_cache:
                self._communicator.clearAppCache("com.nianticlabs.pokemongo")
            time.sleep(1)
            return self._start_pogo()
        else:
            return False

    def _restartPogoDroid(self):
        successfulStop = self._stopPogoDroid()
        time.sleep(1)
        log.debug("restartPogoDroid: stop pogodriud resulted in %s" %
                  str(successfulStop))
        if successfulStop:
            return self._start_pogodroid()
        else:
            return False

    def _reopenRaidTab(self):
        log.debug("_reopenRaidTab: Taking screenshot...")
        log.info(
            "reopenRaidTab: Attempting to retrieve screenshot before checking raidtab"
        )
        if not self._takeScreenshot():
            log.debug("_reopenRaidTab: Failed getting screenshot...")
            log.error(
                "reopenRaidTab: Failed retrieving screenshot before checking for closebutton"
            )
            return
        log.debug("_reopenRaidTab: Checking close except nearby...")
        pathToPass = os.path.join(self._applicationArgs.temp_path,
                                  'screenshot%s.png' % str(self._id))
        log.debug("Path: %s" % str(pathToPass))
        self._pogoWindowManager.checkCloseExceptNearbyButton(
            pathToPass, self._id, self._communicator, 'True')
        log.debug("_reopenRaidTab: Getting to raidscreen...")
        self._getToRaidscreen(3)
        time.sleep(1)

    def _takeScreenshot(self, delayAfter=0.0, delayBefore=0.0):
        log.debug("Taking screenshot...")
        time.sleep(delayBefore)
        compareToTime = time.time() - self._lastScreenshotTaken
        log.debug("Last screenshot taken: %s" % str(self._lastScreenshotTaken))

        if self._applicationArgs.use_media_projection:
            take_screenshot = self._communicator.getScreenshot(
                os.path.join(self._applicationArgs.temp_path,
                             'screenshot%s.png' % str(self._id)))
        else:
            take_screenshot = self._communicator.get_screenshot_single(
                os.path.join(self._applicationArgs.temp_path,
                             'screenshot%s.png' % str(self._id)))

        if self._lastScreenshotTaken and compareToTime < 0.5:
            log.debug(
                "takeScreenshot: screenshot taken recently, returning immediately"
            )
            log.debug("Screenshot taken recently, skipping")
            return True
        # TODO: screenshot.png needs identifier in name
        elif not take_screenshot:
            log.error("takeScreenshot: Failed retrieving screenshot")
            log.debug("Failed retrieving screenshot")
            return False
        else:
            log.debug("Success retrieving screenshot")
            self._lastScreenshotTaken = time.time()
            time.sleep(delayAfter)
            return True

    def _checkPogoFreeze(self):
        log.debug("Checking if pogo froze")
        if not self._takeScreenshot():
            log.debug("_checkPogoFreeze: failed retrieving screenshot")
            return
        from utils.image_utils import getImageHash
        screenHash = getImageHash(
            os.path.join(self._applicationArgs.temp_path,
                         'screenshot%s.png' % str(self._id)))
        log.debug("checkPogoFreeze: Old Hash: " + str(self._lastScreenHash))
        log.debug("checkPogoFreeze: New Hash: " + str(screenHash))
        if hamming_dist(str(self._lastScreenHash),
                        str(screenHash)) < 4 and str(
                            self._lastScreenHash) != '0':
            log.debug(
                "checkPogoFreeze: New und old Screenshoot are the same - no processing"
            )
            self._lastScreenHashCount += 1
            log.debug("checkPogoFreeze: Same Screen Count: " +
                      str(self._lastScreenHashCount))
            if self._lastScreenHashCount >= 100:
                self._lastScreenHashCount = 0
                self._restart_pogo()
        else:
            self._lastScreenHash = screenHash
            self._lastScreenHashCount = 0

            log.debug("_checkPogoFreeze: done")

    def _check_pogo_main_screen(self, maxAttempts, again=False):
        log.debug(
            "_check_pogo_main_screen: Trying to get to the Mainscreen with %s max attempts..."
            % str(maxAttempts))
        pogoTopmost = self._communicator.isPogoTopmost()
        if not pogoTopmost:
            return False

        if not self._takeScreenshot(delayBefore=self._devicesettings.get(
                "post_screenshot_delay", 1)):
            if again:
                log.error(
                    "_check_pogo_main_screen: failed getting a screenshot again"
                )
                return False
        attempts = 0

        if os.path.isdir(
                os.path.join(self._applicationArgs.temp_path,
                             'screenshot%s.png' % str(self._id))):
            log.error(
                "_check_pogo_main_screen: screenshot.png is not a file/corrupted"
            )
            return False

        log.info("_check_pogo_main_screen: checking mainscreen")
        buttoncheck = self._pogoWindowManager.lookForButton(
            os.path.join(self._applicationArgs.temp_path,
                         'screenshot%s.png' % str(self._id)), 2.20, 3.01,
            self._communicator)
        if buttoncheck:
            log.info('Found button on screen')
            self._takeScreenshot(delayBefore=self._devicesettings.get(
                "post_screenshot_delay", 1))
        while not self._pogoWindowManager.checkpogomainscreen(
                os.path.join(self._applicationArgs.temp_path,
                             'screenshot%s.png' % str(self._id)), self._id):
            log.error("_check_pogo_main_screen: not on Mainscreen...")
            if attempts > maxAttempts:
                # could not reach raidtab in given maxAttempts
                log.error(
                    "_check_pogo_main_screen: Could not get to Mainscreen within %s attempts"
                    % str(maxAttempts))
                return False

            # not using continue since we need to get a screen before the next round...
            found = self._pogoWindowManager.lookForButton(
                os.path.join(self._applicationArgs.temp_path,
                             'screenshot%s.png' % str(self._id)), 2.20, 3.01,
                self._communicator)
            if found:
                log.info("_check_pogo_main_screen: Found button (small)")

            if not found and self._pogoWindowManager.checkCloseExceptNearbyButton(
                    os.path.join(self._applicationArgs.temp_path,
                                 'screenshot%s.png' % str(self._id)),
                    self._id,
                    self._communicator,
                    closeraid=True):
                log.info(
                    "_check_pogo_main_screen: Found (X) button (except nearby)"
                )
                found = True

            if not found and self._pogoWindowManager.lookForButton(
                    os.path.join(self._applicationArgs.temp_path,
                                 'screenshot%s.png' % str(self._id)), 1.05,
                    2.20, self._communicator):
                log.info("_check_pogo_main_screen: Found button (big)")
                found = True

            log.info(
                "_check_pogo_main_screen: Previous checks found popups: %s" %
                str(found))

            self._takeScreenshot(delayBefore=self._devicesettings.get(
                "post_screenshot_delay", 1))

            attempts += 1
        log.info("_check_pogo_main_screen: done")
        return True

    def _checkPogoButton(self):
        log.debug("checkPogoButton: Trying to find buttons")
        pogoTopmost = self._communicator.isPogoTopmost()
        if not pogoTopmost:
            return False

        if not self._takeScreenshot(delayBefore=self._devicesettings.get(
                "post_screenshot_delay", 1)):
            # TODO: again?
            # if again:
            #     log.error("checkPogoButton: failed getting a screenshot again")
            #     return False
            # TODO: throw?
            log.debug("checkPogoButton: Failed getting screenshot")
            return False
        attempts = 0

        if os.path.isdir(
                os.path.join(self._applicationArgs.temp_path,
                             'screenshot%s.png' % str(self._id))):
            log.error(
                "checkPogoButton: screenshot.png is not a file/corrupted")
            return False

        log.info("checkPogoButton: checking for buttons")
        found = self._pogoWindowManager.lookForButton(
            os.path.join(self._applicationArgs.temp_path,
                         'screenshot%s.png' % str(self._id)), 2.20, 3.01,
            self._communicator)
        if found:
            time.sleep(1)
            log.info("checkPogoButton: Found button (small)")
            log.info("checkPogoButton: done")
            return True
        log.info("checkPogoButton: done")
        return False

    def _checkPogoClose(self):
        log.debug("checkPogoClose: Trying to find closeX")
        pogoTopmost = self._communicator.isPogoTopmost()
        if not pogoTopmost:
            return False

        if not self._takeScreenshot(delayBefore=self._devicesettings.get(
                "post_screenshot_delay", 1)):
            # TODO: go again?
            # if again:
            #     log.error("checkPogoClose: failed getting a screenshot again")
            #     return False
            # TODO: consider throwing?
            log.debug("checkPogoClose: Could not get screenshot")
            return False
        attempts = 0

        if os.path.isdir(
                os.path.join(self._applicationArgs.temp_path,
                             'screenshot%s.png' % str(self._id))):
            log.error("checkPogoClose: screenshot.png is not a file/corrupted")
            return False

        log.info("checkPogoClose: checking for CloseX")
        found = self._pogoWindowManager.checkCloseExceptNearbyButton(
            os.path.join(self._applicationArgs.temp_path,
                         'screenshot%s.png' % str(self._id)), self._id,
            self._communicator)
        if found:
            time.sleep(1)
            log.info("checkPogoClose: Found (X) button (except nearby)")
            log.info("checkPogoClose: done")
            return True
        log.info("checkPogoClose: done")
        return False

    def _getToRaidscreen(self, maxAttempts, again=False):
        # check for any popups (including post login OK)
        log.debug(
            "getToRaidscreen: Trying to get to the raidscreen with %s max attempts..."
            % str(maxAttempts))
        pogoTopmost = self._communicator.isPogoTopmost()
        if not pogoTopmost:
            return False

        self._checkPogoFreeze()
        if not self._takeScreenshot(delayBefore=self._devicesettings.get(
                "post_screenshot_delay", 1)):
            if again:
                log.error("getToRaidscreen: failed getting a screenshot again")
                return False
            self._getToRaidscreen(maxAttempts, True)
            log.debug("getToRaidscreen: Got screenshot, checking GPS")
        attempts = 0

        if os.path.isdir(
                os.path.join(self._applicationArgs.temp_path,
                             'screenshot%s.png' % str(self._id))):
            log.error(
                "getToRaidscreen: screenshot.png is not a file/corrupted")
            return False

        # TODO: replace self._id with device ID
        while self._pogoWindowManager.isGpsSignalLost(
                os.path.join(self._applicationArgs.temp_path,
                             'screenshot%s.png' % str(self._id)), self._id):
            log.debug("getToRaidscreen: GPS signal lost")
            time.sleep(1)
            self._takeScreenshot()
            log.warning("getToRaidscreen: GPS signal error")
            self._redErrorCount += 1
            if self._redErrorCount > 3:
                log.error(
                    "getToRaidscreen: Red error multiple times in a row, restarting"
                )
                self._redErrorCount = 0
                self._restart_pogo()
                return False
        self._redErrorCount = 0
        log.debug("getToRaidscreen: checking raidscreen")
        while not self._pogoWindowManager.checkRaidscreen(
                os.path.join(self._applicationArgs.temp_path,
                             'screenshot%s.png' % str(self._id)), self._id,
                self._communicator):
            log.debug("getToRaidscreen: not on raidscreen...")
            if attempts > maxAttempts:
                # could not reach raidtab in given maxAttempts
                log.error(
                    "getToRaidscreen: Could not get to raidtab within %s attempts"
                    % str(maxAttempts))
                return False
            self._checkPogoFreeze()
            # not using continue since we need to get a screen before the next round...
            found = self._pogoWindowManager.lookForButton(
                os.path.join(self._applicationArgs.temp_path,
                             'screenshot%s.png' % str(self._id)), 2.20, 3.01,
                self._communicator)
            if found:
                log.info("getToRaidscreen: Found button (small)")

            if not found and self._pogoWindowManager.checkCloseExceptNearbyButton(
                    os.path.join(self._applicationArgs.temp_path,
                                 'screenshot%s.png' % str(self._id)), self._id,
                    self._communicator):
                log.info("getToRaidscreen: Found (X) button (except nearby)")
                found = True

            if not found and self._pogoWindowManager.lookForButton(
                    os.path.join(self._applicationArgs.temp_path,
                                 'screenshot%s.png' % str(self._id)), 1.05,
                    2.20, self._communicator):
                log.info("getToRaidscreen: Found button (big)")
                found = True

            log.info("getToRaidscreen: Previous checks found popups: %s" %
                     str(found))
            if not found:
                log.info(
                    "getToRaidscreen: Previous checks found nothing. Checking nearby open"
                )
                if self._pogoWindowManager.checkNearby(
                        os.path.join(self._applicationArgs.temp_path,
                                     'screenshot%s.png' % str(self._id)),
                        self._id, self._communicator):
                    return self._takeScreenshot(
                        delayBefore=self._devicesettings.get(
                            "post_screenshot_delay", 1))

            if not self._takeScreenshot(delayBefore=self._devicesettings.get(
                    "post_screenshot_delay", 1)):
                return False

            attempts += 1
        log.debug("getToRaidscreen: done")
        return True

    def _get_screen_size(self):
        screen = self._communicator.getscreensize().split(' ')
        self._screen_x = screen[0]
        self._screen_y = screen[1]
        x_offset = self._devicesettings.get("screenshot_x_offset", 0)
        y_offset = self._devicesettings.get("screenshot_y_offset", 0)
        log.debug(
            'Get Screensize of %s: X: %s, Y: %s, X-Offset: %s, Y-Offset: %s' %
            (str(self._id), str(self._screen_x), str(
                self._screen_y), str(x_offset), str(y_offset)))
        self._resocalc.get_x_y_ratio(self, self._screen_x, self._screen_y,
                                     x_offset, y_offset)
Example #45
0
class Collection(object):
    """
    Base class for any serializable list of things.
    """
    def __init__(self, collection_mgr):
        """
        Constructor.
        """
        self.collection_mgr = collection_mgr
        self.listing = {}
        self.api = self.collection_mgr.api
        self.lite_sync = None
        self.lock = Lock()

    def __iter__(self):
        """
        Iterator for the collection.  Allows list comprehensions, etc.
        """
        for a in list(self.listing.values()):
            yield a

    def __len__(self):
        """
        Returns size of the collection.
        """
        return len(list(self.listing.values()))

    def factory_produce(self, collection_mgr, seed_data):
        """
        Must override in subclass.  Factory_produce returns an Item object
        from dict
        """
        raise NotImplementedException()

    def remove(self,
               name,
               with_delete=True,
               with_sync=True,
               with_triggers=True,
               recursive=False,
               logger=None):
        """
        Remove an item from collection. This method must be overriden in any subclass.

        @param: str name (item name)
        @param: bool with_delete (sync and run triggers)
        @param: bool with_sync (sync to server file system)
        @param: bool with_triggers (run "on delete" triggers)
        @param: bool recursive (recursively delete children)
        @param: clogger logger (logger object)
        @returns: NotImplementedException
        """
        raise NotImplementedException()

    def get(self, name):
        """
        Return object with name in the collection
        """
        return self.listing.get(name.lower(), None)

    def find(self, name=None, return_list=False, no_errors=False, **kargs):
        """
        Return first object in the collection that maches all item='value'
        pairs passed, else return None if no objects can be found.
        When return_list is set, can also return a list.  Empty list
        would be returned instead of None in that case.
        """
        matches = []

        # support the old style innovation without kwargs
        if name is not None:
            kargs["name"] = name

        kargs = self.__rekey(kargs)

        # no arguments is an error, so we don't return a false match
        if len(kargs) == 0:
            raise CX(_("calling find with no arguments"))

        # performance: if the only key is name we can skip the whole loop
        if len(kargs) == 1 and "name" in kargs and not return_list:
            try:
                return self.listing.get(kargs["name"].lower(), None)
            except:
                return self.listing.get(kargs["name"], None)

        self.lock.acquire()
        try:
            for (name, obj) in list(self.listing.items()):
                if obj.find_match(kargs, no_errors=no_errors):
                    matches.append(obj)
        finally:
            self.lock.release()

        if not return_list:
            if len(matches) == 0:
                return None
            return matches[0]
        else:
            return matches

    SEARCH_REKEY = {
        'kopts': 'kernel_options',
        'kopts_post': 'kernel_options_post',
        'inherit': 'parent',
        'ip': 'ip_address',
        'mac': 'mac_address',
        'virt-auto-boot': 'virt_auto_boot',
        'virt-file-size': 'virt_file_size',
        'virt-disk-driver': 'virt_disk_driver',
        'virt-ram': 'virt_ram',
        'virt-path': 'virt_path',
        'virt-type': 'virt_type',
        'virt-bridge': 'virt_bridge',
        'virt-cpus': 'virt_cpus',
        'virt-host': 'virt_host',
        'virt-group': 'virt_group',
        'dhcp-tag': 'dhcp_tag',
        'netboot-enabled': 'netboot_enabled',
    }

    def __rekey(self, _dict):
        """
        Find calls from the command line ("cobbler system find")
        don't always match with the keys from the datastructs and this
        makes them both line up without breaking compatibility with either.
        Thankfully we don't have a LOT to remap.
        """
        new_dict = {}
        for x in list(_dict.keys()):
            if x in self.SEARCH_REKEY:
                newkey = self.SEARCH_REKEY[x]
                new_dict[newkey] = _dict[x]
            else:
                new_dict[x] = _dict[x]
        return new_dict

    def to_list(self):
        """
        Serialize the collection
        """
        _list = [x.to_dict() for x in list(self.listing.values())]
        return _list

    def from_list(self, _list):
        if _list is None:
            return
        for item_dict in _list:
            item = self.factory_produce(self.collection_mgr, item_dict)
            self.add(item)

    def copy(self, ref, newname, logger=None):
        ref = ref.make_clone()
        ref.uid = self.collection_mgr.generate_uid()
        ref.ctime = 0
        ref.set_name(newname)
        if ref.COLLECTION_TYPE == "system":
            # this should only happen for systems
            for iname in list(ref.interfaces.keys()):
                # clear all these out to avoid DHCP/DNS conflicts
                ref.set_dns_name("", iname)
                ref.set_mac_address("", iname)
                ref.set_ip_address("", iname)

        self.add(ref,
                 save=True,
                 with_copy=True,
                 with_triggers=True,
                 with_sync=True,
                 check_for_duplicate_names=True,
                 check_for_duplicate_netinfo=False)

    def rename(self,
               ref,
               newname,
               with_sync=True,
               with_triggers=True,
               logger=None):
        """
        Allows an object "ref" to be given a newname without affecting the rest
        of the object tree.
        """
        # Nothing to do when it is the same name
        if newname == ref.name:
            return

        # make a copy of the object, but give it a new name.
        oldname = ref.name
        newref = ref.make_clone()
        newref.set_name(newname)

        self.add(newref, with_triggers=with_triggers, save=True)

        # for mgmt classes, update all objects that use it
        if ref.COLLECTION_TYPE == "mgmtclass":
            for what in ["distro", "profile", "system"]:
                items = self.api.find_items(what, {"mgmt_classes": oldname})
                for item in items:
                    for i in range(0, len(item.mgmt_classes)):
                        if item.mgmt_classes[i] == oldname:
                            item.mgmt_classes[i] = newname
                    self.api.add_item(what, item, save=True)

        # for a repo, rename the mirror directory
        if ref.COLLECTION_TYPE == "repo":
            path = "/var/www/cobbler/repo_mirror/%s" % ref.name
            if os.path.exists(path):
                newpath = "/var/www/cobbler/repo_mirror/%s" % newref.name
                os.renames(path, newpath)

        # for a distro, rename the mirror and references to it
        if ref.COLLECTION_TYPE == 'distro':
            path = utils.find_distro_path(self.api.settings(), ref)

            # create a symlink for the new distro name
            utils.link_distro(self.api.settings(), newref)

            # test to see if the distro path is based directly
            # on the name of the distro. If it is, things need
            # to updated accordingly
            if os.path.exists(
                    path
            ) and path == "/var/www/cobbler/distro_mirror/%s" % ref.name:
                newpath = "/var/www/cobbler/distro_mirror/%s" % newref.name
                os.renames(path, newpath)

                # update any reference to this path ...
                distros = self.api.distros()
                for d in distros:
                    if d.kernel.find(path) == 0:
                        d.set_kernel(d.kernel.replace(path, newpath))
                        d.set_initrd(d.initrd.replace(path, newpath))
                        self.collection_mgr.serialize_item(self, d)

        # now descend to any direct ancestors and point them at the new object allowing
        # the original object to be removed without orphanage.  Direct ancestors
        # will either be profiles or systems.  Note that we do have to care as
        # set_parent is only really meaningful for subprofiles. We ideally want a more
        # generic set_parent.
        kids = ref.get_children()
        for k in kids:
            if k.COLLECTION_TYPE == "distro":
                raise CX(
                    _("internal error, not expected to have distro child objects"
                      ))
            elif k.COLLECTION_TYPE == "profile":
                if k.parent != "":
                    k.set_parent(newname)
                else:
                    k.set_distro(newname)
                self.api.profiles().add(k,
                                        save=True,
                                        with_sync=with_sync,
                                        with_triggers=with_triggers)
            elif k.COLLECTION_TYPE == "system":
                k.set_profile(newname)
                self.api.systems().add(k,
                                       save=True,
                                       with_sync=with_sync,
                                       with_triggers=with_triggers)
            elif k.COLLECTION_TYPE == "repo":
                raise CX(
                    _("internal error, not expected to have repo child objects"
                      ))
            else:
                raise CX(
                    _("internal error, unknown child type (%s), cannot finish rename"
                      % k.COLLECTION_TYPE))

        # now delete the old version
        self.remove(oldname, with_delete=True, with_triggers=with_triggers)
        return

    def add(self,
            ref,
            save=False,
            with_copy=False,
            with_triggers=True,
            with_sync=True,
            quick_pxe_update=False,
            check_for_duplicate_names=False,
            check_for_duplicate_netinfo=False,
            logger=None):
        """
        Add an object to the collection

        with_copy is a bit of a misnomer, but lots of internal add operations
        can run with "with_copy" as False. True means a real final commit, as if
        entered from the command line (or basically, by a user).

        With with_copy as False, the particular add call might just be being run
        during deserialization, in which case extra semantics around the add don't really apply.
        So, in that case, don't run any triggers and don't deal with any actual files.
        """
        item_base.Item.remove_from_cache(ref)
        if ref is None:
            raise CX("Unable to add a None object")
        if ref.name is None:
            raise CX("Unable to add an object without a name")

        ref.check_if_valid()

        if ref.uid == '':
            ref.uid = self.collection_mgr.generate_uid()

        if save is True:
            now = time.time()
            if ref.ctime == 0:
                ref.ctime = now
            ref.mtime = now

        if self.lite_sync is None:
            self.lite_sync = litesync.CobblerLiteSync(self.collection_mgr,
                                                      logger=logger)

        # migration path for old API parameter that I've renamed.
        if with_copy and not save:
            save = with_copy

        if not save:
            # for people that aren't quite aware of the API
            # if not saving the object, you can't run these features
            with_triggers = False
            with_sync = False

        # Avoid adding objects to the collection
        # if an object of the same/ip/mac already exists.
        self.__duplication_checks(ref, check_for_duplicate_names,
                                  check_for_duplicate_netinfo)

        if ref.COLLECTION_TYPE != self.collection_type():
            raise CX(_("API error: storing wrong data type in collection"))

        # failure of a pre trigger will prevent the object from being added
        if save and with_triggers:
            utils.run_triggers(
                self.api, ref, "/var/lib/cobbler/triggers/add/%s/pre/*" %
                self.collection_type())

        self.lock.acquire()
        try:
            self.listing[ref.name.lower()] = ref
        finally:
            self.lock.release()

        # perform filesystem operations
        if save:
            # save just this item if possible, if not, save
            # the whole collection
            self.collection_mgr.serialize_item(self, ref)

            if with_sync:
                if isinstance(ref, system.System):
                    # we don't need openvz containers to be network bootable
                    if ref.virt_type == "openvz":
                        ref.netboot_enabled = False
                    self.lite_sync.add_single_system(ref.name)
                elif isinstance(ref, profile.Profile):
                    # we don't need openvz containers to be network bootable
                    if ref.virt_type == "openvz":
                        ref.enable_menu = 0
                    self.lite_sync.add_single_profile(ref.name)
                elif isinstance(ref, distro.Distro):
                    self.lite_sync.add_single_distro(ref.name)
                elif isinstance(ref, image.Image):
                    self.lite_sync.add_single_image(ref.name)
                elif isinstance(ref, repo.Repo):
                    pass
                elif isinstance(ref, mgmtclass.Mgmtclass):
                    pass
                elif isinstance(ref, package.Package):
                    pass
                elif isinstance(ref, file.File):
                    pass
                else:
                    print(
                        _("Internal error. Object type not recognized: %s") %
                        type(ref))
            if not with_sync and quick_pxe_update:
                if isinstance(ref, system.System):
                    self.lite_sync.update_system_netboot_status(ref.name)

            # save the tree, so if neccessary, scripts can examine it.
            if with_triggers:
                utils.run_triggers(self.api, ref,
                                   "/var/lib/cobbler/triggers/change/*", [],
                                   logger)
                utils.run_triggers(
                    self.api, ref, "/var/lib/cobbler/triggers/add/%s/post/*" %
                    self.collection_type(), [], logger)

        # update children cache in parent object
        parent = ref.get_parent()
        if parent is not None:
            parent.children[ref.name] = ref

    def __duplication_checks(self, ref, check_for_duplicate_names,
                             check_for_duplicate_netinfo):
        """
        Prevents adding objects with the same name.
        Prevents adding or editing to provide the same IP, or MAC.
        Enforcement is based on whether the API caller requests it.
        """
        # always protect against duplicate names
        if check_for_duplicate_names:
            match = None
            if isinstance(ref, system.System):
                match = self.api.find_system(ref.name)
            elif isinstance(ref, profile.Profile):
                match = self.api.find_profile(ref.name)
            elif isinstance(ref, distro.Distro):
                match = self.api.find_distro(ref.name)
            elif isinstance(ref, repo.Repo):
                match = self.api.find_repo(ref.name)
            elif isinstance(ref, image.Image):
                match = self.api.find_image(ref.name)
            elif isinstance(ref, mgmtclass.Mgmtclass):
                match = self.api.find_mgmtclass(ref.name)
            elif isinstance(ref, package.Package):
                match = self.api.find_package(ref.name)
            elif isinstance(ref, file.File):
                match = self.api.find_file(ref.name)
            else:
                raise CX("internal error, unknown object type")

            if match:
                raise CX(
                    _("An object already exists with that name.  Try 'edit'?"))

        # the duplicate mac/ip checks can be disabled.
        if not check_for_duplicate_netinfo:
            return

        if isinstance(ref, system.System):
            for (name, intf) in list(ref.interfaces.items()):
                match_ip = []
                match_mac = []
                match_hosts = []
                input_mac = intf["mac_address"]
                input_ip = intf["ip_address"]
                input_dns = intf["dns_name"]
                if not self.api.settings(
                ).allow_duplicate_macs and input_mac is not None and input_mac != "":
                    match_mac = self.api.find_system(mac_address=input_mac,
                                                     return_list=True)
                if not self.api.settings(
                ).allow_duplicate_ips and input_ip is not None and input_ip != "":
                    match_ip = self.api.find_system(ip_address=input_ip,
                                                    return_list=True)
                # it's ok to conflict with your own net info.

                if not self.api.settings(
                ).allow_duplicate_hostnames and input_dns is not None and input_dns != "":
                    match_hosts = self.api.find_system(dns_name=input_dns,
                                                       return_list=True)

                for x in match_mac:
                    if x.name != ref.name:
                        raise CX(
                            _("Can't save system %s. The MAC address (%s) is already used by system %s (%s)"
                              ) %
                            (ref.name, intf["mac_address"], x.name, name))
                for x in match_ip:
                    if x.name != ref.name:
                        raise CX(
                            _("Can't save system %s. The IP address (%s) is already used by system %s (%s)"
                              ) % (ref.name, intf["ip_address"], x.name, name))
                for x in match_hosts:
                    if x.name != ref.name:
                        raise CX(
                            _("Can't save system %s.  The dns name (%s) is already used by system %s (%s)"
                              ) % (ref.name, intf["dns_name"], x.name, name))

    def to_string(self):
        """
        Creates a printable representation of the collection suitable
        for reading by humans or parsing from scripts.  Actually scripts
        would be better off reading the JSON in the cobbler_collections files
        directly.
        """
        values = list(self.listing.values())[:]  # copy the values
        values.sort()  # sort the copy (2.3 fix)
        results = []
        for i, v in enumerate(values):
            results.append(v.to_string())
        if len(values) > 0:
            return "\n\n".join(results)
        else:
            return _("No objects found")

    def collection_type(self):
        """
        Returns the string key for the name of the collection (for use in messages for humans)
        """
        return NotImplementedException()
Example #46
0
class DaemonPantsRunner(RawFdRunner):
    """A RawFdRunner (callable) that will be called for each client request to Pantsd."""
    def __init__(self, core: PantsDaemonCore) -> None:
        super().__init__()
        self._core = core
        self._run_lock = Lock()

    @staticmethod
    def _send_stderr(stderr_fd: int, msg: str) -> None:
        """Used to send stderr on a raw filehandle _before_ stdio replacement.

        After stdio replacement has happened via `stdio_as` (which mutates sys.std*, and thus cannot
        happen until the request lock has been acquired), sys.std* should be used directly.
        """
        with os.fdopen(stderr_fd, mode="w", closefd=False) as stderr:
            print(msg, file=stderr, flush=True)

    @contextmanager
    def _one_run_at_a_time(self, stderr_fd: int, timeout: float):
        """Acquires exclusive access within the daemon.

        Periodically prints a message on the given stderr_fd while exclusive access cannot be
        acquired.
        """

        should_poll_forever = timeout <= 0
        start = time.time()
        deadline = None if should_poll_forever else start + timeout

        def should_keep_polling(now):
            return not deadline or deadline > now

        acquired = self._run_lock.acquire(blocking=False)
        if not acquired:
            # If we don't acquire immediately, send an explanation.
            length = "forever" if should_poll_forever else "up to {} seconds".format(
                timeout)
            self._send_stderr(
                stderr_fd,
                f"Another pants invocation is running. Will wait {length} for it to finish before giving up.\n"
                "If you don't want to wait for the first run to finish, please press Ctrl-C and run "
                "this command with PANTS_CONCURRENT=True in the environment.\n",
            )
        while True:
            now = time.time()
            if acquired:
                try:
                    yield
                    break
                finally:
                    self._run_lock.release()
            elif should_keep_polling(now):
                self._send_stderr(
                    stderr_fd,
                    f"Waiting for invocation to finish (waited for {int(now - start)}s so far)...\n",
                )
                acquired = self._run_lock.acquire(blocking=True, timeout=5)
            else:
                raise ExclusiveRequestTimeout(
                    "Timed out while waiting for another pants invocation to finish."
                )

    @contextmanager
    def _stderr_logging(self, global_bootstrap_options):
        """Temporarily replaces existing handlers (ie, the pantsd handler) with a stderr handler.

        In the context of pantsd, there will be an existing handler for the pantsd log, which we
        temporarily replace. Making them additive would cause per-run logs to go to pantsd, which
        we don't want.

        TODO: It would be good to handle logging destinations entirely via the threadlocal state
        rather than via handler mutations.
        """
        handlers = get_logging_handlers()
        try:
            clear_logging_handlers()
            Native().override_thread_logging_destination_to_just_stderr()
            setup_logging(global_bootstrap_options)
            yield
        finally:
            Native().override_thread_logging_destination_to_just_pantsd()
            set_logging_handlers(handlers)

    def _run(self, working_dir: str) -> ExitCode:
        """Run a single daemonized run of Pants.

        All aspects of the `sys` global should already have been replaced in `__call__`, so this
        method should not need any special handling for the fact that it's running in a proxied
        environment.
        """

        # Capture the client's start time, which we propagate here in order to get an accurate
        # view of total time.
        env_start_time = os.environ.get("PANTSD_RUNTRACKER_CLIENT_START_TIME",
                                        None)
        start_time = float(env_start_time) if env_start_time else time.time()

        # Clear global mutable state before entering `LocalPantsRunner`. Note that we use
        # `sys.argv` and `os.environ`, since they have been mutated to maintain the illusion
        # of a local run: once we allow for concurrent runs, this information should be
        # propagated down from the caller.
        #   see https://github.com/pantsbuild/pants/issues/7654
        clean_global_runtime_state(reset_subsystem=True)
        options_bootstrapper = OptionsBootstrapper.create(env=os.environ,
                                                          args=sys.argv)
        bootstrap_options = options_bootstrapper.bootstrap_options
        global_bootstrap_options = bootstrap_options.for_global_scope()

        # Run using the pre-warmed Session.
        with self._stderr_logging(global_bootstrap_options):
            try:
                scheduler = self._core.prepare_scheduler(options_bootstrapper)
                runner = LocalPantsRunner.create(os.environ,
                                                 options_bootstrapper,
                                                 scheduler=scheduler)
                return runner.run(start_time)
            except Exception as e:
                logger.exception(e)
                return PANTS_FAILED_EXIT_CODE
            except KeyboardInterrupt:
                print("Interrupted by user.\n", file=sys.stderr)
                return PANTS_FAILED_EXIT_CODE

    def __call__(
        self,
        command: str,
        args: Tuple[str, ...],
        env: Dict[str, str],
        working_directory: bytes,
        stdin_fd: int,
        stdout_fd: int,
        stderr_fd: int,
    ) -> ExitCode:
        request_timeout = float(env.get("PANTSD_REQUEST_TIMEOUT_LIMIT", -1))
        # NB: Order matters: we acquire a lock before mutating either `sys.std*`, `os.environ`, etc.
        with self._one_run_at_a_time(
                stderr_fd, timeout=request_timeout), stdio_as(
                    stdin_fd=stdin_fd,
                    stdout_fd=stdout_fd,
                    stderr_fd=stderr_fd), hermetic_environment_as(
                        **env), argv_as((command, ) + args):
            # NB: Run implements exception handling, so only the most primitive errors will escape
            # this function, where they will be logged to the pantsd.log by the server.
            logger.info(f"handling request: `{' '.join(args)}`")
            try:
                return self._run(working_directory.decode())
            finally:
                logger.info(f"request completed: `{' '.join(args)}`")
Example #47
0

def measureTime(title):
    global beginTime
    global endTime
    # Measure time
    endTime = time.process_time()
    timeLast = endTime - beginTime
    print(title + ". Time: " + str(timeLast))
    beginTime = endTime


while (True):

    print("Start processing a new frame")
    mutex.acquire()
    frame = globalFrame
    mutex.release()

    # Display the origin frame
    cv2.imshow('origin', frame)
    cv2.waitKey(10)

    # Get key from keyboard
    key = cv2.waitKey(10)

    if key == 13:
        try:

            transformMatrices = board_detector.detect(frame)
            result = board_detector.getCropImage(frame, transformMatrices)
Example #48
0
from threading import Lock
lock = Lock()


def do_something_dangerous():
    lock.acquire()
    raise Exception('oops I forgot this code could raise exceptions')
    lock.release()


try:
    do_something_dangerous()
except:
    print('Got an exception')
lock.acquire()
print('Got here')
Example #49
0
class CCIK(object):
    def __init__(self):
        #Load robot from parameter server
        self.robot = URDF.from_parameter_server()

        #Subscribe to current joint state of the robot
        rospy.Subscriber('/joint_states', JointState, self.get_joint_state)

        #This will load information about the joints of the robot
        self.num_joints = 0
        self.joint_names = []
        self.q_current = []
        self.joint_axes = []
        self.get_joint_info()

        #This is a mutex
        self.mutex = Lock()

        #Subscribers and publishers for for cartesian control
        rospy.Subscriber('/cartesian_command', CartesianCommand,
                         self.get_cartesian_command)
        self.velocity_pub = rospy.Publisher('/joint_velocities',
                                            JointState,
                                            queue_size=10)
        self.joint_velocity_msg = JointState()

        #Subscribers and publishers for numerical IK
        rospy.Subscriber('/ik_command', Transform, self.get_ik_command)
        self.joint_command_pub = rospy.Publisher('/joint_command',
                                                 JointState,
                                                 queue_size=10)
        self.joint_command_msg = JointState()

    '''This is a function which will collect information about the robot which
	   has been loaded from the parameter server. It will populate the variables
	   self.num_joints (the number of joints), self.joint_names and
	   self.joint_axes (the axes around which the joints rotate)'''

    def get_joint_info(self):
        link = self.robot.get_root()
        while True:
            if link not in self.robot.child_map: break
            (joint_name, next_link) = self.robot.child_map[link][0]
            current_joint = self.robot.joint_map[joint_name]
            if current_joint.type != 'fixed':
                self.num_joints = self.num_joints + 1
                self.joint_names.append(current_joint.name)
                self.joint_axes.append(current_joint.axis)
            link = next_link

    '''This is the callback which will be executed when the cartesian control
	   recieves a new command. The command will contain information about the
	   secondary objective and the target q0. At the end of this callback, 
	   you should publish to the /joint_velocities topic.'''

    def get_cartesian_command(self, command):
        self.mutex.acquire()
        #--------------------------------------------------------------------------
        #FILL IN YOUR PART OF THE CODE FOR CARTESIAN CONTROL HERE

        joint_transforms, b_T_ee_curr = self.forward_kinematics(self.q_current)
        b_T_ee_des_T = tf.transformations.translation_matrix(
            (command.x_target.translation.x, command.x_target.translation.y,
             command.x_target.translation.z))
        # print("desired T in base coordinate",b_T_ee_des_T)
        b_T_ee_des_R = tf.transformations.quaternion_matrix([
            command.x_target.rotation.x, command.x_target.rotation.y,
            command.x_target.rotation.z, command.x_target.rotation.w
        ])
        # print("desired R in base coordinate ",b_T_ee_des_R)
        b_T_ee_des = numpy.dot(b_T_ee_des_T, b_T_ee_des_R)
        # print("desired T&R in base coordinate ",b_T_ee_des)
        ee_curr_T_b = numpy.linalg.inv(b_T_ee_curr)
        curr_T_des = ee_curr_T_b.dot(b_T_ee_des)
        # print("desired T in ee coordinate ",curr_T_des)
        translation_ee = tf.transformations.translation_from_matrix(curr_T_des)
        rotation_ee = self.rotation_from_matrix(curr_T_des)
        rotation_ee = rotation_ee[0] * rotation_ee[1]
        # print(translation_ee,"translation in ee")
        # print("start here",rotation_ee,"rotation in ee")
        # print("Vee",curr_T_des)
        t_max = numpy.abs(translation_ee).max(0)
        r_max = numpy.abs(rotation_ee).max(0)
        if t_max > 0.1:
            translation_ee = (0.1 / t_max) * translation_ee
        if r_max > 2:
            rotation_ee = (1 / r_max) * rotation_ee
        Vee = numpy.array([translation_ee, rotation_ee]).reshape(6)

        J = self.get_jacobian(b_T_ee_curr, joint_transforms)
        J_plus = numpy.linalg.pinv(J)
        q_desired = J_plus.dot(Vee)
        if command.secondary_objective:
            ps = 5
            q_sec = numpy.zeros(self.num_joints)
            q_sec[0] = ps * (command.q0_target - self.q_current[0])
            J_plus_dot_J = J_plus.dot(J)
            q_null = (numpy.identity(J_plus_dot_J.shape[1]) - J_plus_dot_J)
            q_null = q_null.dot(q_sec)
            q_desired += q_null
        #q_desired = q_desired / (numpy.linalg.norm(q_desired))
        q_max = numpy.abs(q_desired).max(0)
        gain = 1
        if q_max > gain:
            q_desired = (gain / q_max) * q_desired
        velocity = JointState()
        velocity.name = self.joint_names
        velocity.velocity = q_desired

        # print(q_desired)
        self.velocity_pub.publish(velocity)
        #--------------------------------------------------------------------------
        self.mutex.release()

    def skew(self, x):
        return numpy.array([[0, -x[2], x[1]], [x[2], 0, -x[0]],
                            [-x[1], x[0], 0]])

    def get_jacobian(self, b_T_ee_curr, joint_transforms):
        J = numpy.zeros((6, self.num_joints))
        #--------------------------------------------------------------------------
        #FILL IN YOUR PART OF THE CODE FOR ASSEMBLING THE CURRENT JACOBIAN HERE
        #for each joint
        i = 0
        for each in joint_transforms:
            # print(self.joint_names[i])
            b_T_J = each
            J_T_ee = numpy.linalg.inv(each).dot(b_T_ee_curr)
            ee_T_J = numpy.linalg.inv(J_T_ee)
            B_R_A = ee_T_J[0:3, 0:3]
            A_t_B = J_T_ee[0:3, 3:4]
            axis = numpy.array([0.0, 0.0, 0.0])
            axis = numpy.append(axis, self.joint_axes[i])
            axis = numpy.expand_dims(axis.reshape(6), 1)
            V_J = numpy.zeros((6, 6))
            V_J[0:3, 0:3], V_J[3:6, 3:6] = B_R_A, B_R_A
            V_J[0:3, 3:6] = -1 * B_R_A.dot(self.skew(A_t_B))
            J[:, i] = V_J.dot(axis).flatten()

            i += 1
        #--------------------------------------------------------------------------
        return J

    '''This is the callback which will be executed when the inverse kinematics
	   recieve a new command. The command will contain information about desired
	   end effector pose relative to the root of your robot. At the end of this
	   callback, you should publish to the /joint_command topic. This should not
	   search for a solution indefinitely - there should be a time limit. When
	   searching for two matrices which are the same, we expect numerical
	   precision of 10e-3.
	'''

    def get_ik_command(self, command):
        self.mutex.acquire()
        #start timer
        # print("start")
        command_cc = CartesianCommand()
        command_cc.x_target.translation = command.translation
        command_cc.x_target.rotation = command.rotation
        b_T_ee_des_T = tf.transformations.translation_matrix(
            (command.translation.x, command.translation.y,
             command.translation.z))
        # print("desired T in base coordinate",b_T_ee_des_T)
        b_T_ee_des_R = tf.transformations.quaternion_matrix([
            command.rotation.x, command.rotation.y, command.rotation.z,
            command.rotation.w
        ])
        # print("desired R in base coordinate ",b_T_ee_des_R)
        b_T_ee_des = numpy.dot(b_T_ee_des_T, b_T_ee_des_R)
        p = 1
        for i in range(3):
            #initial random q
            time_out = 10  #10 sec
            time_start = time.time()
            self.q_current = random.sample(numpy.arange(0.0, 2 * 3.14, 0.1),
                                           self.num_joints)
            joint_transforms, b_T_ee_curr = self.forward_kinematics(
                self.q_current)
            error = 1
            #(time.time() - time_start < time_out) and
            while (time.time() - time_start < time_out) and (error > 0.01):
                # print("while loop start")
                joint_transforms, b_T_ee_curr = self.forward_kinematics(
                    self.q_current)
                # print("start cartisain command")
                ee_curr_T_b = numpy.linalg.inv(b_T_ee_curr)
                curr_T_des = ee_curr_T_b.dot(b_T_ee_des)
                # print("desired T in ee coordinate ",curr_T_des)
                translation_ee = tf.transformations.translation_from_matrix(
                    curr_T_des)
                rotation_ee = self.rotation_from_matrix(curr_T_des)
                rotation_ee = rotation_ee[0] * rotation_ee[1]
                Vee = numpy.array([translation_ee, rotation_ee]).reshape(6)
                J = self.get_jacobian(b_T_ee_curr, joint_transforms)
                J_plus = numpy.linalg.pinv(J)
                q_desired = J_plus.dot(Vee)
                velocity = JointState()
                velocity.name = self.joint_names
                velocity.velocity = q_desired
                # print("caculate q_curent intergration")
                self.q_current += p * q_desired
                error = numpy.max(numpy.abs(b_T_ee_curr - b_T_ee_des))
                print(i, error, (time.time() - time_start))
            if error < 0.01:
                q = JointState()
                q.name = self.joint_names
                q.position = self.q_current
                self.joint_command_pub.publish(q)
                # print("published")
                break
        # print("quit")
        pass
        self.mutex.release()

    '''This function will return the angle-axis representation of the rotation
	   contained in the input matrix. Use like this: 
	   angle, axis = rotation_from_matrix(R)'''

    def rotation_from_matrix(self, matrix):
        # print(self.joint_names)
        R = numpy.array(matrix, dtype=numpy.float64, copy=False)
        R33 = R[:3, :3]
        # axis: unit eigenvector of R33 corresponding to eigenvalue of 1
        l, W = numpy.linalg.eig(R33.T)
        i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
        if not len(i):
            raise ValueError(
                "no unit eigenvector corresponding to eigenvalue 1")
        axis = numpy.real(W[:, i[-1]]).squeeze()
        # point: unit eigenvector of R33 corresponding to eigenvalue of 1
        l, Q = numpy.linalg.eig(R)
        i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
        if not len(i):
            raise ValueError(
                "no unit eigenvector corresponding to eigenvalue 1")
        # rotation angle depending on axis
        cosa = (numpy.trace(R33) - 1.0) / 2.0
        if abs(axis[2]) > 1e-8:
            sina = (R[1, 0] + (cosa - 1.0) * axis[0] * axis[1]) / axis[2]
        elif abs(axis[1]) > 1e-8:
            sina = (R[0, 2] + (cosa - 1.0) * axis[0] * axis[2]) / axis[1]
        else:
            sina = (R[2, 1] + (cosa - 1.0) * axis[1] * axis[2]) / axis[0]
        angle = math.atan2(sina, cosa)
        return angle, axis

    '''This is the function which will perform forward kinematics for your 
	   cartesian control and inverse kinematics functions. It takes as input
	   joint values for the robot and will return an array of 4x4 transforms
	   from the base to each joint of the robot, as well as the transform from
	   the base to the end effector.
	   Usage: joint_transforms, b_T_ee = self.forward_kinematics(joint_values)'''

    #return the skew matrix 3X3 only

    def forward_kinematics(self, joint_values):
        joint_transforms = []

        link = self.robot.get_root()
        T = tf.transformations.identity_matrix()

        while True:
            if link not in self.robot.child_map:
                break

            (joint_name, next_link) = self.robot.child_map[link][0]
            joint = self.robot.joint_map[joint_name]

            T_l = numpy.dot(
                tf.transformations.translation_matrix(joint.origin.xyz),
                tf.transformations.euler_matrix(joint.origin.rpy[0],
                                                joint.origin.rpy[1],
                                                joint.origin.rpy[2]))
            T = numpy.dot(T, T_l)

            if joint.type != "fixed":
                joint_transforms.append(T)
                q_index = self.joint_names.index(joint_name)
                T_j = tf.transformations.rotation_matrix(
                    joint_values[q_index], numpy.asarray(joint.axis))
                T = numpy.dot(T, T_j)

            link = next_link
        return joint_transforms, T  #where T = b_T_ee

    '''This is the callback which will recieve and store the current robot
	   joint states.'''

    def get_joint_state(self, msg):
        self.mutex.acquire()
        self.q_current = []
        for name in self.joint_names:
            self.q_current.append(msg.position[msg.name.index(name)])
        self.mutex.release()
Example #50
0
class AffinityManager:
    """
    Manages session cookies by host / port. Reentrant safe for threading
    or forking in the server. Assumes that only ever one session will
    be required by one host:port combination at a time
    """
    def __init__(self, logger):
        self.logger = logger
        self.queue_lock = Lock()
        self.session_lock = Lock()
        self.pending_sessions = {}
        self.sessions = {}

    def session_key(self, host):
        """
        Returns a key for the given session
        """
        return "%s" % (host)

    def queue_session(self, host, handler):
        """
        Adds an entry to the structure indicating that the next request
        should result in an affinity sessions cookie
        """
        self.queue_lock.acquire()
        try:
            # Get the session key
            key = self.session_key(host)

            # If there is an active or queued session, remove them
            if self.pending_sessions.has_key(key):
                handler.log_debug(
                    "Start affinity session request already exists - removing")
                del self.pending_sessions[key]
            self.session_lock.acquire()
            try:
                if self.sessions.has_key(key):
                    handler.log_debug(
                        "Active affinity session already exists - removing")
                    del self.sessions[key]
            finally:
                self.session_lock.release()

            # Add the session key to the queued list
            self.pending_sessions[key] = True
            handler.log_debug("Affinity session request queued for %s", key)
        finally:
            self.queue_lock.release()

    def start_session(self, host, headers, handler):
        """
        Initialises a proxy affinity session for a given host and port
        """
        self.session_lock.acquire()
        self.queue_lock.acquire()
        try:
            # Get the session key
            key = self.session_key(host)

            # Check if there is a pending session
            if self.pending_sessions.has_key(key):
                # Remove from the queue
                del self.pending_sessions[key]

                # Register the new session
                cookie = None
                if headers.has_key('set-cookie'):
                    cookie = headers['set-cookie']
                if cookie:
                    self.sessions[key] = cookie
                    handler.log_debug("Affinity session started")
                else:
                    handler.log_message("Affinity session could not start")
        finally:
            self.queue_lock.release()
            self.session_lock.release()

    def end_session(self, host, handler):
        """
        Ends a proxy affinity session for a given host and port
        """
        # Get the session key
        key = self.session_key(host)

        # Remove the session key from current list
        self.session_lock.acquire()
        try:
            if self.sessions.has_key(key):
                del self.sessions[key]
                handler.log_debug("Affinity session ended")
            else:
                handler.log_message("No affinity session to end")
        finally:
            self.session_lock.release()

        # Remove the session key from queued list
        self.queue_lock.acquire()
        try:
            if self.pending_sessions.has_key(key):
                del self.pending_sessions[key]
                handler.log_debug("Queued affinity request removed")
        finally:
            self.queue_lock.release()

    def get_session(self, host, handler):
        """
        Either returns the cookie associated with the given host / port,
        or None if no session is active
        """
        ret = None
        self.session_lock.acquire()
        try:
            # Get the session key
            key = self.session_key(host)

            # Return the key if present
            if self.sessions.has_key(key):
                ret = self.sessions[key]
                handler.log_debug("Got affinity session cookie")
        finally:
            self.session_lock.release()

        return ret
Example #51
0
class DebugNode( object ):
   """ Wraps the cread debug node plugin for easier automated testing of the Crea Network"""

   def __init__( self, cread, data_dir, args='', plugins=[], apis=[], cread_out=None, cread_err=None ):
      """ Creates a cread debug node.

      It can be ran by using 'with debug_node:'
      While in the context of 'with' the debug node will continue to run.
      Upon exit of 'with' the debug will exit and clean up temporary files.
      This class also contains methods to allow basic manipulation of the blockchain.
      For all other requests, the python-crea library should be used.

      args:
         cread -- The string path to the location of the cread binary
         data_dir -- The string path to an existing cread data directory which will be used to pull blocks from.
         args -- Other string args to pass to cread.
         plugins -- Any additional plugins to start with the debug node. Modify plugins DebugNode.plugins
         apis -- Any additional APIs to have available. APIs will retain this order for accesibility starting at id 3.
            database_api is 0, login_api is 1, and debug_node_api is 2. Modify apis with DebugNode.api
         cread_stdout -- A stream for cread's stdout. Default is to pipe to /dev/null
         cread_stderr -- A stream for cread's stderr. Default is to pipe to /dev/null
      """
      self._data_dir = None
      self._debug_key = None
      self._FNULL = None
      self._rpc = None
      self._cread_bin = None
      self._cread_lock = None
      self._cread_process = None
      self._temp_data_dir = None

      self._cread_bin = Path( cread )
      if( not self._cread_bin.exists() ):
         raise ValueError( 'cread does not exist' )
      if( not self._cread_bin.is_file() ):
         raise ValueError( 'cread is not a file' )

      self._data_dir = Path( data_dir )
      if( not self._data_dir.exists() ):
         raise ValueError( 'data_dir either does not exist or is not a properly constructed crea data directory' )
      if( not self._data_dir.is_dir() ):
         raise ValueError( 'data_dir is not a directory' )

      self.plugins = plugins
      self.apis = apis

      if( args != '' ):
         self._args = args.split( "\\s" )
      else:
         self._args = list()

      self._FNULL = open( devnull, 'w' )
      if( cread_out != None ):
         self.cread_out = cread_out
      else:
         self.cread_out = self._FNULL

      if( cread_err != None ):
         self.cread_err = cread_err
      else:
         self.cread_err = self._FNULL

      self._debug_key = '5JHNbFNDg834SFj8CMArV6YW7td4zrPzXveqTfaShmYVuYNeK69'
      self._cread_lock = Lock()


   def __enter__( self ):
      self._cread_lock.acquire()

      # Setup temp directory to use as the data directory for this
      self._temp_data_dir = TemporaryDirectory()

      for child in self._data_dir.iterdir():
         if( child.is_dir() ):
            copytree( str( child ), str( self._temp_data_dir.name ) + '/' + child.name )

      db_version = Path( self._data_dir.name ) / 'db_version'
      if( db_version.exists() and not db_version.is_dir() ):
         copy2( str( db_version ), str( self._temp_data_dir.name ) + '/db_version' )

      config = Path( self._temp_data_dir.name ) / 'config.ini'
      config.touch()
      config.write_text( self._get_config() )

      cread = [ str( self._cread_bin ), '--data-dir=' + str( self._temp_data_dir.name ) ]
      cread.extend( self._args )

      self._cread_process = Popen( cread, stdout=self.cread_out, stderr=self.cread_err )
      self._cread_process.poll()
      sleep( 5 )
      if( not self._cread_process.returncode ):
         self._rpc = CreaNodeRPC( 'ws://127.0.0.1:8095', '', '' )
      else:
         raise Exception( "cread did not start properly..." )

   def __exit__( self, exc, value, tb ):
      self._rpc = None

      if( self._cread_process != None ):
         self._cread_process.poll()

         if( not self._cread_process.returncode ):
            self._cread_process.send_signal( SIGINT )

            sleep( 7 )
            self._cread_process.poll()

            if( not self._cread_process.returncode ):
               self._cread_process.send_signal( SIGTERM )

               sleep( 5 )
               self._cread_process.poll()

               if( self._cread_process.returncode ):
                  loggin.error( 'cread did not properly shut down after SIGINT and SIGTERM. User intervention may be required.' )

      self._cread_process = None
      self._temp_data_dir.cleanup()
      self._temp_data_dir = None
      self._cread_lock.release()


   def _get_config( self ):
      return "# no seed-node in config file or command line\n" \
          + "p2p-endpoint = 127.0.0.1:1776       # bind to localhost to prevent remote p2p nodes from connecting to us\n" \
          + "rpc-endpoint = 127.0.0.1:8095       # bind to localhost to secure RPC API access\n" \
          + "enable-plugin = witness debug_node " + " ".join( self.plugins ) + "\n" \
          + "public-api = database_api login_api debug_node_api " + " ".join( self.apis ) + "\n"


   def debug_generate_blocks( self, count ):
      """
      Generate blocks on the current chain. Pending transactions will be applied, otherwise the
      blocks will be empty.

      The debug node plugin requires a WIF key to sign blocks with. This class uses the key
      5JHNbFNDg834SFj8CMArV6YW7td4zrPzXveqTfaShmYVuYNeK69 which was generated from
      `get_dev_key crea debug`. Do not use this key on the live chain for any reason.

      args:
         count -- The number of new blocks to generate.

      returns:
         int: The number of blocks actually pushed.
      """
      if( count < 0 ):
         raise ValueError( "count must be a positive non-zero number" )
      return self._rpc.rpcexec( json.loads( '{"jsonrpc": "2.0", "method": "call", "params": [2,"debug_generate_blocks",["' + self._debug_key + '",' + str( count ) + ']], "id": 1}' ) )


   def debug_generate_blocks_until( self, timestamp, generate_sparsely=True ):
      """
      Generate block up until a head block time rather than a specific number of blocks. As with
      `debug_generate_blocks` all blocks will be empty unless there were pending transactions.

      The debug node plugin requires a WIF key to sign blocks with. This class uses the key
      5JHNbFNDg834SFj8CMArV6YW7td4zrPzXveqTfaShmYVuYNeK69 which was generated from
      `get_dev_key crea debug`. Do not use this key on the live chain for any reason.

      args:
         time -- The desired new head block time. This is a POSIX Timestmap.
         generate_sparsely -- True if you wish to skip all intermediate blocks between the current
            head block time and the desired head block time. This is useful to trigger events, such
            as payouts and bandwidth updates, without generating blocks. However, many automatic chain
            updates (such as block inflation) will not continue at their normal rate as they are only
            calculated when a block is produced.

      returns:
         (time, int): A tuple including the new head block time and the number of blocks that were
            generated.
      """
      if( not isinstance( timestamp, int ) ):
         raise ValueError( "Time must be a int" )
      generate_sparsely_str = "true"
      if( not generate_sparsely ):
         generate_sparsely_str = "false"

      iso_string = datetime.fromtimestamp( timestamp, timezone.utc ).isoformat().split( '+' )[0].split( '-' )
      if( len( iso_string ) == 4 ):
         iso_string = iso_string[:-1]
      iso_string = '-'.join( iso_string )

      print( iso_string )
      return self._rpc.rpcexec( json.loads( '{"jsonrpc": "2.0", "method": "call", "params": [2,"debug_generate_blocks_until",["' + self._debug_key + '","' + iso_string + '","' + generate_sparsely_str + '"]], "id": 1}' ) )


   def debug_set_hardfork( self, hardfork_id ):
      """
      Schedules a hardfork to happen on the next block. call `debug_generate_blocks( 1 )` to trigger
      the hardfork. All hardforks with id less than or equal to hardfork_id will be scheduled and
      triggered.

      args:
         hardfork_id: The id of the hardfork to set. Hardfork IDs start at 1 (0 is genesis) and increment
            by one for each hardfork. The maximum value is CREA_NUM_HARDFORKS in chain/hardfork.d/0-preamble.hf
      """
      if( hardfork_id < 0 ):
         raise ValueError( "hardfork_id cannot be negative" )

      self._rpc.rpcexec( json.loads( '{"jsonrpc": "2.0", "method": "call", "params": [2,"debug_set_hardfork",[' + str( hardfork_id ) + ']], "id":1}' ) )


   def debug_has_hardfork( self, hardfork_id ):
      return self._rpc.rpcexec( json.loads( '{"jsonrpc": "2.0", "method": "call", "params": [2,"debug_has_hardfork",[' + str( hardfork_id ) + ']], "id":1}' ) )


   def debug_get_witness_schedule( self ):
      return self._rpc.rpcexec( json.loads( '{"jsonrpc": "2.0", "method": "call", "params": [2,"debug_get_witness_schedule",[]], "id":1}' ) )


   def debug_get_hardfork_property_object( self ):
      return self._rpc.rpcexec( json.loads( '{"jsonrpc": "2.0", "method": "call", "params": [2,"debug_get_hardfork_property_object",[]], "id":1}' ) )
Example #52
0
class ChatClient(lib.ChatClientBase):
    def __init__(self, configFile, LogFile, mode):
        js = json.load(open(configFile, 'r'))
        super(ChatClient,
              self).__init__(lib.ChatClientInfo(js['name'], js['password']),
                             js['aes_key'], LogFile)
        self.mode = mode
        self.consoleOutput = mode != Mode.MCD
        self.server_addr = (js['server_hostname'], js['server_port'])
        self.log('Client Info: name = ' + self.info.name + ', password = '******'Mode = ' + mode)
        self.log('AESKey = ' + self.AESKey)
        self.log('Server address = ' + utils.addressToString(self.server_addr))
        self.minecraftServer = None
        self.start_lock = Lock()

    def start(self, minecraftServer=None):
        acq = self.start_lock.acquire(False)
        if not acq:
            return
        try:
            self.minecraftServer = minecraftServer
            if not self.isOnline():
                self.log('Trying to start the client, connecting to ' +
                         utils.addressToString(self.server_addr))
                self.sock = socket.socket()
                # 发送客户端信息
                try:
                    self.sock.settimeout(5)
                    self.sock.connect(self.server_addr)
                    self.send_login(self.info.name, self.info.password)
                except socket.error:
                    self.log('Fail to connect to the server')
                    return
                # 获取登录结果
                try:
                    data = self.recieveData(timeout=5)
                    result = json.loads(data)['result']
                except socket.error:
                    self.log('Fail to receive login result')
                    return
                except ValueError:
                    self.log('Fail to read login result')
                    return
                self.log(utils.stringAdd('Result: ', result))
                if result == 'login success':
                    super(ChatClient, self).start()
            else:
                self.log('Client has already been started')
        finally:
            self.start_lock.release()

    def on_recieve_message(self, data):
        messages = utils.messageData_to_strings(data)
        for msg in messages:
            self.log(msg)
            if self.mode == Mode.MCD:
                self.minecraftServer.execute('tellraw @a {}'.format(
                    json.dumps({
                        'text': msg,
                        'color': 'gray'
                    })))

    def on_recieve_command(self, data):
        ret = copy.deepcopy(data)
        command = data['command']  # type: str
        result = {'responded': True}
        if command.startswith('!!stats '):
            stats = None
            if self.mode == Mode.MCD:
                stats = self.minecraftServer.get_plugin_instance(
                    'stats_helper')  # MCDR 1.0+
            if stats is not None:
                trimmed_command = command.replace('-bot',
                                                  '').replace('-all', '')
                try:
                    prefix, typ, cls, target = trimmed_command.split()
                    assert typ == 'rank' and type(target) is str
                except:
                    res_raw = None
                else:
                    res_raw = stats.show_rank(None, None, cls, target, '-bot'
                                              in command, False, '-all'
                                              in command, True)
                if res_raw is not None:
                    lines = res_raw.splitlines()
                    stats_name = lines[0]
                    res = '\n'.join(lines[1:])
                    result['type'] = 0
                    result['stats_name'] = stats_name
                    result['result'] = res
                else:
                    result['type'] = 1
            else:
                result['type'] = 2
        elif command == '!!online':  # MCDR -> bungeecord rcon
            if self.minecraftServer is not None and hasattr(
                    self.minecraftServer,
                    'MCDR') and self.minecraftServer.is_rcon_running():
                res = self.minecraftServer.rcon_query('glist')
                if res != None:
                    result['type'] = 0
                    result['result'] = res
                else:
                    result['type'] = 1
            else:
                result['type'] = 2
        ret['result'] = result
        ret_str = json.dumps(ret)
        self.log('Command received, responding {}'.format(ret_str))
        self.sendData(ret_str)

    def sendChatMessage(self, player, message):
        self.log('Sending chat message "' + str((player, message)) +
                 '" to the server')
        self.send_message(self.info.name, player, message)

    def sendMessage(self, message):
        self.log('Sending message "' + message + '" to the server')
        self.send_message(self.info.name, '', message)
Example #53
0
class Blockchain:
    """
        Blockchain class, implements Blockchain based on a MySQL server

        Attributes
        ----------
        host : str
            ip address of the MySQL server (default "localhost")
        user : str
            username for the MySQL server (default "root")
        password : str
            password for the MySQL server (default "root")
        db : MySQL connector
            connector to the MySQL database
        cursor : MySQL cursor
            cursor to point to the MySQL database

        Methods
        -------
        __init__(host="localhost", user="******", password="******")
            initializes the Blockchain database (if not initialized), the MySQl connector and the MySQL cursor
        __getitem__(block_number, prev_hash="")
            return the block(s) at the requested number
        __len__()
            calculates the length of the Blockchain's consensus chain
        __sizeof__()
        append(block_number, timestamp, size, prev_hash, difficulty, nonce, merkle_root_hash, transactions, self_hash)
            appends new block to the blockchain database
        delete
            deletes block hash from sql database
        get_block_by_hash(block_hash)
            get method for blocks with certain hash
        get_block_consensus_chain(block_number)
            get method for blocks on the consensus (longest) chain

        Static Methods
        --------------
        datetime_string_posix(datetime_string)
            converts sql datetime string to posix time
        """
    def __init__(self, host="localhost", user="******", password="******"):
        """
        initiator for Blockchain objects
        :param host: host address of MySQL server, default 127.0.0.1
        :type host: str
        :param user: MySQL server username
        :type user: str
        :param password: MySQL server password
        :type password: str
        """
        if not isinstance(host, str):
            raise TypeError(
                "Blockchain.__init__: expected host to be of type str")
        if not isinstance(user, str):
            raise TypeError(
                "Blockchain.__init__: expected user to be of type str")
        if not isinstance(password, str):
            raise TypeError(
                "Blockchain.__init__: expected password to be of type str")

        self.lock = Lock()

        # connect to MySQL server
        self.db = connector.connect(host=host,
                                    user=user,
                                    passwd=password,
                                    auth_plugin='mysql_native_password')

        # initiate database cursor
        self.cursor = self.db.cursor()

        # create Blockchain's database if it doesn't exist yet
        self.cursor.execute("CREATE DATABASE if not EXISTS Blockchain")

        # set cursor's database to Blockchain
        self.db.database = "Blockchain"

        # create Block table in Blockchain database if it doesn't exist yet
        self.cursor.execute(
            "CREATE TABLE if not EXISTS Blocks (id int UNSIGNED PRIMARY KEY AUTO_INCREMENT, "
            "block_number INT UNSIGNED, timestamp BIT(32),"
            "difficulty SMALLINT, nonce VARCHAR(64) NOT NULL, prev_hash VARCHAR(64) NOT NULL,"
            "merkle_root_hash VARCHAR(64), transactions LONGBLOB, self_hash VARCHAR(64))"
        )

        if self.__sizeof__() == 0:
            self.append(0, 0, 0, 0, "", "", [], "")

    def __getitem__(self, block_number, prev_hash=""):
        """
        return the block(s) at the requested number
        :param block_number: number of the block(s) to return
        :type block_number: int
        :return: requested block(s)
        :rtype: tuple
        :raises: IndexError: block number is not within range
        :raises: TypeError: expected block number to be of type int
        """
        if not isinstance(block_number, int):
            raise TypeError(
                "Blockchain.__getitem__: expected block_number to be of type int"
            )
        if not isinstance(prev_hash, str):
            raise TypeError(
                "Blockchain.__getitem__: expected prev_hash to be of type str")
        if block_number < 1 or block_number > self.__len__():
            raise IndexError("Blockchain.__getitem__: index out of range")

        self.lock.acquire()

        self.cursor.execute(
            "SELECT * FROM Blocks WHERE block_number={}".format(block_number))

        results = self.cursor.fetchall()

        self.lock.release()

        for x in range(len(results)):
            results[x] = Block(results[x])

        if results and not prev_hash:
            return results

        elif results:
            for result in results:
                if result[3] == prev_hash:
                    return [result]

        self.lock.release()
        return None

    def __len__(self):
        """
        calculates the length of the Blockchain's consensus chain
        :return: length of the blockchain's consensus chain
        :rtype: int
        """
        self.lock.acquire()

        self.cursor.execute(
            "SELECT * FROM Blocks ORDER BY block_number DESC LIMIT 1")

        block = self.cursor.fetchall()

        self.lock.release()

        if block:
            return block[0][1]
        else:
            return 0

    def __sizeof__(self):
        """
        calculates the size of the blockchain's database (amount of rows)
        :return: size of the blockchain's database
        :rtype: int
        """
        self.lock.acquire()
        self.cursor.execute("SELECT * FROM Blocks")

        size = len(self.cursor.fetchall())
        self.lock.release()

        return size

    def append(self, block_number, timestamp, difficulty, nonce, previous_hash,
               merkle_root_hash, transactions, self_hash):
        """
        appends new block to the blockchain database
        :param block_number: number of block (distance from genesis block)
        :type block_number: int
        :param timestamp: time block was created (posix time)
        :type timestamp: int
        :param difficulty: difficulty of block (length of hash zero prefix)
        :type difficulty: int
        :param nonce: block nonce used to achieve targeted difficulty
        :type nonce: int
        :param previous_hash:
        :type previous_hash:
        :param merkle_root_hash: root of transactions merkle tree
        :type merkle_root_hash: str
        :param transactions: list of transactions to be included in the block
        :type transactions: list
        :param self_hash: hash of the block
        :type self_hash: str
        """
        if not isinstance(block_number, int):
            raise TypeError(
                "Blockchain.append: expected block_number to be of type int")
        if not isinstance(timestamp, int):
            raise TypeError(
                "Blockchain.append: expected timestamp to be of type int")
        if not isinstance(difficulty, int):
            raise TypeError(
                "Blockchain.append: expected difficulty to be of type int")
        if not isinstance(nonce, int):
            raise TypeError(
                "Blockchain.append: expected nonce to be of type int")
        if not isinstance(previous_hash, str):
            raise TypeError(
                "Blockchain.append: expected previous_hash to be of type str")
        if not isinstance(merkle_root_hash, str):
            raise TypeError(
                "Blockchain.append: expected merkle_root_hash to be of type str"
            )
        if not isinstance(transactions, list):
            raise TypeError(
                "Blockchain.append: expected transactions to be of type list")
        if not isinstance(self_hash, str):
            raise TypeError(
                "Blockchain.append: expected self_hash to be of type str")

        for x in range(len(transactions)):
            transactions[x] = transactions[x].network_format()
        t = ""
        for x in transactions:
            t += "{},".format(x)
        t = t[:-1]

        self.lock.acquire()
        self.cursor.execute(
            "INSERT INTO Blocks (block_number, timestamp, difficulty, nonce, prev_hash,"
            " merkle_root_hash, transactions, self_hash) VALUES ({}, {}, {}, \"{}\", \"{}\","
            "\"{}\", \"{}\", \"{}\")".format(block_number, timestamp,
                                             difficulty, nonce, previous_hash,
                                             merkle_root_hash, t, self_hash))
        self.db.commit()

        self.lock.release()

    def delete(self, block_hash):
        """
        deletes block from sql database
        :param block_hash: hash of block to delete
        :type block_hash: str
        """
        if not isinstance(block_hash, str):
            raise TypeError(
                "Blockchain.delete: expected block_hash to be of type str")

        self.lock.acquire()

        self.cursor.execute(
            "DELETE FROM Blocks WHERE self_hash=\"{}\"".format(block_hash))
        self.db.commit()

        self.lock.release()

    def get_block_by_hash(self, block_hash):
        """
        get method for block with certain hash
        :param block_hash: block hash
        :type block_hash: str
        :return: block with hash block_hash
        :rtype: Block
        """
        if not isinstance(block_hash, str):
            raise TypeError(
                "Blockchain.get_block_by_hash: expected block_hash to be of type str"
            )

        self.lock.acquire()

        self.cursor.execute(
            "SELECT * FROM Blocks WHERE self_hash=\"{}\"".format(block_hash))
        result = self.cursor.fetchall()

        self.lock.release()

        if result:
            return Block(result[0])
        else:
            return None

    def get_block_consensus_chain(self, block_number):
        """
        get method for blocks on the consensus (longest) chain
        :param block_number: block number of requested block
        :type block_number: int
        :return: requested block
        :rtype: Block
        :raises: IndexError: block number is not within range
        :raises: TypeError: expected block number to be of type int
        """
        if not isinstance(block_number, int):
            raise TypeError(
                "Blockchain.get_block_consensus_chain: expected block_number to be of type int"
            )

        if block_number < 1 or block_number > self.__len__():
            raise IndexError(
                "Blockchain.get_blocks: block number not within range")
        elif not isinstance(block_number, int):
            raise TypeError(
                "Blockchain.get_blocks: expected block number to be of type int"
            )

        if block_number < self.__len__() - 1:
            return self.__getitem__(block_number)[0]

        self.lock.acquire()
        self.cursor.execute(
            "SELECT * FROM Blocks WHERE block_number={}".format(block_number))

        results = self.cursor.fetchall()

        self.lock.release()

        for x in range(len(results)):
            results[x] = Block(results[x])

        if len(results) == 1:
            return results[0]
        else:
            minimum_posix = results[0]
            for result in results:
                if result.timestamp < minimum_posix.timestamp:
                    minimum_posix = result
            if block_number == self.__len__():
                return minimum_posix
            else:
                return self.get_block_by_hash(minimum_posix.prev_hash)
class Thermocycler:
    def __init__(self, _port):

        # initialize arrays to log time and temperature throughout script protocol
        self.timeLog = []
        self.tempLog = []
        self.setpointLog = []
        self.outCurrLog = []
        self.setpt = 'undefined'
        self.currentTemp = None
        self.tc3625Lock = Lock()

        #open tc3625 object, coded at caltech to talk to the device.
        try:
            self.ctlr = tc3625.TC3625(port=_port, max_attempt=1)
            self.ctlr.open()
        except (IOError):
            raise IOError("Could not connect to " + _port + ".")

        #set control Temp Type to computer controlled set point
        self.ctlr.set_setpt_type('computer')

        #Set temp high range to 105 C
        self.ctlr.set_high_external_set_range(105)

        #Set temp low range to 0 C
        self.ctlr.set_low_external_set_range(0)

        #Set control type to PID control
        self.ctlr.set_control_type('PID')

        # Set Control mode to WP2 + and WP1 -
        self.ctlr.set_output_polarity('heat wp1+ wp2-')

        # Set alarm type to fixed value alarms
        self.ctlr.set_alarm_type('fixed')

        # Set POWER SHUTDOWN IF ALARM to MAINOUT SHUTDOWN IF ALARM
        self.ctlr.set_shutdown_if_alarm('off')

        # Set high alarm setting to 100 C. If the temperate of the plate surpasses this, the system will shut off.
        self.ctlr.set_high_alarm(105)

        # Set low alarm setting to 0 C. If the temperature of the plate gets bellow this, the system will shut off.
        self.ctlr.set_low_alarm(0)

        # Set Alarm Deadband to 10 C
        self.ctlr.set_alarm_deadband(5)

        # Set alarm latch to alarm latch on
        self.ctlr.set_alarm_latch('on')

        # Set sensor type to TS-67, TS132 15K
        #self.ctlr.set_sensor_type('TS67 TS136 15K') #Default thermistor with orange wires
        self.ctlr.set_sensor_type(
            'TS103 50K'
        )  #This is the MP-3022 for use with Nick's water cooled thermocycler

        # Set Sensor for alarm to CONTROL SENSOR
        self.ctlr.set_alarm_sensor('input1')

        # Set temperature scale to Celsius
        self.ctlr.set_working_units('C')

        # Set overcurrent level to 15 A
        self.ctlr.set_over_current_compare(30)

        # Set overcurrent level restart attempts to continuous
        self.ctlr.set_over_current_restart_type('continuous')

        # PID settings, P = 1
        self.ctlr.set_proportional_bandwidth(3)
        self.ctlr.set_integral_gain(1)
        self.ctlr.set_derivative_gain(0)

        #PID control settings
        self.ctlr.set_heat_multiplier(1)
        self.ctlr.set_cool_multiplier(1)

    def setPowerOn(self):
        self.tc3625Lock.acquire()
        self.ctlr.set_power_state('on')
        self.tc3625Lock.release()

    def setPowerOff(self):
        self.tc3625Lock.acquire()
        self.ctlr.set_power_state('off')
        self.tc3625Lock.release()

    def getTemp(self):
        self.tc3625Lock.acquire()
        self.currentTemp = self.ctlr.get_input1()
        self.tc3625Lock.release()
        return self.currentTemp

    def getOutCurr(self):
        self.tc3625Lock.acquire()
        curr = self.ctlr.get_output_current()
        self.tc3625Lock.release()
        return curr

    def setPoint(self, temp):
        if type(temp) != int:
            raise TypeError('set point must be an integer')
        if temp < 0 or temp > 100:
            raise ValueError(
                'This thermocycler operates between 0 C and 100C. Please enter a set point in that range.'
            )
        print("Set point to " + str(temp))
        self.tc3625Lock.acquire()
        self.ctlr.set_setpt(temp)
        self.tc3625Lock.release()
        self.setpt = temp
        self.log()

    #We will say the PID controller is equilibrated if the last 20 temp logs have been within 0.1 C of each other.
    def checkEquil(self):
        for i in self.tempLog[-20]:
            if i - self.tempLog[-1] > 0.1 or i - self.setpt > 0.3:
                #if any are more than 0.1 away from the current temp,
                return False
        #otherwise,
        return True

    def waitEquil(self):
        equil = False
        while not equil:
            time.sleep(1)
            self.log()
            equil = self.checkEquil()

    def setIntegralGain(self, gain):
        if (type(gain) != int) and (type(gain) != float):
            raise TypeError(
                "The integral gain must be either an integer or a float.")
        self.tc3625Lock.acquire()
        self.ctlr.set_integral_gain(gain)
        self.tc3625Lock.release()

    def setDerivativeGain(self, gain):
        if (type(gain) != int) and (type(gain) != float):
            raise TypeError(
                "The derivative gain must be either an integer or a float.")
        self.tc3625Lock.acquire()
        self.ctlr.set_derivative_gain(gain)
        self.tc3625Lock.release()

    #specify the number of seconds to pause.
    def pause(self, pause):
        if type(pause) != int and type(pause) != float:
            raise TypeError("Error: Pause time must be a float or an int.")
        if pause < 0:
            raise ValueError("Error: Pause time must be positive.")

        time.sleep(pause)

    def log(self):
        self.tempLog.append(float(self.getTemp()))
        #self.timeLog.append(float(time.clock()))
        #self.setpointLog.append(self.setpt)
        #self.outCurrLog.append(self.getOutCurr())

    def destroy(self):
        print("Shutting Down...")
        self.setPowerOff()
Example #55
0
class CachedLoaderMixin(object):
    """
    Mixin this class to implement simple memory and disk caching. The
    memcaching just uses a dict in the loader so if you have a global
    environment or at least a global loader this can speed things up.

    If the memcaching is enabled you can use (with Jinja 1.1 onwards)
    the `clear_memcache` function to clear the cache.

    For memcached support check the `MemcachedLoaderMixin`.
    """

    def __init__(self, use_memcache, cache_size, cache_folder, auto_reload,
                 cache_salt=None):
        if use_memcache:
            self.__memcache = CacheDict(cache_size)
        else:
            self.__memcache = None
        self.__cache_folder = cache_folder
        if not hasattr(self, 'check_source_changed'):
            self.__auto_reload = False
        else:
            self.__auto_reload = auto_reload
        self.__salt = cache_salt
        self.__times = {}
        self.__lock = Lock()

    def clear_memcache(self):
        """
        Clears the memcache.
        """
        if self.__memcache is not None:
            self.__memcache.clear()

    def load(self, environment, name, translator):
        """
        Load and translate a template. First we check if there is a
        cached version of this template in the memory cache. If this is
        not the cache check for a compiled template in the disk cache
        folder. And if none of this is the case we translate the temlate,
        cache and return it.
        """
        self.__lock.acquire()
        try:
            # caching is only possible for the python translator. skip
            # all other translators
            if translator is not PythonTranslator:
                return super(CachedLoaderMixin, self).load(
                             environment, name, translator)

            tmpl = None
            save_to_disk = False
            push_to_memory = False

            # auto reload enabled? check for the last change of
            # the template
            if self.__auto_reload:
                last_change = self.check_source_changed(environment, name)
            else:
                last_change = None

            # check if we have something in the memory cache and the
            # memory cache is enabled.
            if self.__memcache is not None:
                if name in self.__memcache:
                    tmpl = self.__memcache[name]
                    # if auto reload is enabled check if the template changed
                    if last_change and last_change > self.__times[name]:
                        tmpl = None
                        push_to_memory = True
                else:
                    push_to_memory = True

            # mem cache disabled or not cached by now
            # try to load if from the disk cache
            if tmpl is None and self.__cache_folder is not None:
                cache_fn = get_cachename(self.__cache_folder, name, self.__salt)
                if last_change is not None:
                    try:
                        cache_time = path.getmtime(cache_fn)
                    except OSError:
                        cache_time = 0
                if last_change is None or (cache_time and
                   last_change <= cache_time):
                    try:
                        f = file(cache_fn, 'rb')
                    except IOError:
                        tmpl = None
                        save_to_disk = True
                    else:
                        try:
                            tmpl = Template.load(environment, f)
                        finally:
                            f.close()
                else:
                    save_to_disk = True

            # if we still have no template we load, parse and translate it.
            if tmpl is None:
                tmpl = super(CachedLoaderMixin, self).load(
                             environment, name, translator)

            # save the compiled template on the disk if enabled
            if save_to_disk:
                f = file(cache_fn, 'wb')
                try:
                    tmpl.dump(f)
                finally:
                    f.close()

            # if memcaching is enabled and the template not loaded
            # we add that there.
            if push_to_memory:
                self.__times[name] = time.time()
                self.__memcache[name] = tmpl
            return tmpl
        finally:
            self.__lock.release()
Example #56
0
class EventManager:
    """The event manager is an object to control the events.
    It provides two basic functions, send and wait and has
    a dictionary of events, which the keys are event keys and
    the values are event objects.

    :param timout: the time to wait before stop waiting.
    """
    def __init__(self, timeout=None):
        self.timeout = timeout
        #: Dictionary of events waiting to execute (event wating list).
        self.event_dict = {}
        self.condition_dict = {}
        self.lock = Lock()

    def send(self, key, value=None):
        """This function should be called when an event has
        happened and we know that some other function may be
        or will be waiting for that event. So it fires the event
        in event dictionary.

        :param key: key of the event in event dictionary.
        """
        already_exists = key in self.event_dict

        #: Locks the event dictionary so no other changes can happen
        #: to it, during this change.
        self.lock.acquire()

        #: Make an event in dictionary if it is not availabe.
        #: It happens when no one is waiting for the event right
        #: now, but may be waiting for that in the future.
        if not already_exists:
            e = Event()
            self.event_dict[key] = e
        #: Runs if someone is already waiting for the event to happen.
        else:
            e = self.event_dict[key]

        #: Unlock dictionary.
        self.lock.release()
        if value is None:
            e.set()
        else:
            if already_exists and self.condition_dict[key](value):
                e.set()
            else:
                print('still waiting')

    def wait(self, key, condition=None, timeout=None):
        """When you want to wait for a value to be set, you
        can call this function and it waits the program until
        the send function for the same key is called.

        :param key: key of the event in dictionary.
        """
        timeout = self.timeout if timeout == None else timeout
        #: Locks the event dictionary so no other changes can happen
        #: to it, during this change.
        self.lock.acquire()

        #: Makes the event if it is not already availabe in the dictionary.
        if not key in self.event_dict:
            e = Event()
            self.event_dict[key] = e
            if condition is not None:
                self.condition_dict[key] = condition
        #: Runs if event is already available in dictionary. In case the
        #: event has been set before the wait or it has been called with
        #: wait function before this call.
        else:
            e = self.event_dict[key]

        #: Unlock dictionary.
        self.lock.release()
        e.wait(timeout)
Example #57
0
class SerialData:
    def __init__(self):

        self.isRun = False
        # self.isReceiving = False
        self.thread = None
        self.callbackfunction = collections.deque()
        self.callback_list_mutex = Lock()
        self.serial_read_write_mutex = Lock()
        self.port = None
        self.baud = None
        self.serialConnection = None
        
        self.defined_data_mode = True
        self.dataNumBytes = -1
        self.dataFormat = "<"
        self.rawData = None

    def openPort (self, serialPort='COM5', serialBaud=9600):
        
        if self.isRun:
            close()
        
        self.port = serialPort
        self.baud = serialBaud

        print('Trying to connect to: ' + str(serialPort) + ' at ' + str(serialBaud) + ' BAUD.')
        try:
            self.serialConnection = serial.Serial(serialPort, serialBaud)
            if(self.serialConnection.isOpen() == False):
                self.serialConnection.open()
                
            print('Connected to ' + str(serialPort) + ' at ' + str(serialBaud) + ' BAUD.')
            self.readSerialStart()
        except:
            print("Failed to connect with " + str(serialPort) + ' at ' + str(serialBaud) + ' BAUD.')

    def isConnected(self):
        return self.isRun 

    def readSerialStart(self):
        if not self.isRun:
            self.thread = Thread(target=self.backgroundThread)
            self.isRun = True
            self.thread.start()

    def parseData(self):
        try:
            value = struct.unpack(self.dataFormat, self.rawData)
        except:
            return
        
        data = [];
        
        '''for i in range(len(value)):         
            if self.dataFormat[i+1-rep_ind] == 'c':
                data.append(value[i].decode('ascii'))
            elif self.dataFormat[i+1-rep_ind] == 'b' or self.dataFormat[i] == 'h':
                data.append(int(value[i]))
            else:
                data.append(value[i])'''
        rep = 1
        ind = 0
        for fmt in self.dataFormat:
            if fmt is '<':
                continue
            
            if rep == 1:
                try:
                    rep = int(str(fmt))
                    continue
                except:
                    rep = 1

            for i in range(rep):
                if fmt == 'c':
                    if rep == 1 or i == 0:
                        data.append(value[ind].decode('ascii'))
                    else:
                        data[-1] += value[ind].decode('ascii')
                        
                elif fmt == 's':
                    data.append(value[ind].decode('ascii'))
                    ind += 1
                    break
                elif fmt == 'b' or fmt == 'h':
                    data.append(int(value[ind]))
                else:
                    data.append(value[ind])
                    
                ind += 1
            rep = 1

                
        self.callback_list_mutex.acquire()
        try:
            for function in self.callbackfunction:
                function(data)
        finally:
            self.callback_list_mutex.release()

    def setDataFormat(self, new_format):
        if new_format != "Dynamic":
            try:
                self.defined_data_mode = True
                self.dataFormat = "<"+new_format
                self.dataNumBytes = struct.calcsize(self.dataFormat)
                self.rawData = bytearray(self.dataNumBytes)
            except:
                print("Invalid Format: " + new_format)
                return False
        else:
            self.defined_data_mode = False
            self.dataFormat = "<"
            self.dataNumBytes = -1
            self.rawData = None
        
        return True

    def backgroundThread(self):  # retrieve data
        self.serialConnection.reset_input_buffer()
        print('Serial Monitoring Thread Started\n')
        
        self.rawData = bytearray(0)
        
        while self.isRun:
            try:
                if self.defined_data_mode and self.serialConnection.in_waiting >= self.dataNumBytes and self.dataNumBytes > 0:
                    self.rawData = bytearray(self.dataNumBytes)
                    self.serial_read_write_mutex.acquire()
                    try:
                        self.serialConnection.readinto(self.rawData)
                    finally:
                        self.serial_read_write_mutex.release()
                    self.parseData()
                
                elif (not self.defined_data_mode) and self.serialConnection.in_waiting and  self.dataNumBytes == -1:
                    self.dataNumBytes = struct.unpack('b',self.serialConnection.read(1))[0]
                
                elif (not self.defined_data_mode) and self.serialConnection.in_waiting >= self.dataNumBytes and self.dataNumBytes > 0 :
                    self.serial_read_write_mutex.acquire()
                    try:
                        tmp = self.serialConnection.read(1)
                    finally:
                        self.serial_read_write_mutex.release()
                    
                    self.dataNumBytes -= 1
                    tmp_uchar = struct.unpack('b',tmp)[0]
                    
                    if tmp_uchar is not 0:
                        self.dataFormat = self.dataFormat + struct.unpack('c',tmp)[0].decode('ascii')
                        try:
                            try:
                                i = int(self.dataFormat[-1])
                            except:
                                i = None
                                
                            if i is None:
                                struct.calcsize(self.dataFormat) # check if its a valid format skip ones that end in a number
                        except:
                            print("num bytes: " + str(self.dataNumBytes) + " attempt fmt: " + self.dataFormat)
                            self.dataNumBytes = -1
                            self.dataFormat = "<"
                    else:
                        if struct.calcsize(self.dataFormat) == self.dataNumBytes:
                            # all is as expected
                            self.rawData = bytearray(self.dataNumBytes)
                            self.serial_read_write_mutex.acquire()
                            try:
                                self.serialConnection.readinto(self.rawData)
                            finally:
                                self.serial_read_write_mutex.release()
                            self.parseData()
                        self.dataNumBytes = -1
                        self.dataFormat = "<"
                
                elif self.dataNumBytes == 0:
                    self.dataNumBytes = -1
                
                else:
                    time.sleep(0.001) # recheck serial every 5ms
                    
                        
            
            except:
                self.isRun = False
                self.thread = None
                self.serialConnection.close()
                print('Connection Lost\n')
                
    def write(self, data, data_format):
        try:
            index = 0
            for d in data:
                if data_format[index] == 'c':
                    data[index] = d.encode()
                elif data_format[index] == 'f':
                    data[index] = float(d)
                else:
                    data[index] = int(d)
                index += 1
                
        except:
            return (False, 'Format/Entry Mismatch')
        
        data_format_str = ""
        for e in data_format:
            data_format_str += e


        try:
            if len(data) == 1:
                msg = struct.pack("<"+data_format_str,data[0])
            elif len(data) == 2:
                msg = struct.pack("<"+data_format_str,data[0],data[1])
            elif len(data) == 3:
                msg = struct.pack("<"+data_format_str,data[0],data[1],data[2])
            elif len(data) == 4:
                msg = struct.pack("<"+data_format_str,data[0],data[1],data[2],data[3])
            else:
                return (False, "Data Length Unsupported")
        except:
            return (False, "Format/Entry Mismatch" )
            
        if self.isConnected():    
            if self.serialConnection:
                self.serial_read_write_mutex.acquire()
                try:
                    self.serialConnection.write(msg)
                finally:
                    self.serial_read_write_mutex.release()
                return True, None
            else:
                return (False, 'Port Not Writeable')
        else:
            return (False, 'Not Connected')

    def close(self, on_shutdown=False):
        if self.isConnected():
            self.isRun = False
            self.thread.join()
            self.thread = None
            self.serialConnection.close()
            if not on_shutdown:
                print('Serial Port ' + self.port + ' Disconnected.\n')

    def registerCallback(self, function):
        self.callback_list_mutex.acquire()
        try:
            self.callbackfunction.append(function)
        finally:
            self.callback_list_mutex.release()
            

    def removeCallback(self, function):
        self.callback_list_mutex.acquire()
        try:
            self.callbackfunction.remove(function)
        finally:
            self.callback_list_mutex.release()
Example #58
0
class MemcachedLoaderMixin(object):
    """
    Uses a memcached server to cache the templates.

    Requires the memcache library from `tummy`_ or the cmemcache library
    from `Gijsbert de Haan`_.

    With Jinja 1.2 onwards you can also provide a `client` keyword argument
    that takes an already instanciated memcache client or memcache client
    like object.

    .. _tummy: http://www.tummy.com/Community/software/python-memcached/
    .. _Gisjsbert de Haan: http://gijsbert.org/cmemcache/
    """

    def __init__(self, use_memcache, memcache_time=60 * 60 * 24 * 7,
                 memcache_host=None, item_prefix='template/', client=None):
        if memcache_host is None:
            memcache_host = ['127.0.0.1:11211']
        if use_memcache:
            if client is None:
                try:
                    try:
                        from cmemcache import Client
                    except ImportError:
                        from memcache import Client
                except ImportError:
                    raise RuntimeError('the %r loader requires an installed '
                                       'memcache module' %
                                       self.__class__.__name__)
                client = Client(list(memcache_host))
            self.__memcache = client
            self.__memcache_time = memcache_time
        else:
            self.__memcache = None
        self.__item_prefix = item_prefix
        self.__lock = Lock()

    def load(self, environment, name, translator):
        """
        Load and translate a template. First we check if there is a
        cached version of this template in the memory cache. If this is
        not the cache check for a compiled template in the disk cache
        folder. And if none of this is the case we translate the template,
        cache and return it.
        """
        self.__lock.acquire()
        try:
            # caching is only possible for the python translator. skip
            # all other translators
            if translator is not PythonTranslator:
                return super(MemcachedLoaderMixin, self).load(
                             environment, name, translator)
            tmpl = None
            push_to_memory = False

            # check if we have something in the memory cache and the
            # memory cache is enabled.
            if self.__memcache is not None:
                bytecode = self.__memcache.get(self.__item_prefix + name)
                if bytecode:
                    tmpl = Template.load(environment, bytecode)
                else:
                    push_to_memory = True

            # if we still have no template we load, parse and translate it.
            if tmpl is None:
                tmpl = super(MemcachedLoaderMixin, self).load(
                             environment, name, translator)

            # if memcaching is enabled and the template not loaded
            # we add that there.
            if push_to_memory:
                self.__memcache.set(self.__item_prefix + name, tmpl.dump(),
                                    self.__memcache_time)
            return tmpl
        finally:
            self.__lock.release()
Example #59
0
class RealTimePlot():
    def __init__(self, plotLength=500, refreshTime=10):
               
        self.gui_main = None       
        self.window = None
        self.plotMaxLength = plotLength
        
        self.data  = collections.deque( maxlen=plotLength)
        self.times = collections.deque( maxlen=plotLength)
        self.plotTimer = 0
        self.previousTimer = 0
        self.valueLast = None
        self.p = None
        self.fig = None
        
        self.t_start = time.perf_counter()
        self.values_queue = collections.deque(maxlen=plotLength)
        self.times_queue  = collections.deque(maxlen=plotLength)
        
        self.plotTimer = 0
        self.previousTimer = 0
        self.timeText = None
        
        self.input_index = 0;
        
        self.pltInterval = refreshTime  # Refresh period [ms]
        
        self.data_mutex = Lock()

    def updatePlotData(self,args=None): #, frame, lines, lineValueText, lineLabel, timeText):
#        while self.isRunning:
        
        if len(self.times_queue):
            currentTimer = time.perf_counter()
            self.plotTimer = int((currentTimer - self.previousTimer) * 1000)
            if self.plotTimer > 1:
                self.previousTimer = currentTimer
                self.timeText.set_text('Plot Interval = ' + str(self.plotTimer) + 'ms')
        else:
            return
       
        valueLast = []

        self.data_mutex.acquire()

        while len(self.times_queue):
            try:
                valueLast = self.values_queue[-1][self.input_index]
                time_val = self.times_queue[-1]
                valueLast = float(valueLast) # make sure its a number
                self.data.append(valueLast)  # latest data point and append it to array
                self.times.append(time_val)
                self.values_queue.clear()
                self.times_queue.clear()
            except:
                break
        

        self.data_mutex.release()
        
        self.lines.set_data(self.times, self.data)
        if len(self.data):
            self.lineValueText.set_text('[' + self.lineLabel + " IND: " +str(self.input_index) + '] = ' + str(round(self.data[-1],3)))
        
        if len(self.times) > 5:
            #self.fig.canvas.restore_region(self.background)
            self.ax.set_xlim(self.times[0],self.times[-1])
            
            min_ylim = min(self.data)
            max_ylim = max(self.data)
            
            if min_ylim == max_ylim:
                if min_ylim == 0:
                    min_ylim = -1
                    max_ylim = 1
                else:
                    min_ylim = min_ylim*.2
                    max_ylim = max_ylim*1.2
            
            
            self.ax.set_ylim(min_ylim - (max_ylim-min_ylim)/10, max_ylim + (max_ylim-min_ylim)/10)

    def addValue(self, value):
        self.data_mutex.acquire()
        self.values_queue.append(value)
        self.times_queue.append(time.perf_counter()-self.t_start)
        self.data_mutex.release()
        
    def changePlotIndex(self, index):
        self.input_index = index
        self.times.clear()
        self.data.clear()

    def setupPlot(self):  # retrieve data
        xmin = 0
        xmax = self.plotMaxLength
        ymin = -1
        ymax = 1050
        self.fig = plt.figure()
        self.ax = plt.axes( autoscale_on=True)#xlim=(xmin, xmax), ylim=(float(ymin - (ymax - ymin) / 10), float(ymax + (ymax - ymin) / 10)))
        self.ax.set_title('Arduino Analog Read')
        self.ax.set_xlabel("time")
        self.ax.set_ylabel("AnalogRead Value")
        
        self.canvas = FigureCanvasTkAgg(self.fig, master=self.window)
        self.canvas.draw()
        self.canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
        
        toolbar = NavigationToolbar2Tk(self.canvas,self.window)
        toolbar.update()
        self.canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)

        self.lineLabel = 'Sensor Value'
        self.timeText = self.ax.text(0.50, 0.95, '', transform=self.ax.transAxes)
        self.lines = self.ax.plot([], [], label=self.lineLabel)[0]
        self.lineValueText = self.ax.text(0.50, 0.90, '', transform=self.ax.transAxes)
  
        # START THE PLOT ANIMATION
        self.anim = animation.FuncAnimation(self.fig, self.updatePlotData, interval=self.pltInterval)
        
    
    def isOk(self):
        return self.anim is not None

    def close(self):
        if self.anim is not None:
            self.anim.event_source.stop()
        self.anim = None
 
        self.window.withdraw()
        
        '''if self.window is not None:
            self.window.quit() # stops main loop
            self.window.destroy() # Destroys window and all child widgets
        '''

    def Start(self, main=None):
        if self.window is None:
            self.window = Toplevel(main)
            self.window.title("Real Time Plot")
            self.window.geometry("800x600")
            self.window.protocol("WM_DELETE_WINDOW", self.close)
            self.gui_main = main
        self.setupPlot()
Example #60
0
    def __provide_state(self, port_file, at_time):
        '''
   Provide the internal state of the module. This is typically used by another
   module such as '.cortix.modulib.pyplot'; this XML file type is now deprecated.
   However to have a dynamic plotting option, create this history file in the
   time unit of Cortix; that is, undo the time scaling.
   '''
        import datetime
        import xml.etree.ElementTree as ElementTree
        from threading import Lock

        n_digits_precision = 8

        # write header
        if at_time == self.__start_time:

            assert os.path.isfile(
                port_file) is False, 'port_file %r exists; stop.' % port_file

            tree = ElementTree.ElementTree()
            root_node = tree.getroot()

            a = ElementTree.Element('time-sequence')
            a.set('name', 'droplet_' + str(self.__slot_id) + '-state')

            b = ElementTree.SubElement(a, 'comment')
            today = datetime.datetime.today()
            b.set('author', 'cortix.examples.modulib.droplet')
            b.set('version', '0.1')

            b = ElementTree.SubElement(a, 'comment')
            today = datetime.datetime.today()
            b.set('today', str(today))

            b = ElementTree.SubElement(a, 'time')
            b.set('unit', self.__cortix_time_unit)

            # setup the headers
            for specie in self.__liquid_phase.species:
                b = ElementTree.SubElement(a, 'var')
                formula_name = specie.formulaName
                b.set('name', formula_name)
                unit = specie.massCCUnit
                b.set('unit', unit)
                b.set('legend', 'Droplet_' + str(self.__slot_id) + '-state')
                b.set('scale', self.__pyplot_scale)

            for quant in self.__liquid_phase.quantities:
                b = ElementTree.SubElement(a, 'var')
                formal_name = quant.formalName
                b.set('name', formal_name)
                unit = quant.unit
                b.set('unit', unit)
                b.set('legend', 'Droplet_' + str(self.__slot_id) + '-state')
                b.set('scale', self.__pyplot_scale)

            # write values for all variables
            b = ElementTree.SubElement(a, 'timeStamp')
            b.set(
                'value',
                str(round(at_time / self.__time_unit_scale,
                          n_digits_precision)))

            values = list()

            for specie in self.__liquid_phase.species:
                val = self.__liquid_phase.GetValue(specie.name, at_time)
                values.append(val)

            for quant in self.__liquid_phase.quantities:
                val = self.__liquid_phase.GetValue(quant.name, at_time)
                values.append(val)

            # flush out data
            text = str()
            for value in values:
                text += str(round(value, n_digits_precision)) + ','

            text = text[:-1]

            b.text = text

            tree = ElementTree.ElementTree(a)

            tree.write(port_file,
                       xml_declaration=True,
                       encoding="unicode",
                       method="xml")

    #-------------------------------------------------------------------------------
    # if not the first time step then parse the existing history file and append
        else:

            mutex = Lock()
            mutex.acquire()

            tree = ElementTree.parse(port_file)
            root_node = tree.getroot()

            a = ElementTree.Element('timeStamp')
            a.set(
                'value',
                str(round(at_time / self.__time_unit_scale,
                          n_digits_precision)))

            # all variables values
            values = list()

            for specie in self.__liquid_phase.species:
                val = self.__liquid_phase.GetValue(specie.name, at_time)
                values.append(val)
            for quant in self.__liquid_phase.quantities:
                val = self.__liquid_phase.GetValue(quant.name, at_time)
                values.append(val)

            # flush out data
            text = str()
            for value in values:
                text += str(round(value, n_digits_precision)) + ','

            text = text[:-1]

            a.text = text

            root_node.append(a)

            tree.write(port_file,
                       xml_declaration=True,
                       encoding="unicode",
                       method="xml")

            mutex.release()

        return