Example #1
0
def run(conn):
    """Function to handle running implosion generation in separate :py:class:`multithreading.Process`

    :param conn: A connection, i.e. one end of a `Pipe()`
    """
    # Need duck-checking instead of real type-checking...
    assert hasattr(conn, 'send') and hasattr(conn, 'recv')

    # Get the implosion object from the pipe:
    imp = conn.recv()
    assert isinstance(imp, Implosion)

    connLock = Lock()

    # Run in a separate thread in this process:
    def impRun():
        nonlocal imp, conn
        try:
            imp.generate()
        except Exception as e:
            connLock.acquire()
            conn.send(e)
            connLock.release()

    t = Thread(target=impRun)
    t.start()

    while t.is_alive():
        connLock.acquire()
        conn.send(imp.progress())
        connLock.release()
        time.sleep(0.01)

    # When the thread is done, send the Implosion object back:
    conn.send(imp)
Example #2
0
class PriorityQueue:
    """ a non blocking priority queue """

    def __init__(self):
        self.queue = []
        self.lock = Lock()

    def __iter__(self):
        return iter(self.queue)

    def __delitem__(self, key):
        del self.queue[key]

    def put(self, element):
        self.lock.acquire()
        heappush(self.queue, element)
        self.lock.release()

    def get(self):
        """ return element or None """
        self.lock.acquire()
        try:
            el = heappop(self.queue)
            return el
        except IndexError:
            return None, None
        finally:
            self.lock.release()
Example #3
0
class Reader(Thread):

    def __init__(self):
        super(Reader, self).__init__()
        self.buffer = []
        self.lock = Lock()

    def run(self):
        while True:
            self.lock.acquire()
            self.buffer.append(getch())
            self.lock.release()
            sleep(0)

    def read(self):
        self.start()
        while True:
            if self.buffer:
                # The reader may be in the middle of receiving multi-byte
                # input; wait long enough for this to finish.
                sleep(0.1)
                self.lock.acquire()
                chars, self.buffer = self.buffer, []
                self.lock.release()
                yield chars
Example #4
0
class DataWindow(Thread):
    def __init__(self,data_adapter):
        Thread.__init__(self)
        self.win = N.zeros((100,3))
        self.winlock = Lock()
        self.data_adapter = data_adapter
    
    def run(self):
        self.data_adapter.start()
        self.running = True
        while self.running:
            self.winlock.acquire()
            try:
                while 1:
                    newdata = self.data_adapter.q.get(block=False)
                    self.win[:-1,:] = self.win[1:,:]
                    self.win[-1,:] = newdata[1:]
            except Queue.Empty:
                pass
            finally:
                self.winlock.release()
        self.data_adapter.stop()
    
    def stop(self):
        self.running = False
Example #5
0
class WorkerTask(object):
    """
    This class used to represent page loading task
    """
    def __init__(self, args, routine, user_data = None):
        self._lock_complete = Lock()
        self.__complete = False
        self.gathered = False
        self.args = args
        if callable(routine):
            self.routine = routine
        else:
            raise AttributeError('<routine> argument should be callable function')
        self.user_data = user_data
        self.result = []
        self.thread = None

    def _get_complete(self):
        """complete property getter"""
        self._lock_complete.acquire()
        complete = self.__complete
        self._lock_complete.release()
        return complete

    def _set_complete(self, state):
        """complete property setter"""
        if type(state) != types.BooleanType:
            raise TypeError('state should be boolean')
        self._lock_complete.acquire()
        self.__complete = state
        self._lock_complete.release()

    complete = property(_get_complete, _set_complete)
Example #6
0
class FutureTask(Task):
    def __init__(self,function,*args,**kwargs):
        self.function = function
        self.args = args
        self.kwargs = kwargs
        self.lock = Lock()
        self.e = None
        self.executing = False;
    
    def _execute(self):
        try:
            self.lock.acquire()
            self.executing = True;
            self.result = self.function(*self.args,**self.kwargs);
        except Exception as e:
            self.e = e;
        finally:
            self.lock.release()
        
    def result(self):
        try:
            while True:
                if self.executing: #检查标志位,避免死锁
                    self.lock.acquire()
                    if self.e is not None:
                        raise self.e;
                    return self.result
        finally:
            self.lock.release()
Example #7
0
class _CoreScheduleThread(Thread):
    def __init__(self,threadpool):
        self.scheduletasks = [];
        self.tasklock = Lock();
        self.condition = Condition(Lock())
        self.threadpool = threadpool
        Thread.__init__(self)

    def run(self):
        while True:
            self.condition.acquire()
            if len(self.scheduletasks) == 0:
                self.condition.wait();
            else:
                task = self.scheduletasks.pop(0)
                if dates.current_timestamps()>=task.nexttime: 
                    self.threadpool.execute(task.function,*task.args,**task.kwargs)
                    task.nexttime = dates.current_timestamps()+task.period;
                else:
                    self.condition.wait(task.nexttime-dates.current_timestamps())
                self.addtask(task)
            self.condition.release()

    
    def addtask(self,task): # copy on write
        self.tasklock.acquire()
        tasks = [ t for t in self.scheduletasks ]
        tasks.append(task)
        tasks.sort(key=lambda task:task.nexttime)
        self.scheduletasks = tasks
        self.tasklock.release()
Example #8
0
class ProgressBarLogger:
    def __init__(self, msg, total):
        self.msg = msg
        self.total = total
        self.status = 0
        self.lock = Lock()

    def log(self, *_):
        self.lock.acquire()
        self.status += 1
        self._print_progress_bar(self.status, self.total, prefix=self.msg, bar_length=50)
        self.lock.release()

    # from here http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
    # Print iterations progress
    @staticmethod
    def _print_progress_bar(iteration, total, prefix='', suffix='', decimals=2, bar_length=100):
        """
        Call in a loop to create terminal progress bar
        @params:
            iteration   - Required  : current iteration (Int)
            total       - Required  : total iterations (Int)
            prefix      - Optional  : prefix string (Str)
            suffix      - Optional  : suffix string (Str)
            decimals    - Optional  : number of decimals in percent complete (Int)
            bar_length   - Optional  : character length of bar (Int)
        """
        filled_length = int(round(bar_length * iteration / float(total)))
        percents = round(100.00 * (iteration / float(total)), decimals)
        bar_char = '#' * filled_length + '-' * (bar_length - filled_length)
        sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar_char, percents, '%', suffix))
        sys.stdout.flush()
        if iteration == total:
            sys.stdout.write('\n')
            sys.stdout.flush()
Example #9
0
	def handle(self):
		#try:
			data, socket = self.request

			lock = Lock()
			lock.acquire()

			DataOffset    = struct.unpack('<H',data[139:141])[0]
			BrowserPacket = data[82+DataOffset:]
			ReqType       = RequestType(BrowserPacket[0])

			Domain = Decode_Name(data[49:81])
			Name   = Decode_Name(data[15:47])
			Role1  = NBT_NS_Role(data[45:48])
			Role2  = NBT_NS_Role(data[79:82])
			Fprint = WorkstationFingerPrint(data[190:192])
			Roles  = ParseRoles(data[192:196])

			print text("[BROWSER] Request Type : %s" % ReqType)
			print text("[BROWSER] Address      : %s" % self.client_address[0])
			print text("[BROWSER] Domain       : %s" % Domain)
			print text("[BROWSER] Name         : %s" % Name)
			print text("[BROWSER] Main Role    : %s" % Role1)
			print text("[BROWSER] 2nd Role     : %s" % Role2)
			print text("[BROWSER] Fingerprint  : %s" % Fprint)
			print text("[BROWSER] Role List    : %s" % Roles)

			RAPThisDomain(self.client_address[0], Domain)

			lock.release()
Example #10
0
class Skeleton(object):
    def __init__(self, config_file):
        self.logger = logging.getLogger(self.__class__.__name__)
        config = Config(config_file)
        config.get_configs()
        self.config = config
        self.lock = Lock()
        self.queues = dict()

    def produce_task(self, tasker_name, tasker):
        while True:
            try:
                queue = self.queues.get(tasker_name)
                self.logger.info("old {0} queue size: {1}".format(tasker_name, queue.qsize()))
                queue.put(tasker.size)
                self.logger.info("new {0} queue size: {1}".format(tasker_name, queue.qsize()))
            except Exception as error:
                self.logger.exception("{0} {1}".format(tasker_name, error))
            finally:
                time.sleep(self.config.scan_task_interval)
        return

    def consume_task(self, n, tasker_name, tasker):
        while True:
            handler = Handler()
            queue = self.queues.get(tasker_name)
            if queue.empty():
                time.sleep(self.config.wait_time)
                continue
            try:
                while not queue.empty():
                    size = queue.get()
                    self.lock.acquire()
                    handler.human_readable(size)
                    self.lock.release()
            except Exception as error:
                self.logger.exception('Thread-{0}: error {1}'.format(n, error))
            finally:
                del(handler)

    def do_work(self):
        for tasker_name, tasker in self.config.taskers.items():
            self.queues[tasker_name] = Queue()

            # Spwan produce_task thread
            t = Thread(target=self.produce_task, args=(tasker_name, tasker))
            t.setDaemon(True)
            t.start()

            # Spwan consume_task thread
            for n in range(tasker.max_workers):
                t = Thread(target=self.consume_task, args=(n, tasker_name, tasker))
                t.setDaemon(True)
                t.start()

        while True:
            signal.signal(signal.SIGTERM, sigterm_handler)
            # Round robin and Sleep some seconds.
            time.sleep(self.config.scan_task_interval)
        return
Example #11
0
class WaitCursor(Thread):
    """A waiting cursor for long operation that
    catch output and flush it after waiting"""
    def __init__(self):
        self.state = "WAIT"
        self.lock = Lock()           # Lock used to synchronise IO and cursor stop
        Thread.__init__(self)

    def run(self):
        """Method executed when the thread object start() method is called"""

        realStdout = sys.stdout # Backup stdout
        tmpStdout = StringIO()  # Store here all data output during waiting state
        sys.stdout = tmpStdout  # Capture stdout
        cursorState = ("-", "\\", "|", "/")
        i = 0
        self.lock.acquire()
        while self.state == "WAIT":
            realStdout.write(cursorState[i % 4])
            realStdout.flush()
            sleep(0.1)
            realStdout.write("\b")
            i += 1

        # Restore standard output and print temp data
        sys.stdout = realStdout
        sys.stdout.write(tmpStdout.getvalue())
        sys.stdout.flush()
        self.lock.release()

    def stop(self):
        self.state = "STOP"
        self.lock.acquire() # Wait end of IO flush before returning
Example #12
0
class Tips(object):
    """ Manage Tips Events. """

    def __init__(self, enable):
        self.enable = enable
        self._tips = {}
        self._new_tips = set()
        self.lock = Lock()
        if self.enable:
            self.fetcher = Fetcher(self._tips, self.lock, self._new_tips)
            self.cleaner = Cleaner(self._tips, self.lock, self._new_tips)
            self.fetcher.start()
            self.cleaner.start()

    def tips(self):
        return self._tips.values()

    def new_tips(self):
        if self._new_tips:
            wait_free_acquire(self.lock)
            res = [self._tips[x] for x in self._new_tips]
            self._new_tips.clear()
            self.lock.release()
            return res
        else:
            return []

    def stop(self):
        if self.enable:
            self.fetcher.finnish()
            self.cleaner.finnish()
Example #13
0
class InMemoryItemValue(object):
  _lock = None
  """:type _lock Lock"""

  def __init__(self, value=None, expire_in=None):
    self._lock = Lock()
    self._value = value
    self._expire_in = None
    self._expire_in_time = None

    self.update_expire_time(expire_in)

  @property
  def value(self):
    return self._value

  @value.setter
  def value(self, val):
    self._lock.acquire()
    self._value = val
    self._expire_in = datetime.now() + timedelta(seconds=float(self._expire_in_time)) if self._expire_in_time else None
    self._lock.release()

  def update_expire_time(self, t):
    self._expire_in_time = t

  @property
  def is_expired(self):
    return (self._expire_in - datetime.now()).days < 0 if self._expire_in else False
Example #14
0
class Queue:
	"""Command queue class
	"""
	def __init__(self):
		self.lock = Lock()
		self.locks = {}
	
	def queue(self, command, *args):
		check = inspect.getargspec(command)
		cmdname = command.__name__ if command.__name__ else "uknown_cmd"
		if len(check[0]) != len(args):
			logging.warn("Queue command '%s' expected %u args, got %u!" % (cmdname, len(check[0]), len(args)))
			
		# If we have enough args, try running the command
		if len(args) >= len(check[0]):
			args = args[:len(check[0])] # Resize arg list if needed
			ret = None
			server = args[0]
			self.lock.acquire()
			if not server in self.locks:
				self.locks[server] = Lock()
			self.lock.release()
			
			self.locks[server].acquire()
			# Run in an enclosure, so as to be able to release lock if it fails
			try:
				ret = command(*args)
			except Exception as err:
				logging.warn("Queue command returned error: %s" % err)
			self.locks[server].release()
			
			if ret:
				return ret
		return None
    def start_manager(self):

        exit_flags[self.tab_id] = 0

        log.info('START | Layers Download Manager')

        thread_list = ['Alpha', 'Bravo', 'Charlie', 'Delta', 'Echo', 'Foxtrot', 'Golf', 'Hotel', 'India', 'Juliet']
        queue_lock = Lock()
        work_queue = Queue.Queue(len(self.file_paths_and_sizes))
        threads = []

        for thread_name in thread_list:
            key = str(uuid.uuid4())
            thread = LayerDownloadThread(self.source, thread_name, work_queue, queue_lock, key, self.target_dir, self.tab_id)
            thread.start()
            if not threads_map_key in thread_manager_processes:
                thread_manager_processes[threads_map_key] = {}
            thread_manager_processes[threads_map_key][key] = thread
            threads.append(thread)

        queue_lock.acquire()
        for word in self.file_paths_and_sizes:
            work_queue.put(word)
        queue_lock.release()

        while not work_queue.empty():
            pass

        exit_flags[self.tab_id] = 1

        for t in threads:
            t.join()

        log.info('DONE | Layers Download Manager')
class Store:
    def __init__(self, item_number, person_capacity):
        self.items_remaining_lock = Lock()
        self.space_available_lock = Lock()
        self.item_number = item_number
        self.person_capacity = person_capacity
        self.items_remaining = self.item_number
        self.space_available = self.person_capacity

    def enter(self):
        while True:
            self.space_available_lock.acquire()
            if self.space_available > 0:
                self.space_available -= 1
                self.space_available_lock.release()
                break
            self.space_available_lock.release()

    def buy(self):
        self.items_remaining_lock.acquire()
        if self.items_remaining <= 0:
            return False
        self.items_remaining_lock.release()
        time.sleep(random.randint(5, 10))
        self.items_remaining_lock.acquire()
        if self.items_remaining <= 0:
            return False
        self.items_remaining -= 1
        self.items_remaining_lock.release()
        self.space_available_lock.acquire()
        self.space_available += 1
        self.space_available_lock.release()
        return True
Example #17
0
class Manager:
  def __init__(self):
    self.networks = set()
    self.logs = []
    self.next_log_id = 0
    self.log_lock = Lock()

  def notifyNetUp(self, pno, net_name):
    self.networks.add(net_name)
    sys.stdout.write(">> network: %s is up (%d)\n" % (net_name, len(self.networks)))

  def getNetworks(self):
    return self.networks

  def getLogs(self, since=-1):
    if since >= 0:
      return filter(lambda l: l['id'] > since, self.logs)
    else:
      return self.logs

  def putLog(self, host, log):
    self.log_lock.acquire()
    l = {'id': self.next_log_id, 'host': host, 'log': log}
    self.next_log_id = self.next_log_id + 1
    sys.stdout.write(">> log: %s\n" % json.dumps(l))
    self.logs.append(l)
    self.log_lock.release()
Example #18
0
class shm:
    def __init__(self, data=None):
        self._data = data
        self._lock = Lock()

    def set(self, data, lock=True):
        if lock:
            with self._lock:
                self._data = data
        else:
            self._data = data

    def get(self, lock=True):
        if lock:
            with self._lock:
                data = self._data
                return data
        else:
            return self._data

    def acquire(self):
        self._lock.acquire()

    def release(self):
        self._lock.release()

    data = property(get, set)
Example #19
0
class DPMClient():   
    def __init__(self, uid=None, key=None):
        self._lock = Lock()
        self._uid = uid
        self._key = None
        if key:
            self._key = rsa.PublicKey.load_pkcs1(key)
      
    def request(self, addr, port, buf):
        self._lock.acquire()
        try:
            return self._request(addr, port, buf)
        finally:
            self._lock.release()
    
    def _request(self, addr, port, buf):
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect((addr, port))
        try:
            if self._key:
                stream = Stream(sock, uid=self._uid, key=self._key)
            else:
                stream = Stream(sock)
            stream.write( buf)
            if self._key:
                stream = Stream(sock)
            _, _, res = stream.readall()
            return res
        finally:
            sock.close()
Example #20
0
class Syncer(object):

    def __init__(self, slave):
        self.slave = slave
        self.lock = Lock()
        self.pb = PostBox()
        for i in range(int(gconf.sync_jobs)):
            t = Thread(target=self.syncjob)
            t.start()

    def syncjob(self):
        while True:
            pb = None
            while True:
                self.lock.acquire()
                if self.pb:
                    pb, self.pb = self.pb, PostBox()
                self.lock.release()
                if pb:
                    break
                time.sleep(0.5)
            pb.close()
            pb.wakeup(self.slave.rsync(pb))

    def add(self, e):
        while True:
            try:
                self.pb.append(e)
                return self.pb
            except BoxClosedErr:
                pass
Example #21
0
class BlaLock(object):
    """
    Simple wrapper class for the thread.lock class which only raises an
    exception when trying to release an unlocked lock if it's initialized with
    strict=True.
    """

    def __init__(self, strict=False, blocking=True):
        self.__strict = strict
        self.__blocking = blocking
        self.__lock = Lock()

    def acquire(self):
        self.__lock.acquire(self.__blocking)

    def release(self):
        try:
            self.__lock.release()
        except ThreadError:
            if self.__strict:
                raise

    def locked(self):
        return self.__lock.locked()

    def __enter__(self, *args):
        self.acquire()

    def __exit__(self, *args):
        self.release()
Example #22
0
class ProcessThread(Thread):
	# Initialize this thread
	def __init__(self):
		Thread.__init__(self)
		self.stopped = Event() # Cancel Event
		self.mutex = Lock()
		self.data = None

	# Threaded code
	def run(self):
		while not self.stopped.isSet():
			# Check if status data is available and process it
			data = None
			self.mutex.acquire()
			if self.data:
				data = self.data
				self.data = None
			self.mutex.release()

			if data:
				global outputfile
				try:
					fp = file(outputfile, 'wu')
					fp.write(data.encode('utf-8'))
					fp.close()
				except Exception, e:
					console.log(LOG_PYTHON, "Couldn't write status to '%s': %s.\n" % (outputfile, str(e)))

			self.stopped.wait(0.5)
Example #23
0
    def _setup_to_do_n_cycles(self, number_of_cycles: int, updates_each_cycle: UpdateCollection=None):
        """
        Sets up the test so that the retriever will only do n cycles.
        :param number_of_cycles: the number of cycles to do
        """
        if updates_each_cycle is None:
            updates_each_cycle = UpdateCollection([])

        semaphore = Semaphore(0)
        lock_until_counted = Lock()
        lock_until_counted.acquire()

        def increase_counter(*args) -> UpdateCollection:
            semaphore.release()
            lock_until_counted.acquire()
            return updates_each_cycle

        self.retrieval_manager.update_mapper.get_all_since.side_effect = increase_counter
        self.retrieval_manager.start()

        run_counter = 0
        while run_counter < number_of_cycles:
            semaphore.acquire()
            run_counter += 1
            lock_until_counted.release()
            if run_counter == number_of_cycles:
                self.retrieval_manager.stop()

        self.retrieval_manager.update_mapper.get_all_since.side_effect = None
Example #24
0
class Promise:
  def __init__(self):
    self.value = None
    self.mutex = Lock()
    self.callbacks = []

  def updateValue(self, new_value):
    if (self.mutex.acquire(blocking = False) and self.value is None):
      self.value = new_value
      for cb in self.callbacks:
        cb(new_value)
      self.mutex.release()
    else:
      raise RuntimeError("cannot set the value of an already resolved promise")

  def addCallback(self, cb):
    self.mutex.acquire(blocking = True)
    self.callbacks.append(cb)
    self.mutex.release()
    return self

  def map(self, f):
    fp = Promise()
    def chain(v):
      fp.updateValue(f(v))
    self.addCallback(chain)
    return fp

  def flatMap(self, f):
    fp = Promise()
    def chain(v):
      f(v).addCallback(fp.updateValue)
    self.addCallback(chain)
    return fp
Example #25
0
class TUNERS:
    def __init__(self, str):
        from threading import Lock

        tuners = "".join(str.split()) # remove white space
        tuners = tuners.split(',')
        tuners = [tuple(x.split(':')[0:2]) for x in tuners]
        # Add priority
        self.tuner_list = [(i, v[0], v[1]) for i,v in enumerate(tuners)]
        heapq.heapify(self.tuner_list)
        self.lock = Lock()

    def get_tuner(self):
        self.lock.acquire()
        try:
            tuner = heapq.heappop(self.tuner_list)
        except IndexError:
            tuner = None
        finally:
            self.lock.release()
        return tuner

    def put_tuner(self, tuner):
        self.lock.acquire()
        heapq.heappush(self.tuner_list, tuner)
        self.lock.release()
class TfBroadcasterThread(Thread):
    def __init__(self,child_frame,parent_frame,tf_br=None):
        Thread.__init__(self)
        rospy.loginfo("Initializing tf broadcaster with child frame "+child_frame+" and parent frame "+parent_frame)
        if tf_br is None:
            self.tf_br = tf.TransformBroadcaster()
        else:
            self.tf_br = tf_br
        self.translation = None
        self.quaternion = None
        self.child_frame = child_frame
        self.parent_frame = parent_frame
        self.has_transformation=False
        self.lock=Lock()
            
    def set_transformation(self,translation,quaternion):
        self.lock.acquire()
        self.translation = translation
        self.quaternion = quaternion
        self.lock.release()
        self.has_transformation =True

    def run(self):
        while not rospy.is_shutdown():
            try:
                if self.has_transformation:
                    self.lock.acquire()
                    self.tf_br.sendTransform(self.translation ,self.quaternion , rospy.Time.now(), self.child_frame,self.parent_frame)
                    self.lock.release()
            except Exception,e:
                print 'TfBroadcasterThread:',e
Example #27
0
class PandoraPool(object):
	def __init__(self, poolSize, proxy=None, expireTime=3600):
		self.size = poolSize
		self.proxy = proxy
		self.expire = expireTime
		self.pool = [self.createPandoraAgent() for i in xrange(self.size)]
		self.mutex = Lock()

	def createPandoraAgent(self):
		return PandoraAgent(datetime.now() + timedelta(0, self.expire), self.proxy)

	def refreshPandoraAgent(self, agent):
		if agent.isExpired():
			agent.authenticate_connection()
			agent.setExpireDate(datetime.now() + timedelta(0, self.expire))
		return agent

	def getAgent(self):
		try:
			return self.refreshPandoraAgent(self.pool.pop())
		except IndexError:
			return self.createPandoraAgent()

	def hasAvailableConnections(self):
		return len(self.pool) > 0

	def releaseAgent(self, agent):
		self.mutex.acquire()
		if len(self.pool) < self.size:
			self.pool.append(agent)
		self.mutex.release()
Example #28
0
class IndependentWorker(Thread):
    def __init__(self, id, manager, workerArgs = None):
        '''A worker needs a task stack(queue), a name (id) and a boss (manager)'''
        self.id = id
        self.manager = manager
        self.completedTasksLock = Lock()
        self.completedTasks = 0
        Thread.__init__(self)
    def run(self):
        subject = self.manager.getTask()
        while subject != None:
            if self._task(subject):
                self.completedTasksLock.acquire()
                self.completedTasks += 1
                self.completedTasksLock.release()
            subject = self.manager.getTask()
        self._close()
        sys.exit()
    def getAndResetCompletedTasks(self):
        completedTasks = self.completedTasks
        self.completedTasksLock.acquire()
        self.completedTasks = 0
        self.completedTasksLock.release()
        return completedTasks
    def _task(self, subject):
        print 'You should initialize Worker._task(self, subject)'
        sys.exit()
        pass
    def _close(self):
        pass
Example #29
0
class DebuggingLock:
    def __init__(self, name):
        self.lock = Lock()
        self.name = name

    def acquire(self, blocking = 1):
        self.print_tb("Acquire lock")
        self.lock.acquire(blocking)
        self.logmsg("===== %s: Thread %s acquired lock\n"%
            (self.name, currentThread().getName()))

    def release(self):
        self.print_tb("Release lock")
        self.lock.release()

    def logmsg(self, msg):
        loglock.acquire()
        logfile.write(msg + "\n")
        logfile.flush()
        loglock.release()

    def print_tb(self, msg):
        self.logmsg(".... %s: Thread %s attempting to %s\n"% \
                    (self.name, currentThread().getName(), msg) + \
                    "\n".join(traceback.format_list(traceback.extract_stack())))
class Serial:
  def __init__(self, port='COM5', rate=9600, timeout=10):
      self._serial = serial.Serial(port, rate, timeout=timeout)
      self._mutex = Lock()
      self._mutex.acquire()
      response = self._serial.readline().strip()
      print response
      if response != 'OK':
          raise Exception("Failed to communicate with the serial device!")
      self._mutex.release()

  def _shortCommand(self, command):
      self._serial.write(command)
      response = self._serial.readline()
      return response.strip()

  def _longCommand(self, command):
      response = self._shortCommand('RCV ' + str(len(command)) + "\n")
      if response != 'RDY':
          return None
      for i in range(int(math.ceil(len(command) / 128.0))):
          c = command[128*i:128*(i+1)]
          response = self._shortCommand(c)
      return self._serial.readline().strip()

  def command(self, command):
      self._mutex.acquire()
      if len(command) < 128:
          response = self._shortCommand(command + "\n")
      else:
          response = self._longCommand(command)
      self._mutex.release()
      return response
Example #31
0
class PublisherConsistencyListener(SubscribeListener):
    """ This class is used to solve the problem that sometimes we create a
    publisher and then immediately publish a message, before the subscribers
    have set up their connections.

    Call attach() to attach the listener to a publisher.  It sets up a buffer
    of outgoing messages, then when a new connection occurs, sends the messages
    in the buffer.

    Call detach() to detach the listener from the publisher and restore the
    original publish methods.

    After some particular timeout (default to 1 second), the listener stops
    buffering messages as it is assumed by this point all subscribers will have
    successfully set up their connections."""

    timeout = 1  # Timeout in seconds to wait for new subscribers
    attached = False

    def attach(self, publisher):
        """ Overrides the publisher's publish method, and attaches a subscribe
        listener to the publisher, effectively routing incoming connections
        and outgoing publish requests through this class instance """
        # Do the attaching
        self.publisher = publisher
        publisher.impl.add_subscriber_listener(self)
        self.publish = publisher.publish
        publisher.publish = self.publish_override

        # Set state variables
        self.lock = Lock()
        self.established_time = time()
        self.msg_buffer = []
        self.attached = True

    def detach(self):
        """ Restores the publisher's original publish method and unhooks the
        subscribe listeners, effectively finishing with this object """
        self.publisher.publish = self.publish
        if self in self.publisher.impl.subscriber_listeners:
            self.publisher.impl.subscriber_listeners.remove(self)
        self.attached = False

    def peer_subscribe(self, topic_name, topic_publish, peer_publish):
        """ Called whenever there's a new subscription.

        If we're still inside the subscription setup window, then we publish
        any buffered messages to the peer.

        We also check if we're timed out, but if we are we don't detach (due
        to threading complications), we just don't propagate buffered messages
        """
        if not self.timed_out():
            self.lock.acquire()
            msgs = copy(self.msg_buffer)
            self.lock.release()
            for msg in msgs:
                peer_publish(msg)

    def timed_out(self):
        """ Checks to see how much time has elapsed since the publisher was
        created """
        return time() - self.established_time > self.timeout

    def publish_override(self, message):
        """ The publisher's publish method is replaced with this publish method
        which checks for timeout and if we haven't timed out, buffers outgoing
        messages in preparation for new subscriptions """
        if not self.timed_out():
            self.lock.acquire()
            self.msg_buffer.append(message)
            self.lock.release()
        self.publish(message)
class ReSampler:
    '''
    Initializes the resampler
    particles: The particles to sample from
    weights: The weights of each particle
    state_lock: Controls access to particles and weights
  '''
    def __init__(self, particles, weights, state_lock=None):
        self.particles = particles
        self.weights = weights

        # For speed purposes, you may wish to add additional member variable(s) that
        # cache computations that will be reused in the re-sampling functions
        # YOUR CODE HERE?

        if state_lock is None:
            self.state_lock = Lock()
        else:
            self.state_lock = state_lock

    '''
    Performs independently, identically distributed in-place sampling of particles
  '''

    def resample_naiive(self):

        self.state_lock.acquire()

        self.weights /= np.sum(self.weights)
        k = len(self.weights)
        #resampled_particles= np.zeros((self.particles.shape[0]), dtype= np.float)
        resampled_particles_indices = np.arange(0, k, 1)

        resampled_particles_indices = np.random.choice(
            resampled_particles_indices, size=k, replace=True, p=self.weights)
        resampled_particles_naiive = self.particles[
            resampled_particles_indices]
        self.particles[:] = resampled_particles_naiive[:]
        # YOUR CODE HERE
        self.state_lock.release()

    '''
    Performs in-place, lower variance sampling of particles
    (As discussed on pg 110 of Probabilistic Robotics)
  '''

    def resample_low_variance(self):
        self.state_lock.acquire()

        # YOUR CODE HERE
        self.weights /= np.sum(self.weights)
        M = len(self.weights)
        r = np.random.rand(1) * 1.0 / M
        c = self.weights[0]
        U_array = np.zeros(M, dtype=float)
        last_element = r + ((M - 1.0) / M)
        U_array = np.linspace(r, last_element,
                              M)  #, endpoint=True, retstep= False)
        particles_resampled = np.zeros((self.particles.shape), dtype=np.float)
        weights_cumsum = np.cumsum(self.weights, dtype=np.float)
        particle_bins = np.digitize(U_array, weights_cumsum, right=False)
        print particle_bins
        particles_resampled = self.particles[particle_bins]
        self.particles[:] = particles_resampled[:]
        '''
    reampled_particles= np.zeros((self.particles.shape), dtype= np.float)
    #print resampled_particles
    M= len(self.weights)
    r= np.random.rand(1) * 1.0/M
    c= self.weights[0]
    i=0
    for m in xrange(M):
      U= r+ (m* 1.0/M)
      while U>c:
        i+=1
        c=c + self.weights[i]
      resampled_particles[m,:]= self.particles[i,:]  
      #print resampled_particles[m,:]
    self.particles[:] = resampled_particles[:]
    #print resampled_particles
    #print self.particles 
    '''
        self.state_lock.release()
Example #33
0
class spider():
    def __init__(self):
        self.Binary_Search = BinarySearch()  # 实例一个二分法过滤器
        self.Queue = Queue(100000)  # 实例一个队列
        self.thread_count = 10  # 线程数
        self.lock = Lock()  # 线程锁
        self.event = Event()

    def requstGET(self, url):
        '''
        请求函数
        :param url: url
        :return: text
        '''
        try:
            response = requests.get(url,
                                    headers=headers,
                                    timeout=3,
                                    verify=False)
            response.encoding = response.apparent_encoding
            if response.status_code == 200:
                return response.text
        except Exception as e:
            return None

    def parser(self, url):
        '''
        解析网页,取得url保存至队列
        :param url:
        :return:
        '''
        url_hash = self.Binary_Search.hash(url)
        self.lock.acquire()  # 线程锁
        if self.Binary_Search.exists(self.Binary_Search.container,
                                     url_hash):  # 判断url是否在布隆过滤器中
            self.lock.release()  # 解锁
            return None  # 在过滤器中直接结束函数
        self.Binary_Search.container.append(url_hash)  # 不在过滤器中,添加到过滤器中
        self.Binary_Search.container.sort()
        self.lock.release()  # 解锁
        print(url)
        text = self.requstGET(url)  # 请求url
        if text:
            soup = BeautifulSoup(text, 'html.parser')  # 解析url
            for link in soup.find_all('a'):
                self.Queue.put_nowait(link.get('href'))  # 取得新url保存到队列
        self.event.wait()

    def thread_pool(self):
        '''
        线程池,从队列中取出url,循环开启子线程
        :return:
        '''

        while True:
            task_list = []
            for i in range(0, self.thread_count):  # 每次开启 thread_count 个线程
                try:
                    task_list.append(
                        Thread(target=self.parser,
                               args=(self.Queue.get_nowait(), )))  #创建子线程任务
                except Exception as e:
                    print(e)
            for task in task_list:  # 开启线程任务
                time.sleep(0.5)
                task.start()
            # [task.join() for task in task_list]  # 等待这批线程全部结束
            time.sleep(5)
            self.event.set()
            print(self.Queue.qsize())
class FireflyLedController():
    class FireflyPattern():
        def __init__(self,
                     board_id,
                     red,
                     green,
                     blue,
                     speed,
                     pattern_str,
                     pattern_name=None):
            self.board_id = board_id
            self.red = red
            self.blue = blue
            self.green = green
            self.speed = speed
            self.pattern_str = pattern_str
            self.pattern_name = pattern_name
            if self.board_id is not None:
                self.packet = create_firefly_packet(self.board_id,
                                                    [red, green, blue],
                                                    self.speed,
                                                    self.pattern_str)
            else:
                self.packet = []

        def get_minimal(self):
            return {
                'board_id': self.board_id,
                'color': [self.red, self.green, self.blue],
                'speed': self.speed,
                'pattern': self.pattern_str,
                'pattern_name': self.pattern_name
            }

    def __init__(self):
        self.createBroadcastSender()
        # swarms are 1-indexed, because swarm ID 0 means broadcast
        self.patterns = {
            board_id : FireflyLedController.FireflyPattern(
                board_id,
                0.5,
                0.7,
                0.5,
                2,
                "01100110000",
                "Default") \
            for board_id in range(1, NUM_SWARMS + 1)
        }

        self.patternLock = Lock()
        self.is_running = True
        self.broadcastThread = Thread(target=self.broadcastFireflyPatterns)
        self.broadcastThread.start()
        #self.update_firmware()

    def update_firmware(self):
        if os.path.exists(FIRMWARE_PATH) and os.path.isfile(FIRMWARE_PATH):
            with open(FIRMWARE_PATH, 'rb') as fw:
                data = fw.read()
                self.fw_hash = hashlib.md5(data).hexdigest()
                self.have_firmware = True
                self.firmware_packet = create_firmware_packet(
                    0, "http://" + self.ipaddr + ":" + str(self.port) +
                    "/firefly_leds/firmware/" + self.fw_hash, self.fw_hash)
        else:
            self.have_firmware = False

    def get_firmware_hash(self):
        return self.fw_hash if self.have_firmware else None

    def set_led_pattern(self,
                        board_id,
                        red,
                        green,
                        blue,
                        speed,
                        pattern_str,
                        pattern_name=None):
        self.patternLock.acquire()
        self.patterns[board_id] = FireflyLedController.FireflyPattern(
            board_id,
            red,
            green,
            blue,
            speed,
            pattern_str,
            pattern_name,
        )
        self.patternLock.release()

    def get_led_patterns(self):
        self.patternLock.acquire()
        pattern_copy = [
            pattern.get_minimal() for pattern in set(self.patterns.values())
        ]
        self.patternLock.release()
        return pattern_copy

    def set_service_addr(self, ipaddr, port):
        self.ipaddr = ipaddr
        self.port = port
        self.update_firmware()  # yeah yeah, unexpected side effects.

    def broadcastFireflyPatterns(self):
        idx = 0
        while self.is_running:
            print("BROADCAST")
            #print(f"{self.patterns}")
            #print(f"{len(self.patterns)}")
            self.patternLock.acquire()
            try:
                for pattern in self.patterns.values():
                    #print(f"Sending {pattern.packet}")
                    self.sender_socket.sendto(pattern.packet,
                                              (MULTICAST_GROUP, LED_CMD_PORT))
                    time.sleep(0.01)
            except Exception as e:
                print(f"Exception {e}")
                pass  # I'm concerned about transient networking issues...
                # and I really don't know what to do if they happen
            self.patternLock.release()
            time.sleep(1.0)

            # and every 10 seconds, also broadcast the current firmware hash
            if False:
                #if idx % 10 == 0:
                if self.have_firmware:
                    print("FW UPDATE PACKET")
                    self.sender_socket.sendto(self.firmware_packet,
                                              (MULTICAST_GROUP, LED_CMD_PORT))
            idx += 1

    def createBroadcastSender(self, ttl=4):
        self.sender_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.sender_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,
                                      1)
        #       self.sender_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
        self.sender_socket.setsockopt(socket.IPPROTO_IP,
                                      socket.IP_MULTICAST_TTL, ttl)
Example #35
0
class A3CAgent:
    # Actor-Critic Main Optimization Algorithm
    def __init__(self, env_name):
        # Initialization
        # Environment and PPO parameters
        self.env_name = env_name
        self.env = gym.make(env_name)
        self.action_size = self.env.action_space.n
        self.EPISODES, self.episode, self.max_average = 20000, 0, -21.0  # specific for pong
        self.lock = Lock()
        self.lr = 0.000025

        self.ROWS = 80
        self.COLS = 80
        self.REM_STEP = 4

        # Instantiate plot memory
        self.scores, self.episodes, self.average = [], [], []

        self.Save_Path = 'Models'
        self.state_size = (self.REM_STEP, self.ROWS, self.COLS)

        if not os.path.exists(self.Save_Path): os.makedirs(self.Save_Path)
        self.path = '{}_A3C_{}'.format(self.env_name, self.lr)
        self.Model_name = os.path.join(self.Save_Path, self.path)

        # Create Actor-Critic network model
        self.Actor, self.Critic = OurModel(input_shape=self.state_size,
                                           action_space=self.action_size,
                                           lr=self.lr)

        # make predict function to work while multithreading
        self.Actor._make_predict_function()
        self.Critic._make_predict_function()

        global graph
        graph = tf.get_default_graph()

    def act(self, state):
        # Use the network to predict the next action to take, using the model
        prediction = self.Actor.predict(state)[0]
        action = np.random.choice(self.action_size, p=prediction)
        return action

    def discount_rewards(self, reward):
        # Compute the gamma-discounted rewards over an episode
        gamma = 0.99  # discount rate
        running_add = 0
        discounted_r = np.zeros_like(reward)
        for i in reversed(range(0, len(reward))):
            if reward[
                    i] != 0:  # reset the sum, since this was a game boundary (pong specific!)
                running_add = 0
            running_add = running_add * gamma + reward[i]
            discounted_r[i] = running_add

        discounted_r -= np.mean(discounted_r)  # normalizing the result
        discounted_r /= np.std(discounted_r)  # divide by standard deviation
        return discounted_r

    def replay(self, states, actions, rewards):
        # reshape memory to appropriate shape for training
        states = np.vstack(states)
        actions = np.vstack(actions)

        # Compute discounted rewards
        discounted_r = self.discount_rewards(rewards)

        # Get Critic network predictions
        value = self.Critic.predict(states)[:, 0]
        # Compute advantages
        advantages = discounted_r - value
        # training Actor and Critic networks
        self.Actor.fit(states,
                       actions,
                       sample_weight=advantages,
                       epochs=1,
                       verbose=0)
        self.Critic.fit(states, discounted_r, epochs=1, verbose=0)

    def load(self, Actor_name, Critic_name):
        self.Actor = load_model(Actor_name, compile=False)
        #self.Critic = load_model(Critic_name, compile=False)

    def save(self):
        self.Actor.save(self.Model_name + '_Actor.h5')
        #self.Critic.save(self.Model_name + '_Critic.h5')

    pylab.figure(figsize=(18, 9))

    def PlotModel(self, score, episode):
        self.scores.append(score)
        self.episodes.append(episode)
        self.average.append(sum(self.scores[-50:]) / len(self.scores[-50:]))
        if str(episode)[-2:] == "00":  # much faster than episode % 100
            pylab.plot(self.episodes, self.scores, 'b')
            pylab.plot(self.episodes, self.average, 'r')
            pylab.ylabel('Score', fontsize=18)
            pylab.xlabel('Steps', fontsize=18)
            try:
                pylab.savefig(self.path + ".png")
            except OSError:
                pass

        return self.average[-1]

    def imshow(self, image, rem_step=0):
        cv2.imshow(self.Model_name + str(rem_step), image[rem_step, ...])
        if cv2.waitKey(25) & 0xFF == ord("q"):
            cv2.destroyAllWindows()
            return

    def GetImage(self, frame, image_memory):
        if image_memory.shape == (1, *self.state_size):
            image_memory = np.squeeze(image_memory)

        # croping frame to 80x80 size
        frame_cropped = frame[35:195:2, ::2, :]
        if frame_cropped.shape[0] != self.COLS or frame_cropped.shape[
                1] != self.ROWS:
            # OpenCV resize function
            frame_cropped = cv2.resize(frame, (self.COLS, self.ROWS),
                                       interpolation=cv2.INTER_CUBIC)

        # converting to RGB (numpy way)
        frame_rgb = 0.299 * frame_cropped[:, :,
                                          0] + 0.587 * frame_cropped[:, :,
                                                                     1] + 0.114 * frame_cropped[:, :,
                                                                                                2]

        # convert everything to black and white (agent will train faster)
        frame_rgb[frame_rgb < 100] = 0
        frame_rgb[frame_rgb >= 100] = 255
        # converting to RGB (OpenCV way)
        #frame_rgb = cv2.cvtColor(frame_cropped, cv2.COLOR_RGB2GRAY)

        # dividing by 255 we expresses value to 0-1 representation
        new_frame = np.array(frame_rgb).astype(np.float32) / 255.0

        # push our data by 1 frame, similar as deq() function work
        image_memory = np.roll(image_memory, 1, axis=0)

        # inserting new frame to free space
        image_memory[0, :, :] = new_frame

        # show image frame
        #self.imshow(image_memory,0)
        #self.imshow(image_memory,1)
        #self.imshow(image_memory,2)
        #self.imshow(image_memory,3)

        return np.expand_dims(image_memory, axis=0)

    def reset(self, env):
        image_memory = np.zeros(self.state_size)
        frame = env.reset()
        for i in range(self.REM_STEP):
            state = self.GetImage(frame, image_memory)
        return state

    def step(self, action, env, image_memory):
        next_state, reward, done, info = env.step(action)
        next_state = self.GetImage(next_state, image_memory)
        return next_state, reward, done, info

    def run(self):
        for e in range(self.EPISODES):
            state = self.reset(self.env)
            done, score, SAVING = False, 0, ''
            # Instantiate or reset games memory
            states, actions, rewards = [], [], []
            while not done:
                #self.env.render()
                # Actor picks an action
                action = self.act(state)
                # Retrieve new state, reward, and whether the state is terminal
                next_state, reward, done, _ = self.step(
                    action, self.env, state)
                # Memorize (state, action, reward) for training
                states.append(state)
                action_onehot = np.zeros([self.action_size])
                action_onehot[action] = 1
                actions.append(action_onehot)
                rewards.append(reward)
                # Update current state
                state = next_state
                score += reward
                if done:
                    average = self.PlotModel(score, e)
                    # saving best models
                    if average >= self.max_average:
                        self.max_average = average
                        self.save()
                        SAVING = "SAVING"
                    else:
                        SAVING = ""
                    print(
                        "episode: {}/{}, score: {}, average: {:.2f} {}".format(
                            e, self.EPISODES, score, average, SAVING))

                    self.replay(states, actions, rewards)
        # close environemnt when finish training
        self.env.close()

    def train(self, n_threads):
        self.env.close()
        # Instantiate one environment per thread
        envs = [gym.make(self.env_name) for i in range(n_threads)]

        # Create threads
        threads = [
            threading.Thread(target=self.train_threading,
                             daemon=True,
                             args=(self, envs[i], i)) for i in range(n_threads)
        ]

        for t in threads:
            time.sleep(2)
            t.start()

        for t in threads:
            time.sleep(10)
            t.join()

    def train_threading(self, agent, env, thread):
        global graph
        with graph.as_default():
            while self.episode < self.EPISODES:
                # Reset episode
                score, done, SAVING = 0, False, ''
                state = self.reset(env)
                # Instantiate or reset games memory
                states, actions, rewards = [], [], []
                while not done:
                    action = agent.act(state)
                    next_state, reward, done, _ = self.step(action, env, state)

                    states.append(state)
                    action_onehot = np.zeros([self.action_size])
                    action_onehot[action] = 1
                    actions.append(action_onehot)
                    rewards.append(reward)

                    score += reward
                    state = next_state

                self.lock.acquire()
                self.replay(states, actions, rewards)
                self.lock.release()

                # Update episode count
                with self.lock:
                    average = self.PlotModel(score, self.episode)
                    # saving best models
                    if average >= self.max_average:
                        self.max_average = average
                        self.save()
                        SAVING = "SAVING"
                    else:
                        SAVING = ""
                    print(
                        "episode: {}/{}, thread: {}, score: {}, average: {:.2f} {}"
                        .format(self.episode, self.EPISODES, thread, score,
                                average, SAVING))
                    if (self.episode < self.EPISODES):
                        self.episode += 1
            env.close()

    def test(self, Actor_name, Critic_name):
        self.load(Actor_name, Critic_name)
        for e in range(100):
            state = self.reset(self.env)
            done = False
            score = 0
            while not done:
                self.env.render()
                action = np.argmax(self.Actor.predict(state))
                state, reward, done, _ = self.step(action, self.env, state)
                score += reward
                if done:
                    print("episode: {}/{}, score: {}".format(
                        e, self.EPISODES, score))
                    break
        self.env.close()
Example #36
0
class TTIR(eg.IrDecoderPlugin):
    def __init__(self):
        eg.IrDecoderPlugin.__init__(self, 1)

    def __close__(self):
        self.irDecoder.Close()

    def __start__(self):
        self.dll = None
        self.hOpen = None
        pluginDir = os.path.abspath(os.path.dirname(__file__))
        dll = CDLL(os.path.join(pluginDir, "TTUSBIR.dll"))
        self.cCallback = IRCALLBACKFUNC(self.IrCallback)
        self.hOpen = dll.irOpen(0, USBIR_MODE_DIV, self.cCallback, 0)
        if self.hOpen == -1:
            raise self.Exceptions.DeviceNotFound
#        self.irGetUniqueCode = dll.ir_GetUniqueCode
#        self.irGetUniqueCode.restype  = DWORD
        self.dll = dll
        self.data = []
        self.timer = eg.ResettableTimer(self.OnTimeout)
        self.dataLock = Lock()
        self.lastTime = clock()
        self.startByte = 1
        self.dll.irSetPowerLED(self.hOpen, 0)
        self.ledTimer = eg.ResettableTimer(
            partial(self.dll.irSetPowerLED, self.hOpen, 0))

    def OnComputerSuspend(self, dummySuspendType):
        self.dll.irClose(self.hOpen)

    def OnComputerResume(self, dummySuspendType):
        self.hOpen = self.dll.irOpen(0, USBIR_MODE_DIV, self.cCallback, 0)
        if self.hOpen == -1:
            raise self.Exceptions.DeviceNotFound

    def __stop__(self):
        if self.dll is not None:
            self.dll.irClose(self.hOpen)
            self.dll = None
            self.hOpen = None
            self.cCallback = None
        self.timer.Stop()
        self.ledTimer.Stop()

    def IrCallback(self, context, buf, length, irMode, hOpen, devIdx):
        if irMode == USBIR_MODE_DIV:
            self.dll.irSetPowerLED(self.hOpen, 1)
            self.ledTimer.Reset(1)
            self.timer.Reset(80)
            #self.lastTime = clock()
            self.dataLock.acquire()
            #print "---", length / 4, clock() - self.lastTime
            append = self.data.append
            for i in xrange(self.startByte, min(length / 4, 500)):
                value = buf[i] & 0x00ffffff
                append(value)
                if value > 12000:
                    self.irDecoder.Decode(self.data, len(self.data))
                    self.data = []
                    append = self.data.append
            self.startByte = 0
            self.dataLock.release()

    def OnTimeout(self):
        if self.dataLock.acquire(0):
            data = self.data
            self.data = []
            self.startByte = 1
            self.dataLock.release()
            data.append(10000)
            self.irDecoder.Decode(data, len(data))
class HuskyMapper:
    def __init__(self, num_rows, num_cols, meters_per_cell):
        rospy.init_node('occupancy_grid_mapper', anonymous=True)
        self.tf_listener = tf.TransformListener()

        self.odometry_position_noise_std_dev = rospy.get_param(
            "~odometry_position_noise_std_dev")
        self.odometry_orientation_noise_std_dev = rospy.get_param(
            "~odometry_orientation_noise_std_dev")

        og_origin_in_map_frame = np.array([-20, -10, 0])
        self.init_log_odds_ratio = 0  #log(0.5/0.5)
        self.ogm = OccupancyGridMap(num_rows, num_cols, meters_per_cell,
                                    og_origin_in_map_frame,
                                    self.init_log_odds_ratio)

        self.max_laser_range = None
        self.min_laser_range = None
        self.max_laser_angle = None
        self.min_laser_angle = None

        self.odometry = None

        # baselink is a frame on the husky robot that corresponds roughly to its body center
        # baselaser is a frame on the husky robot that corresponds to the base of the LiDAR sensor (the scanned points will be in this coordinate frame)
        # map is the frame corresponding to the global frame of reference

        # You need to convert points from the baselaser frame to the map frame for this assignment

        self.q_map_baselink = None  # 4x1 quaternion from husky_1/baselink to map frame
        self.R_map_baselink = None  # 3x3 rotation matrix from husky_1/baselink to map frame
        self.p_map_baselink = None  # 3x1 position of husky_1/baselink in map frame

        self.q_map_baselaser = None  # 4x1 quaternion from husky_1/baselaser to map frame
        self.R_map_baselaser = None  # 3x3 rotation matrix from husky_1/baselaser to map frame
        self.p_map_baselaser = None  # 3x1 position of husky_1/baselaser in map frame

        self.q_baselink_baselaser = np.array([1.0, 0, 0, 0])
        self.R_baselink_baselaser = tr.quaternion_matrix(
            self.q_baselink_baselaser)[0:3, 0:3]
        self.p_baselink_baselaser = np.array([0.337, 0.0, 0.308])

        self.mutex = Lock()

        self.occupancy_grid_pub = rospy.Publisher('/husky_1/occupancy_grid',
                                                  OccupancyGrid,
                                                  queue_size=1)
        self.laser_points_marker_pub = rospy.Publisher(
            '/husky_1/debug/laser_points', Marker, queue_size=1)
        self.robot_pose_pub = rospy.Publisher('/husky_1/debug/robot_pose',
                                              PoseStamped,
                                              queue_size=1)

        self.laser_sub = rospy.Subscriber('/husky_1/scan',
                                          LaserScan,
                                          self.laser_scan_callback,
                                          queue_size=1)
        self.odometry_sub = rospy.Subscriber('/husky_1/odometry/ground_truth',
                                             Odometry,
                                             self.odometry_callback,
                                             queue_size=1)

    def odometry_callback(self, msg):
        self.mutex.acquire()
        self.odometry = msg

        # Adds noise to the odometry position measurement according to the standard deviations specified as parameters in the launch file
        # We should have used noisy measurements from the Gazebo simulator, but it is more complicated to configure
        # so, we add random noise here ourselves, assuming perfect odometry from the simulator.
        self.odometry.pose.pose.position.x += random.gauss(
            0, self.odometry_position_noise_std_dev)
        self.odometry.pose.pose.position.y += random.gauss(
            0, self.odometry_position_noise_std_dev)

        #
        # TODO: populate the quaternion from the husky_1/base_link frame to the map frame
        #       based on the current odometry message. In order to know more about where
        #       these frames are located on the robot, run: rosrun rviz rviz and look at the TF
        #       widget. Pay attention to the following frames: husky_1/base_link which is at the center
        #       of the robot's body, husky_1/base_laser, which is the frame of the laser sensor,
        #       and map, which is where odometry messages are expressed in. In fact, odometry
        #       messages from the Husky are transformations from husky_1/base_link to map
        #
        #self.q_map_baselink = np.array([x, y, z, w])

        # Corrupting the quaternion with noise in yaw, because we have configured the simulator
        # to return noiseless orientation measurements.
        yaw_noise = random.gauss(
            0, self.odometry_orientation_noise_std_dev) * pi / 180.0
        q_truebaselink_noisybaselink = np.array(
            [0, 0, np.sin(yaw_noise),
             np.cos(yaw_noise)])
        self.q_map_baselink = tr.quaternion_multiply(
            self.q_map_baselink, q_truebaselink_noisybaselink)

        # Computes the rotation matrix from husky_1/base_link to map
        self.R_map_baselink = tr.quaternion_matrix(self.q_map_baselink)[0:3,
                                                                        0:3]

        #
        # TODO: populate the position of the husky_1/base_link frame in map frame
        #       coordinates based on the current odometry message
        #
        #
        #self.p_map_baselink = np.array([x, y, z])

        #
        # TODO: populate the quaternion from the frame husky_1/base_laser to the map frame
        #       note: you have access to the static quaternion from husky_1/base_laser to
        #       husky_1/base_link
        #self.q_map_baselaser = tr.quaternion_multiply(? , ?)

        #
        # TODO: populate the rotation matrix from the frame husky_1/base_laser to the map frame
        #       note: you have access to the static rotation matrix from husky_1/base_laser to
        #       husky_1/base_link
        #       also note: np.dot(A,B) multiplies numpy matrices A and B, whereas A*B is element-wise
        #       multiplication, which is not usually what you want
        #
        #self.R_map_baselaser = ?

        #
        # TODO: populate the origin of the frame husky_1/base_laser in coordinates of the map frame
        #       note: you have access to the static rotation matrix from husky_1/base_laser to
        #       husky_1/base_link and also to the origin of the husky_1/base_laser frame in coordinates of
        #       frame husky_1/base_link
        #       also note: np.dot(A,B) multiplies numpy matrices A and B, whereas A*B is element-wise
        #       multiplication, which is not usually what you want
        #
        #self.p_map_baselaser = ?

        self.mutex.release()

    def from_laser_to_map_coordinates(self, points_in_baselaser_frame):
        #
        # The robot's odometry is with respect to the map frame, but the points measured from
        # the laser are given with respec to the frame husky_1/base_laser. This function convert
        # the measured points in the laser scan from husky_1/base_laser to the map frame.
        #
        points_in_map_frame = [
            np.dot(self.R_map_baselaser, xyz_baselaser) + self.p_map_baselaser
            for xyz_baselaser in points_in_baselaser_frame
        ]
        return points_in_map_frame

    def is_in_field_of_view(self, robot_row, robot_col, robot_theta, row, col):
        # Returns true iff the cell (row, col) in the grid is in the field of view of the 2D laser of the
        # robot located at cell (robot_row, robot_col) and having yaw robot_theta in the map frame.
        # Useful things to know:
        # 1) self.ogm.meters_per_cell converts cell distances to metric distances
        # 2) atan2(y,x) gives the angle of the vector (x,y)
        # 3) atan2(sin(theta_1 - theta_2), cos(theta_1 - theta_2)) gives the angle difference between theta_1 and theta_2 in [-pi, pi]
        # 4) self.max_laser_range and self.max_laser_angle specify some of the limits of the laser sensor
        #
        # TODO: fill this
        #
        return False

    def inverse_measurement_model(self, row, col, robot_row, robot_col,
                                  robot_theta_in_map, beam_ranges,
                                  beam_angles):
        alpha = 0.1
        beta = 10 * pi / 180.0
        p_occupied = 0.999

        #
        # TODO: Find the range r and angle diff_angle of the beam (robot_row, robot_col) ------> (row, col)
        # r should be in meters and diff_angle should be in [-pi, pi]. Useful things to know are same as above.
        #
        #r = ?
        #diff_angle = ?

        closest_beam_angle, closest_beam_idx = min((val, idx) for (
            idx,
            val) in enumerate([abs(diff_angle - ba) for ba in beam_angles]))
        r_cb = beam_ranges[closest_beam_idx]
        theta_cb = beam_angles[closest_beam_idx]

        if r > min(self.max_laser_range, r_cb +
                   alpha / 2.0) or abs(diff_angle - theta_cb) > beta / 2.0:
            return self.init_log_odds_ratio

        if r_cb < self.max_laser_range and abs(r - r_cb) < alpha / 2.0:
            return log(p_occupied / (1 - p_occupied))

        if r <= r_cb:
            return log((1 - p_occupied) / p_occupied)

        return 0.0

    def laser_scan_callback(self, msg):
        self.mutex.acquire()

        self.min_laser_angle = msg.angle_min
        self.max_laser_angle = msg.angle_max
        self.min_laser_range = msg.range_min
        self.max_laser_range = msg.range_max

        if self.odometry is None:
            # ignore the laser message if no odometry has been received
            self.mutex.release()
            return

        N = len(msg.ranges)

        ranges_in_baselaser_frame = msg.ranges
        angles_in_baselaser_frame = [
            (msg.angle_max - msg.angle_min) * float(i) / N + msg.angle_min
            for i in xrange(len(msg.ranges))
        ]
        angles_in_baselink_frame = angles_in_baselaser_frame[::-1]
        # This is because the z-axis of husky_1/base_laser is pointing downwards, while for husky_1/base_link and the map frame
        # the z-axis points upwards

        points_xyz_in_baselaser_frame = [
            np.array([r * cos(theta), r * sin(theta), 0])
            for (r, theta
                 ) in zip(ranges_in_baselaser_frame, angles_in_baselaser_frame)
            if r < self.max_laser_range and r > self.min_laser_range
        ]

        points_xyz_in_map_frame = self.from_laser_to_map_coordinates(
            points_xyz_in_baselaser_frame)

        baselaser_x_in_map = self.p_map_baselaser[0]
        baselaser_y_in_map = self.p_map_baselaser[1]
        baselaser_row, baselaser_col = self.ogm.cartesian_to_grid_coords(
            baselaser_x_in_map, baselaser_y_in_map)
        _, _, yaw_map_baselaser = tr.euler_from_quaternion(
            self.q_map_baselaser)
        _, _, yaw_map_baselink = tr.euler_from_quaternion(self.q_map_baselink)

        # Publishing the pose of the robot as a red arrow in rviz to help you debug
        ps = self._get_pose_marker(msg.header.stamp, 'map',
                                   self.p_map_baselaser, self.q_map_baselaser)

        # Publishing the points of the laser transformed into the map frame, as green points in rviz, to help you debug
        pts_marker = self._get_2d_laser_points_marker(msg.header.stamp, 'map',
                                                      points_xyz_in_map_frame)
        self.mutex.release()

        self.robot_pose_pub.publish(ps)
        self.laser_points_marker_pub.publish(pts_marker)

        #
        # This is the main loop in occupancy grid mapping
        #
        max_laser_range_in_cells = int(
            self.max_laser_range / self.ogm.meters_per_cell) + 1
        for delta_row in xrange(-max_laser_range_in_cells,
                                max_laser_range_in_cells):
            for delta_col in xrange(-max_laser_range_in_cells,
                                    max_laser_range_in_cells):
                row = baselaser_row + delta_row
                col = baselaser_col + delta_col

                if row < 0 or row >= self.ogm.num_rows or col < 0 or col >= self.ogm.num_cols:
                    continue

                if self.is_in_field_of_view(baselaser_row, baselaser_col,
                                            yaw_map_baselink, row, col):
                    delta_log_odds = self.inverse_measurement_model(
                        row, col, baselaser_row, baselaser_col,
                        yaw_map_baselaser, ranges_in_baselaser_frame,
                        angles_in_baselink_frame) - self.init_log_odds_ratio

                    self.ogm.update_log_odds_ratio_in_grid_coords(
                        row, col, delta_log_odds)

        self.occupancy_grid_pub.publish(
            self.ogm.get_map_as_ros_msg(msg.header.stamp, 'map'))

    def _get_2d_laser_points_marker(self, timestamp, frame_id, pts_in_map):
        msg = Marker()
        msg.header.stamp = timestamp
        msg.header.frame_id = frame_id
        msg.ns = 'laser_points'
        msg.id = 0
        msg.type = 6
        msg.action = 0
        msg.points = [Point(pt[0], pt[1], pt[2]) for pt in pts_in_map]
        msg.colors = [ColorRGBA(0, 1.0, 0, 1.0) for pt in pts_in_map]

        for pt in pts_in_map:
            assert ((not np.isnan(pt).any()) and np.isfinite(pt).all())

        msg.scale.x = 0.1
        msg.scale.y = 0.1
        msg.scale.z = 0.1
        return msg

    def _get_pose_marker(self, timestamp, frame_id, p, q):
        ps = PoseStamped()
        ps.pose.position.x = p[0]
        ps.pose.position.y = p[1]
        ps.pose.position.z = p[2]
        ps.pose.orientation.x = q[0]
        ps.pose.orientation.y = q[1]
        ps.pose.orientation.z = q[2]
        ps.pose.orientation.w = q[3]
        ps.header.stamp = timestamp
        ps.header.frame_id = frame_id
        return ps

    def run(self):
        rate = rospy.Rate(200)
        while not rospy.is_shutdown():
            rate.sleep()
Example #38
0
class BundleRegistry(GObject.GObject):
    """Tracks the available activity bundles"""

    __gsignals__ = {
        'bundle-added':
        (GObject.SignalFlags.RUN_FIRST, None, ([GObject.TYPE_PYOBJECT])),
        'bundle-removed':
        (GObject.SignalFlags.RUN_FIRST, None, ([GObject.TYPE_PYOBJECT])),
        'bundle-changed':
        (GObject.SignalFlags.RUN_FIRST, None, ([GObject.TYPE_PYOBJECT])),
    }

    def __init__(self):
        logging.debug('STARTUP: Loading the bundle registry')
        GObject.GObject.__init__(self)

        self._mime_defaults = self._load_mime_defaults()

        # Queue of bundles to be installed/upgraded
        self._install_queue = _InstallQueue(self)

        # Bundle installation happens in a separate thread, which needs
        # access to _bundles. Protect all _bundles access with a lock.
        self._lock = Lock()
        self._bundles = []

        # hold a reference to the monitors so they don't get disposed
        self._gio_monitors = []

        dirs = [env.get_user_activities_path(), env.get_user_library_path()]

        for data_dir in GLib.get_system_data_dirs():
            dirs.append(os.path.join(data_dir, "sugar", "activities"))

        for activity_dir in dirs:
            self._scan_directory(activity_dir)
            directory = Gio.File.new_for_path(activity_dir)
            monitor = directory.monitor_directory(
                flags=Gio.FileMonitorFlags.NONE, cancellable=None)
            monitor.connect('changed', self.__file_monitor_changed_cb)
            self._gio_monitors.append(monitor)

        self._favorite_bundles = []
        for i in range(desktop.get_number_of_views()):
            self._favorite_bundles.append({})

        settings = Gio.Settings('org.sugarlabs')
        self._protected_activities = settings.get_strv('protected-activities')

        try:
            self._load_favorites()
        except Exception:
            logging.exception('Error while loading favorite_activities.')

        self._hidden_activities = []
        self._load_hidden_activities()

        self._convert_old_favorites()
        self._scan_new_favorites()

        self._desktop_model = desktop.get_model()
        self._desktop_model.connect('desktop-view-icons-changed',
                                    self.__desktop_view_icons_changed_cb)

    def __desktop_view_icons_changed_cb(self, model):
        number_of_views = desktop.get_number_of_views()
        if len(self._favorite_bundles) < number_of_views:
            for i in range(number_of_views - len(self._favorite_bundles)):
                self._favorite_bundles.append({})
        try:
            self._load_favorites()
        except Exception:
            logging.exception('Error while loading favorite_activities.')

    def __file_monitor_changed_cb(self, monitor, one_file, other_file,
                                  event_type):
        if event_type == Gio.FileMonitorEvent.CREATED or \
           event_type == Gio.FileMonitorEvent.ATTRIBUTE_CHANGED:
            self.add_bundle(one_file.get_path(), set_favorite=True)
        elif event_type == Gio.FileMonitorEvent.DELETED:
            self.remove_bundle(one_file.get_path())
            for root in GLib.get_system_data_dirs():
                root = os.path.join(root, 'sugar', 'activities')

                try:
                    os.listdir(root)
                except OSError:
                    logging.debug('Can not find GLib system dir %s', root)
                    continue
                activity_dir = os.path.basename(one_file.get_path())
                try:
                    bundle = bundle_from_dir(os.path.join(root, activity_dir))
                except MalformedBundleException:
                    continue

                if bundle is not None:
                    path = bundle.get_path()
                    if path is not None:
                        self.add_bundle(path)

    def _load_mime_defaults(self):
        defaults = {}

        f = open(os.environ["SUGAR_MIME_DEFAULTS"], 'r')
        for line in f.readlines():
            line = line.strip()
            if line and not line.startswith('#'):
                mime = line[:line.find(' ')]
                handler = line[line.rfind(' ') + 1:]
                defaults[mime] = handler
        f.close()

        return defaults

    def _get_favorite_key(self, bundle_id, version):
        """We use a string as a composite key for the favorites dictionary
        because JSON doesn't support tuples and python won't accept a list
        as a dictionary key.
        """
        if ' ' in bundle_id:
            raise ValueError('bundle_id cannot contain spaces')
        return '%s %s' % (bundle_id, version)

    def _load_favorites(self):
        for i in range(desktop.get_number_of_views()):
            # Special-case 0 for backward compatibility
            if i == 0:
                favorites_path = env.get_profile_path('favorite_activities')
            else:
                favorites_path = env.get_profile_path(
                    'favorite_activities_%d' % (i))
            if os.path.exists(favorites_path):
                favorites_data = json.load(open(favorites_path))

                favorite_bundles = favorites_data['favorites']
                if not isinstance(favorite_bundles, dict):
                    raise ValueError('Invalid format in %s.' % favorites_path)
                if favorite_bundles:
                    first_key = list(favorite_bundles.keys())[0]
                    if not isinstance(first_key, str):
                        raise ValueError('Invalid format in %s.' %
                                         favorites_path)

                    first_value = list(favorite_bundles.values())[0]
                    if first_value is not None and \
                       not isinstance(first_value, dict):
                        raise ValueError('Invalid format in %s.' %
                                         favorites_path)

                self._favorite_bundles[i] = favorite_bundles

    def _load_hidden_activities(self):
        path = os.environ.get('SUGAR_ACTIVITIES_HIDDEN', None)
        try:
            with open(path) as file:
                for line in file.readlines():
                    bundle_id = line.strip()
                    if bundle_id:
                        self._hidden_activities.append(bundle_id)
        except IOError:
            logging.error('Error when loading hidden activities %s', path)

    def _convert_old_favorites(self):
        for i in range(desktop.get_number_of_views()):
            for key in list(self._favorite_bundles[i].keys()):
                data = self._favorite_bundles[i][key]
                if data is None:
                    data = {}
                if 'favorite' not in data:
                    data['favorite'] = True
                self._favorite_bundles[i][key] = data
            self._write_favorites_file(i)

    def _scan_new_favorites(self):
        for bundle in self:
            bundle_id = bundle.get_bundle_id()
            key = self._get_favorite_key(bundle_id,
                                         bundle.get_activity_version())
            if key not in self._favorite_bundles[_DEFAULT_VIEW]:
                self._favorite_bundles[_DEFAULT_VIEW][key] = \
                    {'favorite': bundle_id not in self._hidden_activities}
        self._write_favorites_file(_DEFAULT_VIEW)

    def get_bundle(self, bundle_id):
        """Returns an bundle given his service name"""
        with self._lock:
            for bundle in self._bundles:
                if bundle.get_bundle_id() == bundle_id:
                    return bundle
        return None

    def __iter__(self):
        with self._lock:
            copy = list(self._bundles)
        return copy.__iter__()

    def __len__(self):
        with self._lock:
            return len(self._bundles)

    def _scan_directory(self, path):
        if not os.path.isdir(path):
            return

        # Sort by mtime to ensure a stable activity order
        bundles = {}
        for f in os.listdir(path):
            try:
                bundle_dir = os.path.join(path, f)
                if os.path.isdir(bundle_dir):
                    bundles[bundle_dir] = os.stat(bundle_dir).st_mtime
            except Exception:
                logging.exception(
                    'Error while processing installed activity'
                    ' bundle %s:', bundle_dir)

        bundle_dirs = list(bundles.keys())
        bundle_dirs.sort(key=lambda x: bundles[x])
        for folder in bundle_dirs:
            try:
                self.add_bundle(folder, emit_signals=False)
            except:
                # pylint: disable=W0702
                logging.exception(
                    'Error while processing installed activity'
                    ' bundle %s:', folder)

    def add_bundle(self,
                   bundle_path,
                   set_favorite=False,
                   emit_signals=True,
                   force_downgrade=False):
        """
        Add a bundle to the registry.
        If the bundle is a duplicate with one already in the registry,
        the existing one from the registry is returned.
        Otherwise, the newly added bundle is returned on success, or None on
        failure.
        """
        try:
            bundle = bundle_from_dir(bundle_path)
        except MalformedBundleException:
            logging.exception('Error loading bundle %r', bundle_path)
            return None

        # None is a valid return value from bundle_from_dir helper.
        if bundle is None:
            logging.error('No bundle in %r', bundle_path)
            return None

        bundle_id = bundle.get_bundle_id()
        logging.debug('STARTUP: Adding bundle %s', bundle_id)
        installed = self.get_bundle(bundle_id)

        if installed is not None:
            if NormalizedVersion(installed.get_activity_version()) == \
                    NormalizedVersion(bundle.get_activity_version()):
                logging.debug("Bundle already known")
                return installed
            if not force_downgrade and \
                    NormalizedVersion(installed.get_activity_version()) >= \
                    NormalizedVersion(bundle.get_activity_version()):
                logging.debug('Skip old version for %s', bundle_id)
                return None
            else:
                logging.debug('Upgrade %s', bundle_id)
                self.remove_bundle(installed.get_path(), emit_signals)

        if set_favorite:
            favorite = not self.is_bundle_hidden(bundle.get_bundle_id(),
                                                 bundle.get_activity_version())
            self._set_bundle_favorite(bundle.get_bundle_id(),
                                      bundle.get_activity_version(), favorite)

        with self._lock:
            self._bundles.append(bundle)
        if emit_signals:
            self.emit('bundle-added', bundle)
        return bundle

    def remove_bundle(self, bundle_path, emit_signals=True):
        removed = None
        self._lock.acquire()
        for bundle in self._bundles:
            if bundle.get_path() == bundle_path:
                self._bundles.remove(bundle)
                removed = bundle
                break
        self._lock.release()

        if emit_signals and removed is not None:
            self.emit('bundle-removed', removed)
        return removed is not None

    def get_activities_for_type(self, mime_type):
        result = []

        mime = mimeregistry.get_registry()
        default_bundle_id = mime.get_default_activity(mime_type)
        default_bundle = None

        for bundle in self:
            if not isinstance(bundle, ActivityBundle):
                continue
            if mime_type in (bundle.get_mime_types() or []):
                if bundle.get_bundle_id() == default_bundle_id:
                    default_bundle = bundle
                elif self.get_default_for_type(mime_type) == \
                        bundle.get_bundle_id():
                    result.insert(0, bundle)
                else:
                    result.append(bundle)

        if default_bundle is not None:
            result.insert(0, default_bundle)

        return result

    def get_default_for_type(self, mime_type):
        return self._mime_defaults.get(mime_type)

    def _find_bundle(self, bundle_id, version):
        with self._lock:
            for bundle in self._bundles:
                if bundle.get_bundle_id() == bundle_id and \
                        bundle.get_activity_version() == version:
                    return bundle
        raise ValueError('No bundle %r with version %r exists.' %
                         (bundle_id, version))

    def set_bundle_favorite(self,
                            bundle_id,
                            version,
                            favorite,
                            favorite_view=0):
        changed = self._set_bundle_favorite(bundle_id, version, favorite,
                                            favorite_view)
        if changed:
            bundle = self._find_bundle(bundle_id, version)
            self.emit('bundle-changed', bundle)

    def _set_bundle_favorite(self,
                             bundle_id,
                             version,
                             favorite,
                             favorite_view=0):
        key = self._get_favorite_key(bundle_id, version)
        if key not in self._favorite_bundles[favorite_view]:
            self._favorite_bundles[favorite_view][key] = {}
        elif favorite == \
                self._favorite_bundles[favorite_view][key]['favorite']:
            return False
        self._favorite_bundles[favorite_view][key]['favorite'] = favorite
        self._write_favorites_file(favorite_view)
        return True

    def is_bundle_favorite(self, bundle_id, version, favorite_view=0):
        key = self._get_favorite_key(bundle_id, version)
        if key not in self._favorite_bundles[favorite_view]:
            return False
        return self._favorite_bundles[favorite_view][key]['favorite']

    def is_bundle_hidden(self, bundle_id, version):
        key = self._get_favorite_key(bundle_id, version)
        if key in self._favorite_bundles[_DEFAULT_VIEW]:
            data = self._favorite_bundles[_DEFAULT_VIEW][key]
            return data['favorite'] is False
        else:
            return bundle_id in self._hidden_activities

    def is_activity_protected(self, bundle_id):
        return bundle_id in self._protected_activities

    def set_bundle_position(self, bundle_id, version, x, y, favorite_view=0):
        key = self._get_favorite_key(bundle_id, version)
        if key not in self._favorite_bundles[favorite_view]:
            raise ValueError('Bundle %s %s not favorite' %
                             (bundle_id, version))

        if 'position' not in self._favorite_bundles[favorite_view][key] or \
                [x, y] != \
                self._favorite_bundles[favorite_view][key]['position']:
            self._favorite_bundles[favorite_view][key]['position'] = [x, y]
        else:
            return

        self._write_favorites_file(favorite_view)
        bundle = self._find_bundle(bundle_id, version)
        self.emit('bundle-changed', bundle)

    def get_bundle_position(self, bundle_id, version, favorite_view=0):
        """Get the coordinates where the user wants the representation of this
        bundle to be displayed. Coordinates are relative to a 1000x1000 area.
        """
        key = self._get_favorite_key(bundle_id, version)
        if key not in self._favorite_bundles[favorite_view] or \
                'position' not in self._favorite_bundles[favorite_view][key]:
            return (-1, -1)
        else:
            return \
                tuple(self._favorite_bundles[favorite_view][key]['position'])

    def _write_favorites_file(self, favorite_view):
        if favorite_view == 0:
            path = env.get_profile_path('favorite_activities')
        else:
            path = env.get_profile_path('favorite_activities_%d' %
                                        (favorite_view))
        favorites_data = {'favorites': self._favorite_bundles[favorite_view]}
        json.dump(favorites_data, open(path, 'w'), indent=1)

    def is_installed(self, bundle):
        for installed_bundle in self:
            if bundle.get_bundle_id() == installed_bundle.get_bundle_id() and \
                    NormalizedVersion(bundle.get_activity_version()) == \
                    NormalizedVersion(installed_bundle.get_activity_version()):
                return True
        return False

    def install(self, bundle, force_downgrade=False):
        """
        Install a bundle, upgrading or optionally downgrading any existing
        version.

        If the same version of the bundle is already installed, this function
        returns False without doing anything. If the installation succeeded,
        True is returned.

        By default, downgrades will be refused (AlreadyInstalledException will
        be raised) but the force_downgrade flag can override that behaviour
        and cause the downgrade to happen.

        The bundle is installed in the user activity directory.
        System-installed activities cannot be upgraded/downgraded; in such
        case, the bundle will be installed as a duplicate in the user
        activity directory.

        RegistrationException is raised if the bundle cannot be registered
        after it is installed.
        """
        result = [None]
        self.install_async(bundle, self._sync_install_cb, result,
                           force_downgrade)
        while result[0] is None:
            Gtk.main_iteration()

        if isinstance(result[0], Exception):
            raise result[0]
        return result[0]

    def _sync_install_cb(self, bundle, result, user_data):
        # Async callback for install()
        user_data[0] = result

    def install_async(self,
                      bundle,
                      callback,
                      user_data,
                      force_downgrade=False):
        """
        Asynchronous version of install().
        The result of the installation is presented to a user-defined callback
        with the following parameters:
          1. The bundle that passed to this method
          2. The result of the operation (True, False, or an Exception -
             see the install() docs)
          3. The user_data passed to this method

        The callback is always invoked from main-loop context.
        """
        self._install_queue.enqueue(bundle, force_downgrade,
                                    self._bundle_installed_cb,
                                    [callback, user_data])

    def _bundle_installed_cb(self, bundle, result, data):
        """
        Completion handler for the bundle Install thread.
        Called in main loop context. Finishes registration and invokes user
        callback as necessary.
        """
        callback = data[0]
        user_data = data[1]

        # Installation didn't happen, or error?
        if isinstance(result, Exception) or result is False:
            callback(bundle, result, user_data)
            return

        if bundle.get_bundle_id() is not None:
            registered = self.add_bundle(result,
                                         set_favorite=True,
                                         force_downgrade=True)
            if registered is None:
                callback(bundle, RegistrationException(), user_data)
                return

        callback(bundle, True, user_data)

    def uninstall(self, bundle, force=False, delete_profile=False):
        """
        Uninstall a bundle.

        If a different version of bundle is found in the activity registry,
        this function does nothing unless force is True.

        If the bundle is not found in the activity registry at all,
        this function simply returns.
        """
        act = self.get_bundle(bundle.get_bundle_id())
        if not act:
            logging.debug("Bundle is not installed")
            return

        if not force and \
                act.get_activity_version() != bundle.get_activity_version():
            logging.warning('Not uninstalling, different bundle present')
            return

        if not act.is_user_activity():
            logging.debug('Do not uninstall system activity')
            return

        install_path = act.get_path()
        bundle.uninstall(force, delete_profile)
        self.remove_bundle(install_path)

        alt_bundles = self.get_system_bundles(act.get_bundle_id())
        if alt_bundles:
            alt_bundles.sort(
                key=lambda b: NormalizedVersion(b.get_activity_version()))
            alt_bundles.reverse()
            new_bundle = alt_bundles[0]
            self.add_bundle(new_bundle.get_path())

    def get_system_bundles(self, bundle_id):
        """
        Searches for system bundles (eg. those in /usr/share/sugar/activities)
        with a given bundle id.

        Prams:
            * bundle_id (string):  the bundle id to look for

        Returns a list of ActivityBundle or ContentBundle objects, or an empty
        list if there are none found.
        """
        bundles = []
        for root in GLib.get_system_data_dirs():
            root = os.path.join(root, 'sugar', 'activities')

            try:
                dir_list = os.listdir(root)
            except OSError:
                logging.debug('Can not find GLib system dir %s', root)
                continue

            for activity_dir in dir_list:
                try:
                    bundle = bundle_from_dir(os.path.join(root, activity_dir))
                except MalformedBundleException:
                    continue

                if bundle.get_bundle_id() == bundle_id:
                    bundles.append(bundle)
        return bundles
Example #39
0
            if self.stop_event.is_set():
                continue

            while data != "":
                ((message_id, ), data) = unpack_helper('<B', data)

                if message_id == MSG_LEAVE:
                    print("[CHAT] %s has left the chat" % self.name)
                    peers_lock.acquire()

                    try:
                        del peers[self.guid]

                    finally:
                        peers_lock.release()

                elif message_id == MSG_CHAT_MESSAGE:
                    # Read all data for this message
                    ((chat_len, ), data) = unpack_helper('<I', data)
                    ((message, ),
                     data) = unpack_helper('<%ds' % chat_len, data)
                    print("[CHAT] %s says: %s" % (self.name, message))

                elif message_id == MSG_JOIN_NETWORK:
                    ((guid_len, ), data) = unpack_helper('<I', data)
                    ((guid, ), data) = unpack_helper('<%ds' % guid_len, data)
                    ((hostname_len, ), data) = unpack_helper('<I', data)
                    ((hostname, ),
                     data) = unpack_helper('<%ds' % hostname_len, data)
                    ((name_len, ), data) = unpack_helper('<I', data)
Example #40
0
class _InstallQueue(object):
    """
    A class to represent a queue of bundles to be installed, and to handle
    execution of each task in the queue. Only for internal bundleregistry use.

    The use of a queue means that we serialize all bundle upgrade processing.
    This is necessary to avoid many difficult corner-cases like: what happens
    if two users try to asynchronously and simultaenously install different
    version of the same bundle?

    We maintain at maximum one thread to do the actual bundle install. When
    done, the thread enqueues a callback in the main thread (via the GLib
    main loop).
    """
    def __init__(self, registry):
        self._lock = Lock()
        self._queue = []
        self._thread_running = False
        self._registry = registry

    def enqueue(self, bundle, force_downgrade, callback, user_data):
        task = _InstallTask(bundle, force_downgrade, callback, user_data)
        self._lock.acquire()
        self._queue.append(task)
        if not self._thread_running:
            self._thread_running = True
            Thread(target=self._thread_func).start()
        self._lock.release()

    def _thread_func(self):
        while True:
            self._lock.acquire()
            if len(self._queue) == 0:
                self._thread_running = False
                self._lock.release()
                return

            task = self._queue.pop()
            self._lock.release()

            self._do_work(task)

    def _do_work(self, task):
        bundle = task.bundle
        bundle_id = bundle.get_bundle_id()
        act = self._registry.get_bundle(bundle_id)
        logging.debug("InstallQueue task %s installed %r", bundle_id, act)

        if act:
            # Same version already installed?
            if act.get_activity_version() == bundle.get_activity_version():
                logging.debug('No upgrade needed, same version already '
                              'installed.')
                task.queue_callback(False)
                return

            # Would this new installation be a downgrade?
            if NormalizedVersion(bundle.get_activity_version()) <= \
                    NormalizedVersion(act.get_activity_version()) \
                    and not task.force_downgrade:
                task.queue_callback(AlreadyInstalledException())
                return

            # Uninstall the previous version, if we can
            if act.is_user_activity():
                try:
                    act.uninstall()
                except:
                    logging.exception('Uninstall failed, still trying to '
                                      'install newer bundle')
            else:
                logging.warning('Unable to uninstall system activity, '
                                'installing upgraded version in user '
                                'activities')

        try:
            task.queue_callback(bundle.install())
        except Exception as e:
            logging.debug("InstallThread install failed: %r", e)
            task.queue_callback(e)
Example #41
0
class LeagueMgrAPIs(object):
    def __init__(self, league_mgr_addr):
        self._zmq_context = zmq.Context()
        self._zmq_context.setsockopt(zmq.TCP_KEEPALIVE, 1)
        self._zmq_context.setsockopt(zmq.TCP_KEEPALIVE_CNT, 60)
        self._zmq_context.setsockopt(zmq.TCP_KEEPALIVE_IDLE, 60)
        self._zmq_context.setsockopt(zmq.TCP_KEEPALIVE_INTVL, 60)
        self._socket = self._zmq_context.socket(zmq.REQ)
        ip, port = league_mgr_addr.split(':')
        self._socket.connect("tcp://%s:%s" % (ip, port))
        self._req_lock = Lock()

    def request_actor_task(self, actor_id, learner_id):
        self._req_lock.acquire()
        while True:
            self._socket.send_string("request_actor_task", zmq.SNDMORE)
            self._socket.send_pyobj((actor_id, learner_id))
            task = self._socket.recv_pyobj()
            if not isinstance(task, LeagueMgrErroMsg):
                break
            time.sleep(10)
        self._req_lock.release()
        return task

    def request_learner_task(self, learner_id):
        self._req_lock.acquire()
        while True:
            self._socket.send_string("request_learner_task", zmq.SNDMORE)
            self._socket.send_pyobj(learner_id)
            task = self._socket.recv_pyobj()
            if not isinstance(task, LeagueMgrErroMsg):
                break
            time.sleep(1)
        self._req_lock.release()
        return task

    def query_learner_task(self, learner_id):
        self._req_lock.acquire()
        self._socket.send_string("query_learner_task", zmq.SNDMORE)
        self._socket.send_pyobj(learner_id)
        task = self._socket.recv_pyobj()
        self._req_lock.release()
        if isinstance(task, LeagueMgrErroMsg):
            return None
        else:
            return task

    def notify_actor_task_begin(self, actor_id):
        self._req_lock.acquire()
        self._socket.send_string("notify_actor_task_begin", zmq.SNDMORE)
        self._socket.send_pyobj(actor_id)
        assert self._socket.recv_string() == "ok"
        self._req_lock.release()

    def notify_actor_task_end(self, actor_id, match_result):
        self._req_lock.acquire()
        self._socket.send_string("notify_actor_task_end", zmq.SNDMORE)
        self._socket.send_pyobj((actor_id, match_result))
        assert self._socket.recv_string() == "ok"
        self._req_lock.release()

    def notify_learner_task_begin(self, learner_id, learner_task):
        self._req_lock.acquire()
        self._socket.send_string("notify_learner_task_begin", zmq.SNDMORE)
        self._socket.send_pyobj((learner_id, learner_task))
        assert self._socket.recv_string() == "ok"
        self._req_lock.release()

    def notify_learner_task_end(self, learner_id):
        self._req_lock.acquire()
        self._socket.send_string("notify_learner_task_end", zmq.SNDMORE)
        self._socket.send_pyobj(learner_id)
        assert self._socket.recv_string() == "ok"
        self._req_lock.release()

    def request_add_model(self, model):
        self._req_lock.acquire()
        self._socket.send_string("request_add_model", zmq.SNDMORE)
        self._socket.send_pyobj(model)
        assert self._socket.recv_string() == "ok"
        self._req_lock.release()
Example #42
0
class Downloader(object):
    @property
    def state(self):
        return self._state

    @property
    def connections(self):
        return len([
            c for c in self._download_info["parts"] if c["status"] in
            [self.states.downloading, self.states.connecting]
        ]), self._max_connections

    @property
    def downloaded(self):
        return self.__change_units__(
            sum([
                c["current"] - c["start"] for c in self._download_info["parts"]
            ]))

    @property
    def average_speed(self):
        return self.__change_units__(self._average_speed)

    @property
    def speed(self):
        return self.__change_units__(self._speed)

    @property
    def remaining_time(self):
        if self.speed[0] and self._file_size:
            t = old_div((self.size[0] - self.downloaded[0]), self.speed[0])
        else:
            t = 0

        return time.strftime("%H:%M:%S", time.gmtime(t))

    @property
    def download_url(self):
        return self.url

    @property
    def size(self):
        return self.__change_units__(self._file_size)

    @property
    def progress(self):
        if self._file_size:
            return float(self.downloaded[0]) * 100 / float(self._file_size)
        elif self._state == self.states.completed:
            return 100
        else:
            return 0

    @property
    def filename(self):
        return self._filename

    @property
    def fullpath(self):
        return os.path.abspath(filetools.join(self._path, self._filename))

    # Funciones
    def start_dialog(self, title=config.get_localized_string(60200)):
        from platformcode import platformtools
        progreso = platformtools.dialog_progress_bg(
            title, config.get_localized_string(60201))
        try:
            self.start()
            while self.state == self.states.downloading:
                time.sleep(0.2)
                line1 = "%s" % (self.filename)
                line2 = config.get_localized_string(59983) % (
                    self.downloaded[1], self.downloaded[2], self.size[1],
                    self.size[2], self.speed[1], self.speed[2],
                    self.connections[0], self.connections[1])
                line3 = config.get_localized_string(60202) % (
                    self.remaining_time)

                progreso.update(int(self.progress), line1, line2 + line3)
                self.__update_json()
        finally:
            progreso.close()

    def start(self):
        self.__update_json(started=False)
        if self._state == self.states.error: return
        conns = []
        for x in range(self._max_connections):
            try:
                conns.append(self.__open_connection__("0", ""))
            except:
                self._max_connections = x
                self._threads = [
                    Thread(target=self.__start_part__,
                           name="Downloader %s/%s" %
                           (x + 1, self._max_connections))
                    for x in range(self._max_connections)
                ]
                break
        del conns
        self._start_time = time.time() - 1
        self._state = self.states.downloading
        self._speed_thread.start()
        self._save_thread.start()

        for t in self._threads:
            t.start()

    def stop(self, erase=False):
        if self._state == self.states.downloading:
            # Detenemos la descarga
            self._state = self.states.stopped
            for t in self._threads:
                if t.isAlive(): t.join()

            if self._save_thread.isAlive(): self._save_thread.join()

            if self._seekable:
                # Guardamos la info al final del archivo
                self.file.seek(0, 2)
                try:
                    offset = self.file.tell()
                except:
                    offset = self.file.seek(0, 1)
                if not PY3:
                    self.file.write(str(self._download_info))
                    self.file.write("%0.16d" % offset)
                else:
                    download_info_dump = jsontools.dump(
                        self._download_info).encode('utf-8')
                    self.file.write(download_info_dump)
                    self.file.write(b"%0.16d" % offset)

        self.file.close()

        if erase: os.remove(filetools.join(self._path, self._filename))

    def __speed_metter__(self):
        self._speed = 0
        self._average_speed = 0

        downloaded = self._start_downloaded
        downloaded2 = self._start_downloaded
        t = time.time()
        t2 = time.time()
        time.sleep(1)

        while self.state == self.states.downloading:
            self._average_speed = old_div(
                (self.downloaded[0] - self._start_downloaded),
                (time.time() - self._start_time))
            self._speed = old_div(
                (self.downloaded[0] - self._start_downloaded),
                (time.time() - self._start_time))
            # self._speed = (self.downloaded[0] - downloaded) / (time.time()  -t)

            if time.time() - t > 5:
                t = t2
                downloaded = downloaded2
                t2 = time.time()
                downloaded2 = self.downloaded[0]

            time.sleep(0.5)

    # Funciones internas
    def __init__(self,
                 url,
                 path,
                 filename=None,
                 headers=[],
                 resume=True,
                 max_connections=10,
                 block_size=2**17,
                 part_size=2**24,
                 max_buffer=10,
                 json_path=None):
        # Parametros
        self._resume = resume
        self._path = path
        self._filename = filename
        self._max_connections = max_connections
        self._block_size = block_size
        self._part_size = part_size
        self._max_buffer = max_buffer
        self._json_path = json_path

        try:
            import xbmc
            self.tmp_path = xbmc.translatePath("special://temp/")
        except:
            self.tmp_path = os.getenv("TEMP") or os.getenv("TMP") or os.getenv(
                "TMPDIR")

        self.states = type(
            'states', (), {
                "stopped": 0,
                "connecting": 1,
                "downloading": 2,
                "completed": 3,
                "error": 4,
                "saving": 5
            })

        self._state = self.states.stopped
        self._download_lock = Lock()
        self._headers = {
            "User-Agent":
            "Kodi/15.2 (Windows NT 10.0; WOW64) App_Bitness/32 Version/15.2-Git:20151019-02e7013"
        }
        self._speed = 0
        self._buffer = {}
        self._seekable = True

        self._threads = [
            Thread(target=self.__start_part__,
                   name="Downloader %s/%s" % (x + 1, self._max_connections))
            for x in range(self._max_connections)
        ]
        self._speed_thread = Thread(target=self.__speed_metter__,
                                    name="Speed Meter")
        self._save_thread = Thread(target=self.__save_file__,
                                   name="File Writer")

        # Actualizamos los headers
        self._headers.update(dict(headers))

        # Separamos los headers de la url
        self.__url_to_headers__(url)

        # Obtenemos la info del servidor
        self.__get_download_headers__()

        self._file_size = int(self.response_headers.get("content-length", "0"))

        if not self.response_headers.get(
                "accept-ranges") == "bytes" or self._file_size == 0:
            self._max_connections = 1
            self._part_size = 0
            self._resume = False

        # Obtenemos el nombre del archivo
        self.__get_download_filename__()

        # Abrimos en modo "a+" para que cree el archivo si no existe, luego en modo "r+b" para poder hacer seek()
        self.file = filetools.file_open(filetools.join(self._path,
                                                       self._filename),
                                        "a+",
                                        vfs=VFS)
        if self.file: self.file.close()
        self.file = filetools.file_open(filetools.join(self._path,
                                                       self._filename),
                                        "r+b",
                                        vfs=VFS)
        if not self.file:
            return

        if self._file_size >= 2**31 or not self._file_size:
            try:
                self.file.seek(2**31, 0)
            except OverflowError:
                self._seekable = False
                logger.info(
                    "Cannot do seek() or tell() in files larger than 2GB")

        self.__get_download_info__()

        try:
            logger.info("Descarga inicializada: Partes: %s | Ruta: %s | Archivo: %s | Tamaño: %s" % \
                    (str(len(self._download_info["parts"])), self._pathencode('utf-8'), \
                    self._filenameencode('utf-8'), str(self._download_info["size"])))
        except:
            pass

    def __url_to_headers__(self, url):
        # Separamos la url de los headers adicionales
        self.url = url.split("|")[0]

        # headers adicionales
        if "|" in url:
            self._headers.update(
                dict([[
                    header.split("=")[0],
                    urllib.parse.unquote_plus(header.split("=")[1])
                ] for header in url.split("|")[1].split("&")]))

    def __get_download_headers__(self):
        if self.url.startswith("https"):
            try:
                conn = urllib.request.urlopen(
                    urllib.request.Request(self.url.replace("https", "http"),
                                           headers=self._headers))
                conn.fp._sock.close()
                self.url = self.url.replace("https", "http")
            except:
                pass

        for x in range(3):
            try:
                if not sys.hexversion > 0x0204FFFF:
                    conn = urllib.request.urlopen(
                        urllib.request.Request(self.url,
                                               headers=self._headers))
                    conn.fp._sock.close()
                else:
                    conn = urllib.request.urlopen(urllib.request.Request(
                        self.url, headers=self._headers),
                                                  timeout=5)

            except:
                self.response_headers = dict()
                self._state = self.states.error
            else:
                self.response_headers = conn.headers
                self._state = self.states.stopped
                break

    def __get_download_filename__(self):
        # Obtenemos nombre de archivo y extension
        if "filename" in self.response_headers.get(
                "content-disposition",
                "") and "attachment" in self.response_headers.get(
                    "content-disposition", ""):
            cd_filename, cd_ext = os.path.splitext(
                urllib.parse.unquote_plus(
                    re.compile(
                        "attachment; filename ?= ?[\"|']?([^\"']+)[\"|']?").
                    match(self.response_headers.get(
                        "content-disposition")).group(1)))
        elif "filename" in self.response_headers.get(
                "content-disposition",
                "") and "inline" in self.response_headers.get(
                    "content-disposition", ""):
            cd_filename, cd_ext = os.path.splitext(
                urllib.parse.unquote_plus(
                    re.compile(
                        "inline; filename ?= ?[\"|']?([^\"']+)[\"|']?").match(
                            self.response_headers.get(
                                "content-disposition")).group(1)))
        else:
            cd_filename, cd_ext = "", ""

        url_filename, url_ext = os.path.splitext(
            urllib.parse.unquote_plus(
                filetools.basename(urllib.parse.urlparse(self.url)[2])))
        if self.response_headers.get(
                "content-type",
                "application/octet-stream") != "application/octet-stream":
            mime_ext = mimetypes.guess_extension(
                self.response_headers.get("content-type"))
        else:
            mime_ext = ""

        # Seleccionamos el nombre mas adecuado
        if cd_filename:
            self.remote_filename = cd_filename
            if not self._filename:
                self._filename = cd_filename

        elif url_filename:
            self.remote_filename = url_filename
            if not self._filename:
                self._filename = url_filename

        # Seleccionamos la extension mas adecuada
        if cd_ext:
            if not cd_ext in self._filename: self._filename += cd_ext
            if self.remote_filename: self.remote_filename += cd_ext
        elif mime_ext:
            if not mime_ext in self._filename: self._filename += mime_ext
            if self.remote_filename: self.remote_filename += mime_ext
        elif url_ext:
            if not url_ext in self._filename: self._filename += url_ext
            if self.remote_filename: self.remote_filename += url_ext

    def __change_units__(self, value):
        import math
        units = ["B", "KB", "MB", "GB"]
        if value <= 0:
            return 0, 0, units[0]
        else:
            return value, old_div(value, 1024.0**int(math.log(
                value, 1024))), units[int(math.log(value, 1024))]

    def __get_download_info__(self):
        # Continuamos con una descarga que contiene la info al final del archivo
        self._download_info = {}

        try:
            if not self._resume:
                raise Exception()
            self.file.seek(-16, 2)
            offset = int(self.file.read())
            self.file.seek(offset, 0)
            data = self.file.read()[:-16]
            self._download_info = eval(data)
            if not self._download_info["size"] == self._file_size:
                raise Exception()
            self.file.seek(offset, 0)
            try:
                self.file.truncate()
            except:
                pass

            if not self._seekable:
                for part in self._download_info["parts"]:
                    if part["start"] >= 2**31 and part[
                            "status"] == self.states.completed:
                        part["status"] == self.states.stopped
                        part["current"] == part["start"]

            self._start_downloaded = sum([
                c["current"] - c["start"] for c in self._download_info["parts"]
            ])
            self.pending_parts = set([
                x for x, a in enumerate(self._download_info["parts"])
                if not a["status"] == self.states.completed
            ])
            self.completed_parts = set([
                x for x, a in enumerate(self._download_info["parts"])
                if a["status"] == self.states.completed
            ])
            self.save_parts = set()
            self.download_parts = set()

        # La info no existe o no es correcta, comenzamos de 0
        except:
            self._download_info["parts"] = []
            if self._file_size and self._part_size:
                for x in range(0, self._file_size, self._part_size):
                    end = x + self._part_size - 1
                    if end >= self._file_size: end = self._file_size - 1
                    self._download_info["parts"].append({
                        "start":
                        x,
                        "end":
                        end,
                        "current":
                        x,
                        "status":
                        self.states.stopped
                    })
            else:
                self._download_info["parts"].append({
                    "start":
                    0,
                    "end":
                    self._file_size - 1,
                    "current":
                    0,
                    "status":
                    self.states.stopped
                })

            self._download_info["size"] = self._file_size
            self._start_downloaded = 0
            self.pending_parts = set(
                [x for x in range(len(self._download_info["parts"]))])
            self.completed_parts = set()
            self.save_parts = set()
            self.download_parts = set()

            self.file.seek(0, 0)
            try:
                self.file.truncate()
            except:
                pass

    def __open_connection__(self, start, end):
        headers = self._headers.copy()
        if not end: end = ""
        headers.update({"Range": "bytes=%s-%s" % (start, end)})
        if not sys.hexversion > 0x0204FFFF:
            conn = urllib.request.urlopen(
                urllib.request.Request(self.url, headers=headers))
        else:
            conn = urllib.request.urlopen(urllib.request.Request(
                self.url, headers=headers),
                                          timeout=5)
        return conn

    def __check_consecutive__(self, id):
        return id == 0 or (len(self.completed_parts) >= id
                           and sorted(self.completed_parts)[id - 1] == id - 1)

    def __save_file__(self):
        logger.info("Thread started: %s" % threading.current_thread().name)

        while self._state == self.states.downloading:
            if not self.pending_parts and not self.download_parts and not self.save_parts:  # Descarga finalizada
                self._state = self.states.completed
                self.file.close()
                continue

            elif not self.save_parts:
                continue

            save_id = min(self.save_parts)

            if not self._seekable and self._download_info["parts"][save_id][
                    "start"] >= 2**31 and not self.__check_consecutive__(
                        save_id):
                continue

            if self._seekable or self._download_info["parts"][save_id][
                    "start"] < 2**31:
                self.file.seek(self._download_info["parts"][save_id]["start"],
                               0)

            try:
                # file = open(os.path.join(self.tmp_path, self._filename + ".part%s" % save_id), "rb")
                # self.file.write(file.read())
                # file.close()
                # os.remove(os.path.join(self.tmp_path, self._filename + ".part%s" % save_id))
                for a in self._buffer.pop(save_id):
                    self.file.write(a)
                self.save_parts.remove(save_id)
                self.completed_parts.add(save_id)
                self._download_info["parts"][save_id][
                    "status"] = self.states.completed
            except:
                import traceback
                logger.error(traceback.format_exc())
                self._state = self.states.error

        if self.save_parts:
            for s in self.save_parts:
                self._download_info["parts"][s]["status"] = self.states.stopped
                self._download_info["parts"][s][
                    "current"] = self._download_info["parts"][s]["start"]

        logger.info("Thread stopped: %s" % threading.current_thread().name)

    def __get_part_id__(self):
        self._download_lock.acquire()
        if len(self.pending_parts):
            id = min(self.pending_parts)
            self.pending_parts.remove(id)
            self.download_parts.add(id)
            self._download_lock.release()
            return id
        else:
            self._download_lock.release()
            return None

    def __set_part_connecting__(self, id):
        logger.info("ID: %s Establishing connection" % id)
        self._download_info["parts"][id]["status"] = self.states.connecting

    def __set_part__error__(self, id):
        logger.info("ID: %s Download failed" % id)
        self._download_info["parts"][id]["status"] = self.states.error
        self.pending_parts.add(id)
        self.download_parts.remove(id)

    def __set_part__downloading__(self, id):
        logger.info("ID: %s Downloading data ..." % id)
        self._download_info["parts"][id]["status"] = self.states.downloading

    def __set_part_completed__(self, id):
        logger.info("ID: %s Download finished!" % id)
        self._download_info["parts"][id]["status"] = self.states.saving
        self.download_parts.remove(id)
        self.save_parts.add(id)
        while self._state == self.states.downloading and len(
                self._buffer) > self._max_connections + self._max_buffer:
            time.sleep(0.1)

    def __set_part_stopped__(self, id):
        if self._download_info["parts"][id][
                "status"] == self.states.downloading:
            self._download_info["parts"][id]["status"] = self.states.stopped
            self.download_parts.remove(id)
            self.pending_parts.add(id)

    def __open_part_file__(self, id):
        #file = open(os.path.join(self.tmp_path, self._filename + ".part%s" % id), "a+")
        #file = open(os.path.join(self.tmp_path, self._filename + ".part%s" % id), "r+b")
        self.file = filetools.file_open(filetools.join(
            self.tmp_path, self._filename + ".part%s" % id),
                                        "a+",
                                        vfs=VFS)
        self.file.close()
        self.file = filetools.file_open(filetools.join(
            self.tmp_path, self._filename + ".part%s" % id),
                                        "r+b",
                                        vfs=VFS)
        file.seek(
            self._download_info["parts"][id]["current"] -
            self._download_info["parts"][id]["start"], 0)
        return file

    def __start_part__(self):
        logger.info("Thread Started: %s" % threading.current_thread().name)
        while self._state == self.states.downloading:
            id = self.__get_part_id__()
            if id is None: break

            self.__set_part_connecting__(id)

            try:
                connection = self.__open_connection__(
                    self._download_info["parts"][id]["current"],
                    self._download_info["parts"][id]["end"])
            except:
                self.__set_part__error__(id)
                time.sleep(5)
                continue

            self.__set_part__downloading__(id)
            # file = self.__open_part_file__(id)

            if not id in self._buffer:
                self._buffer[id] = []
            speed = []

            while self._state == self.states.downloading:
                try:
                    start = time.time()
                    buffer = connection.read(self._block_size)
                    speed.append(
                        old_div(len(buffer), ((time.time() - start) or 0.001)))
                except:
                    logger.info("ID: %s Error downloading data" % id)
                    self._download_info["parts"][id][
                        "status"] = self.states.error
                    self.pending_parts.add(id)
                    self.download_parts.remove(id)
                    break
                else:
                    if len(buffer) and self._download_info["parts"][id][
                            "current"] < self._download_info["parts"][id][
                                "end"]:
                        # file.write(buffer)
                        self._buffer[id].append(buffer)
                        self._download_info["parts"][id]["current"] += len(
                            buffer)
                        if len(speed) > 10:
                            velocidad_minima = old_div(
                                old_div(sum(speed), len(speed)), 3)
                            velocidad = speed[-1]
                            vm = self.__change_units__(velocidad_minima)
                            v = self.__change_units__(velocidad)

                            if velocidad_minima > speed[-1] and velocidad_minima > speed[-2] and \
                                            self._download_info["parts"][id]["current"] < \
                                            self._download_info["parts"][id]["end"]:
                                if connection.fp: connection.fp._sock.close()
                                logger.info(
                                    "ID: %s Restarting connection! | Minimum Speed: %.2f %s/s | Speed: %.2f %s/s" % \
                                    (id, vm[1], vm[2], v[1], v[2]))
                                # file.close()
                                break
                    else:
                        self.__set_part_completed__(id)
                        if connection.fp: connection.fp._sock.close()
                        # file.close()
                        break

            self.__set_part_stopped__(id)
        logger.info("Thread stopped: %s" % threading.current_thread().name)

    def __update_json(self, started=True):
        item = Item().fromjson(filetools.read(self._json_path))
        progress = int(self.progress)
        if started and item.downloadStatus == 0:  # stopped
            logger.info('Download paused')
            self.stop()
        elif item.downloadProgress != progress or not started:
            params = {
                "downloadStatus": 4,
                "downloadComplete": 0,
                "downloadProgress": progress
            }
            item.__dict__.update(params)
            filetools.write(self._json_path, item.tojson())
Example #43
0
class MySqlPool(object):
    __pool = None

    def __init__(self, db, host, port, user, passwd):

        # 数据库构造函数,从连接池中取出连接,并生成操作游标
        try:
            self.lock = Lock()
            self._conn = PooledDB(creator=pymysql,
                                  mincached=1,
                                  maxcached=20,
                                  host=host,
                                  port=port,
                                  user=user,
                                  passwd=passwd,
                                  db=db,
                                  use_unicode=True,
                                  charset='utf8mb4',
                                  cursorclass=DictCursor).connection()
            self._cursor = self._conn.cursor()
        except Exception as e:
            print(e)

    def __query(self, sql, param=None):
        if param is None:
            count = self._cursor.execute(sql)
        else:
            count = self._cursor.execute(sql, param)
        return count

    def __execute(self, sql, param=None):
        conn = self._conn
        cursor = conn.cursor()
        try:
            self.lock.acquire()
            cursor.execute(sql, param)
            conn.commit()
            self.lock.release()
        except pymysql.Error as e:
            conn.rollback()
            print(e)

    def excute(self, sql):
        self.__execute(sql)

    def __execute_many(self, sql, param=None):
        conn = self._conn
        cursor = conn.cursor()
        try:
            self.lock.acquire()
            cursor.executemany(sql, param)
            conn.commit()
            self.lock.release()
        except pymysql.Error as e:
            conn.rollback()
            print(e)

    def update(self, sql, param=None):
        """
        pool = MySqlPool()
        pool.update("update user_info set name = %s where user_id = %s", param=('Lucy', '001'))
        """
        self.__execute(sql, param)

    def delete(self, sql, param=None):
        """
        pool = MySqlPool()
        pool.delete('delete from user_info where name is null')
        """
        self.__execute(sql, param)

    def insert_one(self, sql, param=None):
        """
        pool = MySqlPool()
        pool.insert_one("INSERT INTO `user_info` (`user_id`,`name`) VALUES (%s,%s)", ('001', 'Jim'))
        """
        self.__execute(sql, param)

    def insert_many(self, sql, param=None):
        """
        pool = MySqlPool()
        pool.insert_many("INSERT INTO `user_info` (`user_id`,`name`) VALUES (%s,%s)", [('001', 'Jim'), ('002', 'Lucius')])
        """
        self.__execute_many(sql, param)

    def get_all(self, sql, param=None):
        conn = self._conn
        cursor = conn.cursor()
        count = cursor.execute(sql, param)
        if count > 0:
            result = cursor.fetchall()
        else:
            result = False
        return result

    def get_one(self, sql, param=None):
        conn = self._conn
        cursor = conn.cursor()
        count = cursor.execute(sql, param)
        if count > 0:
            result = cursor.fetchone()
        else:
            result = False
        return result

    def get_many(self, sql, num, param=None):
        conn = self._conn
        cursor = conn.cursor()
        count = cursor.execute(sql, param)
        if count > 0:
            result = cursor.fetchmany(num)
        else:
            result = False
        return result

    def dispose(self):
        self._conn.close()
Example #44
0
class DXLCommunicator(Communicator):
    """ Communicator class for DXL MX-64 devices.

    This class implements Dynamixel MX-64 series specific packet communicator (uses DXL Protocol 1.0).
    Only goal torque actuator commands are currently allowed.
    """
    def __init__(
        self,
        idn,
        baudrate,
        timeout_connection=0.5,
        sensor_dt=0.006,
        device_path='None',
        use_ctypes_driver=True,
    ):
        """ Inits DXLCommunicator class with device and task-specific parameters.

        Establishes serial connection with a dxl device. If device_path is 'None', the function searches
        for a serial port address and connects to the first device found.

        Args:
            idn: An integer representing the DXL ID number
            baudrate: An integer representing a baudrate to connect at
            timeout_connection: A float representing connection timeout parameter in seconds
            sensor_dt: A float representing the cycle time for polling sensory information and
                writing torque commands.
            device_path: A string containing the serial port address (e.g., /dev/ttyACM0 or /dev/ttyUSB0 on linux)
            use_ctypes_driver: A bool. Use this flag to choose between ctypes based DynamixelSDK or
                a custom pyserial DXL driver. By default, the ctypes driver is used.
        """
        self.idn = idn
        self.baudrate = baudrate
        self.timeout = timeout_connection
        self.sensor_dt = sensor_dt
        self.device_path = device_path
        self.dt_tol = 0.0001
        self.read_wait_time = 0.0001

        self.use_ctypes_driver = use_ctypes_driver
        self.read_block = dxl_mx64.MX64.subblock(
            'version_0', 'goal_acceleration', ret_dxl_type=use_ctypes_driver)

        self.write_block = dxl_mx64.MX64.subblock(
            'goal_torque', 'goal_torque', ret_dxl_type=use_ctypes_driver)

        sensor_args = {
            'buffer_len': SharedBuffer.DEFAULT_BUFFER_LEN,
            'array_len': self.read_block.len(),
            'array_type': 'd',
            'np_array_type': 'd'
        }

        actuator_args = {
            'buffer_len': SharedBuffer.DEFAULT_BUFFER_LEN,
            'array_len': 1,
            'array_type': 'd',
            'np_array_type': 'd'
        }

        super(DXLCommunicator, self).__init__(use_sensor=True,
                                              use_actuator=True,
                                              sensor_args=sensor_args,
                                              actuator_args=actuator_args)

        self.just_read = 0
        self.just_read_lock = Lock()
        self.port_lock = Lock()
        self.read_start_time = self.read_end_time = time.time()
        self.read_time = 0
        self.last_actuation_updated = time.time()
        self.max_actuation_time = 1
        self.torque = 0

    def run(self):
        """ Override base class run method to setup dxl driver.

        The shared object file should be loaded within the process that
        sends commands and receives information from the DXL. Hence the dxl_driver
        is initialized here. This is an issue only when using the ctypes driver.
        Regular imports work for the pyserial driver.
        """
        if self.use_ctypes_driver:
            from senseact.devices.dxl import dxl_driver_v1 as dxl_commv1
        else:
            from senseact.devices.dxl import dxl_commv1
        self.dxl_driver = dxl_commv1

        # Make connection
        self.port = dxl_commv1.make_connection(self.baudrate, self.timeout,
                                               self.device_path)

        # Prepare for torque control
        self.write_torque_mode_enable(dxl_commv1,
                                      self.port,
                                      self.idn,
                                      1,
                                      use_ctypes_driver=self.use_ctypes_driver)

        vals = self.dxl_driver.read_a_block(self.port, self.idn,
                                            self.read_block,
                                            self.read_wait_time)

        # Set Return Delay time = 0
        if not vals["rtd"] == 0:
            self.write_return_delay_time(
                dxl_commv1,
                self.port,
                self.idn,
                0,
                use_ctypes_driver=self.use_ctypes_driver)

        # Set the dynamixel to wheel mode (infinite rotations and mx-64AT encoder positions Є [0, 4095])
        if not (vals["angle_limit_cw"] == -pi
                and vals["angle_limit_ccw"] == -pi):
            self.set_wheel_mode()

        # Overwrite torque command written in the preceding expt in case of unsafe exit
        self.write_torque(0)

        # catching SIGINT (Ctrl+C) so that we can close dxl safely
        signal.signal(signal.SIGINT, self._close)

        super(DXLCommunicator, self).run()

    def _sensor_handler(self):
        """ Receives and stores sensory packets from DXL.

        Waits for packets to arrive from DXL through `dxl_driver.read_a_block_vals` call,
        communicator sensor cycle time, and stores the packet in `sensor_buffer`.
        """

        self._time_and_wait()
        self.port_lock.acquire()
        vals = self.dxl_driver.read_a_block_vals(self.port, self.idn,
                                                 self.read_block,
                                                 self.read_wait_time)
        self.port_lock.release()
        self.just_read_lock.acquire()
        self.just_read = 1
        self.just_read_lock.release()
        self.sensor_buffer.write(vals)

    def _actuator_handler(self):
        """ Sends actuation commands to DXL.

        Only torque control allowed now. We send zero torque
        (i.e., stopping movements) if there is a delay in actuation.
        """
        if time.time() - self.last_actuation_updated > self.max_actuation_time:
            self.torque = 0
        self.just_read_lock.acquire()
        if self.just_read == 1:
            self.just_read = 0
            self.just_read_lock.release()
        else:
            self.just_read_lock.release()
            time.sleep(0.0001)
            return
        if self.actuator_buffer.updated():
            self.last_actuation_updated = time.time()
            recent_actuation, _, _ = self.actuator_buffer.read_update()
            self.torque = recent_actuation[0]
        self.write_torque(self.torque)

    def write_torque(self, torque):
        """ Writes goal torque commands to the DXL control table.

        Args:
            torque: An float between [-1024 and 1024]
        """
        packet = self.dxl_driver.packet_write_buffer(
            self.idn, self.write_block,
            self.write_block.data_from_vals([torque]))
        self.port_lock.acquire()
        self.dxl_driver.loop_until_written(self.port, self.idn, packet)
        self.port_lock.release()

    def _time_and_wait(self):
        """ Maintains communicator cycle time.

        If sending a command takes 'x'ms, wait (sensor_dt - x)ms to maintain cycle time.
        """
        self.read_end_time = time.time()
        self.read_time = self.read_end_time - self.read_start_time
        if self.read_time > self.sensor_dt:
            print("Warning: Iteration time exceeded sensor_dt {}ms by {}ms".
                  format(self.sensor_dt * 1000,
                         (self.read_time - self.sensor_dt) * 1000))
        else:
            time.sleep(max(0, self.sensor_dt - self.read_time - self.dt_tol))
        self.read_start_time = time.time()

    def _close(self, *args, **kwargs):
        """ Safely closes the DXL device. Disables torque before shutting down the script.

        NOTE: This method currently gets called 3 times due to interactions with gym.core.env
              the following two lines can be used for debugging if this needs to get fixed

        Args:
            *args: Additional args
            **kwargs: Additional keyword args
        """
        print("Closing Gripper communicator")

        # Wait for the sensor and actuator threads to close before setting the torque to 0
        # This is to ensure there isn't contention for sending messages on the port
        super(DXLCommunicator, self)._close()
        self.write_torque_mode_enable(self.dxl_driver,
                                      self.port,
                                      self.idn,
                                      0,
                                      use_ctypes_driver=self.use_ctypes_driver)
        self.write_torque(0)
        self.dxl_driver.clear_port(self.port)
        self.dxl_driver.close(self.port)

    @staticmethod
    def write_torque_mode_enable(dxl_driver,
                                 port,
                                 idn,
                                 enable,
                                 use_ctypes_driver=True):
        """ Enables/Disables torque control mode in DXL MX-64 devices.

        0 - Disabled - Position control mode
        1 - Enabled - Torque control mode

        Args:
            dxl_driver: A variable that points to the DXL driver
            port: Dynamixel portHandler object
            idn: An integer representing the DXL ID number
            enable: 0 or 1
            use_ctypes_driver: A bool to choose between Ctypes and pyserial driver
        """
        block = dxl_mx64.MX64.subblock('torque_control_mode_enable',
                                       'torque_control_mode_enable',
                                       ret_dxl_type=use_ctypes_driver)
        packet = dxl_driver.packet_write_buffer(idn, block,
                                                block.data_from_vals([enable]))
        dxl_driver.loop_until_written(port, idn, packet)

    @staticmethod
    def write_return_delay_time(dxl_driver,
                                port,
                                idn,
                                rtd,
                                use_ctypes_driver=True):
        """ Writes the return delay time to the DXL control table

        Return Delay Time is the delay time per data value that takes from the transmission of
        Instruction Packet until the return of Status Packet.

        Args:
            dxl_driver: A variable that points to the DXL driver
            port: Dynamixel portHandler object
            idn: An integer representing the DXL ID number
            rtd: A float representing delay time between packets in milliseconds.
            use_ctypes_driver: A bool to choose between Ctypes and pyserial driver
        """
        block = dxl_mx64.MX64.subblock('rtd',
                                       'rtd',
                                       ret_dxl_type=use_ctypes_driver)
        packet = dxl_driver.packet_write_buffer(idn, block,
                                                block.data_from_vals([rtd]))
        dxl_driver.loop_until_written(port, idn, packet)

    @staticmethod
    def write_to_register(dxl_driver,
                          port,
                          idn,
                          reg_name,
                          val,
                          use_ctypes_driver=True):
        """ Writes a value to a given register of the DXL device

        General function to write a value to any given register

        Args:
            dxl_driver:A variable that points to the DXL driver
            port: Dynamixel portHandler object
            idn: An integer representing the DXL ID number
            reg_name: A string containing the name of the register to write to
            val: An int or a float depending on the register
            use_ctypes_driver: A bool to choose between Ctypes and pyserial driver
        """
        block = dxl_mx64.MX64.subblock(reg_name,
                                       reg_name,
                                       ret_dxl_type=use_ctypes_driver)
        packet = dxl_driver.packet_write_buffer(idn, block,
                                                block.data_from_vals([val]))
        dxl_driver.loop_until_written(port, idn, packet)

    def set_wheel_mode(self):
        """ Sets the DXL to wheel mode (i.e., infinite turns)

        This is done by setting angle_limit_cw and
        angle_limit_ccw registers to zero in the control table
        """
        self.write_to_register(self.dxl_driver,
                               self.port,
                               self.idn,
                               'angle_limit_cw',
                               -pi,
                               use_ctypes_driver=self.use_ctypes_driver)
        self.write_to_register(self.dxl_driver,
                               self.port,
                               self.idn,
                               'angle_limit_ccw',
                               -pi,
                               use_ctypes_driver=self.use_ctypes_driver)
Example #45
0
class BTCtl:
    DATA_IN = 0x82
    DATA_OUT = 0x05

    def __init__(self, usb):
        self._usb = usb
        self._rx_thread = None
        self._con = False
        self._con_lock = Lock()
        self._rx_stopped = Event()
        self._rx_stopped.clear()
        self._msg_handler = None

    @staticmethod
    def find():
        context = usb1.USBContext()
        handle = context.openByVendorIDAndProductID(UBERTOOTH_VENDOR_ID,
                                                    UBERTOOTH_PRODUCT_ID,
                                                    skip_on_error=True)
        if handle is None:
            raise Exception("Ubertooth device not found")
            sys.exit(1)
        return BTCtl(handle)

    def register_msg_handler(self, h):
        assert (self._msg_handler is None)
        self._msg_handler = h

    def unregister_msg_handler(self, h):
        assert (self._msg_handler == h)
        self._msg_handler = None

    def _print_console(self, msg):
        msg = "\x1b[32;1m%s\x1b[0m" % msg.decode()
        eprint(msg)

    def _print_debug(self, msg):
        msg = "\x1b[31;1m%s\x1b[0m" % msg.decode()
        eprint(msg)

    def _handle_default(self, t, msg):
        if t == BTCTL_STATE_RESP:
            print_state(*msg)
        else:
            log.info("No cmd for message %d" % t)

    def _handle_msg(self, data):
        t, msg = data[0], data[4:]
        if t == BTCTL_DEBUG:
            self._print_console(msg)
        else:
            if t == BTCTL_RX_PKT:
                msg = BTCtlRxPkt.unpack(msg)
            if self._msg_handler is not None:
                self._msg_handler(t, msg)
            else:
                self._handle_default(t, msg)

    def _rx_thread_main(self):
        msg = None
        msg_size = 0
        log.info("Rx thread started")
        while not self._rx_stopped.isSet():
            try:
                data = self._usb.bulkRead(self.DATA_IN, 64, 100)
            except usb1.USBErrorTimeout:
                continue
            t = data[:1]
            #log.info ("got data (l:%d, t:%s): %s"%(len(data),repr(t),repr(data)))
            if t == b'P':
                self._print_debug(data[1:].strip(b"\x00"))
            else:
                if msg is None:
                    assert (t == b'S')
                    msg_size, = unpack("<H", data[2:4])
                    msg = data[4:]
                    assert (len(data) == min(64, 4 + msg_size))
                else:
                    assert (t == b'C')
                    assert (len(data) == min(64, 1 + msg_size - len(msg)))
                    msg += data[1:]
                if len(msg) == msg_size:
                    self._handle_msg(msg)
                    msg = None
        log.debug("Rx thread stopped")

    def connect(self):
        if self.connected():
            log.warning("Already connected")
            return
        self._usb.claimInterface(0)
        self._rx_thread = Thread(target=self._rx_thread_main)
        self._rx_thread.start()
        log.info("USB connected")
        self._con = True
        self.send_idle_cmd()
        sleep(1)

    def connected(self):
        return self._con

    def close(self):
        if not self.connected():
            log.warning("Not connected")
            return
        self.send_idle_cmd()
        self._rx_stopped.set()
        self._rx_thread.join()
        self._usb.close()
        self._con = False

    def _send_usb_bulk(self, bulk):
        assert (len(bulk) <= 64)
        self._usb.bulkWrite(self.DATA_OUT, bulk)

    def _send_usb(self, data):
        self._con_lock.acquire()
        usb_hdr = b"S\x00" + pack("<H", len(data))
        chunk, data = data[:60], data[60:]
        self._send_usb_bulk(usb_hdr + chunk)
        while data:
            chunk, data = data[:63], data[63:]
            self._send_usb_bulk(b'C' + chunk)
        self._con_lock.release()

    def _send_cmd(self, t, data=b''):
        hdr = pack("<I", t)
        self._send_usb(hdr + data)

    def send_debug_cmd(self, msg):
        msg = msg[:256]
        self._send_cmd(BTCTL_DEBUG, msg)

    def send_idle_cmd(self):
        log.info("Send idle")
        self._send_cmd(BTCTL_IDLE_REQ)

    def send_set_freq_off_cmd(self, off):
        self._send_cmd(BTCTL_SET_FREQ_OFF_REQ, p16(off))

    def send_set_max_ac_errors_cmd(self, max_ac_errors):
        self._send_cmd(BTCTL_SET_MAX_AC_ERRORS_REQ, p16(max_ac_errors))

    def send_set_bdaddr_cmd(self, bdaddr):
        self._send_cmd(BTCTL_SET_BDADDR_REQ, pack("<Q", bdaddr))

    def send_inquiry_cmd(self):
        self._send_cmd(BTCTL_INQUIRY_REQ)

    def send_inquiry_scan_cmd(self):
        self._send_cmd(BTCTL_INQUIRY_SCAN_REQ)

    def send_paging_cmd(self, bdaddr):
        self._send_cmd(BTCTL_PAGING_REQ, pack("<Q", bdaddr))

    def send_monitor_cmd(self, bdaddr):
        self._send_cmd(BTCTL_MONITOR_REQ, pack("<Q", bdaddr))

    def send_page_scan_cmd(self):
        self._send_cmd(BTCTL_PAGE_SCAN_REQ)

    def send_acl_cmd(self,
                     llid,
                     data,
                     flow=1,
                     lt_addr=1,
                     flags=0,
                     bt_type=None):
        acl = BTCtlACLPkt(llid, data, flow, bt_type)
        bb_hdr = BBHdr(lt_addr, acl.bt_type, flags)
        payload = bb_hdr.pack() + acl.pack()
        log.debug("send bb %s, acl: %s" % (bb_hdr, acl))
        self._send_cmd(BTCTL_TX_ACL_REQ, payload)

    def send_set_eir_cmd(self, eir_data):
        acl = BTCtlACLPkt(LLID_L2CAP_START, eir_data, 1)
        bb_hdr = BBHdr(0, acl.bt_type)  # send on lt_addr 0
        payload = bb_hdr.pack() + acl.pack()
        log.debug("Set eir %s, acl: %s" % (bb_hdr, acl))
        self._send_cmd(BTCTL_SET_EIR_REQ, payload)

    def send_set_afh_cmd(self, instant, mode, afh_map):
        cmd = pack("<IB", instant, mode) + afh_map
        log.debug("send set afh: %s" % hexlify(cmd))
        self._send_cmd(BTCTL_SET_AFH_REQ, cmd)
Example #46
0
class RobotManager(object):
    _instance = None
    
    def __new__(cls, *args, **kwargs):
        if not cls._instance:
            cls._instance = super(RobotManager, cls).__new__(cls, *args, **kwargs)
            cls.__initialized = False
        return cls._instance

    def init(self):
        if self.__initialized:
            return
        self.__initialized = True

        log.i('robot manager init')

        if LAUNCH_NAVIGATION:
            self.launch_nav = roslauncher.roslauncher(NAVIGATION_LAUNCH_CMD)
            self.launch_nav.launch()

        self.robot_speed = Twist()  # tempararily stores robot speed
        self.robot_pose = Pose()    # tempararily stores robot pose

        self.rs = RobotStatus()
        ### Robot Stauts Lock
        # - Used to protect all the data in robot_status
        # - Should always acquire/release this lock when doing something about robot_status.
        self.robot_status_lock = Lock()

        # ros
        rospy.init_node('robot_manager', anonymous=False)
        rospy.Subscriber('/odom', Odometry, self.odom_cb)
        self.cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
        self.init_pose_pub = rospy.Publisher('/initialpose', PoseWithCovarianceStamped, queue_size=10)

        if ROBOT_POSE_TYPE == ABSOLUTE:
            self.tf_listener = tf.TransformListener()
            try:
                self.tf_listener.waitForTransform('/map', '/base_link', rospy.Time(0), rospy.Duration(30.0))
            except (tf.Exception, tf.ConnectivityException, tf.LookupException) as e:
                log.e('wait for tf transform between /map and /base_link timeout %s'%e)
                return False

        # robot status update thread
        self.rs_update_rountine = Thread(target=self.rs_update, name='robot_status_update')
        self.rs_update_rountine.setDaemon(True)
        self.rs_update_rountine.start()
        
        if ROBOT_STATUS_DEBUG:
            self.rs_debug = Thread(target=self.rs_debug_print, name='robot_status_debug')
            self.rs_debug.setDaemon(True)
            self.rs_debug.start()
        return True

    def spin(self):
        rospy.spin()

    def odom_cb(self, odom):
        self.robot_speed.linear.x = odom.twist.twist.linear.x
        self.robot_speed.linear.y = odom.twist.twist.linear.y
        self.robot_speed.angular.z = odom.twist.twist.angular.z

        if ROBOT_POSE_TYPE == RELATIVE:
            self.robot_pose = odom.pose.pose

    def lookup_tf(self):
        try:
            (trans, rot) = self.tf_listener.lookupTransform('/map', '/base_link', rospy.Time(0))
        except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
            if DEBUG:
                log.e('fail to lookup tf transformation. tf error')
            return 

        self.robot_pose.position.x = trans[0]
        self.robot_pose.position.y = trans[1]
        self.robot_pose.orientation.x = rot[0]
        self.robot_pose.orientation.y = rot[1]
        self.robot_pose.orientation.z = rot[2]
        self.robot_pose.orientation.w = rot[3]

    def rs_debug_print(self):
        now = time()
        while True:
            if time()-now > 1.0:
                if self.robot_status_lock.acquire():
                    log.d('speed - vx:',self.rs.vx, 'vy:', self.rs.vy, 'vw:', self.rs.vw)
                    log.d('pose - x:', self.rs.x, 'y:', self.rs.y, 'yaw:', self.rs.yaw)
                    self.robot_status_lock.release()
                now = time()

    def rs_update(self):
        '''
            Update velocities and pose to robot status
        '''
        interval = 1.0/ROBOT_STATUS_UPDATE_FREQUENCY
        now = time()

        while True:
            if (time()-now)>interval:
                if self.robot_status_lock.acquire():
                    # update velocities
                    self.rs.vx = self.robot_speed.linear.x
                    self.rs.vy = self.robot_speed.linear.y
                    self.rs.vw = self.robot_speed.angular.z

                    # update pose
                    if ROBOT_POSE_TYPE == ABSOLUTE:
                        self.lookup_tf()
                    if ROBOT_POSE_TYPE == RELATIVE:
                        pass
                    self.rs.x = self.robot_pose.position.x
                    self.rs.y = self.robot_pose.position.y
                    orientation = self.robot_pose.orientation
                    quat = [orientation.x, orientation.y, orientation.z, orientation.w]
                    self.rs.yaw = tf.transformations.euler_from_quaternion(quat)[-1]
                    self.robot_status_lock.release()
                    
                    log.t("v=[%f, %f, %f] pos=[%f, %f, %f]" %(self.rs.vx, self.rs.vy, self.rs.vw, self.rs.x, self.rs.y, self.rs.yaw))
                now = time()

    def get_speed(self):
        '''Get the current robot speeds.
        '''
        self.robot_status_lock.acquire()
        vx = self.rs.vx
        vy = self.rs.vy
        vw = self.rs.vw
        self.robot_status_lock.release()
        return (vx, vy, vw)

    def get_pose(self):
        '''Get the current robot pose.
        '''
        self.robot_status_lock.acquire()
        x = self.rs.x
        y = self.rs.y
        yaw = self.rs.yaw
        self.robot_status_lock.release()
        return (x, y, yaw)

    def set_vel(self, vx, vy, vw):
        '''Control the robot's speeds by publishing command to /cmd_vel topic.'''
        cmd = Twist()
        cmd.linear.x = vx
        cmd.linear.y = vy
        cmd.angular.z = vw
        self.cmd_vel_pub.publish(cmd)
    
    def set_pose(self, x, y, yaw):
        '''Set the robot pose'''
        pose = PoseWithCovarianceStamped()
        pose.header.stamp = rospy.Time.now()
        pose.header.frame_id = 'map'
        pose.pose.covariance = [0.25, 0.0, 0.0, 0.0, 0.0, 0.0, 
                                0.0, 0.25, 0.0, 0.0, 0.0, 0.0, 
                                0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 
                                0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 
                                0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 
                                0.0, 0.0, 0.0, 0.0, 0.0, 0.06853891945200942]
        pose.pose.pose.position.x = x
        pose.pose.pose.position.y = y
        quat = tf.transformations.quaternion_from_euler(0, 0, yaw)
        pose.pose.pose.orientation.x = quat[0]
        pose.pose.pose.orientation.y = quat[1]
        pose.pose.pose.orientation.z = quat[2]
        pose.pose.pose.orientation.w = quat[3]
        self.init_pose_pub.publish(pose)
        # clear costmap
        rospy.wait_for_service('/move_base/clear_costmaps')
        clear = rospy.ServiceProxy('/move_base/clear_costmaps', Empty)
        try:
            clear()
        except rospy.ServiceException, e:
            print(e)
            
Example #47
0
class WebsocketServerBase(ABC):
    def __init__(self, args, listen_address, listen_port, mitm_mapper,
                 db_wrapper, routemanagers, device_mappings, auths):
        self.__current_users = {}
        self.__listen_adress = listen_address
        self.__listen_port = listen_port
        self._send_queue = queue.Queue()

        self.__received = {}  # map with received IDs
        self.__receivedMutex = Lock()

        self.__requests = {}  # map with ID, event mapping
        self.__requestsMutex = Lock()

        self.__nextId = 0
        self.__idMutex = Lock()
        self.args = args
        self.db_wrapper = db_wrapper
        self.device_mappings = device_mappings
        self.routemanagers = routemanagers
        self.auths = auths
        self._mitm_mapper = mitm_mapper

    def start_server(self):
        log.info("Starting websocket server...")
        loop = asyncio.new_event_loop()
        # build list of origin IDs
        allowed_origins = []
        for device in self.device_mappings.keys():
            allowed_origins.append(device)

        log.info("Device mappings: %s" % str(self.device_mappings))
        log.info("Allowed origins derived: %s" % str(allowed_origins))
        asyncio.set_event_loop(loop)
        asyncio.get_event_loop().run_until_complete(
            websockets.serve(self.handler,
                             self.__listen_adress,
                             self.__listen_port,
                             max_size=2**25,
                             origins=allowed_origins,
                             ping_timeout=60,
                             ping_interval=60))
        asyncio.get_event_loop().run_forever()

    async def __unregister(self, websocket):
        id = str(websocket.request_headers.get_all("Origin")[0])
        worker = self.__current_users.get(id, None)
        if worker is None:
            return
        else:
            # worker[1].stop_worker()
            self.__current_users.pop(id)

    async def __register(self, websocket):
        # await websocket.recv()
        log.info("Client registering....")
        try:
            id = str(websocket.request_headers.get_all("Origin")[0])
        except IndexError:
            log.warning(
                "Client from %s tried to connect without Origin header" %
                str(websocket))  # TODO: list IP or whatever...
            return False
        if self.auths:
            try:
                authBase64 = str(
                    websocket.request_headers.get_all("Authorization")[0])
            except IndexError:
                log.warning(
                    "Client from %s tried to connect without auth header" %
                    str(websocket))
                return False
        if self.__current_users.get(id, None) is not None:
            log.warning("Worker for %s is already running" % str(id))
            return False
        elif self.auths and authBase64 and not check_auth(
                authBase64, self.args, self.auths):
            return False

        lastKnownState = {}
        client_mapping = self.device_mappings[id]
        daytime_routemanager = self.routemanagers[
            client_mapping["daytime_area"]].get("routemanager")
        if client_mapping.get("nighttime_area", None) is not None:
            nightime_routemanager = self.routemanagers[
                client_mapping["nighttime_area"]].get("routemanager", None)
        else:
            nightime_routemanager = None
        devicesettings = client_mapping["settings"]

        started = False
        if MadGlobals.sleep is True:
            # start the appropriate nighttime manager if set
            if nightime_routemanager is None:
                pass
            elif nightime_routemanager.mode in [
                    "raids_mitm", "mon_mitm", "iv_mitm"
            ]:
                Worker = WorkerMITM(self.args,
                                    id,
                                    lastKnownState,
                                    self,
                                    daytime_routemanager,
                                    nightime_routemanager,
                                    self._mitm_mapper,
                                    devicesettings,
                                    db_wrapper=self.db_wrapper)
                started = True
            elif nightime_routemanager.mode in ["raids_ocr"]:
                from worker.WorkerOcr import WorkerOcr
                Worker = WorkerOcr(self.args,
                                   id,
                                   lastKnownState,
                                   self,
                                   daytime_routemanager,
                                   nightime_routemanager,
                                   devicesettings,
                                   db_wrapper=self.db_wrapper)
                started = True
            elif nightime_routemanager.mode in ["pokestops"]:
                Worker = WorkerQuests(self.args,
                                      id,
                                      lastKnownState,
                                      self,
                                      daytime_routemanager,
                                      nightime_routemanager,
                                      self._mitm_mapper,
                                      devicesettings,
                                      db_wrapper=self.db_wrapper)
                started = True
            else:
                log.fatal("Mode not implemented")
                sys.exit(1)
        if not MadGlobals.sleep or not started:
            # we either gotta run daytime mode OR nighttime routemanager not set
            if daytime_routemanager.mode in [
                    "raids_mitm", "mon_mitm", "iv_mitm"
            ]:
                Worker = WorkerMITM(self.args,
                                    id,
                                    lastKnownState,
                                    self,
                                    daytime_routemanager,
                                    nightime_routemanager,
                                    self._mitm_mapper,
                                    devicesettings,
                                    db_wrapper=self.db_wrapper)
            elif daytime_routemanager.mode in ["raids_ocr"]:
                from worker.WorkerOcr import WorkerOcr
                Worker = WorkerOcr(self.args,
                                   id,
                                   lastKnownState,
                                   self,
                                   daytime_routemanager,
                                   nightime_routemanager,
                                   devicesettings,
                                   db_wrapper=self.db_wrapper)
            elif daytime_routemanager.mode in ["pokestops"]:
                Worker = WorkerQuests(self.args,
                                      id,
                                      lastKnownState,
                                      self,
                                      daytime_routemanager,
                                      nightime_routemanager,
                                      self._mitm_mapper,
                                      devicesettings,
                                      db_wrapper=self.db_wrapper)
            else:
                log.fatal("Mode not implemented")
                sys.exit(1)

        newWorkerThread = Thread(name='worker_%s' % id,
                                 target=Worker.start_worker)
        self.__current_users[id] = [newWorkerThread, Worker, websocket]
        newWorkerThread.daemon = False
        newWorkerThread.start()

        return True

    def __send(self, id, message):
        nextMessage = OutgoingMessage(id, message)
        self._send_queue.put(nextMessage)

    async def _retrieve_next_send(self):
        found = None
        while found is None:
            try:
                found = self._send_queue.get_nowait()
            except:
                await asyncio.sleep(0.02)
        return found
        # return self._send_queue.get(True)

    async def _producer_handler(self, websocket, path):
        while True:
            # retrieve next message from queue to be sent, block if empty
            next = None
            while next is None:
                next = await self._retrieve_next_send()
            # TODO: next consists of pair <id, message>, split that up and send message to the user with the ID
            await self.__send_specific(next.id, next.message)
            # message = await websocket.recv()
            # log.debug("Recv: %s" % str("Done"))
            # await asyncio.wait([value[1].send(next.message) for key, value in self.__current_users if key == next.id])
            # await websocket.send()

    async def __send_specific(self, id, message):
        for key, value in self.__current_users.items():
            if key == id and value[2].open:
                await value[2].send(message)

        # [value[1].send(next.message) for key, value in self.__current_users if key == next.id]

    async def _consumer_handler(self, websocket, path):
        while True:
            message = None
            id = str(websocket.request_headers.get_all("Origin")[0])
            try:
                asyncio.wait_for(websocket.recv(), timeout=0.01)
                message = await websocket.recv()
            except asyncio.TimeoutError:
                log.debug('timeout!')
                await asyncio.sleep(0.02)
            except websockets.exceptions.ConnectionClosed:
                log.debug("Connection closed while receiving data")
                log.debug("Closed connection to %s" % str(id))
                worker = self.__current_users.get(id, None)
                return
                # TODO: cleanup, stop worker...
            if message is not None:
                self.__onMessage(message)

    async def handler(self, websocket, path):
        log.debug("Waiting for connections")
        continueWork = await self.__register(websocket)
        # newWorkerThread = Thread(name='worker%s' % id, target=self._consumer_handler, args=(websocket, path,))
        # newWorkerThread.daemon = False
        # newWorkerThread.start()
        if not continueWork:
            return
        consumer_task = asyncio.ensure_future(
            self._consumer_handler(websocket, path))
        producer_task = asyncio.ensure_future(
            self._producer_handler(websocket, path))
        done, pending = await asyncio.wait(
            [producer_task, consumer_task],
            return_when=asyncio.FIRST_COMPLETED,
        )
        for task in pending:
            task.cancel()
        log.info("All done with %s" %
                 str(websocket.request_headers.get_all("Origin")[0]))
        await self.__unregister(websocket)

    def __onMessage(self, message):
        id = -1
        response = None
        if isinstance(message, str):
            log.debug("Receiving message: %s" % str(message))
            splitup = message.split(";")
            id = int(splitup[0])
            response = splitup[1]
        else:
            log.debug("Received binary values.")
            id = int.from_bytes(message[:4], byteorder='big', signed=False)
            response = message[4:]
        self.__setResponse(id, response)
        if not self.__setEvent(id):
            # remove the response again - though that is kinda stupid
            self.__popResponse(id)

    def __getNewMessageId(self):
        self.__idMutex.acquire()
        self.__nextId += 1
        self.__nextId = int(math.fmod(self.__nextId, 100000))
        if self.__nextId == 100000:
            self.__nextId = 1
        toBeReturned = self.__nextId
        self.__idMutex.release()
        return toBeReturned

    def __setRequest(self, id, event):
        self.__requestsMutex.acquire()
        self.__requests[id] = event
        self.__requestsMutex.release()

    def __setEvent(self, id):
        self.__requestsMutex.acquire()
        result = False
        if id in self.__requests:
            self.__requests[id].set()
            result = True
        else:
            # the request has already been deleted due to a timeout...
            result = False
        self.__requestsMutex.release()
        return result

    def __removeRequest(self, id):
        self.__requestsMutex.acquire()
        self.__requests.pop(id)
        self.__requestsMutex.release()

    def __setResponse(self, id, message):
        self.__receivedMutex.acquire()
        self.__received[id] = message
        self.__receivedMutex.release()

    def __popResponse(self, id):
        self.__receivedMutex.acquire()
        message = self.__received.pop(id)
        self.__receivedMutex.release()
        return message

    def sendAndWait(self, id, message, timeout):
        log.debug("Sending command: %s" % message)
        if self.__current_users.get(id, None) is None:
            raise WebsocketWorkerRemovedException
        messageId = self.__getNewMessageId()
        messageEvent = Event()
        messageEvent.clear()

        self.__setRequest(messageId, messageEvent)

        toBeSent = u"%s;%s" % (str(messageId), message)
        log.debug("Sending:")
        log.debug("To be sent: %s" % toBeSent)

        self.__send(id, toBeSent)

        result = None
        log.debug("Timeout: " + str(timeout))
        if messageEvent.wait(timeout):
            log.debug("Received anser, popping response")
            log.debug("Received an answer")
            # okay, we can get the response..
            result = self.__popResponse(messageId)
            log.debug("Answer: %s" % result)
        else:
            # timeout reached
            log.warning("Timeout reached while waiting for a response...")
            if self.__current_users.get(id, None) is None:
                raise WebsocketWorkerRemovedException

        log.debug("Received response: %s" % str(result))
        self.__removeRequest(messageId)
        log.debug("Returning response to worker.")
        return result
Example #48
0
class SoundSink(SoundPipeline):

    __gsignals__ = SoundPipeline.__generic_signals__.copy()
    __gsignals__.update({
        "eos": one_arg_signal,
    })

    def __init__(self,
                 sink_type=None,
                 sink_options={},
                 codecs=get_decoders(),
                 codec_options={},
                 volume=1.0):
        if not sink_type:
            sink_type = get_default_sink()
        if sink_type not in get_sink_plugins():
            raise InitExit(1, "invalid sink: %s" % sink_type)
        matching = [
            x for x in CODEC_ORDER if (x in codecs and x in get_decoders())
        ]
        log("SoundSink(..) found matching codecs %s", matching)
        if not matching:
            raise InitExit(
                1,
                "no matching codecs between arguments '%s' and supported list '%s'"
                % (csv(codecs), csv(get_decoders().keys())))
        codec = matching[0]
        decoder, parser, stream_compressor = get_decoder_elements(codec)
        SoundPipeline.__init__(self, codec)
        self.container_format = (parser
                                 or "").replace("demux",
                                                "").replace("depay", "")
        self.sink_type = sink_type
        self.stream_compressor = stream_compressor
        log("container format=%s, stream_compressor=%s, sink type=%s",
            self.container_format, self.stream_compressor, self.sink_type)
        self.levels = deque(maxlen=100)
        self.volume = None
        self.src = None
        self.queue = None
        self.normal_volume = volume
        self.target_volume = volume
        self.volume_timer = 0
        self.overruns = 0
        self.underruns = 0
        self.overrun_events = deque(maxlen=100)
        self.queue_state = "starting"
        self.last_data = None
        self.last_underrun = 0
        self.last_overrun = 0
        self.refill = True
        self.last_max_update = monotonic_time()
        self.last_min_update = monotonic_time()
        self.level_lock = Lock()
        pipeline_els = []
        appsrc_el = [
            "appsrc",
            #"do-timestamp=1",
            "name=src",
            "emit-signals=0",
            "block=0",
            "is-live=0",
            "stream-type=%s" % STREAM_TYPE,
            "format=%s" % BUFFER_FORMAT
        ]
        pipeline_els.append(" ".join(appsrc_el))
        if parser:
            pipeline_els.append(parser)
        if decoder:
            decoder_str = plugin_str(decoder, codec_options)
            pipeline_els.append(decoder_str)
        pipeline_els.append("audioconvert")
        pipeline_els.append("audioresample")
        if QUEUE_TIME > 0:
            pipeline_els.append(" ".join([
                "queue", "name=queue", "min-threshold-time=0",
                "max-size-buffers=0", "max-size-bytes=0",
                "max-size-time=%s" % QUEUE_TIME,
                "leaky=%s" % QUEUE_LEAK
            ]))
        pipeline_els.append("volume name=volume volume=0")
        sink_attributes = SINK_SHARED_DEFAULT_ATTRIBUTES.copy()
        #anything older than this may cause problems (ie: centos 6.x)
        #because the attributes may not exist
        sink_attributes.update(SINK_DEFAULT_ATTRIBUTES.get(sink_type, {}))
        get_options_cb = DEFAULT_SINK_PLUGIN_OPTIONS.get(
            sink_type.replace("sink", ""))
        if get_options_cb:
            v = get_options_cb()
            log("%s()=%s", get_options_cb, v)
            sink_attributes.update(v)
        sink_attributes.update(sink_options)
        sink_str = plugin_str(sink_type, sink_attributes)
        pipeline_els.append(sink_str)
        if not self.setup_pipeline_and_bus(pipeline_els):
            return
        self.volume = self.pipeline.get_by_name("volume")
        self.src = self.pipeline.get_by_name("src")
        self.queue = self.pipeline.get_by_name("queue")
        if self.queue:
            if QUEUE_SILENT:
                self.queue.set_property("silent", False)
            else:
                self.queue.connect("overrun", self.queue_overrun)
                self.queue.connect("underrun", self.queue_underrun)
                self.queue.connect("running", self.queue_running)
                self.queue.connect("pushing", self.queue_pushing)

    def __repr__(self):
        return "SoundSink('%s' - %s)" % (self.pipeline_str, self.state)

    def cleanup(self):
        SoundPipeline.cleanup(self)
        self.cancel_volume_timer()
        self.sink_type = ""
        self.src = None

    def start(self):
        SoundPipeline.start(self)
        self.timeout_add(UNMUTE_DELAY, self.start_adjust_volume)

    def start_adjust_volume(self, interval=100):
        if self.volume_timer != 0:
            glib.source_remove(self.volume_timer)
        self.volume_timer = self.timeout_add(interval, self.adjust_volume)
        return False

    def cancel_volume_timer(self):
        if self.volume_timer != 0:
            glib.source_remove(self.volume_timer)
            self.volume_timer = 0

    def adjust_volume(self):
        if not self.volume:
            self.volume_timer = 0
            return False
        cv = self.volume.get_property("volume")
        delta = self.target_volume - cv
        from math import sqrt, copysign
        change = copysign(sqrt(abs(delta)), delta) / 15.0
        gstlog("adjust_volume current volume=%.2f, change=%.2f", cv, change)
        self.volume.set_property("volume", max(0, cv + change))
        if abs(delta) < 0.01:
            self.volume_timer = 0
            return False
        return True

    def queue_pushing(self, *_args):
        gstlog("queue_pushing")
        self.queue_state = "pushing"
        self.emit_info()
        return True

    def queue_running(self, *_args):
        gstlog("queue_running")
        self.queue_state = "running"
        self.emit_info()
        return True

    def queue_underrun(self, *_args):
        now = monotonic_time()
        if self.queue_state == "starting" or 1000 * (
                now - self.start_time) < GRACE_PERIOD:
            gstlog("ignoring underrun during startup")
            return 1
        self.underruns += 1
        gstlog("queue_underrun")
        self.queue_state = "underrun"
        if now - self.last_underrun > 5:
            #only count underruns when we're back to no min time:
            qmin = self.queue.get_property("min-threshold-time") // MS_TO_NS
            clt = self.queue.get_property("current-level-time") // MS_TO_NS
            gstlog("queue_underrun level=%3i, min=%3i", clt, qmin)
            if qmin == 0 and clt < 10:
                self.last_underrun = now
                self.refill = True
                self.set_max_level()
                self.set_min_level()
        self.emit_info()
        return 1

    def get_level_range(self, mintime=2, maxtime=10):
        now = monotonic_time()
        filtered = [
            v for t, v in tuple(self.levels)
            if (now - t) >= mintime and (now - t) <= maxtime
        ]
        if len(filtered) >= 10:
            maxl = max(filtered)
            minl = min(filtered)
            #range of the levels recorded:
            return maxl - minl
        return 0

    def queue_overrun(self, *_args):
        now = monotonic_time()
        if self.queue_state == "starting" or 1000 * (
                now - self.start_time) < GRACE_PERIOD:
            gstlog("ignoring overrun during startup")
            return 1
        clt = self.queue.get_property("current-level-time") // MS_TO_NS
        log("queue_overrun level=%ims", clt)
        now = monotonic_time()
        #grace period of recording overruns:
        #(because when we record an overrun, we lower the max-time,
        # which causes more overruns!)
        if now - self.last_overrun > 2:
            self.last_overrun = now
            self.set_max_level()
            self.overrun_events.append(now)
        self.overruns += 1
        return 1

    def set_min_level(self):
        if not self.queue:
            return
        now = monotonic_time()
        elapsed = now - self.last_min_update
        lrange = self.get_level_range()
        log("set_min_level() lrange=%i, elapsed=%i", lrange, elapsed)
        if elapsed < 1:
            #not more than once a second
            return
        if self.refill:
            #need to have a gap between min and max,
            #so we cannot go higher than mst-50:
            mst = self.queue.get_property("max-size-time") // MS_TO_NS
            mrange = max(lrange + 100, UNDERRUN_MIN_LEVEL)
            mtt = min(mst - 50, mrange)
            gstlog(
                "set_min_level mtt=%3i, max-size-time=%3i, lrange=%s, mrange=%s (UNDERRUN_MIN_LEVEL=%s)",
                mtt, mst, lrange, mrange, UNDERRUN_MIN_LEVEL)
        else:
            mtt = 0
        cmtt = self.queue.get_property("min-threshold-time") // MS_TO_NS
        if cmtt == mtt:
            return
        if not self.level_lock.acquire(False):
            gstlog("cannot get level lock for setting min-threshold-time")
            return
        try:
            self.queue.set_property("min-threshold-time", mtt * MS_TO_NS)
            gstlog("set_min_level min-threshold-time=%s", mtt)
            self.last_min_update = now
        finally:
            self.level_lock.release()

    def set_max_level(self):
        if not self.queue:
            return
        now = monotonic_time()
        elapsed = now - self.last_max_update
        if elapsed < 1:
            #not more than once a second
            return
        lrange = self.get_level_range(mintime=0)
        log("set_max_level lrange=%3i, elapsed=%is", lrange, int(elapsed))
        cmst = self.queue.get_property("max-size-time") // MS_TO_NS
        #overruns in the last minute:
        olm = len([x for x in tuple(self.overrun_events) if now - x < 60])
        #increase target if we have more than 5 overruns in the last minute:
        target_mst = lrange * (100 + MARGIN + min(100, olm * 20)) // 100
        #from 100% down to 0% in 2 seconds after underrun:
        pct = max(0, int((self.last_overrun + 2 - now) * 50))
        #use this last_overrun percentage value to temporarily decrease the target
        #(causes overruns that drop packets and lower the buffer level)
        target_mst = max(50, int(target_mst - pct * lrange // 100))
        mst = (cmst + target_mst) // 2
        if self.refill:
            #temporarily raise max level during underruns,
            #so set_min_level has more room for manoeuver:
            mst += UNDERRUN_MIN_LEVEL
        #cap it at 1 second:
        mst = min(mst, 1000)
        log(
            "set_max_level overrun count=%-2i, margin=%3i, pct=%2i, cmst=%3i, target=%3i, mst=%3i",
            olm, MARGIN, pct, cmst, target_mst, mst)
        if abs(cmst - mst) <= max(50, lrange // 2):
            #not enough difference
            return
        if not self.level_lock.acquire(False):
            gstlog("cannot get level lock for setting max-size-time")
            return
        try:
            self.queue.set_property("max-size-time", mst * MS_TO_NS)
            log("set_max_level max-size-time=%s", mst)
            self.last_max_update = now
        finally:
            self.level_lock.release()

    def eos(self):
        gstlog("eos()")
        if self.src:
            self.src.emit('end-of-stream')
        self.cleanup()
        return 0

    def get_info(self):
        info = SoundPipeline.get_info(self)
        if QUEUE_TIME > 0 and self.queue:
            clt = self.queue.get_property("current-level-time")
            qmax = self.queue.get_property("max-size-time")
            qmin = self.queue.get_property("min-threshold-time")
            info["queue"] = {
                "min": qmin // MS_TO_NS,
                "max": qmax // MS_TO_NS,
                "cur": clt // MS_TO_NS,
                "pct": min(QUEUE_TIME, clt) * 100 // qmax,
                "overruns": self.overruns,
                "underruns": self.underruns,
                "state": self.queue_state,
            }
        return info

    def can_push_buffer(self):
        if not self.src:
            log("no source, dropping buffer")
            return False
        if self.state in ("stopped", "error"):
            log("pipeline is %s, dropping buffer", self.state)
            return False
        return True

    def uncompress_data(self, data, metadata):
        if not data or not metadata:
            return data
        compress = metadata.get("compress")
        if not compress:
            return data
        assert compress in ("lz4", "lzo")
        v = decompress_by_name(data, compress)
        #log("decompressed %s data: %i bytes into %i bytes", compress, len(data), len(v))
        return v

    def add_data(self, data, metadata=None, packet_metadata=()):
        if not self.can_push_buffer():
            return
        data = self.uncompress_data(data, metadata)
        for x in packet_metadata:
            self.do_add_data(x)
        if self.do_add_data(data, metadata):
            self.rec_queue_level(data)
            self.set_max_level()
            self.set_min_level()
            #drop back down quickly if the level has reached min:
            if self.refill:
                clt = self.queue.get_property("current-level-time") // MS_TO_NS
                qmin = self.queue.get_property(
                    "min-threshold-time") // MS_TO_NS
                gstlog("add_data: refill=%s, level=%i, min=%i", self.refill,
                       clt, qmin)
                if qmin > 0 and clt > qmin:
                    self.refill = False
        self.emit_info()

    def do_add_data(self, data, metadata=None):
        #having a timestamp causes problems with the queue and overruns:
        log("do_add_data(%s bytes, %s) queue_state=%s", len(data), metadata,
            self.queue_state)
        buf = gst.Buffer.new_allocate(None, len(data), None)
        buf.fill(0, data)
        if metadata:
            #having a timestamp causes problems with the queue and overruns:
            #ts = metadata.get("timestamp")
            #if ts is not None:
            #    buf.timestamp = normv(ts)
            #    log.info("timestamp=%s", ts)
            d = metadata.get("duration")
            if d is not None:
                d = normv(d)
                if d > 0:
                    buf.duration = normv(d)
        if self.push_buffer(buf):
            self.inc_buffer_count()
            self.inc_byte_count(len(data))
            return True
        return False

    def rec_queue_level(self, data):
        q = self.queue
        if not q:
            return
        clt = q.get_property("current-level-time") // MS_TO_NS
        log("pushed %5i bytes, new buffer level: %3ims, queue state=%s",
            len(data), clt, self.queue_state)
        now = monotonic_time()
        self.levels.append((now, clt))

    def push_buffer(self, buf):
        #buf.size = size
        #buf.timestamp = timestamp
        #buf.duration = duration
        #buf.offset = offset
        #buf.offset_end = offset_end
        #buf.set_caps(gst.caps_from_string(caps))
        r = self.src.emit("push-buffer", buf)
        if r != gst.FlowReturn.OK:
            if self.queue_state != "error":
                log.error("Error pushing buffer: %s", r)
                self.update_state("error")
                self.emit('error', "push-buffer error: %s" % r)
            return 0
        return 1
Example #49
0
class Crazyflie():
    """The Crazyflie class"""

    # Called on disconnect, no matter the reason
    disconnected = Caller()
    # Called on unintentional disconnect only
    connection_lost = Caller()
    # Called when the first packet in a new link is received
    link_established = Caller()
    # Called when the user requests a connection
    connection_requested = Caller()
    # Called when the link is established and the TOCs (that are not cached)
    # have been downloaded
    connected = Caller()
    # Called if establishing of the link fails (i.e times out)
    connection_failed = Caller()
    # Called for every packet received
    packet_received = Caller()
    # Called for every packet sent
    packet_sent = Caller()
    # Called when the link driver updates the link quality measurement
    link_quality_updated = Caller()

    state = State.DISCONNECTED

    def __init__(self, link=None, ro_cache=None, rw_cache=None):
        """
        Create the objects from this module and register callbacks.

        ro_cache -- Path to read-only cache (string)
        rw_cache -- Path to read-write cache (string)
        """
        self.link = link
        self._toc_cache = TocCache(ro_cache=ro_cache, rw_cache=rw_cache)

        self.incoming = _IncomingPacketHandler(self)
        self.incoming.setDaemon(True)
        self.incoming.start()

        self.commander = Commander(self)
        self.log = Log(self)
        self.console = Console(self)
        self.param = Param(self)

        self.link_uri = ""

        # Used for retry when no reply was sent back
        self.packet_received.add_callback(self._check_for_initial_packet_cb)
        self.packet_received.add_callback(self._check_for_answers)

        self._answer_patterns = {}

        self._send_lock = Lock()

        self.connected_ts = None

        # Connect callbacks to logger
        self.disconnected.add_callback(
            lambda uri: logger.info("Callback->Disconnected from [%s]", uri))
        self.disconnected.add_callback(self._disconnected)
        self.link_established.add_callback(
            lambda uri: logger.info("Callback->Connected to [%s]", uri))
        self.connection_lost.add_callback(lambda uri, errmsg: logger.info(
            "Callback->Connection lost to"
            " [%s]: %s", uri, errmsg))
        self.connection_failed.add_callback(lambda uri, errmsg: logger.info(
            "Callback->Connected failed to"
            " [%s]: %s", uri, errmsg))
        self.connection_requested.add_callback(lambda uri: logger.info(
            "Callback->Connection initialized[%s]", uri))
        self.connected.add_callback(lambda uri: logger.info(
            "Callback->Connection setup finished [%s]", uri))

    def _disconnected(self, link_uri):
        """ Callback when disconnected."""
        self.connected_ts = None

    def _start_connection_setup(self):
        """Start the connection setup by refreshing the TOCs"""
        logger.info("We are connected[%s], request connection setup",
                    self.link_uri)
        self.log.refresh_toc(self._log_toc_updated_cb, self._toc_cache)

    def _param_toc_updated_cb(self):
        """Called when the param TOC has been fully updated"""
        logger.info("Param TOC finished updating")
        self.connected_ts = datetime.datetime.now()
        self.connected.call(self.link_uri)

    def _log_toc_updated_cb(self):
        """Called when the log TOC has been fully updated"""
        logger.info("Log TOC finished updating")
        self.param.refresh_toc(self._param_toc_updated_cb, self._toc_cache)

    def _link_error_cb(self, errmsg):
        """Called from the link driver when there's an error"""
        logger.warning("Got link error callback [%s] in state [%s]", errmsg,
                       self.state)
        if (self.link is not None):
            self.link.close()
        self.link = None
        if (self.state == State.INITIALIZED):
            self.connection_failed.call(self.link_uri, errmsg)
        if (self.state == State.CONNECTED
                or self.state == State.SETUP_FINISHED):
            self.disconnected.call(self.link_uri)
            self.connection_lost.call(self.link_uri, errmsg)
        self.state = State.DISCONNECTED

    def _link_quality_cb(self, percentage):
        """Called from link driver to report link quality"""
        self.link_quality_updated.call(percentage)

    def _check_for_initial_packet_cb(self, data):
        """
        Called when first packet arrives from Crazyflie.

        This is used to determine if we are connected to something that is
        answering.
        """
        self.state = State.CONNECTED
        self.link_established.call(self.link_uri)
        self.packet_received.remove_callback(self._check_for_initial_packet_cb)

    def open_link(self, link_uri):
        """
        Open the communication link to a copter at the given URI and setup the
        connection (download log/parameter TOC).
        """
        self.connection_requested.call(link_uri)
        self.state = State.INITIALIZED
        self.link_uri = link_uri
        try:
            self.link = cflib.crtp.get_link_driver(link_uri,
                                                   self._link_quality_cb,
                                                   self._link_error_cb)

            if not self.link:
                message = "No driver found or malformed URI: {}"\
                    .format(link_uri)
                logger.warning(message)
                self.connection_failed.call(link_uri, message)
            else:
                # Add a callback so we can check that any data is comming
                # back from the copter
                self.packet_received.add_callback(
                    self._check_for_initial_packet_cb)

                self._start_connection_setup()
        except Exception as ex:  # pylint: disable=W0703
            # We want to catch every possible exception here and show
            # it in the user interface
            import traceback
            logger.error("Couldn't load link driver: %s\n\n%s", ex,
                         traceback.format_exc())
            exception_text = "Couldn't load link driver: %s\n\n%s" % (
                ex, traceback.format_exc())
            if self.link:
                self.link.close()
                self.link = None
            self.connection_failed.call(link_uri, exception_text)

    def close_link(self):
        """Close the communication link."""
        logger.info("Closing link")
        if (self.link is not None):
            self.commander.send_setpoint(0, 0, 0, 0)
        if (self.link is not None):
            self.link.close()
            self.link = None
        self._answer_patterns = {}
        self.disconnected.call(self.link_uri)

    def add_port_callback(self, port, cb):
        """Add a callback to cb on port"""
        self.incoming.add_port_callback(port, cb)

    def remove_port_callback(self, port, cb):
        """Remove the callback cb on port"""
        self.incoming.remove_port_callback(port, cb)

    def _no_answer_do_retry(self, pk, pattern):
        """Resend packets that we have not gotten answers to"""
        logger.debug("Resending for pattern %s", pattern)
        # Set the timer to None before trying to send again
        self.send_packet(pk, expected_reply=pattern, resend=True)

    def _check_for_answers(self, pk):
        """
        Callback called for every packet received to check if we are
        waiting for an answer on this port. If so, then cancel the retry
        timer.
        """
        self._send_lock.acquire()
        longest_match = ()
        if len(self._answer_patterns) > 0:
            data = (pk.header, ) + pk.datat
            for p in self._answer_patterns.keys():
                logger.debug("Looking for pattern match on %s vs %s", p, data)
                if len(p) <= len(data):
                    if p == data[0:len(p)]:
                        match = data[0:len(p)]
                        if len(match) >= len(longest_match):
                            logger.debug("Found new longest match %s", match)
                            longest_match = match
        if len(longest_match) > 0:
            del self._answer_patterns[longest_match]
        self._send_lock.release()

    def send_packet(self, pk, expected_reply=(), resend=False):
        """
        Send a packet through the link interface.

        pk -- Packet to send
        expect_answer -- True if a packet from the Crazyflie is expected to
                         be sent back, otherwise false

        """
        self._send_lock.acquire()
        if (self.link is not None):
            self.link.send_packet(pk)
            self.packet_sent.call(pk)
            if len(expected_reply) > 0 and not resend:
                pattern = (pk.header, ) + expected_reply
                logger.debug(
                    "Sending packet and expecting the %s pattern back",
                    pattern)
                new_timer = Timer(
                    0.2, lambda: self._no_answer_do_retry(pk, pattern))
                self._answer_patterns[pattern] = new_timer
                new_timer.start()
            elif resend:
                # Check if we have gotten an answer, if not try again
                pattern = expected_reply
                if pattern in self._answer_patterns:
                    logger.debug("We want to resend and the pattern is there")
                    if self._answer_patterns[pattern]:
                        new_timer = Timer(
                            0.2, lambda: self._no_answer_do_retry(pk, pattern))
                        self._answer_patterns[pattern] = new_timer
                        new_timer.start()
                else:
                    logger.debug("Resend requested, but no pattern found: %s",
                                 self._answer_patterns)
        self._send_lock.release()
class SentinelHubDownloadClient(DownloadClient):
    """ Download client specifically configured for download from Sentinel Hub service
    """
    _CACHED_SESSIONS = {}

    def __init__(self, *, session=None, **kwargs):
        """
        :param session: An OAuth2 session with Sentinel Hub service
        :type session: SentinelHubSession or None
        :param kwargs: Optional parameters from DownloadClient
        """
        super().__init__(**kwargs)

        if session is not None and not isinstance(session, SentinelHubSession):
            raise ValueError(f'A session parameter has to be an instance of {SentinelHubSession.__name__} or None, but '
                             f'{session} was given')
        self.session = session

        self.rate_limit = SentinelHubRateLimit(num_processes=self.config.number_of_download_processes)
        self.lock = Lock()

    @retry_temporal_errors
    @fail_user_errors
    def _execute_download(self, request):
        """ Executes the download with a single thread and uses a rate limit object, which is shared between all threads
        """
        thread_name = currentThread().getName()

        while True:
            sleep_time = self._execute_with_lock(self.rate_limit.register_next)

            if sleep_time == 0:
                response = self._do_download(request)

                self._execute_with_lock(self.rate_limit.update, response.headers)

                if response.status_code != requests.status_codes.codes.TOO_MANY_REQUESTS:
                    response.raise_for_status()

                    LOGGER.debug('%s: Successful download from %s', thread_name, request.url)
                    return response.content
            else:
                LOGGER.debug('%s: Sleeping for %0.2f', thread_name, sleep_time)
                time.sleep(sleep_time)

    def _execute_with_lock(self, thread_unsafe_function, *args, **kwargs):
        """ Executes a function inside a thread lock and handles potential errors
        """
        self.lock.acquire()
        try:
            return thread_unsafe_function(*args, **kwargs)
        finally:
            self.lock.release()

    def _do_download(self, request):
        """ Runs the download
        """
        return requests.request(
            request.request_type.value,
            url=request.url,
            json=request.post_values,
            headers=self._prepare_headers(request),
            timeout=self.config.download_timeout_seconds
        )

    def _prepare_headers(self, request):
        """ Prepares final headers by potentially joining them with session headers
        """
        if not request.use_session:
            return request.headers

        if self.session is None:
            self.session = self._execute_with_lock(self._get_session)

        return {
            **self.session.session_headers,
            **request.headers
        }

    def _get_session(self):
        """ Provides a session object either from cache or it creates a new one
        """
        cache_key = self.config.sh_client_id, self.config.sh_client_secret, self.config.get_sh_oauth_url()
        if cache_key in SentinelHubDownloadClient._CACHED_SESSIONS:
            return SentinelHubDownloadClient._CACHED_SESSIONS[cache_key]

        session = SentinelHubSession(config=self.config)
        SentinelHubDownloadClient._CACHED_SESSIONS[cache_key] = session
        return session
Example #51
0
class NucleoUSART(AvatarPeripheral, Thread):
    def read_status_register(self, offset, size):
        self.lock.acquire(True)
        ret = self.status_register
        self.lock.release()
        return ret

    def read_data_register(self, offset, size):
        self.lock.acquire(True)
        ret = self.data_buf[0]
        self.data_buf = self.data_buf[1:]
        if len(self.data_buf) == 0:
            self.status_register &= ~SR_RXNE
        self.lock.release()
        return ret

    def write_data_register(self, offset, size, value):
        if self.connected:
            self.conn.send(bytes((chr(value).encode('utf-8'))))

        return True

    def nop_read(self, offset, size):
        return 0x00

    def nop_write(self, offset, size, value):
        return True

    def __init__(self, name, address, size, nucleo_usart_port=5656, **kwargs):
        Thread.__init__(self)
        AvatarPeripheral.__init__(self, name, address, size)
        self.port = nucleo_usart_port

        self.data_buf = bytearray()
        self.status_register = SR_TXE | SR_TC

        self.read_handler[0:3] = self.read_status_register
        self.read_handler[4:7] = self.read_data_register
        self.write_handler[4:7] = self.write_data_register

        self.read_handler[8:size] = self.nop_read
        self.write_handler[8:size] = self.nop_write

        self.connected = False

        self.lock = Lock()
        self._close = Event()
        self.sock = None
        self.conn = None
        self.daemon = True
        self.start()

    def shutdown(self):
        self._close.set()

        if self.conn:
            self.conn.close()

        if self.sock:
            self.sock.close()

    def run(self):
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.sock.setblocking(0)
        self.sock.bind(('127.0.0.1', self.port))
        self.sock.settimeout(0.1)

        while not self._close.is_set():
            self.sock.listen(1)

            try:
                self.conn, addr = self.sock.accept()
                self.conn.settimeout(0.1)
                self.connected = True
            except socket.timeout:
                continue
            except OSError as e:
                if e.errno == 9:
                    # Bad file descriptor error. Only happens when we called
                    # closed on the socket, the continuing the loop will
                    # terminate, which is the desired behaviour
                    continue
                else:
                    # Something terrible happened
                    raise (e)

            while not self._close.is_set():
                try:
                    chr = self.conn.recv(1)
                except socket.timeout:
                    continue
                if not chr:
                    break
                self.lock.acquire(True)
                self.data_buf += chr
                self.status_register |= SR_RXNE
                self.lock.release()
            self.connected = False
Example #52
0
class DesignState(object):
    def __init__(self):
        self.designs = {}
        self.designs_lock = Lock()

        self.builds = []
        self.builds_lock = Lock()

        self.tasks = []
        self.tasks_lock = Lock()

        return

    # TODO Need to lock a design base or change once implementation
    # has started
    def get_design(self, design_id):
        if design_id not in self.designs.keys():

            raise DesignError("Design ID %s not found" % (design_id))

        return objects.SiteDesign.obj_from_primitive(self.designs[design_id])

    def post_design(self, site_design):
        if site_design is not None:
            my_lock = self.designs_lock.acquire(blocking=True, timeout=10)
            if my_lock:
                design_id = site_design.id
                if design_id not in self.designs.keys():
                    self.designs[design_id] = site_design.obj_to_primitive()
                else:
                    self.designs_lock.release()
                    raise StateError("Design ID %s already exists" % design_id)
                self.designs_lock.release()
                return True
            raise StateError("Could not acquire lock")
        else:
            raise DesignError("Design change must be a SiteDesign instance")

    def put_design(self, site_design):
        if site_design is not None:
            my_lock = self.designs_lock.acquire(blocking=True, timeout=10)
            if my_lock:
                design_id = site_design.id
                if design_id not in self.designs.keys():
                    self.designs_lock.release()
                    raise StateError("Design ID %s does not exist" % design_id)
                else:
                    self.designs[design_id] = site_design.obj_to_primitive()
                    self.designs_lock.release()
                    return True
            raise StateError("Could not acquire lock")
        else:
            raise DesignError("Design base must be a SiteDesign instance")

    def get_current_build(self):
        latest_stamp = 0
        current_build = None

        for b in self.builds:
            if b.build_id > latest_stamp:
                latest_stamp = b.build_id
                current_build = b

        return deepcopy(current_build)

    def get_build(self, build_id):
        for b in self.builds:
            if b.build_id == build_id:
                return b

        return None

    def post_build(self, site_build):
        if site_build is not None and isinstance(site_build, SiteBuild):
            my_lock = self.builds_lock.acquire(block=True, timeout=10)
            if my_lock:
                exists = [
                    b for b in self.builds if b.build_id == site_build.build_id
                ]

                if len(exists) > 0:
                    self.builds_lock.release()
                    raise DesignError("Already a site build with ID %s" %
                                      (str(site_build.build_id)))
                self.builds.append(deepcopy(site_build))
                self.builds_lock.release()
                return True
            raise StateError("Could not acquire lock")
        else:
            raise DesignError("Design change must be a SiteDesign instance")

    def put_build(self, site_build):
        if site_build is not None and isinstance(site_build, SiteBuild):
            my_lock = self.builds_lock.acquire(block=True, timeout=10)
            if my_lock:
                buildid = site_build.buildid
                for b in self.builds:
                    if b.buildid == buildid:
                        b.merge_updates(site_build)
                        self.builds_lock.release()
                        return True
                self.builds_lock.release()
                return False
            raise StateError("Could not acquire lock")
        else:
            raise DesignError("Design change must be a SiteDesign instance")

    def get_task(self, task_id):
        for t in self.tasks:
            if t.get_id() == task_id or str(t.get_id()) == task_id:
                return deepcopy(t)
        return None

    def post_task(self, task):
        if task is not None and isinstance(task, tasks.Task):
            my_lock = self.tasks_lock.acquire(blocking=True, timeout=10)
            if my_lock:
                task_id = task.get_id()
                matching_tasks = [
                    t for t in self.tasks if t.get_id() == task_id
                ]
                if len(matching_tasks) > 0:
                    self.tasks_lock.release()
                    raise StateError("Task %s already created" % task_id)

                self.tasks.append(deepcopy(task))
                self.tasks_lock.release()
                return True
            else:
                raise StateError("Could not acquire lock")
        else:
            raise StateError("Task is not the correct type")

    def put_task(self, task, lock_id=None):
        if task is not None and isinstance(task, tasks.Task):
            my_lock = self.tasks_lock.acquire(blocking=True, timeout=10)
            if my_lock:
                task_id = task.get_id()
                t = self.get_task(task_id)
                if t.lock_id is not None and t.lock_id != lock_id:
                    self.tasks_lock.release()
                    raise StateError("Task locked for updates")

                task.lock_id = lock_id
                self.tasks = [
                    i if i.get_id() != task_id else deepcopy(task)
                    for i in self.tasks
                ]

                self.tasks_lock.release()
                return True
            else:
                raise StateError("Could not acquire lock")
        else:
            raise StateError("Task is not the correct type")

    def lock_task(self, task_id):
        my_lock = self.tasks_lock.acquire(blocking=True, timeout=10)
        if my_lock:
            lock_id = uuid.uuid4()
            for t in self.tasks:
                if t.get_id() == task_id and t.lock_id is None:
                    t.lock_id = lock_id
                    self.tasks_lock.release()
                    return lock_id
            self.tasks_lock.release()
            return None
        else:
            raise StateError("Could not acquire lock")

    def unlock_task(self, task_id, lock_id):
        my_lock = self.tasks_lock.acquire(blocking=True, timeout=10)
        if my_lock:
            for t in self.tasks:
                if t.get_id() == task_id and t.lock_id == lock_id:
                    t.lock_id = None
                    self.tasks_lock.release()
                    return True
            self.tasks_lock.release()
            return False
        else:
            raise StateError("Could not acquire lock")
Example #53
0
class Highlight(object):
    def __init__(self, bot):
        self.bot = bot
        self.lock = Lock()
        self.highlights = dataIO.load_json("data/highlight/words.json")

    def _update_highlights(self, new_obj):
        self.lock.acquire()
        try:
            dataIO.save_json("data/highlight/words.json", new_obj)
            self.highlights = dataIO.load_json("data/highlight/words.json")
        finally:
            self.lock.release()

    async def _sleep_then_delete(self, msg, time):
        await asyncio.sleep(time)
        await self.bot.delete_message(msg)

    def _get_guild_ids(self):
        guilds = [list(x) for x in self.highlights['guilds']]
        return list(itertools.chain.from_iterable(guilds))  # flatten list

    def _check_guilds(self, guild_id):
        """returns guild pos in list"""
        guilds_ids = self._get_guild_ids()

        if guild_id not in guilds_ids:
            new_guild = {}
            users = {}
            users['users'] = []
            new_guild[guild_id] = users
            self.highlights['guilds'].append(new_guild)
            self._update_highlights(self.highlights)

        return next(x for (x, d) in enumerate(self.highlights['guilds'])
                    if guild_id in d)

    def _is_registered(self, guild_idx, guild_id, user_id):
        users = self.highlights['guilds'][guild_idx][guild_id]['users']

        for user in users:
            if user_id == user['id']:
                return [
                    next(index for (index, d) in enumerate(users)
                         if d["id"] == user['id']), user
                ]
        return None

    @commands.group(name="highlight", pass_context=True, no_pm=True)
    async def highlight(self, ctx):
        """Slack-like feature to be notified based on specific words outside of at-mentions"""
        if ctx.invoked_subcommand is None:
            await send_cmd_help(ctx)

    @highlight.command(name="add", pass_context=True, no_pm=True)
    async def add_highlight(self, ctx, word: str):
        """Add a word to be highlighted in the current guild"""
        guild_id = ctx.message.server.id
        user_id = ctx.message.author.id
        user_name = ctx.message.author.name

        guild_idx = self._check_guilds(guild_id)
        user = self._is_registered(guild_idx, guild_id, user_id)

        if user is not None:
            user_idx = user[0]
            user_add = user[1]
            if len(user_add['words']) <= 4 and word not in user_add[
                    'words']:  # user can only have max of 5 words
                user_add['words'].append(word)
                self.highlights['guilds'][guild_idx][guild_id]['users'][
                    user_idx] = user_add
                self._update_highlights(self.highlights)
                t_msg = await self.bot.say(
                    "Highlight word added, {}".format(user_name))
                await self._sleep_then_delete(t_msg, 2)
            else:
                msg = "Sorry {}, you already have 5 words highlighted"
                msg += ", or you are trying to add a duplicate word"
                t_msg = await self.bot.say(msg.format(user_name))
                await self._sleep_then_delete(t_msg, 5)
        else:
            new_user = {}
            new_user['id'] = ctx.message.author.id
            new_user['words'] = [word]
            self.highlights['guilds'][guild_idx][guild_id]['users'].append(
                new_user)
            self._update_highlights(self.highlights)
            t_msg = await self.bot.say(
                "Registered and highlight word added, {}".format(user_name))
            await self._sleep_then_delete(t_msg, 2)

        await self.bot.delete_message(ctx.message)

    @highlight.command(name="remove", pass_context=True, no_pm=True)
    async def remove_highlight(self, ctx, word: str):
        """Remove a highlighted word in the current guild"""
        guild_id = ctx.message.server.id
        user_id = ctx.message.author.id
        user_name = ctx.message.author.name

        guild_idx = self._check_guilds(guild_id)
        user = self._is_registered(guild_idx, guild_id, user_id)

        if user is not None:
            user_idx = user[0]
            user_rm = user[1]
            if word in user_rm['words']:
                user_rm['words'].remove(word)
                self.highlights['guilds'][guild_idx][guild_id]['users'][
                    user_idx] = user_rm
                self._update_highlights(self.highlights)
                t_msg = await self.bot.say(
                    "Highlight word removed, {}".format(user_name))
                await self._sleep_then_delete(t_msg, 2)
            else:
                t_msg = await self.bot.say(
                    "Sorry {}, you don't have this word highlighted".format(
                        user_name))
                await self._sleep_then_delete(t_msg, 5)
        else:
            msg = "Sorry {}, you aren't currently registered for highlights."
            msg += " Add a word to become registered"
            t_msg = await self.bot.say(msg.format(user_name))
            await self._sleep_then_delete(t_msg, 5)

        await self.bot.delete_message(ctx.message)

    @highlight.command(name="list", pass_context=True, no_pm=True)
    async def list_highlight(self, ctx):
        """List your highighted words for the current guild"""
        guild_id = ctx.message.server.id
        user_id = ctx.message.author.id
        user_name = ctx.message.author.name

        guild_idx = self._check_guilds(guild_id)
        user = self._is_registered(guild_idx, guild_id, user_id)

        if user is not None:
            user_list = user[1]
            if len(user_list['words']) > 0:
                msg = ""
                for word in user_list['words']:
                    msg += word
                    msg += "\n"

                embed = discord.Embed(description=msg,
                                      colour=discord.Colour.red())
                embed.set_author(name=ctx.message.author.name,
                                 icon_url=ctx.message.author.avatar_url)
                t_msg = await self.bot.say(embed=embed)
                await self._sleep_then_delete(t_msg, 5)
            else:
                t_msg = await self.bot.say(
                    "Sorry {}, you have no highlighted words currently".format(
                        user_name))
                await self._sleep_then_delete(t_msg, 5)
        else:
            msg = "Sorry {}, you aren't currently registered for highlights."
            msg += " Add a word to become registered"
            t_msg = await self.bot.say(msg.format(user_name))
            await self._sleep_then_delete(t_msg, 5)

    @highlight.command(name="import", pass_context=True, no_pm=False)
    async def import_highlight(self, ctx, from_server: str):
        """Transfer highlights from a different guild to the current guild, OVERWRITING any words in the current guild"""
        guild_id = ctx.message.server.id
        user_id = ctx.message.author.id
        user_name = ctx.message.author.name

        guild_idx = self._check_guilds(guild_id)
        user = self._is_registered(guild_idx, guild_id, user_id)

        guild = discord.utils.get(self.bot.servers, name=from_server)

        # This is kind of ugly, dont really like it but kind of has to be done like this based on how i have
        # the highlight data structured, will definitely be revisting this
        if guild is not None and user is not None:
            user_idx = user[0]
            if guild.id in self._get_guild_ids():
                impt_guild_idx = self._check_guilds(guild.id)
                impt_user = self._is_registered(impt_guild_idx, guild.id,
                                                user_id)

                if impt_user is not None:
                    impt_user_idx = impt_user[0]
                    impt = self.highlights['guilds'][impt_guild_idx][
                        guild.id]['users'][impt_user_idx]
                    self.highlights['guilds'][guild_idx][guild_id]['users'][
                        user_idx] = impt
                    self._update_highlights(self.highlights)
                    t_msg = await self.bot.say(
                        "Highlight words imported from {0} for {1}".format(
                            from_server, user_name))
                    await self._sleep_then_delete(t_msg, 3)
            else:
                msg = "Sorry {}, the guild you want to import from"
                msg += " is not registered for highlights, or you are not registered in that guild"
                t_msg = await self.bot.say(msg.format(user_name))
                await self._sleep_then_delete(t_msg, 5)
        else:
            msg = "Sorry {}, this bot is not in the guild you want to import from,"
            msg += " or you are not registered in this guild"
            t_msg = await self.bot.say(msg.format(user_name))
            await self._sleep_then_delete(t_msg, 5)

    async def check_highlights(self, msg):
        if isinstance(msg.channel, discord.PrivateChannel):
            return

        guild_id = msg.server.id
        user_id = msg.author.id
        user_name = msg.author.name
        user_obj = msg.author

        guild_idx = self._check_guilds(guild_id)

        # iterate through every users words on the server, and notify all highlights
        for user in self.highlights['guilds'][guild_idx][guild_id]['users']:
            for word in user['words']:
                active = await self._is_active(user['id'], msg.channel, msg)
                match = self._is_word_match(word, msg.content)
                if match and not active and user_id != user['id']:
                    hilite_user = msg.server.get_member(user['id'])
                    if hilite_user is None:
                        # Handle case where user is no longer in the server of interest.
                        continue
                    perms = msg.channel.permissions_for(hilite_user)
                    if not perms.read_messages:
                        break
                    await self._notify_user(hilite_user, msg, word)

    async def _notify_user(self, user, message, word):
        await asyncio.sleep(
            3
        )  # possibly pick up messages after trigger that will help with the context
        msgs = []
        async for msg in self.bot.logs_from(message.channel,
                                            limit=6,
                                            around=message):
            msgs.append(msg)
        msg_ctx = sorted(msgs, key=lambda r: r.timestamp)
        notify_msg = "In {1.channel.mention}, you were mentioned with highlight word **{0}**:\n".format(
            word, message)
        embed_msg = ""
        for msg in msg_ctx:
            time = msg.timestamp
            time = time.replace(tzinfo=timezone.utc).astimezone(
                tz=None).strftime('%H:%M:%S %Z')
            embed_msg += "[{0}] {1.author.name}#{1.author.discriminator}: {1.content}\n".format(
                time, msg)
        embed = discord.Embed(title=user.name,
                              description=embed_msg,
                              colour=discord.Colour.red())
        time = message.timestamp.replace(tzinfo=timezone.utc).astimezone(
            tz=None)
        footer = "Triggered at | {}".format(
            time.strftime('%a, %d %b %Y %I:%M%p %Z'))
        embed.set_footer(text=footer)
        await self.bot.send_message(user, content=notify_msg, embed=embed)

    def _is_word_match(self, word, string):
        regex = r'\b{}\b'.format(word.lower())
        return bool(re.search(regex, string.lower()))

    async def _is_active(self, user_id, channel, message):
        is_active = False

        async for msg in self.bot.logs_from(channel, limit=50, before=message):
            delta_since_msg = message.timestamp - msg.timestamp
            if msg.author.id == user_id and delta_since_msg <= timedelta(
                    seconds=20):
                is_active = True
                break
        return is_active
class Connection(object):
    """The RPyC *connection* (AKA *protocol*).

    :param root: the :class:`~rpyc.core.service.Service` object to expose
    :param channel: the :class:`~rpyc.core.channel.Channel` over which messages are passed
    :param config: the connection's configuration dict (overriding parameters
                   from the :data:`default configuration <DEFAULT_CONFIG>`)
    """
    def __init__(self, root, channel, config={}):
        self._closed = True
        self._config = DEFAULT_CONFIG.copy()
        self._config.update(config)
        if self._config["connid"] is None:
            self._config["connid"] = "conn%d" % (
                next(_connection_id_generator), )

        self._HANDLERS = self._request_handlers()
        self._channel = channel
        self._seqcounter = itertools.count()
        self._recvlock = Lock()
        self._sendlock = Lock()
        self._recv_event = Condition()
        self._request_callbacks = {}
        self._local_objects = RefCountingColl()
        self._last_traceback = None
        self._proxy_cache = WeakValueDict()
        self._netref_classes_cache = {}
        self._remote_root = None
        self._send_queue = []
        self._local_root = root
        self._closed = False

    def __del__(self):
        self.close()

    def __enter__(self):
        return self

    def __exit__(self, t, v, tb):
        self.close()

    def __repr__(self):
        a, b = object.__repr__(self).split(" object ")
        return "%s %r object %s" % (a, self._config["connid"], b)

    def _cleanup(self, _anyway=True):  # IO
        if self._closed and not _anyway:
            return
        self._closed = True
        self._channel.close()
        self._local_root.on_disconnect(self)
        self._request_callbacks.clear()
        self._local_objects.clear()
        self._proxy_cache.clear()
        self._netref_classes_cache.clear()
        self._last_traceback = None
        self._remote_root = None
        self._local_root = None
        # self._seqcounter = None
        # self._config.clear()
        del self._HANDLERS

    def close(self, _catchall=True):  # IO
        """closes the connection, releasing all held resources"""
        if self._closed:
            return
        self._closed = True
        try:
            self._async_request(consts.HANDLE_CLOSE)
        except EOFError:
            pass
        except Exception:
            if not _catchall:
                raise
        finally:
            self._cleanup(_anyway=True)

    @property
    def closed(self):  # IO
        """Indicates whether the connection has been closed or not"""
        return self._closed

    def fileno(self):  # IO
        """Returns the connectin's underlying file descriptor"""
        return self._channel.fileno()

    def ping(self, data=None, timeout=3):  # IO
        """Asserts that the other party is functioning properly, by making sure
        the *data* is echoed back before the *timeout* expires

        :param data: the data to send (leave ``None`` for the default buffer)
        :param timeout: the maximal time to wait for echo

        :raises: :class:`PingError` if the echoed data does not match
        :raises: :class:`EOFError` if the remote host closes the connection
        """
        if data is None:
            data = "abcdefghijklmnopqrstuvwxyz" * 20
        res = self.async_request(consts.HANDLE_PING, data, timeout=timeout)
        if res.value != data:
            raise PingError("echo mismatches sent data")

    def _get_seq_id(self):  # IO
        return next(self._seqcounter)

    def _send(self, msg, seq, args):  # IO
        data = brine.dump((msg, seq, args))
        # GC might run while sending data
        # if so, a BaseNetref.__del__ might be called
        # BaseNetref.__del__ must call asyncreq,
        # which will cause a deadlock
        # Solution:
        # Add the current request to a queue and let the thread that currently
        # holds the sendlock send it when it's done with its current job.
        # NOTE: Atomic list operations should be thread safe,
        # please call me out if they are not on all implementations!
        self._send_queue.append(data)
        # It is crucial to check the queue each time AFTER releasing the lock:
        while self._send_queue:
            if not self._sendlock.acquire(False):
                # Another thread holds the lock. It will send the data after
                # it's done with its current job. We can safely return.
                return
            try:
                # Can happen if another consumer was scheduled in between
                # `while` and `acquire`:
                if not self._send_queue:
                    # Must `continue` to ensure that `send_queue` is checked
                    # after releasing the lock! (in case another producer is
                    # scheduled before `release`)
                    continue
                data = self._send_queue.pop(0)
                self._channel.send(data)
            finally:
                self._sendlock.release()

    def _box(self, obj):  # boxing
        """store a local object in such a way that it could be recreated on
        the remote party either by-value or by-reference"""
        if brine.dumpable(obj):
            return consts.LABEL_VALUE, obj
        if type(obj) is tuple:
            return consts.LABEL_TUPLE, tuple(self._box(item) for item in obj)
        elif isinstance(obj, netref.BaseNetref) and obj.____conn__ is self:
            return consts.LABEL_LOCAL_REF, obj.____id_pack__
        else:
            id_pack = get_id_pack(obj)
            self._local_objects.add(id_pack, obj)
            try:
                cls = obj.__class__
            except Exception:
                # see issue #16
                cls = type(obj)
            if not isinstance(cls, type):
                cls = type(obj)
            return consts.LABEL_REMOTE_REF, id_pack

    def _unbox(self, package):  # boxing
        """recreate a local object representation of the remote object: if the
        object is passed by value, just return it; if the object is passed by
        reference, create a netref to it"""
        label, value = package
        if label == consts.LABEL_VALUE:
            return value
        if label == consts.LABEL_TUPLE:
            return tuple(self._unbox(item) for item in value)
        if label == consts.LABEL_LOCAL_REF:
            return self._local_objects[value]
        if label == consts.LABEL_REMOTE_REF:
            id_pack = (str(value[0]), value[1], value[2]
                       )  # so value is a id_pack
            if id_pack in self._proxy_cache:
                proxy = self._proxy_cache[id_pack]
                proxy.____refcount__ += 1  # if cached then remote incremented refcount, so sync refcount
            else:
                proxy = self._netref_factory(id_pack)
                self._proxy_cache[id_pack] = proxy
            return proxy
        raise ValueError("invalid label %r" % (label, ))

    def _netref_factory(self, id_pack):  # boxing
        """id_pack is for remote, so when class id fails to directly match """
        if id_pack[0] in netref.builtin_classes_cache:
            cls = netref.builtin_classes_cache[id_pack[0]]
        elif id_pack[1] in self._netref_classes_cache:
            cls = self._netref_classes_cache[id_pack[1]]
        else:
            # in the future, it could see if a sys.module cache/lookup hits first
            cls_methods = self.sync_request(consts.HANDLE_INSPECT, id_pack)
            cls = netref.class_factory(id_pack, cls_methods)
            self._netref_classes_cache[id_pack[1]] = cls
        return cls(self, id_pack)

    def _dispatch_request(self, seq, raw_args):  # dispatch
        try:
            handler, args = raw_args
            args = self._unbox(args)
            res = self._HANDLERS[handler](self, *args)
        except Exception:
            # need to catch old style exceptions too
            t, v, tb = sys.exc_info()
            self._last_traceback = tb
            logger = self._config["logger"]
            if logger and t is not StopIteration:
                logger.debug("Exception caught", exc_info=True)
            if t is SystemExit and self._config["propagate_SystemExit_locally"]:
                raise
            if t is KeyboardInterrupt and self._config[
                    "propagate_KeyboardInterrupt_locally"]:
                raise
            self._send(consts.MSG_EXCEPTION, seq, self._box_exc(t, v, tb))
        else:
            self._send(consts.MSG_REPLY, seq, self._box(res))

    def _box_exc(self, typ, val, tb):  # dispatch?
        return vinegar.dump(
            typ,
            val,
            tb,
            include_local_traceback=self._config["include_local_traceback"],
            include_local_version=self._config["include_local_version"])

    def _unbox_exc(self, raw):  # dispatch?
        return vinegar.load(
            raw,
            import_custom_exceptions=self._config["import_custom_exceptions"],
            instantiate_custom_exceptions=self.
            _config["instantiate_custom_exceptions"],
            instantiate_oldstyle_exceptions=self.
            _config["instantiate_oldstyle_exceptions"])

    def _dispatch(self, data):  # serving---dispatch?
        msg, seq, args = brine.load(data)
        if msg == consts.MSG_REQUEST:
            self._dispatch_request(seq, args)
        elif msg == consts.MSG_REPLY:
            obj = self._unbox(args)
            self._request_callbacks.pop(seq)(False, obj)
        elif msg == consts.MSG_EXCEPTION:
            obj = self._unbox_exc(args)
            self._request_callbacks.pop(seq)(True, obj)
        else:
            raise ValueError("invalid message type: %r" % (msg, ))

    def serve(self, timeout=1, wait_for_lock=True):  # serving
        """Serves a single request or reply that arrives within the given
        time frame (default is 1 sec). Note that the dispatching of a request
        might trigger multiple (nested) requests, thus this function may be
        reentrant.

        :returns: ``True`` if a request or reply were received, ``False``
                  otherwise.
        """
        timeout = Timeout(timeout)
        with self._recv_event:
            if not self._recvlock.acquire(False):
                return wait_for_lock and self._recv_event.wait(
                    timeout.timeleft())
        try:
            data = self._channel.poll(timeout) and self._channel.recv()
            if not data:
                return False
        except EOFError:
            self.close()
            raise
        finally:
            self._recvlock.release()
            with self._recv_event:
                self._recv_event.notify_all()
        self._dispatch(data)
        return True

    def poll(self, timeout=0):  # serving
        """Serves a single transaction, should one arrives in the given
        interval. Note that handling a request/reply may trigger nested
        requests, which are all part of a single transaction.

        :returns: ``True`` if a transaction was served, ``False`` otherwise"""
        return self.serve(timeout, False)

    def serve_all(self):  # serving
        """Serves all requests and replies for as long as the connection is
        alive."""
        try:
            while not self.closed:
                self.serve(None)
        except (socket.error, select_error, IOError):
            if not self.closed:
                raise
        except EOFError:
            pass
        finally:
            self.close()

    def serve_threaded(self, thread_count=10):  # serving
        """Serves all requests and replies for as long as the connection is
        alive."""
        def _thread_target():
            try:
                while True:
                    self.serve(None)
            except (socket.error, select_error, IOError):
                if not self.closed:
                    raise
            except EOFError:
                pass

        try:
            threads = [spawn(_thread_target) for _ in range(thread_count)]

            for thread in threads:
                thread.join()
        finally:
            self.close()

    def poll_all(self, timeout=0):  # serving
        """Serves all requests and replies that arrive within the given interval.

        :returns: ``True`` if at least a single transaction was served, ``False`` otherwise
        """
        at_least_once = False
        timeout = Timeout(timeout)
        try:
            while True:
                if self.poll(timeout):
                    at_least_once = True
                if timeout.expired():
                    break
        except EOFError:
            pass
        return at_least_once

    def sync_request(self, handler, *args):  # serving
        """requests, sends a synchronous request (waits for the reply to arrive)

        :raises: any exception that the requets may be generated
        :returns: the result of the request
        """
        timeout = self._config["sync_request_timeout"]
        return self.async_request(handler, *args, timeout=timeout).value

    def _async_request(self, handler, args=(),
                       callback=(lambda a, b: None)):  # serving
        seq = self._get_seq_id()
        self._request_callbacks[seq] = callback
        try:
            self._send(consts.MSG_REQUEST, seq, (handler, self._box(args)))
        except Exception:
            self._request_callbacks.pop(seq, None)
            raise

    def async_request(self, handler, *args, **kwargs):  # serving
        """Send an asynchronous request (does not wait for it to finish)

        :returns: an :class:`rpyc.core.async_.AsyncResult` object, which will
                  eventually hold the result (or exception)
        """
        timeout = kwargs.pop("timeout", None)
        if kwargs:
            raise TypeError("got unexpected keyword argument(s) %s" %
                            (list(kwargs.keys()), ))
        res = AsyncResult(self)
        self._async_request(handler, args, res)
        if timeout is not None:
            res.set_expiry(timeout)
        return res

    @property
    def root(self):  # serving
        """Fetches the root object (service) of the other party"""
        if self._remote_root is None:
            self._remote_root = self.sync_request(consts.HANDLE_GETROOT)
        return self._remote_root

    def _check_attr(self, obj, name, perm):  # attribute access
        config = self._config
        if not config[perm]:
            raise AttributeError("cannot access %r" % (name, ))
        prefix = config["allow_exposed_attrs"] and config["exposed_prefix"]
        plain = config["allow_all_attrs"]
        plain |= config["allow_exposed_attrs"] and name.startswith(prefix)
        plain |= config["allow_safe_attrs"] and name in config["safe_attrs"]
        plain |= config["allow_public_attrs"] and not name.startswith("_")
        has_exposed = prefix and hasattr(obj, prefix + name)
        if plain and (not has_exposed or hasattr(obj, name)):
            return name
        if has_exposed:
            return prefix + name
        if plain:
            return name  # chance for better traceback
        raise AttributeError("cannot access %r" % (name, ))

    def _access_attr(self, obj, name, args, overrider, param,
                     default):  # attribute access
        if is_py3k:
            if type(name) is bytes:
                name = str(name, "utf8")
            elif type(name) is not str:
                raise TypeError("name must be a string")
        else:
            if type(name) not in (str, unicode):  # noqa
                raise TypeError("name must be a string")
            name = str(name)  # IronPython issue #10 + py3k issue
        accessor = getattr(type(obj), overrider, None)
        if accessor is None:
            accessor = default
            name = self._check_attr(obj, name, param)
        return accessor(obj, name, *args)

    @classmethod
    def _request_handlers(cls):  # request handlers
        return {
            consts.HANDLE_PING: cls._handle_ping,
            consts.HANDLE_CLOSE: cls._handle_close,
            consts.HANDLE_GETROOT: cls._handle_getroot,
            consts.HANDLE_GETATTR: cls._handle_getattr,
            consts.HANDLE_DELATTR: cls._handle_delattr,
            consts.HANDLE_SETATTR: cls._handle_setattr,
            consts.HANDLE_CALL: cls._handle_call,
            consts.HANDLE_CALLATTR: cls._handle_callattr,
            consts.HANDLE_REPR: cls._handle_repr,
            consts.HANDLE_STR: cls._handle_str,
            consts.HANDLE_CMP: cls._handle_cmp,
            consts.HANDLE_HASH: cls._handle_hash,
            consts.HANDLE_INSTANCECHECK: cls._handle_instancecheck,
            consts.HANDLE_DIR: cls._handle_dir,
            consts.HANDLE_PICKLE: cls._handle_pickle,
            consts.HANDLE_DEL: cls._handle_del,
            consts.HANDLE_INSPECT: cls._handle_inspect,
            consts.HANDLE_BUFFITER: cls._handle_buffiter,
            consts.HANDLE_OLDSLICING: cls._handle_oldslicing,
            consts.HANDLE_CTXEXIT: cls._handle_ctxexit,
        }

    def _handle_ping(self, data):  # request handler
        return data

    def _handle_close(self):  # request handler
        self._cleanup()

    def _handle_getroot(self):  # request handler
        return self._local_root

    def _handle_del(self, obj, count=1):  # request handler
        self._local_objects.decref(get_id_pack(obj), count)

    def _handle_repr(self, obj):  # request handler
        return repr(obj)

    def _handle_str(self, obj):  # request handler
        return str(obj)

    def _handle_cmp(self, obj, other, op='__cmp__'):  # request handler
        # cmp() might enter recursive resonance... yet another workaround
        # return cmp(obj, other)
        try:
            return getattr(type(obj), op)(obj, other)
        except (AttributeError, TypeError):
            return NotImplemented

    def _handle_hash(self, obj):  # request handler
        return hash(obj)

    def _handle_call(self, obj, args, kwargs=()):  # request handler
        return obj(*args, **dict(kwargs))

    def _handle_dir(self, obj):  # request handler
        return tuple(dir(obj))

    def _handle_inspect(self, id_pack):  # request handler
        return tuple(
            get_methods(netref.LOCAL_ATTRS, self._local_objects[id_pack]))

    def _handle_getattr(self, obj, name):  # request handler
        return self._access_attr(obj, name, (), "_rpyc_getattr",
                                 "allow_getattr", getattr)

    def _handle_delattr(self, obj, name):  # request handler
        return self._access_attr(obj, name, (), "_rpyc_delattr",
                                 "allow_delattr", delattr)

    def _handle_setattr(self, obj, name, value):  # request handler
        return self._access_attr(obj, name, (value, ), "_rpyc_setattr",
                                 "allow_setattr", setattr)

    def _handle_callattr(self, obj, name, args, kwargs=()):  # request handler
        obj = self._handle_getattr(obj, name)
        return self._handle_call(obj, args, kwargs)

    def _handle_ctxexit(self, obj, exc):  # request handler
        if exc:
            try:
                raise exc
            except Exception:
                exc, typ, tb = sys.exc_info()
        else:
            typ = tb = None
        return self._handle_getattr(obj, "__exit__")(exc, typ, tb)

    def _handle_instancecheck(self, obj, other_id_pack):
        # Create a name pack which would be familiar here and see if there is a hit
        other_id_pack2 = (other_id_pack[0], other_id_pack[1], 0)
        if other_id_pack2 in self._netref_classes_cache:
            cls = self._netref_classes_cache[other_id_pack2]
            other = cls(self, other_id_pack2)
            return isinstance(other, obj)
        else:  # might just have missed cache, FIX ME
            return False

    def _handle_pickle(self, obj, proto):  # request handler
        if not self._config["allow_pickle"]:
            raise ValueError("pickling is disabled")
        return bytes(pickle.dumps(obj, proto))

    def _handle_buffiter(self, obj, count):  # request handler
        return tuple(itertools.islice(obj, count))

    def _handle_oldslicing(self, obj, attempt, fallback, start, stop,
                           args):  # request handler
        try:
            # first try __xxxitem__
            getitem = self._handle_getattr(obj, attempt)
            return getitem(slice(start, stop), *args)
        except Exception:
            # fallback to __xxxslice__. see issue #41
            if stop is None:
                stop = maxint
            getslice = self._handle_getattr(obj, fallback)
            return getslice(start, stop, *args)
class AeromeScentController(object):
    def __init__(self, compatibility_dummy):
        self.log = None
        self.open_valves = None
        self.state_change_lock = Lock()
        self.status_changed_callback = None

    def initialize_controller(self, status_changed_callback):
        self.log = logging.getLogger("aeromeScentController")
        self.log.error("Init GPIO controller")
        GPIO.setmode(GPIO.BOARD)
        for pin in SCENT_ID_TO_PIN_MAPPING.values():
            GPIO.setup(pin, GPIO.OUT)
        GPIO.setup(FLUSH_VALVE_PIN, GPIO.OUT)
        self.status_changed_callback = status_changed_callback
        self.close_all_valves()

    @staticmethod
    def get_state():
        ret = {}
        for pin_id, pin in SCENT_ID_TO_PIN_MAPPING.iteritems():
            ret[pin_id] = GPIO.input(pin) == GPIO.HIGH
        return ret

    def open_valve(self, valve_id):
        self._set_valve_state(valve_id, GPIO.HIGH)

    def close_valve(self, valve_id):
        self._set_valve_state(valve_id, GPIO.LOW)

    def close(self):
        self.close_all_valves()
        GPIO.cleanup()

    def close_all_valves(self):
        self.state_change_lock.acquire()
        try:
            self.log.error("Closing all")
            self._set_all_pins_low()
        finally:
            self.state_change_lock.release()

    def _set_valve_state(self, valve_id, state):
        valve_key = str(valve_id)

        if valve_key not in SCENT_ID_TO_PIN_MAPPING:
            self.log.error("Unknown valve %s" % valve_key)
            return

        valve_pin = SCENT_ID_TO_PIN_MAPPING[valve_key]

        self.state_change_lock.acquire()
        try:
            self._set_pin_to_state(valve_pin, state)
        finally:
            self.state_change_lock.release()

    def _set_pin_to_state(self, pin, state):
        if GPIO.input(pin) != state:
            self.log.error("Setting " + str(pin) + " to " + str(state))
            GPIO.output(pin, state)
            if state == GPIO.HIGH:
                GPIO.output(FLUSH_VALVE_PIN, state)
                self.open_valves += 1
            else:
                self.open_valves -= 1
                if self.open_valves < 1:
                    self._set_all_pins_low()
            self.status_changed_callback()
        else:
            self.log.error("Pin " + str(pin) + " is already " + str(state))

    def _set_all_pins_low(self):
        GPIO.output(SCENT_ID_TO_PIN_MAPPING.values() + [FLUSH_VALVE_PIN],
                    GPIO.LOW)
        self.open_valves = 0
        self.status_changed_callback()
Example #56
0
class PastlyLogger:
    # error, warn, etc. are file names to open for logging.
    # If a log level doesn't have a file name given for it, messages destined
    # for that level cascade down to the next noisiest level.
    # Example 1: warn=foo.txt, debug=bar.txt
    #   error and warn messages go to foo.txt, all other messages to bar.txt
    # Example 2: notice=baz.txt
    #   error, warn, and notice messages go to baz.txt, all others are lost
    #
    # overwrite is a list of log levels that should overwrite their log file
    # when opening instead of appending.
    # Example: notice=a.txt, info=b.txt, overwrite=['info']
    #   error, warn, and notice messages are appended to a.txt;
    #   b.txt is overwritten and info messages are appended to it;
    #   all debug messages are lost
    #
    # log_threads tells the logger whether or not to log thread names
    #
    # default tells the logger what level to log at when called with
    # log('foobar') instead of log.info('foobar')
    def __init__(self,
                 error=None,
                 warn=None,
                 notice=None,
                 info=None,
                 debug=None,
                 overwrite=[],
                 log_threads=False,
                 default='notice'):

        self.log_threads = log_threads
        assert default in ['debug', 'info', 'notice', 'warn', 'error']
        self.default_level = default

        # buffering=1 means line-based buffering
        if error:
            self.error_fd = open(error,
                                 'w' if 'error' in overwrite else 'a',
                                 buffering=1)
            self.error_fd_mutex = Lock()
        else:
            self.error_fd = None
            self.error_fd_mutex = None
        if warn:
            self.warn_fd = open(warn,
                                'w' if 'warn' in overwrite else 'a',
                                buffering=1)
            self.warn_fd_mutex = Lock()
        else:
            self.warn_fd = None
            self.warn_fd_mutex = None
        if notice:
            self.notice_fd = open(notice,
                                  'w' if 'notice' in overwrite else 'a',
                                  buffering=1)
            self.notice_fd_mutex = Lock()
        else:
            self.notice_fd = None
            self.notice_fd_mutex = None
        if info:
            self.info_fd = open(info,
                                'w' if 'info' in overwrite else 'a',
                                buffering=1)
            self.info_fd_mutex = Lock()
        else:
            self.info_fd = None
            self.info_fd_mutex = None
        if debug:
            self.debug_fd = open(debug,
                                 'w' if 'debug' in overwrite else 'a',
                                 buffering=1)
            self.debug_fd_mutex = Lock()
        else:
            self.debug_fd = None
            self.debug_fd_mutex = None

        self.debug('Creating PastlyLogger instance')

    def __call__(self, *s):
        if self.default_level == 'debug': return self.debug(*s)
        elif self.default_level == 'info': return self.info(*s)
        elif self.default_level == 'notice': return self.notice(*s)
        elif self.default_level == 'warn': return self.warn(*s)
        elif self.default_level == 'error': return self.error(*s)

    def __del__(self):
        self.debug('Deleting PastlyLogger instance')
        self.flush()
        if self.error_fd: self.error_fd.close()
        if self.warn_fd: self.warn_fd.close()
        if self.notice_fd: self.notice_fd.close()
        if self.info_fd: self.info_fd.close()
        if self.debug_fd: self.debug_fd.close()
        self.error_fd, self.warn_fd = None, None
        self.notice_fd, self.info_fd, self.debug_fd = None, None, None
        if self.error_fd_mutex:
            if not self.error_fd_mutex.acquire(blocking=False):
                self.error_fd_mutex.release()
        if self.warn_fd_mutex:
            if not self.warn_fd_mutex.acquire(blocking=False):
                self.warn_fd_mutex.release()
        if self.notice_fd_mutex:
            if not self.notice_fd_mutex.acquire(blocking=False):
                self.notice_fd_mutex.release()
        if self.info_fd_mutex:
            if not self.info_fd_mutex.acquire(blocking=False):
                self.info_fd_mutex.release()
        if self.debug_fd_mutex:
            if not self.debug_fd_mutex.acquire(blocking=False):
                self.debug_fd_mutex.release()

    def _log_file(fd, lock, log_threads, level, *s):
        assert fd
        lock.acquire()
        ts = datetime.now()
        if log_threads:
            fd.write('[{}] [{}] [{}] {}\n'.format(
                ts, level,
                current_thread().name, ' '.join([str(_) for _ in s])))
        else:
            fd.write('[{}] [{}] {}\n'.format(ts, level,
                                             ' '.join([str(_) for _ in s])))
        lock.release()

    def flush(self):
        if self.error_fd: self.error_fd.flush()
        if self.warn_fd: self.warn_fd.flush()
        if self.notice_fd: self.notice_fd.flush()
        if self.info_fd: self.info_fd.flush()
        if self.debug_fd: self.debug_fd.flush()

    def debug(self, *s, level='debug'):
        if self.debug_fd:
            return PastlyLogger._log_file(self.debug_fd, self.debug_fd_mutex,
                                          self.log_threads, level, *s)
        return None

    def info(self, *s, level='info'):
        if self.info_fd:
            return PastlyLogger._log_file(self.info_fd, self.info_fd_mutex,
                                          self.log_threads, level, *s)
        else:
            return self.debug(*s, level=level)

    def notice(self, *s, level='notice'):
        if self.notice_fd:
            return PastlyLogger._log_file(self.notice_fd, self.notice_fd_mutex,
                                          self.log_threads, level, *s)
        else:
            return self.info(*s, level=level)

    def warn(self, *s, level='warn'):
        if self.warn_fd:
            return PastlyLogger._log_file(self.warn_fd, self.warn_fd_mutex,
                                          self.log_threads, level, *s)
        else:
            return self.notice(*s, level=level)

    def error(self, *s, level='error'):
        if self.error_fd:
            return PastlyLogger._log_file(self.error_fd, self.error_fd_mutex,
                                          self.log_threads, level, *s)
        else:
            return self.warn(*s, level=level)
Example #57
0
class _Selection(DOMWidget):
    """Base class for Selection widgets
    
    ``options`` can be specified as a list or dict. If given as a list,
    it will be transformed to a dict of the form ``{str(value):value}``.

    When programmatically setting the value, a reverse lookup is performed
    among the options to set the value of ``selected_label`` accordingly. The
    reverse lookup uses the equality operator by default, but an other
    predicate may be provided via the ``equals`` argument. For example, when
    dealing with numpy arrays, one may set equals=np.array_equal.
    """

    value = Any(help="Selected value")
    selected_label = Unicode(help="The label of the selected value", sync=True)
    options = Any(
        help="""List of (key, value) tuples or dict of values that the
        user can select.
    
    The keys of this list are the strings that will be displayed in the UI,
    representing the actual Python choices.
    
    The keys of this list are also available as _options_labels.
    """)

    _options_dict = Dict()
    _options_labels = Tuple(sync=True)
    _options_values = Tuple()

    disabled = Bool(False, help="Enable or disable user changes", sync=True)
    description = Unicode(
        help="Description of the value this widget represents", sync=True)

    def __init__(self, *args, **kwargs):
        self.value_lock = Lock()
        self.options_lock = Lock()
        self.equals = kwargs.pop('equals', lambda x, y: x == y)
        self.on_trait_change(self._options_readonly_changed, [
            '_options_dict', '_options_labels', '_options_values', '_options'
        ])
        if 'options' in kwargs:
            self.options = kwargs.pop('options')
        DOMWidget.__init__(self, *args, **kwargs)
        self._value_in_options()

    def _make_options(self, x):
        # If x is a dict, convert it to list format.
        if isinstance(x, (OrderedDict, dict)):
            return [(k, v) for k, v in x.items()]

        # Make sure x is a list or tuple.
        if not isinstance(x, (list, tuple)):
            raise ValueError('x')

        # If x is an ordinary list, use the option values as names.
        for y in x:
            if not isinstance(y, (list, tuple)) or len(y) < 2:
                return [(i, i) for i in x]

        # Value is already in the correct format.
        return x

    def _options_changed(self, name, old, new):
        """Handles when the options tuple has been changed.

        Setting options implies setting option labels from the keys of the dict.
        """
        if self.options_lock.acquire(False):
            try:
                self.options = new

                options = self._make_options(new)
                self._options_dict = {i[0]: i[1] for i in options}
                self._options_labels = [i[0] for i in options]
                self._options_values = [i[1] for i in options]
                self._value_in_options()
            finally:
                self.options_lock.release()

    def _value_in_options(self):
        # ensure that the chosen value is one of the choices

        if self._options_values:
            if self.value not in self._options_values:
                self.value = next(iter(self._options_values))

    def _options_readonly_changed(self, name, old, new):
        if not self.options_lock.locked():
            raise TraitError(
                "`.%s` is a read-only trait. Use the `.options` tuple instead."
                % name)

    def _value_changed(self, name, old, new):
        """Called when value has been changed"""
        if self.value_lock.acquire(False):
            try:
                # Reverse dictionary lookup for the value name
                for k, v in self._options_dict.items():
                    if self.equals(new, v):
                        # set the selected value name
                        self.selected_label = k
                        return
                # undo the change, and raise KeyError
                self.value = old
                raise KeyError(new)
            finally:
                self.value_lock.release()

    def _selected_label_changed(self, name, old, new):
        """Called when the value name has been changed (typically by the frontend)."""
        if self.value_lock.acquire(False):
            try:
                self.value = self._options_dict[new]
            finally:
                self.value_lock.release()
Example #58
0
class Communicator:
    UPDATE_INTERVAL = 0.4

    def __init__(self, websocket_handler, worker_id: str, worker_instance_ref,
                 command_timeout: float):
        # Throws ValueError if unable to connect!
        # catch in code using this class
        self.worker_id: str = worker_id
        self.worker_instance_ref = worker_instance_ref
        self.websocket_handler = websocket_handler
        self.__command_timeout: float = command_timeout
        self.__sendMutex = Lock()

    def cleanup_websocket(self):
        logger.info(
            "Communicator of {} acquiring lock to cleanup worker in websocket",
            str(self.worker_id))
        self.__sendMutex.acquire()
        try:
            logger.info("Communicator of {} calling cleanup",
                        str(self.worker_id))
            self.websocket_handler.clean_up_user(self.worker_id,
                                                 self.worker_instance_ref)
        finally:
            self.__sendMutex.release()

    def __runAndOk(self, command, timeout) -> bool:
        return self.__run_and_ok_bytes(command, timeout)

    def __run_and_ok_bytes(self,
                           message,
                           timeout: float,
                           byte_command: int = None) -> bool:
        self.__sendMutex.acquire()
        try:
            result = self.websocket_handler.send_and_wait(
                self.worker_id,
                self.worker_instance_ref,
                message,
                timeout,
                byte_command=byte_command)
            return result is not None and "OK" == result.strip()
        finally:
            self.__sendMutex.release()

    def install_apk(self, filepath: str, timeout: float) -> bool:
        # TODO: check if file exists...
        with open(filepath, "rb") as file:  # opening for [r]eading as [b]inary
            data = file.read(
            )  # if you only wanted to read 512 bytes, do .read(512)
        return self.__run_and_ok_bytes(message=data,
                                       timeout=timeout,
                                       byte_command=1)

    def startApp(self, package_name):
        return self.__runAndOk("more start {}\r\n".format(package_name),
                               self.__command_timeout)

    def stopApp(self, package_name):
        if not self.__runAndOk("more stop {}\r\n".format(package_name),
                               self.__command_timeout):
            logger.error(
                "Failed stopping {}, please check if SU has been granted",
                package_name)
            return False
        else:
            return True

    def passthrough(self, command):
        response = self.websocket_handler.send_and_wait(
            self.worker_id, self.worker_instance_ref,
            "passthrough {}".format(command), self.__command_timeout)
        return response

    def reboot(self) -> bool:
        return self.__runAndOk("more reboot now\r\n", self.__command_timeout)

    def restartApp(self, package_name) -> bool:
        return self.__runAndOk("more restart {}\r\n".format(package_name),
                               self.__command_timeout)

    def resetAppdata(self, package_name) -> bool:
        return self.__runAndOk("more reset {}\r\n".format(package_name),
                               self.__command_timeout)

    def clearAppCache(self, package_name) -> bool:
        return self.__runAndOk("more cache {}\r\n".format(package_name),
                               self.__command_timeout)

    def magisk_off(self, package_name) -> bool:
        return self.passthrough(
            "su -c magiskhide --rm {}".format(package_name))

    def magisk_on(self, package_name) -> bool:
        return self.passthrough(
            "su -c magiskhide --add {}".format(package_name))

    def turnScreenOn(self) -> bool:
        return self.__runAndOk("more screen on\r\n", self.__command_timeout)

    def click(self, x, y) -> bool:
        return self.__runAndOk(
            "screen click {} {}\r\n".format(str(int(round(x))),
                                            str(int(round(y)))),
            self.__command_timeout)

    def swipe(self, x1, y1, x2, y2):
        return self.websocket_handler.send_and_wait(
            self.worker_id, self.worker_instance_ref,
            "touch swipe {} {} {} {}\r\n".format(str(int(round(x1))),
                                                 str(int(round(y1))),
                                                 str(int(round(x2))),
                                                 str(int(round(y2)))),
            self.__command_timeout)

    def touchandhold(self, x1, y1, x2, y2, time: int = 3000) -> bool:
        return self.__runAndOk(
            "touch swipe {} {} {} {} {}".format(str(int(round(x1))),
                                                str(int(round(y1))),
                                                str(int(round(x2))),
                                                str(int(round(y2))),
                                                str(int(time))),
            self.__command_timeout)

    def getscreensize(self) -> str:
        response = self.websocket_handler.send_and_wait(
            self.worker_id, self.worker_instance_ref, "screen size",
            self.__command_timeout)
        return response

    def uiautomator(self) -> str:
        response = self.websocket_handler.send_and_wait(
            self.worker_id, self.worker_instance_ref, "more uiautomator",
            self.__command_timeout)
        return response

    def get_screenshot(
            self,
            path,
            quality: int = 70,
            screenshot_type: ScreenshotType = ScreenshotType.JPEG) -> bool:
        if quality < 10 or quality > 100:
            logger.error("Invalid quality value passed for screenshots")
            return False

        screenshot_type_str: str = "jpeg"
        if screenshot_type == ScreenshotType.PNG:
            screenshot_type_str = "png"

        self.__sendMutex.acquire()
        try:
            encoded = self.websocket_handler.send_and_wait(
                self.worker_id, self.worker_instance_ref,
                "screen capture {} {}\r\n".format(screenshot_type_str,
                                                  quality),
                self.__command_timeout)
        finally:
            self.__sendMutex.release()
        if encoded is None:
            return False
        elif isinstance(encoded, str):
            logger.debug("Screenshot response not binary")
            if "KO: " in encoded:
                logger.error(
                    "get_screenshot: Could not retrieve screenshot. Make sure your RGC is updated."
                )
                return False
            elif "OK:" not in encoded:
                logger.error("get_screenshot: response not OK")
                return False
            return False
        else:
            logger.debug("Storing screenshot...")

            with open(path, "wb") as fh:
                fh.write(encoded)
            logger.debug("Done storing, returning")
            return True

    def backButton(self) -> bool:
        return self.__runAndOk("screen back\r\n", self.__command_timeout)

    def homeButton(self) -> bool:
        return self.__runAndOk("touch keyevent 3", self.__command_timeout)

    def enterButton(self) -> bool:
        return self.__runAndOk("touch keyevent 61", self.__command_timeout)

    def sendText(self, text):
        return self.__runAndOk("touch text " + str(text),
                               self.__command_timeout)

    def isScreenOn(self) -> bool:
        self.__sendMutex.acquire()
        try:
            state = self.websocket_handler.send_and_wait(
                self.worker_id, self.worker_instance_ref,
                "more state screen\r\n", self.__command_timeout)
            if state is None:
                return False
            return "on" in state
        finally:
            self.__sendMutex.release()

    def isPogoTopmost(self) -> bool:
        self.__sendMutex.acquire()
        try:
            topmost = self.websocket_handler.send_and_wait(
                self.worker_id, self.worker_instance_ref,
                "more topmost app\r\n", self.__command_timeout)
            if topmost is None:
                return False
            return "com.nianticlabs.pokemongo" in topmost
        finally:
            self.__sendMutex.release()

    def topmostApp(self) -> str:
        self.__sendMutex.acquire()
        try:
            topmost = self.websocket_handler.send_and_wait(
                self.worker_id, self.worker_instance_ref,
                "more topmost app\r\n", self.__command_timeout)
            if topmost is None:
                return False
            return topmost
        finally:
            self.__sendMutex.release()

    def setLocation(self, lat, lng, alt):
        self.__sendMutex.acquire()
        try:
            response = self.websocket_handler.send_and_wait(
                self.worker_id, self.worker_instance_ref,
                "geo fix {} {} {}\r\n".format(lat, lng,
                                              alt), self.__command_timeout)
            return response
        finally:
            self.__sendMutex.release()

    def terminate_connection(self):
        self.__sendMutex.acquire()
        try:
            response = self.websocket_handler.send_and_wait(
                self.worker_id, self.worker_instance_ref, "exit\r\n", 5)
            return response
        finally:
            self.__sendMutex.release()

    # coords need to be float values
    # speed integer with km/h
    #######
    # This blocks!
    #######
    def walkFromTo(self, startLat, startLng, destLat, destLng, speed):
        with self.__sendMutex:
            # calculate the time it will take to walk and add it to the timeout!
            distance = get_distance_of_two_points_in_meters(
                startLat, startLng, destLat, destLng)
            # speed is in kmph, distance in m
            # we want m/s -> speed / 3.6
            speed_meters = speed / 3.6
            seconds_traveltime = distance / speed_meters
            response = self.websocket_handler.send_and_wait(
                self.worker_id, self.worker_instance_ref,
                "geo walk {} {} {} {} {}\r\n".format(startLat, startLng,
                                                     destLat, destLng, speed),
                self.__command_timeout + seconds_traveltime)
            return response
Example #59
0
class RDMTestThread(Thread):
    """The RDMResponder tests are closely coupled to the Wrapper (yuck!). So we
     need to run this all in a separate thread. This is all a bit of a hack and
     you'll get into trouble if multiple things are running at once...
  """
    RUNNING, COMPLETED, ERROR = range(3)
    TESTS, COLLECTOR = range(2)

    def __init__(self, pid_store, logs_directory):
        super(RDMTestThread, self).__init__()
        self._pid_store = pid_store
        self._logs_directory = logs_directory
        self._terminate = False
        self._request = None
        # guards _terminate and _request
        self._cv = Condition()
        self._wrapper = None
        self._test_state_lock = Lock()  # guards _test_state
        self._test_state = {}

    def Stop(self):
        self._cv.acquire()
        self._terminate = True
        self._cv.notify()
        self._cv.release()

    def ScheduleTests(self, universe, uid, test_filter, broadcast_write_delay,
                      inter_test_delay, dmx_frame_rate, slot_count):
        """Schedule the tests to be run. Callable from any thread. Callbable by any
       thread.

    Returns:
      An error message, or None if the tests were scheduled.
    """
        if not self._CheckIfConnected():
            return 'Lost connection to OLAD'

        self._cv.acquire()
        if self._request is not None:
            self._cv.release()
            return 'Existing request pending'

        self._request = lambda: self._RunTests(
            universe, uid, test_filter, broadcast_write_delay,
            inter_test_delay, dmx_frame_rate, slot_count)
        self._cv.notify()
        self._cv.release()
        return None

    def ScheduleCollector(self, universe, skip_queued_messages):
        """Schedule the collector to run on a universe. Callable by any thread.

    Returns:
      An error message, or None if the collection was scheduled.
    """
        if not self._CheckIfConnected():
            return 'Lost connection to OLAD'

        self._cv.acquire()
        if self._request is not None:
            self._cv.release()
            return 'Existing request pending'

        self._request = lambda: self._RunCollector(universe,
                                                   skip_queued_messages)
        self._cv.notify()
        self._cv.release()
        return None

    def Stat(self):
        """Check the state of the tests. Callable by any thread.

    Returns:
      The status of the tests.
    """
        self._test_state_lock.acquire()
        state = dict(self._test_state)
        self._test_state_lock.release()
        return state

    def run(self):
        self._wrapper = ClientWrapper()
        self._collector = ModelCollector(self._wrapper, self._pid_store)
        while True:
            self._cv.acquire()
            if self._terminate:
                logging.info('quitting test thread')
                self._cv.release()
                return

            if self._request is not None:
                request = self._request
                self._request = None
                self._cv.release()
                request()
                continue
            # nothing to do, go into the wait
            self._cv.wait()
            self._cv.release()

    def _UpdateStats(self, tests_completed, total_tests):
        self._test_state_lock.acquire()
        self._test_state['tests_completed'] = tests_completed
        self._test_state['total_tests'] = total_tests
        self._test_state_lock.release()

    def _RunTests(self, universe, uid, test_filter, broadcast_write_delay,
                  inter_test_delay, dmx_frame_rate, slot_count):
        self._test_state_lock.acquire()
        self._test_state = {
            'action': self.TESTS,
            'tests_completed': 0,
            'total_tests': None,
            'state': self.RUNNING,
            'duration': 0,
        }
        start_time = datetime.now()
        self._test_state_lock.release()

        runner = TestRunner.TestRunner(universe, uid, broadcast_write_delay,
                                       inter_test_delay, self._pid_store,
                                       self._wrapper)

        for test in TestRunner.GetTestClasses(TestDefinitions):
            runner.RegisterTest(test)

        dmx_sender = None
        if dmx_frame_rate > 0 and slot_count > 0:
            logging.info(
                'Starting DMXSender with slot count %d and FPS of %d' %
                (slot_count, dmx_frame_rate))
            dmx_sender = DMXSender(self._wrapper, universe, dmx_frame_rate,
                                   slot_count)

        try:
            tests, device = runner.RunTests(test_filter, False,
                                            self._UpdateStats)
        except Exception as e:
            self._test_state_lock.acquire()
            self._test_state['state'] = self.ERROR
            self._test_state['exception'] = str(e)
            self._test_state['traceback'] = traceback.format_exc()
            self._test_state_lock.release()
            return
        finally:
            if dmx_sender is not None:
                dmx_sender.Stop()

        timestamp = int(time())
        end_time = datetime.now()
        test_parameters = {
            'broadcast_write_delay': broadcast_write_delay,
            'inter_test_delay': inter_test_delay,
            'dmx_frame_rate': dmx_frame_rate,
            'dmx_slot_count': slot_count,
        }
        log_saver = TestLogger.TestLogger(self._logs_directory)
        logs_saved = True
        try:
            log_saver.SaveLog(uid, timestamp, end_time, tests, device,
                              test_parameters)
        except TestLogger.TestLoggerException:
            logs_saved = False

        self._test_state_lock.acquire()
        # we can't use total_seconds() since it requires Python 2.7
        time_delta = end_time - start_time
        self._test_state['duration'] = (time_delta.seconds +
                                        time_delta.days * 24 * 3600)
        self._test_state['state'] = self.COMPLETED
        self._test_state['tests'] = tests
        self._test_state['logs_saved'] = logs_saved
        self._test_state['timestamp'] = timestamp
        self._test_state['uid'] = uid
        self._test_state_lock.release()

    def _RunCollector(self, universe, skip_queued_messages):
        """Run the device model collector for a universe."""
        logging.info('Collecting for %d' % universe)
        self._test_state_lock.acquire()
        self._test_state = {
            'action': self.COLLECTOR,
            'state': self.RUNNING,
        }
        self._test_state_lock.release()

        try:
            output = self._collector.Run(universe, skip_queued_messages)
        except Exception as e:
            self._test_state_lock.acquire()
            self._test_state['state'] = self.ERROR
            self._test_state['exception'] = str(e)
            self._test_state['traceback'] = traceback.format_exc()
            self._test_state_lock.release()
            return

        self._test_state_lock.acquire()
        self._test_state['state'] = self.COMPLETED
        self._test_state['output'] = output
        self._test_state_lock.release()

    def _CheckIfConnected(self):
        """Check if the client is connected to olad.

    Returns:
      True if connected, False otherwise.
    """
        # TODO(simon): add this check, remember it needs locking.
        return True
Example #60
0
class Log(object):
    __metaclass__ = Singleton

    def __init__(self):
        super(Log, self).__init__()
        self.thread_lock = Lock()
        self.print_require_level = 0
        self.file_require_level = 0

    def log(self, level, msg, args):
        try:
            self.thread_lock.acquire()

            if args and isinstance(args, tuple):
                msg = msg % tuple(
                    (arg.encode("utf8") if isinstance(arg, unicode) else arg
                     for arg in args))

            if level >= self.print_require_level:
                try:
                    print msg
                except:
                    pass

            if level >= self.file_require_level:
                file_name = path.join(
                    BASE_DIR, 'log', ''.join([
                        'log_',
                        strftime('%Y-%m-%d', localtime()), '_',
                        str(getpid()), '.txt'
                    ]))
                f = open(file_name, 'a')
                f.write(get_string(msg))
                f.write('\n')
                f.close()
        except Exception as e:
            print '(' * 6
            print msg
            print args
            print format_exc()
            print ')' * 6
        finally:
            self.thread_lock.release()

    def debug(self, msg, *args):
        self.log(1,
                 ''.join([strftime('[%H:%M:%S]debug   :', localtime()) + msg]),
                 args)

    def info(self, msg, *args):
        self.log(2,
                 ''.join([strftime('[%H:%M:%S]info    :', localtime()) + msg]),
                 args)

    def trace(self, msg, *args):
        self.log(3,
                 ''.join([strftime('[%H:%M:%S]trace   :', localtime()) + msg]),
                 args)

    def warn(self, msg, *args):
        self.log(4,
                 ''.join([strftime('[%H:%M:%S]warn    :', localtime()) + msg]),
                 args)

    def error(self, msg, *args):
        self.log(5,
                 ''.join([strftime('[%H:%M:%S]error   :', localtime()) + msg]),
                 args)

    def critical(self, msg, *args):
        self.log(6,
                 ''.join([strftime('[%H:%M:%S]critical:', localtime()) + msg]),
                 args)