Ejemplo n.º 1
0
class ThreadPool(object):
    """
    Nonblock Pool of threads consuming tasks from a queue
    """
    def __init__(self, numThreads):
        self.numThreads = numThreads
        self.workers = []

        self.tasks = Queue(numThreads)
        for _ in range(numThreads):
            self.workers.append(Worker(self.tasks))

    def addTask(self, func, *args, **kargs):
        """
        Add a task to the queue
        if Queue is full already when adding task, return false
        """
        try:
            self.tasks.put_nowait((func, args, kargs))
            return True
        except Full:
            LOG.error("Thread pool of size %s is full" % self.numThreads)
            return False

    def getWorkerRunningTime(self):
        """ get info for all threads """
        runningTime = []
        for _, worker in enumerate(self.workers):
            runningTime.append("%.2fms" % (1000 * worker.getRunningTime()))
        return runningTime

    def waitCompletion(self):
        """Wait for completion of all the tasks in the queue"""
        self.tasks.join()
Ejemplo n.º 2
0
def main():
    f = init_bloom_filter()
    print "bloom filter's count:%s" % f.count

    user_accessed_set = init_user_access()
    print "user accessed set:%s" % user_accessed_set

    from zhihu_thread import MyThread
    threads = []
    queue = Queue()

    url = USER_URL.format("jie-28")
    user_seeds = User(url).generate_user_seeds(int(math.ceil(THREAD_COUNT)), user_accessed_set)

    for user_seed in user_seeds:
        queue.put_nowait(user_seed)
    print "Start, user seeds:%s " % user_seeds

    import threading
    lock = threading.Lock()
    bf_lock = threading.Lock()
    for i in range(THREAD_COUNT):
        t = MyThread(consume, (lock, bf_lock, f, user_accessed_set, queue, i, GRAPH_DEEP_LEVEL),
                     consume.__name__)
        threads.append(t)

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    print "End, queue's size:%s" % queue.qsize()

    print "All Done"
Ejemplo n.º 3
0
    def bfs(self, src):
        self.colors = []
        self.distances = []
        self.predecessors = []

        for u in xrange(self.graphlen):
            self.colors.append(WHITE)
            self.distances.append(0)
            self.predecessors.append(None)

        self.colors[src] = GRAY
        Q = Queue()
        Q.put_nowait(src)

        while not Q.empty():
            u = Q.get_nowait()
	    if not (u in self.graph):
	       continue
            for v in self.graph[u]:
                if self.colors[v] == WHITE:
                    self.colors[v] = GRAY
                    self.distances[v] = self.distances[u]+1
                    self.predecessors[v] = u
                    Q.put_nowait(v)
            self.colors[u] = BLACK
Ejemplo n.º 4
0
class Carrier(object):
    """ スレッド間のデータ受け渡しとイベント通知を行う """

    def __init__(self, name):
        self.name = name
        self.queue = Queue()
        self.event = Event()

    def handover(self, packet):
        """ データをキューに入れる. 配送先のイベントを起こす """
        self.queue.put_nowait(packet)
        self.event.set()
        self.event.clear()

    def pickup(self):
        """ 配達したデータを受け取ってもらう """
        return self.queue.get_nowait()

    def wake(self):
        """ イベントを起こす """
        self.event.set()

    def clear(self):
        """ キューを空っぽにする """
        while not self.empty():
            self.pickup()

    def empty(self):
        """ キューが空かどうかを返す """
        return self.queue.empty()

    def sleep(self):
        """ イベントを眠らせる """
        self.event.wait()
Ejemplo n.º 5
0
def main(argv):
    global input_file
    global tenant
    global number

    try:
        opts, args = getopt.getopt(argv, "hn:i:t:", ["help", "number=", "input=", "tenant="])
    except getopt.GetoptError:
        usage()
        sys.exit(2)

    if len(opts) == 0:
        logger_main.info("Will use default parameter: python {file_name} -i {input_file} -t {tenant} -n {number}".format(
            file_name=sys.argv[0], input_file=input_file, tenant=tenant, number=number))
    for opt, arg in opts:
        if opt in ("-n", "--number"):
            number = int(arg)
        elif opt in ("-i", "--input"):
            input_file = arg
        elif opt in ("-t", "--tenant"):
            tenant = arg
        elif opt in ("-h", "--help"):
            usage()
            exit()
        else:
            usage()
            exit(1)

    f = open(input_file)

    lines = f.readlines()
    option_set = ';'.join(lines).replace('\n', '').split(';')

    from threading import Thread
    from Queue import Queue

    #initialize a queue
    sql_queue = Queue()

    #initialize a thread to process DB insertion
    db_thread = Thread(target=do_insertion, args=(sql_queue,))
    db_thread.start()

    i = 0
    sql_segment_buffer = ''
    while i < number:
        i += 1
        one_row = generate_one_row(option_set, tenant)

        if i % 1000 == 0:
            sql_segment_buffer += one_row + ';'
            sql_queue.put_nowait(sql_segment_buffer)
            sql_segment_buffer = ''
            logger_main.info("Generate the {index}th insert SQL".format(index=i))
        else:
            sql_segment_buffer += one_row + ','

    logger_main.info("Finish SQL generation")
    sql_queue.put_nowait(POISON)
    db_thread.join()
Ejemplo n.º 6
0
class HttpPool(object):

    def __init__(
        self,
        threads_count,
        fail_op,
        log,
        ):

        self._tasks = Queue()
        self._results = Queue()

        for i in xrange(threads_count):
            thread.start_new_thread(get_remote_data, (self._tasks,
                                    self._results, fail_op, log))

    def add_task(self, tid, url):

        task = {'id': tid, 'url': url}
        try:
            self._tasks.put_nowait(task)
        except Full:
            return False

        return True

    def get_results(self):
        results = []
        while True:
            try:
                res = self._results.get_nowait()
            except Empty:
                break
            results.append(res)
        return results
Ejemplo n.º 7
0
class MongoDBSubscriber:
    def __init__(self, collection, topics, timestamp):
        self.messages = Queue()
        self.collection = collection
        self.topics = list(topics)
        self.timestamp = timestamp

    def getNext(self):
        self.cursor = self.collection.find({'topic': {'$in': self.topics}, 'timestamp': {'$gte': self.timestamp}})
        for data in self.cursor:
            self.messages.put_nowait(data['message'])
        self.timestamp = datetime.now()
        if self.messages.qsize() == 0:
            return None
        else:
            return self.messages.get(block=False, timeout=None)

    def getAll(self):
        self.cursor = self.collection.find({'topic': {'$in': self.topics}, 'timestamp': {'$gte': self.timestamp}})
        for data in self.cursor:
            self.messages.put_nowait(data['message'])
        self.timestamp = datetime.now()
        items = []
        maxItemsToRetreive = self.messages.qsize()
        for numOfItemsRetrieved in range(0, maxItemsToRetreive):
            try:
                if numOfItemsRetrieved == maxItemsToRetreive:
                    break
                items.append(self.messages.get_nowait())
            except Empty, e:
                break
        return items
Ejemplo n.º 8
0
class Publisher(object):
    def __init__(self, port):
        super(Publisher, self).__init__()
        self.address = 'tcp://*:%s' % port
        self.context = zmq.Context()
        self.messages = Queue()

    def publish(self, command, *args):
        message = {
            'command': command,
            'args': args
        }
        self.messages.put_nowait(message)

    def _deliver(self):
        self.socket = self.context.socket(zmq.PUB)
        self.socket.bind(self.address)
        time.sleep(0.5)
        while True:
            while not self.messages.empty():
                self.socket.send_pyobj(self.messages.get_nowait())
            time.sleep(0.05)

    def run(self):
        t = threading.Thread(target=self._deliver, name='pub_server')
        t.daemon = True
        t.start()

    def unbind(self):
        self.socket.unbind(self.address)
Ejemplo n.º 9
0
class TaskProcessor(object):
    """
    Collect tasks and run with multiprocessing.Process
    """

    def __init__(self, processes, process_timeout, result_queue_handler):
        """
        :param processes: nun processes
        :type processes: int
        :param process_timeout: number of seconds for process timeout
        :type process_timeout: int, float
        :param result_queue_handler: handler for result queue
        """
        self._processes = processes if processes > 0 else cpu_count()
        self._current = []
        self._queue = TaskQueue()
        self._process_timeout = process_timeout
        self._result_queue_handler = result_queue_handler

    def _is_release(self):
        if len(self._current) < self._processes:
            return True

        for process in self._current:
            if not process.is_alive():
                process.terminate()
                process.join()
                self._current.remove(process)
                return True

        return False

    def add_task(self, target, args=None, kwargs=None):
        self._queue.put_nowait((target, args or tuple(), kwargs or dict()))

    def serve(self):
        while not self._queue.empty():
            try:
                waiting_for(self._is_release, timeout=self._process_timeout, sleep=0.01)
            except TimeoutException:
                self.destroy()
                raise

            target, args, kwargs = self._queue.get_nowait()
            process = Process(target=target, args=args, kwargs=kwargs)
            process.start()
            self._current.append(process)

        self._result_queue_handler.handle()

    def destroy(self):
        """
        Kill all processes
        """
        for process in self._current:
            process.terminate()

    def close(self):
        for process in self._current:
            process.join()
Ejemplo n.º 10
0
class ParserThread(Thread):
    """Parse the http results in parallel with getting them from the server
    """

    def __init__(self, parser):
        self.parser = parser
        self.q = Queue()
        self.ptime = 0
        self.rawlength = 0
        Thread.__init__(self)

    def run(self):
        self.result = []
        while True:
            next = self.q.get(True)
            if next == None:
                break
            (url, item) = next
            # print "parsing result for", url
            item.seek(0)
            tic = time.time()
            val = self.parser(item.read())
            self.ptime += time.time() - tic
            self.rawlength += item.tell()

            self.result.append((url, val))

    def add(self, url, pval):
        self.q.put_nowait((url, pval))

    def finish(self):
        self.q.put_nowait(None)
        self.join()
        return self.result
Ejemplo n.º 11
0
class Lock(object):
    def __init__(self, io_loop=None):
        self._lock = TLock()
        self._queue = Queue()
        self.io_loop = io_loop
        
    @gen.engine
    def run(self, callback, *args, **kwargs):
        yield gen.Task(self.acquire)
        callback(*args, **kwargs)
        self.release()
    
    @gen.engine
    def run_async(self, callback, *args, **kwargs):
        yield gen.Task(self.acquire)
        yield gen.Task(callback, *args, **kwargs)
        self.release()
    
    def acquire(self, callback, *args, **kwargs):
        if self._lock.acquire(False):
            callback(*args, **kwargs)
        else:
            self._queue.put_nowait((callback, args, kwargs))
            
    def release(self):
        self._lock.release()
        try:
            callback, args, kwargs = self._queue.get_nowait()
            if self.io_loop:
                self.io_loop.add_callback(lambda: self.acquire(callback, *args, **kwargs))
            else:
                self.acquire(callback, *args, **kwargs)
        except Empty:
            pass
Ejemplo n.º 12
0
class Transceiver(object):
    def __init__(self):
        from Queue import Queue
        print 'Initialising queue.'
        self.queue = Queue(1)
    def _get_name(self):
        from threading import currentThread
        return currentThread().getName()
    def enqueue(self, msg):
        from Queue import Full
        if not isinstance(msg, Message):
            raise Exception('%s: Das Fak !' % self._get_name())
        try:
            #self.queue.put(msg,block=True,timeout=1.0)
            self.queue.put_nowait(msg)
            print '%s: Enqueue %s.'  % (self._get_name(),msg)
            return True
        except Full:
            print '%s: Queue busy. Dropping %s.' % (self._get_name(),msg)
            return False
    def _process(self, msg):
        raise Exception('Not implemented.')
    def __call__(self, *args, **kwargs):
        from Queue import Empty
        while True:
            try:
                msg = self.queue.get(block=True)
            except Empty:
                continue
            self._process(msg)
Ejemplo n.º 13
0
class TransmitterDevice(i2cMock.I2CDevice):
    MAX_CONTENT_SIZE = 235
    BUFFER_SIZE = 40

    def __init__(self):
        super(TransmitterDevice, self).__init__(0x62)
        self.log = logging.getLogger("Transmitter")
        self._reset = Event()
        self._hwreset = Event()
        self._buffer = Queue(TransmitterDevice.BUFFER_SIZE)
    
    @i2cMock.command([0xAA])
    def _reset(self):
        self.log.info("Reset")
        self._reset.set()

    @i2cMock.command([0xAB])
    def _hwreset(self):
        print "hardware reset"
        self._hwreset.set()

    @i2cMock.command([0x10])
    def _send_frame(self, *data):
        self.log.info("Send frame %s", data)

        self._buffer.put_nowait(data)

        return [TransmitterDevice.BUFFER_SIZE - self._buffer.qsize()]

    def wait_for_reset(self, timeout=None):
        return self._reset.wait(timeout)

    def get_message_from_buffer(self, timeout=None):
        return self._buffer.get(timeout=timeout)
Ejemplo n.º 14
0
class DeviceConnectionObject(Foundation.NSObject):
    def initWithHost_port_(self, host, port):
        self = self.init()
        if self is None: return None
        
        self.client = DeviceClient(host, port)
        self.commandQueue = Queue()

        return self

    def queueSize(self):
        return self.commandQueue.qsize()

    def appendCommand_(self, command):
        self.willChangeValueForKey_('queueSize')
        self.commandQueue.put_nowait([str(item) for item in command])
        self.didChangeValueForKey_('queueSize')

    def tickLoop(self):
        asyncore.loop(timeout=0, count=1)
        self.fillSendBuffer()

    def disconnect(self):
        self.client.close()
        self.client = None

    def fillSendBuffer(self):
        if self.client.buffer or self.commandQueue.empty(): return

        self.willChangeValueForKey_('queueSize')
        command = self.commandQueue.get_nowait()
        self.client.buffer = msgpack.packb(command)
        self.didChangeValueForKey_('queueSize')
Ejemplo n.º 15
0
class HttpPool(object):   
    def __init__(self, threads_count, fail_op, log):   
        self._tasks = Queue()   
        self._results = Queue()   
           
        for i in xrange(threads_count):   
            thread.start_new_thread(get_remote_data,    
                                                            (self._tasks, self._results, fail_op, log))   
               
    def add_task(self, tid, host, url, params, headers = {}, method = 'GET', timeout = None):   
        task = {   
            'id' : tid,   
            'conn_args' : {'host' : host} if timeout is None else {'host' : host, 'timeout' : timeout},   
            'headers' : headers,   
            'url' : url,   
            'params' : params,   
            'method' : method,   
            }   
        try:   
            self._tasks.put_nowait(task)   
        except Full:   
            return False  
        return True  
           
    def get_results(self):   
        results = []   
        while True:   
            try:   
                res = self._results.get_nowait()   
            except Empty:   
                break  
            results.append(res)   
        return results   
Ejemplo n.º 16
0
class Pool(object):
    
    def __init__(self):
        self.__queue_ = Queue()
        self.__thread_ = Thread(group=None, target = self.__thread_func_)
        self.__started = False
        
    def invoke(self, d, func, args, kwargs):
        self.__queue_.put_nowait((d, func, args, kwargs))
        
    def start(self):
        if not self.__started:
            self.__started = True
            self.__thread_.start()
        return defer.succeed(True)
    
    def stop(self):
        self.__started = False
        
    def __thread_func_(self):
        while self.__started:
            (d, func, args, kwargs) = self.__queue_.get(True)
            try:
                result = func(*args, **kwargs)
                reactor.callFromThread(d.callback, *(result,))
            except Exception, e:
                reactor.callFromThread(d.errback, Failure(e, Exception))
Ejemplo n.º 17
0
class Link:
    def __init__(self, socket_obj, connection):
        self._connection = connection
        self._socket = socket_obj
        self._read_buffer = bytearray()
        self._write_queue = Queue()

    def read(self):
        result = bytearray('')
        while True:
            read_count = self._connection.recv_into(self._read_buffer, BUFFER_SIZE)
            if read_count > 0:
                result.extend(self._read_buffer[:read_count - 1])
            else:
                break
        return bytearray

    def send(self, data):
        self._write_queue.put_nowait(data)
        self._socket.notify_send_data(self._connection.fileno())

    def write(self):
        while self._write_queue.not_empty():
            data = self._write_queue.get_nowait()
            self._connection.sendall(data)

    def close(self):
        self._connection.close()
Ejemplo n.º 18
0
class IWRCBot():
    def __init__(self, site, safe = True):
        self.other_ns = re.compile(u'14\[\[07(' + u'|'.join(site.namespaces()) + u')')
        interwiki.globalvar.autonomous = True
        self.site = site
        self.queue = Queue()
        self.processed = []
        self.safe = safe
        # Start 20 threads
        for i in range(20):
            t = threading.Thread(target=self.worker)
            t.setDaemon(True)
            t.start()

    def worker(self):
        bot = interwiki.InterwikiBot()
        while True:
            # Will wait until one page is available
            bot.add(self.queue.get())
            bot.queryStep()
            self.queue.task_done()

    def addQueue(self, name):
        if self.other_ns.match(name):
            return
        if self.safe:
            if name in self.processed:
                return
            self.processed.append(name)
        page = pywikibot.Page(self.site, name)
        # the Queue has for now an unlimited size,
        # it is a simple atomic append(), no need to acquire a semaphore
        self.queue.put_nowait(page)
class MyStore(object):
    def __init__(self):
        """initializes a Store"""
        self.store = Queue(maxsize=100)
        self.size = (24, 80)
        self.typesOfFractals = ['julia', 'mandelbrot', 'test']
    def put(self,item):
        """puts an item into the queue"""
        try:
            self.store.put_nowait(item)
        except:
            pass
        return True
    def get(self):
        """gets an item out of the store"""
        try:
            return self.store.get_nowait()
        except:
            return False
    def set_size(self, height, width):
        """sets a size for the console"""
        self.size = (int(height), int(width))
        return True
    def get_size(self):
        """Gets the console size"""
        return self.size
    def pick_type(self):
        """pick the type of fractal to generate"""
        return random.choice(self.typesOfFractals)
Ejemplo n.º 20
0
class ConnectionPool(object):

    def __init__(self, source_connection, min=2, max=None, preload=True):
        self.source_connection = source_connection
        self.min = min
        self.max = max
        self.preload = preload
        self.source_connection.pool = self

        self._connections = Queue()
        self._dirty = deque()

        self._connections.put(self.source_connection)
        for i in range(min - 1):
            self._connections.put_nowait(self._new_connection())

    def acquire(self, block=False, timeout=None, connect_timeout=None):
        try:
            conn = self._connections.get(block=block, timeout=timeout)
        except QueueEmpty:
            conn = self._new_connection()
        self._dirty.append(conn)
        if connect_timeout is not None:
            conn.connect_timeout = connect_timeout
        return conn

    def release(self, connection):
        self._dirty.remove(connection)
        self._connections.put_nowait(connection)

    def _new_connection(self):
        if len(self._dirty) >= self.max:
            raise ConnectionLimitExceeded(self.max)
        return copy(self.source_connection)
Ejemplo n.º 21
0
    def batch_futures(self, session, statement_generator):
        concurrency = 10
        futures = Queue(maxsize=concurrency)
        number_of_timeouts = 0
        for i, statement in enumerate(statement_generator):
            if i > 0 and i % (concurrency - 1) == 0:
                # clear the existing queue
                while True:
                    try:
                        futures.get_nowait().result()
                    except (OperationTimedOut, WriteTimeout):
                        ex_type, ex, tb = sys.exc_info()
                        number_of_timeouts += 1
                        log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb)))
                        del tb
                        time.sleep(1)
                    except Empty:
                        break

            future = session.execute_async(statement)
            futures.put_nowait(future)

        while True:
            try:
                futures.get_nowait().result()
            except (OperationTimedOut, WriteTimeout):
                ex_type, ex, tb = sys.exc_info()
                number_of_timeouts += 1
                log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb)))
                del tb
                time.sleep(1)
            except Empty:
                break
        return number_of_timeouts
Ejemplo n.º 22
0
class Protocol(RPCProtocol):
    noisy = True

    def __init__(self, *args, **kwargs):
        self.queue = Queue(maxsize=10000)
        self.thread_stop = False
        self.thread = threading.Thread(target=self._thread_loop)
        self.thread.start()
        RPCProtocol.__init__(self, *args, **kwargs)

    def _thread_loop(self):
        while not self.thread_stop:
            time.sleep(0.002)  # dont hog cpu
            for entry in storjnode.util.empty_queue(self.queue):
                self.call(entry["sender"], entry["value"] + 1)

    def stop(self):
        self.thread_stop = True
        self.thread.join()

    def rpc_call(self, sender, value):
        try:
            print("Got {0} from {1}".format(value, sender))
            self.queue.put_nowait({"sender": sender, "value": value})
            return True
        except Full:
            print("Queue full, f**k!")
            return False
Ejemplo n.º 23
0
Archivo: fm.py Proyecto: haoliangx/PyFM
class FM_Controller(object):

    def __init__(self):
        self.agent = FM_API()
        self.player = Player()
        qsize = 10
        self.queue = Queue(qsize)

    def load(self, url):
        obj = pydl(url, progress_bar = False)
        obj.start(blocking = True)
        return obj.get_dest()

    def refresh_list(self, sid = None):
        if not sid:
            songs = self.agent.get_playlist()
        else:
            songs = self.agent.update_playlist(sid)
        for song in songs:
            try:
                self.queue.put_nowait(item)
            except Full:
                break

    def load_d(self):
        while True:
            song = self.queue.get()
            song['url'] = self.load(song['url'])
            self.player.add()
Ejemplo n.º 24
0
class ThreadedWrapperHandler(WrapperHandler):
    """This handled uses a single background thread to dispatch log records
    to a specific other handler using an internal queue.  The idea is that if
    you are using a handler that requires some time to hand off the log records
    (such as the mail handler) and would block your request, you can let
    Logbook do that in a background thread.

    The threaded wrapper handler will automatically adopt the methods and
    properties of the wrapped handler.  All the values will be reflected:

    >>> twh = ThreadedWrapperHandler(TestHandler())
    >>> from logbook import WARNING
    >>> twh.level_name = 'WARNING'
    >>> twh.handler.level_name
    'WARNING'
    """
    _direct_attrs = frozenset(['handler', 'queue', 'controller'])

    def __init__(self, handler, maxsize=0):
        WrapperHandler.__init__(self, handler)
        self.queue = ThreadQueue(maxsize)
        self.controller = TWHThreadController(self)
        self.controller.start()

    def close(self):
        self.controller.stop()
        self.handler.close()

    def emit(self, record):
        try:
            self.queue.put_nowait(record)
        except Full:
            # silently drop
            pass
Ejemplo n.º 25
0
class Source:
    def __init__(self, task_list, results, timeout, verbose = False):
        self.tasks = Queue()
        for task in task_list:
            self.tasks.put_nowait(task)

        self.results = results
        self.timeout = timeout
        self.verbose = verbose

    def start(self, worker_count):
        t0 = datetime.now()

        sink = Sink(self.results)
        self.workers = [ Worker(_+1, self.tasks, sink, self.timeout, self.verbose) for _ in range(worker_count) ]
        if self.verbose:
            print('[P] Starting workers.')
        for w in self.workers:
            w.t0 = t0
            w.start()
        ans = self.join_workers()
        if self.verbose:
            print('[P] Finished.')
        return ans

    def join_workers(self):
        try:
            for w in self.workers:
                w.join(20000)
            return True
        except KeyboardInterrupt:
            for w in self.workers:
                w.stop = True
            return False
Ejemplo n.º 26
0
def generate_items():
	global items, symbols, goto_table

	item_queue = Queue()

	# 将第一个产生式[S->S']的闭包加进items
	start_production = productions[0]
	start_item = Item(0)
	start_item.insert_item_line(
		ItemLine(start_production.pid, 0, Symbol('$', 1)))
	start_item = closure(start_item)
	items.append(start_item)
	item_queue.put_nowait(start_item)

	while not item_queue.empty():
		current_item = item_queue.get_nowait()  # 弹出一个Item
		for s in symbols:
			next_item = goto(current_item, s)
			# 检查next_item是否为空(即无条目)以及items中是否已经存在,这里应该要自己写判断函数
			if next_item.item_lines:
				tmp = is_item_exist(next_item)
				if not tmp:
					items.append(next_item)
					item_queue.put_nowait(next_item)
				else:
					next_item = tmp
				
				# 添加到goto表中
				s_id = current_item.item_id
				
				if s_id not in goto_table.keys():
					goto_table[s_id] = {}

				goto_table[s_id][s.value] = next_item.item_id  # 建立边
Ejemplo n.º 27
0
class ProtonClient(object):
    def __init__(self, reactor, connection, connector, session):
        self.connector = connector
        self.connection = connection
        self.session = session
        self.sender = None
        self.links = []
        self._inbox = None
        self._outbox = Queue()
        self._reactor = reactor

    def _pushIncomingMessage(self, msg):
        try:
            self._inbox.put_nowait((self, msg))
        except AttributeError:
            # Inbox not set
            pass

    def _popPendingMessage(self):
        return self._outbox.get_nowait()

    def setInbox(self, queue):
        self._inbox = queue

    def send(self, msg):
        self._outbox.put_nowait(msg)
        self._reactor._activate(self.connector,
                                proton.PN_CONNECTOR_WRITABLE)

    def close(self):
        #TODO
        pass
Ejemplo n.º 28
0
class AsyncPublisher(object):

    def __init__(self, callback):
        self.queue = Queue()
        self.callback = callback
        self._stop_sentinel = None

    def start(self):
        self.thread = threading.Thread(target=self._process_queue)
        self.thread.setDaemon(True)
        self.thread.start()

    def on_entity(self, entity):
        self.queue.put(entity)

    def stop(self):
        self.queue.put_nowait(self._stop_sentinel)
        self.thread.join()

    def _process_queue(self):
        while True:
            try:
                entity = self.queue.get()
                if entity is self._stop_sentinel:
                    return
                self.callback(entity)
            except KeyError:
                pass
Ejemplo n.º 29
0
class Pipe(object):
    """Define a thread-safe pipe object to transfer data between filters."""
    def __init__(self):
        """Create a Queue and an event to synchronize the filters."""
        self.queue = Queue()
        self.register = threading.Event()

    def open_register(self):
        """Declare that the pipe is open. Simply sets the register event."""
        self.register.set()

    def close_register(self):
        """Declare that the pipe is closed. Simply clears the register event."""
        self.register.clear()

    def push(self, data_packet):
        """Insert data into the pipe."""
        self.queue.put_nowait(data_packet)

    def pull(self):
        """Return data from the pipe."""
        return self.queue.get_nowait()

    def has_flow(self):
        """Return True if there are data items in the pipe."""
        return not self.queue.empty()

    def is_open(self):
        """Return True if the register event is set."""
        return self.register.is_set()
Ejemplo n.º 30
0
Archivo: utils.py Proyecto: numan/nydus
class ThreadPool(object):
    def __init__(self, workers=10):
        self.queue = Queue()
        self.workers = []
        self.tasks = []
        for worker in xrange(workers):
            self.workers.append(Worker(self.queue))

    def add(self, ident, func, args=None, kwargs=None):
        if args is None:
            args = ()
        if kwargs is None:
            kwargs = {}
        task = (ident, func, args, kwargs)
        self.tasks.append(ident)
        self.queue.put_nowait(task)

    def join(self):
        for worker in self.workers:
            worker.start()

        results = defaultdict(list)
        for worker in self.workers:
            worker.join()
            for k, v in worker.results.iteritems():
                results[k].extend(v)
        return results
Ejemplo n.º 31
0
class UIAHandler(COMObject):
	_com_interfaces_=[IUIAutomationEventHandler,IUIAutomationFocusChangedEventHandler,IUIAutomationPropertyChangedEventHandler,IUIAutomationNotificationEventHandler]

	def __init__(self):
		super(UIAHandler,self).__init__()
		self.MTAThreadInitEvent=threading.Event()
		self.MTAThreadInitException=None
		self.MTAThreadQueue=Queue()
		self.MTAThread=threading.Thread(target=self.MTAThreadFunc)
		self.MTAThread.daemon=True
		self.MTAThread.start()
		self.MTAThreadInitEvent.wait(2)
		if self.MTAThreadInitException:
			raise self.MTAThreadInitException

	def terminate(self):
		MTAThreadHandle=HANDLE(windll.kernel32.OpenThread(winKernel.SYNCHRONIZE,False,self.MTAThread.ident))
		self.MTAThreadQueue.put_nowait(None)
		#Wait for the MTA thread to die (while still message pumping)
		if windll.user32.MsgWaitForMultipleObjects(1,byref(MTAThreadHandle),False,200,0)!=0:
			log.debugWarning("Timeout or error while waiting for UIAHandler MTA thread")
		windll.kernel32.CloseHandle(MTAThreadHandle)
		del self.MTAThread

	def MTAThreadFunc(self):
		try:
			oledll.ole32.CoInitializeEx(None,comtypes.COINIT_MULTITHREADED) 
			isUIA8=False
			try:
				self.clientObject=CoCreateInstance(CUIAutomation8._reg_clsid_,interface=IUIAutomation,clsctx=CLSCTX_INPROC_SERVER)
				isUIA8=True
			except (COMError,WindowsError,NameError):
				self.clientObject=CoCreateInstance(CUIAutomation._reg_clsid_,interface=IUIAutomation,clsctx=CLSCTX_INPROC_SERVER)
			if isUIA8:
				# #8009: use appropriate interface based on highest supported interface.
				# #8338: made easier by traversing interfaces supported on Windows 8 and later in reverse.
				for interface in reversed(CUIAutomation8._com_interfaces_):
					try:
						self.clientObject=self.clientObject.QueryInterface(interface)
						break
					except COMError:
						pass
				# Windows 10 RS5 provides new performance features for UI Automation including event coalescing and connection recovery. 
				# Enable all of these where available.
				if isinstance(self.clientObject,IUIAutomation6):
					self.clientObject.CoalesceEvents=CoalesceEventsOptions_Enabled
					self.clientObject.ConnectionRecoveryBehavior=ConnectionRecoveryBehaviorOptions_Enabled
			log.info("UIAutomation: %s"%self.clientObject.__class__.__mro__[1].__name__)
			self.windowTreeWalker=self.clientObject.createTreeWalker(self.clientObject.CreateNotCondition(self.clientObject.CreatePropertyCondition(UIA_NativeWindowHandlePropertyId,0)))
			self.windowCacheRequest=self.clientObject.CreateCacheRequest()
			self.windowCacheRequest.AddProperty(UIA_NativeWindowHandlePropertyId)
			self.UIAWindowHandleCache={}
			self.baseTreeWalker=self.clientObject.RawViewWalker
			self.baseCacheRequest=self.windowCacheRequest.Clone()
			import UIAHandler
			self.ItemIndex_PropertyId=NVDAHelper.localLib.registerUIAProperty(byref(ItemIndex_Property_GUID),u"ItemIndex",1)
			self.ItemCount_PropertyId=NVDAHelper.localLib.registerUIAProperty(byref(ItemCount_Property_GUID),u"ItemCount",1)
			for propertyId in (UIA_FrameworkIdPropertyId,UIA_AutomationIdPropertyId,UIA_ClassNamePropertyId,UIA_ControlTypePropertyId,UIA_ProviderDescriptionPropertyId,UIA_ProcessIdPropertyId,UIA_IsTextPatternAvailablePropertyId,UIA_IsContentElementPropertyId,UIA_IsControlElementPropertyId):
				self.baseCacheRequest.addProperty(propertyId)
			self.baseCacheRequest.addPattern(UIA_TextPatternId)
			self.rootElement=self.clientObject.getRootElementBuildCache(self.baseCacheRequest)
			self.reservedNotSupportedValue=self.clientObject.ReservedNotSupportedValue
			self.ReservedMixedAttributeValue=self.clientObject.ReservedMixedAttributeValue
			self.pendingForegroundUIAElement=None
			self.currentForegroundUIAElement=None
			self.clientObject.AddFocusChangedEventHandler(self.baseCacheRequest,self)
			#self.clientObject.AddPropertyChangedEventHandler(self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self,UIAPropertyIdsToNVDAEventNames.keys())
			for x in UIAEventIdsToNVDAEventNames.iterkeys():  
				self.clientObject.addAutomationEventHandler(x,self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self)
			# #7984: add support for notification event (IUIAutomation5, part of Windows 10 build 16299 and later).
			if isinstance(self.clientObject, IUIAutomation5):
				self.clientObject.AddNotificationEventHandler(self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self)
		except Exception as e:
			self.MTAThreadInitException=e
		finally:
			self.MTAThreadInitEvent.set()
		while True:
			func=self.MTAThreadQueue.get()
			if func:
				try:
					func()
				except:
					log.error("Exception in function queued to UIA MTA thread",exc_info=True)
			else:
				break
		self.clientObject.RemoveAllEventHandlers()

	def _onForegroundChange(self):
		pendingForegroundUIAElement=self.pendingForegroundUIAElement
		if pendingForegroundUIAElement==self.currentForegroundUIAElement:
			return
		if self.currentForegroundUIAElement:
			try:
				self.clientObject.removePropertyChangedEventHandler(self.currentForegroundUIAElement,self)
			except COMError:
				# The old UIAElement died as the window was closed.
				# The system should forget the old event registration itself.
				pass
		try:
			self.clientObject.AddPropertyChangedEventHandler(pendingForegroundUIAElement,TreeScope_Subtree,self.baseCacheRequest,self,UIAPropertyIdsToNVDAEventNames.keys())
		except COMError:
			log.error("Could not register for UIA property change events for new foreground")
			self.currentForegroundUIAElement=None
		else:
			self.currentForegroundUIAElement=pendingForegroundUIAElement

	def onForegroundChange(self,hwnd):
		try:
			self.pendingForegroundUIAElement=self.clientObject.ElementFromHandle(hwnd)
		except COMError:
			log.debugWarning("Could not get a UIAElement from new foreground window")
			return
		# Event registration/unregistration must be always done from the MTA thread, otherwise deadlocks can occur with our UI.
		self.MTAThreadQueue.put_nowait(self._onForegroundChange)

	def IUIAutomationEventHandler_HandleAutomationEvent(self,sender,eventID):
		if not self.MTAThreadInitEvent.isSet():
			# UIAHandler hasn't finished initialising yet, so just ignore this event.
			return
		if eventID==UIA_MenuOpenedEventId and eventHandler.isPendingEvents("gainFocus"):
			# We don't need the menuOpened event if focus has been fired,
			# as focus should be more correct.
			return
		NVDAEventName=UIAEventIdsToNVDAEventNames.get(eventID,None)
		if not NVDAEventName:
			return
		if not self.isNativeUIAElement(sender):
			return
		window=self.getNearestWindowHandle(sender)
		if window and not eventHandler.shouldAcceptEvent(NVDAEventName,windowHandle=window):
			return
		import NVDAObjects.UIA
		obj=NVDAObjects.UIA.UIA(UIAElement=sender)
		if (
			not obj
			or (NVDAEventName=="gainFocus" and not obj.shouldAllowUIAFocusEvent)
			or (NVDAEventName=="liveRegionChange" and not obj._shouldAllowUIALiveRegionChangeEvent)
		):
			return
		focus=api.getFocusObject()
		if obj==focus:
			obj=focus
		eventHandler.queueEvent(NVDAEventName,obj)

	def IUIAutomationFocusChangedEventHandler_HandleFocusChangedEvent(self,sender):
		if not self.MTAThreadInitEvent.isSet():
			# UIAHandler hasn't finished initialising yet, so just ignore this event.
			return
		if not self.isNativeUIAElement(sender):
			return
		import NVDAObjects.UIA
		if isinstance(eventHandler.lastQueuedFocusObject,NVDAObjects.UIA.UIA):
			lastFocus=eventHandler.lastQueuedFocusObject.UIAElement
			# Ignore duplicate focus events.
			# It seems that it is possible for compareElements to return True, even though the objects are different.
			# Therefore, don't ignore the event if the last focus object has lost its hasKeyboardFocus state.
			if self.clientObject.compareElements(sender,lastFocus) and lastFocus.currentHasKeyboardFocus:
				return
		window=self.getNearestWindowHandle(sender)
		if window and not eventHandler.shouldAcceptEvent("gainFocus",windowHandle=window):
			return
		obj=NVDAObjects.UIA.UIA(UIAElement=sender)
		if not obj or not obj.shouldAllowUIAFocusEvent:
			return
		eventHandler.queueEvent("gainFocus",obj)

	def IUIAutomationPropertyChangedEventHandler_HandlePropertyChangedEvent(self,sender,propertyId,newValue):
		# #3867: For now manually force this VARIANT type to empty to get around a nasty double free in comtypes/ctypes.
		# We also don't use the value in this callback.
		newValue.vt=VT_EMPTY
		if not self.MTAThreadInitEvent.isSet():
			# UIAHandler hasn't finished initialising yet, so just ignore this event.
			return
		NVDAEventName=UIAPropertyIdsToNVDAEventNames.get(propertyId,None)
		if not NVDAEventName:
			return
		if not self.isNativeUIAElement(sender):
			return
		window=self.getNearestWindowHandle(sender)
		if window and not eventHandler.shouldAcceptEvent(NVDAEventName,windowHandle=window):
			return
		import NVDAObjects.UIA
		obj=NVDAObjects.UIA.UIA(UIAElement=sender)
		if not obj:
			return
		focus=api.getFocusObject()
		if obj==focus:
			obj=focus
		eventHandler.queueEvent(NVDAEventName,obj)

	def IUIAutomationNotificationEventHandler_HandleNotificationEvent(self,sender,NotificationKind,NotificationProcessing,displayString,activityId):
		if not self.MTAThreadInitEvent.isSet():
			# UIAHandler hasn't finished initialising yet, so just ignore this event.
			return
		import NVDAObjects.UIA
		obj=NVDAObjects.UIA.UIA(UIAElement=sender)
		if not obj:
			# Sometimes notification events can be fired on a UIAElement that has no windowHandle and does not connect through parents back to the desktop.
			# There is nothing we can do with these.
			return
		eventHandler.queueEvent("UIA_notification",obj, notificationKind=NotificationKind, notificationProcessing=NotificationProcessing, displayString=displayString, activityId=activityId)

	def _isUIAWindowHelper(self,hwnd):
		# UIA in NVDA's process freezes in Windows 7 and below
		processID=winUser.getWindowThreadProcessID(hwnd)[0]
		if windll.kernel32.GetCurrentProcessId()==processID:
			return False
		import NVDAObjects.window
		windowClass=NVDAObjects.window.Window.normalizeWindowClassName(winUser.getClassName(hwnd))
		# For certain window classes, we always want to use UIA.
		if windowClass in goodUIAWindowClassNames:
			return True
		# allow the appModule for the window to also choose if this window is good
		# An appModule should be able to override bad UIA class names as prescribed by core
		appModule=appModuleHandler.getAppModuleFromProcessID(processID)
		if appModule and appModule.isGoodUIAWindow(hwnd):
			return True
		# There are certain window classes that just had bad UIA implementations
		if windowClass in badUIAWindowClassNames:
			return False
		if windowClass=="NetUIHWND":
			parentHwnd=winUser.getAncestor(hwnd,winUser.GA_ROOT)
			# #2816: Outlook 2010 auto complete does not fire enough UIA events, IAccessible is better.
			# #4056: Combo boxes in Office 2010 Options dialogs don't expose a name via UIA, but do via MSAA.
			if winUser.getClassName(parentHwnd) in {"Net UI Tool Window","NUIDialog"}:
				return False
		# allow the appModule for the window to also choose if this window is bad
		if appModule and appModule.isBadUIAWindow(hwnd):
			return False
		# Ask the window if it supports UIA natively
		res=windll.UIAutomationCore.UiaHasServerSideProvider(hwnd)
		if res:
			# the window does support UIA natively, but
			# Microsoft Word should not use UIA unless we can't inject or the user explicitly chose to use UIA with Microsoft word
			if windowClass=="_WwG" and not (config.conf['UIA']['useInMSWordWhenAvailable'] or not appModule.helperLocalBindingHandle):
				return False
		return bool(res)

	def isUIAWindow(self,hwnd):
		now=time.time()
		v=self.UIAWindowHandleCache.get(hwnd,None)
		if not v or (now-v[1])>0.5:
			v=self._isUIAWindowHelper(hwnd),now
			self.UIAWindowHandleCache[hwnd]=v
		return v[0]

	def getNearestWindowHandle(self,UIAElement):
		if hasattr(UIAElement,"_nearestWindowHandle"):
			# Called previously. Use cached result.
			return UIAElement._nearestWindowHandle
		try:
			processID=UIAElement.cachedProcessID
		except COMError:
			return None
		appModule=appModuleHandler.getAppModuleFromProcessID(processID)
		# WDAG (Windows Defender application Guard) UIA elements should be treated as being from a remote machine, and therefore their window handles are completely invalid on this machine.
		# Therefore, jump all the way up to the root of the WDAG process and use that window handle as it is local to this machine.
		if appModule.appName==WDAG_PROCESS_NAME:
			condition=UIAUtils.createUIAMultiPropertyCondition({UIA_ClassNamePropertyId:[u'ApplicationFrameWindow',u'CabinetWClass']})
			walker=self.clientObject.createTreeWalker(condition)
		else:
			# Not WDAG, just walk up to the nearest valid windowHandle
			walker=self.windowTreeWalker
		try:
			new=walker.NormalizeElementBuildCache(UIAElement,self.windowCacheRequest)
		except COMError:
			return None
		try:
			window=new.cachedNativeWindowHandle
		except COMError:
			window=None
		# Cache for future use to improve performance.
		UIAElement._nearestWindowHandle=window
		return window

	def isNativeUIAElement(self,UIAElement):
		#Due to issues dealing with UIA elements coming from the same process, we do not class these UIA elements as usable.
		#It seems to be safe enough to retreave the cached processID, but using tree walkers or fetching other properties causes a freeze.
		try:
			processID=UIAElement.cachedProcessId
		except COMError:
			return False
		if processID==windll.kernel32.GetCurrentProcessId():
			return False
		# Whether this is a native element depends on whether its window natively supports UIA.
		windowHandle=self.getNearestWindowHandle(UIAElement)
		if windowHandle:
			if self.isUIAWindow(windowHandle):
				return True
			if winUser.getClassName(windowHandle)=="DirectUIHWND" and "IEFRAME.dll" in UIAElement.cachedProviderDescription and UIAElement.currentClassName in ("DownloadBox", "accessiblebutton", "DUIToolbarButton", "PushButton"):
				# This is the IE 9 downloads list.
				# #3354: UiaHasServerSideProvider returns false for the IE 9 downloads list window,
				# so we'd normally use MSAA for this control.
				# However, its MSAA implementation is broken (fires invalid events) if UIA is initialised,
				# whereas its UIA implementation works correctly.
				# Therefore, we must use UIA here.
				return True
		return False
Ejemplo n.º 32
0
    def create():
        """
        Creates a new RbacRoleProfile.
        
        A role profile is a cache which speeds up child/parent lookups for
        roles.
        """
        logger.debug("Creating RbacRoleProfile")
        if settings.USE_TZ:
            currentTime = datetime.utcnow().replace(tzinfo=utc)
        else:
            currentTime = datetime.now()

        adj_list = {}
        bulk_list = []
        pairs = set()

        with transaction.atomic():
            #create an adjacency list of the role hierarchy
            for parent, child in RbacRole.objects.exclude(
                    children=None).values_list('id', 'children'):
                if parent in adj_list:
                    adj_list[parent].append(child)
                else:
                    adj_list[parent] = [
                        child,
                    ]

            #Search for all child nodes which can be reached from parent through
            # a breadth-first-search.
            #Instead of coloring we're using a dict which keeps track of the
            # discovered nodes (@see: BFS)
            for parent in adj_list:
                child_queue = Queue()
                found = {}

                for child in adj_list[parent]:
                    child_queue.put_nowait(child)
                    found[child] = True

                while not child_queue.empty():
                    node = child_queue.get_nowait()
                    pair = (parent, node)
                    if pair not in pairs:
                        pairs.add(pair)
                    else:
                        continue

                    bulk_list.append(
                        RbacRoleProfile(parent_id=parent,
                                        child_id=node,
                                        touch_date=currentTime,
                                        create_date=currentTime))
                    if node in adj_list:
                        for child in adj_list[node]:
                            if child not in found:
                                child_queue.put_nowait(child)

            #clear previous cache
            RbacRoleProfile.objects.all().delete()
            RbacRoleProfile.objects.bulk_create(bulk_list)

        logger.debug("Finished creating RbacRoleProfile")
Ejemplo n.º 33
0
class HabitatUploader(object):
    ''' 
    Queued Habitat Telemetry Uploader class
    This performs uploads to the Habitat servers, and also handles generation of flight documents.

    Incoming telemetry packets are fed into queue, which is checked regularly.
    If a new callsign is sighted, a payload document is created in the Habitat DB.
    The telemetry data is then converted into a UKHAS-compatible format, before being added to queue to be
    uploaded as network speed permits.

    If an upload attempt times out, the packet is discarded.
    If the queue fills up (probably indicating no network connection, and a fast packet downlink rate),
    it is immediately emptied, to avoid upload of out-of-date packets.

    Note that this uploader object is intended to handle telemetry from multiple sondes
    '''

    # We require the following fields to be present in the incoming telemetry dictionary data
    REQUIRED_FIELDS = [
        'frame', 'id', 'datetime', 'lat', 'lon', 'alt', 'temp', 'type', 'freq',
        'freq_float', 'datetime_dt'
    ]

    def __init__(self,
                 user_callsign='N0CALL',
                 user_position=None,
                 user_antenna="",
                 payload_callsign_override=None,
                 synchronous_upload_time=30,
                 callsign_validity_threshold=5,
                 upload_queue_size=16,
                 upload_timeout=10,
                 upload_retries=5,
                 upload_retry_interval=0.25,
                 user_position_update_rate=6,
                 inhibit=False):
        """ Initialise a Habitat Uploader object.

        Args:
            user_callsign (str): Callsign of the uploader.
            user_position (tuple): Optional - a tuple consisting of (lat, lon, alt), which if populated,
                is used to plot the listener's position on the Habitat map, both when this class is initialised, and
                when a new sonde ID is observed.

            payload_callsign_override (str): Override the payload callsign in the uploaded sentence with this value.
                WARNING: This will horribly break the tracker map if multiple sondes are uploaded under the same callsign.
                USE WITH CAUTION!!!

            synchronous_upload_time (int): Upload the most recent telemetry when time.time()%synchronous_upload_time == 0
                This is done in an attempt to get multiple stations uploading the same telemetry sentence simultaneously,
                and also acts as decimation on the number of sentences uploaded to Habitat.
            callsign_validity_threshold (int): Only upload telemetry data if the callsign has been observed more than N times. Default = 5

            upload_queue_size (int): Maximum umber of sentences to keep in the upload queue. If the queue is filled,
                it will be emptied (discarding the queue contents).
            upload_timeout (int): Timeout (Seconds) when performing uploads to Habitat. Default: 10 seconds.
            upload_retries (int): Retry an upload up to this many times. Default: 5
            upload_retry_interval (int): Time interval between upload retries. Default: 0.25 seconds.

            user_position_update_rate (int): Time interval between automatic station position updates, hours.
                Set to 6 hours by default, updating any more often than this is not really useful.

            inhibit (bool): Inhibit all uploads. Mainly intended for debugging.

        """

        self.user_callsign = user_callsign
        self.user_position = user_position
        self.user_antenna = user_antenna
        self.payload_callsign_override = payload_callsign_override
        self.upload_timeout = upload_timeout
        self.upload_retries = upload_retries
        self.upload_retry_interval = upload_retry_interval
        self.upload_queue_size = upload_queue_size
        self.synchronous_upload_time = synchronous_upload_time
        self.callsign_validity_threshold = callsign_validity_threshold
        self.inhibit = inhibit
        self.user_position_update_rate = user_position_update_rate

        # Our two Queues - one to hold sentences to be upload, the other to temporarily hold
        # input telemetry dictionaries before they are converted and processed.
        self.habitat_upload_queue = Queue(upload_queue_size)
        self.input_queue = Queue()

        # Dictionary where we store sorted telemetry data for upload when required.
        # Elements will be named after payload IDs, and will contain:
        #   'count' (int): Number of times this callsign has been observed. Uploads will only occur when
        #       this number rises above callsign_validity_threshold.
        #   'data' (Queue): A queue of telemetry sentences to be uploaded. When the upload timer fires,
        #       this queue will be dumped, and the most recent telemetry uploaded.
        #   'habitat_document' (bool): Indicates if a habitat document has been created for this payload ID.
        #   'listener_updated' (bool): Indicates if the listener position has been updated for the start of this ID's flight.
        self.observed_payloads = {}

        # Record of when we last uploaded a user station position to Habitat.
        self.last_user_position_upload = 0

        # Lock for dealing with telemetry uploads.
        self.upload_lock = Lock()

        # Start the uploader thread.
        self.upload_thread_running = True
        self.upload_thread = Thread(target=self.habitat_upload_thread)
        self.upload_thread.start()

        # Start the input queue processing thread.
        self.input_processing_running = True
        self.input_thread = Thread(target=self.process_queue)
        self.input_thread.start()

        self.timer_thread_running = True
        self.timer_thread = Thread(target=self.upload_timer)
        self.timer_thread.start()

    def user_position_upload(self):
        """ Upload the the station position to Habitat. """
        if self.user_position is not None:
            _success = uploadListenerPosition(self.user_callsign,
                                              self.user_position[0],
                                              self.user_position[1],
                                              version=auto_rx_version,
                                              antenna=self.user_antenna)
            self.last_user_position_upload = time.time()
            return _success
        else:
            return False

    def habitat_upload(self, sentence):
        ''' Upload a UKHAS-standard telemetry sentence to Habitat

        Args:
            sentence (str): The UKHAS-standard telemetry sentence to upload.
        '''

        if self.inhibit:
            self.log_info("Upload inhibited.")
            return

        # Generate payload to be uploaded
        _sentence_b64 = b64encode(sentence.encode(
            'ascii'))  # Encode to ASCII to be able to perform B64 encoding...
        _date = datetime.datetime.utcnow().isoformat("T") + "Z"
        _user_call = self.user_callsign

        _data = {
            "type": "payload_telemetry",
            "data": {
                "_raw": _sentence_b64.decode(
                    'ascii'
                )  # ... but decode back to a string to enable JSON serialisation.
            },
            "receivers": {
                _user_call: {
                    "time_created": _date,
                    "time_uploaded": _date,
                },
            },
        }

        # The URL to upload to.
        _url = HABITAT_URL + "habitat/_design/payload_telemetry/_update/add_listener/%s" % sha256(
            _sentence_b64).hexdigest()

        # Delay for a random amount of time between 0 and upload_retry_interval*2 seconds.
        time.sleep(random.random() * self.upload_retry_interval * 2.0)

        _retries = 0

        # When uploading, we have three possible outcomes:
        # - Can't connect. No point immediately re-trying in this situation.
        # - The packet is uploaded successfuly (201 / 403)
        # - There is a upload conflict on the Habitat DB end (409). We can retry and it might work.
        while _retries < self.upload_retries:
            # Run the request.
            try:
                headers = {"User-Agent": "autorx-" + auto_rx_version}
                _req = requests.put(_url,
                                    data=json.dumps(_data),
                                    timeout=self.upload_timeout,
                                    headers=headers)
            except Exception as e:
                self.log_error("Upload Failed: %s" % str(e))
                break

            if _req.status_code == 201 or _req.status_code == 403:
                # 201 = Success, 403 = Success, sentence has already seen by others.
                self.log_info("Uploaded sentence to Habitat successfully: %s" %
                              sentence.strip())
                _upload_success = True
                break
            elif _req.status_code == 409:
                # 409 = Upload conflict (server busy). Sleep for a moment, then retry.
                self.log_debug("Upload conflict.. retrying.")
                time.sleep(random.random() * self.upload_retry_interval)
                _retries += 1
            else:
                self.log_error(
                    "Error uploading to Habitat. Status Code: %d %s." %
                    (_req.status_code, _req.text))
                break

        if _retries == self.upload_retries:
            self.log_error("Upload conflict not resolved with %d retries." %
                           self.upload_retries)

        return

    def habitat_upload_thread(self):
        ''' Handle uploading of packets to Habitat '''

        self.log_debug("Started Habitat Uploader Thread.")

        while self.upload_thread_running:

            if self.habitat_upload_queue.qsize() > 0:
                # If the queue is completely full, jump to the most recent telemetry sentence.
                if self.habitat_upload_queue.qsize() == self.upload_queue_size:
                    while not self.habitat_upload_queue.empty():
                        sentence = self.habitat_upload_queue.get()

                    self.log_warning(
                        "Uploader queue was full - possible connectivity issue."
                    )
                else:
                    # Otherwise, get the first item in the queue.
                    sentence = self.habitat_upload_queue.get()

                # Attempt to upload it.
                self.habitat_upload(sentence)

            else:
                # Wait for a short time before checking the queue again.
                time.sleep(0.1)

        self.log_debug("Stopped Habitat Uploader Thread.")

    def handle_telem_dict(self, telem, immediate=False):
        # Try and convert it to a UKHAS sentence
        try:
            _sentence = sonde_telemetry_to_sentence(telem)
        except Exception as e:
            self.log_error("Error converting telemetry to sentence - %s" %
                           str(e))
            return

        _callsign = "RS_" + telem['id']

        # Wait for the upload_lock to be available, to ensure we don't end up with
        # race conditions resulting in multiple payload docs being created.
        self.upload_lock.acquire()

        # Create a habitat document if one does not already exist:
        if not self.observed_payloads[telem['id']]['habitat_document']:
            # Check if there has already been telemetry from this ID observed on Habhub
            _document_exists = check_callsign(_callsign)
            # If so, we don't need to create a new document
            if _document_exists:
                self.observed_payloads[telem['id']]['habitat_document'] = True
            else:
                # Otherwise, we attempt to create a new document.
                if self.inhibit:
                    # If we have an upload inhibit, don't create a payload doc.
                    _created = True
                else:
                    _created = initPayloadDoc(
                        _callsign,
                        description="Meteorology Radiosonde",
                        frequency=telem['freq_float'])

                if _created:
                    self.observed_payloads[
                        telem['id']]['habitat_document'] = True
                else:
                    self.log_error("Error creating payload document!")
                    self.upload_lock.release()
                    return

        if immediate:
            self.log_info(
                "Performing immediate upload for first telemetry sentence of %s."
                % telem['id'])
            self.habitat_upload(_sentence)

        else:
            # Attept to add it to the habitat uploader queue.
            try:
                self.habitat_upload_queue.put_nowait(_sentence)
            except Exception as e:
                self.log_error("Error adding sentence to queue: %s" % str(e))

        self.upload_lock.release()

    def upload_timer(self):
        """ Add packets to the habitat upload queue if it is time for us to upload. """

        while self.timer_thread_running:
            if int(time.time()) % self.synchronous_upload_time == 0:
                # Time to upload!
                for _id in self.observed_payloads.keys():
                    # If no data, continue...
                    if self.observed_payloads[_id]['data'].empty():
                        continue
                    else:
                        # Otherwise, dump the queue and keep the latest telemetry.
                        while not self.observed_payloads[_id]['data'].empty():
                            _telem = self.observed_payloads[_id]['data'].get()

                        self.handle_telem_dict(_telem)

                # Sleep a second so we don't hit the synchronous upload time again.
                time.sleep(1)
            else:
                # Not yet time to upload, wait for a bit.
                time.sleep(0.1)

    def process_queue(self):
        """ Process packets from the input queue.

        This thread handles packets from the input queue (provided by the decoders)
        Packets are sorted by ID, and a dictionary entry is created. 

        """

        while self.input_processing_running:
            # Process everything in the queue.
            while self.input_queue.qsize() > 0:
                # Grab latest telem dictionary.
                _telem = self.input_queue.get_nowait()

                _id = _telem['id']

                if _id not in self.observed_payloads:
                    # We haven't seen this ID before, so create a new dictionary entry for it.
                    self.observed_payloads[_id] = {
                        'count': 1,
                        'data': Queue(),
                        'habitat_document': False,
                        'listener_updated': False
                    }
                    self.log_debug(
                        "New Payload %s. Not observed enough to allow upload."
                        % _id)
                    # However, we don't yet add anything to the queue for this payload...
                else:
                    # We have seen this payload before!
                    # Increment the 'seen' counter.
                    self.observed_payloads[_id]['count'] += 1

                    # If we have seen this particular ID enough times, add the data to the ID's queue.
                    if self.observed_payloads[_id][
                            'count'] >= self.callsign_validity_threshold:

                        # If this is the first time we have observed this payload, update the listener position.
                        if (self.observed_payloads[_id]['listener_updated']
                                == False) and (self.user_position is not None):
                            self.observed_payloads[_id][
                                'listener_updated'] = self.user_position_upload(
                                )
                            # Because receiving balloon telemetry appears to be a competition, immediately upload the
                            # first valid position received.
                            self.handle_telem_dict(_telem, immediate=True)
                        else:
                            # Otherwise, add the telemetry to the upload queue
                            self.observed_payloads[_id]['data'].put(_telem)

                    else:
                        self.log_debug(
                            "Payload ID %s not observed enough to allow upload."
                            % _id)

            # If we haven't uploaded our station position recently, re-upload it.
            if (time.time() - self.last_user_position_upload
                ) > self.user_position_update_rate * 3600:
                self.user_position_upload()

            time.sleep(0.1)

    def add(self, telemetry):
        """ Add a dictionary of telemetry to the input queue. 

        Args:
            telemetry (dict): Telemetry dictionary to add to the input queue.

        """

        # Check the telemetry dictionary contains the required fields.
        for _field in self.REQUIRED_FIELDS:
            if _field not in telemetry:
                self.log_error("JSON object missing required field %s" %
                               _field)
                return

        # Add it to the queue if we are running.
        if self.input_processing_running:
            self.input_queue.put(telemetry)
        else:
            self.log_error("Processing not running, discarding.")

    def close(self):
        ''' Shutdown uploader and processing threads. '''
        self.log_debug("Waiting for threads to close...")
        self.input_processing_running = False
        self.timer_thread_running = False
        self.upload_thread_running = False

        # Wait for all threads to close.
        if self.upload_thread is not None:
            self.upload_thread.join()

        if self.timer_thread is not None:
            self.timer_thread.join()

        if self.input_thread is not None:
            self.input_thread.join()

    def log_debug(self, line):
        """ Helper function to log a debug message with a descriptive heading. 
        Args:
            line (str): Message to be logged.
        """
        logging.debug("Habitat - %s" % line)

    def log_info(self, line):
        """ Helper function to log an informational message with a descriptive heading. 
        Args:
            line (str): Message to be logged.
        """
        logging.info("Habitat - %s" % line)

    def log_error(self, line):
        """ Helper function to log an error message with a descriptive heading. 
        Args:
            line (str): Message to be logged.
        """
        logging.error("Habitat - %s" % line)

    def log_warning(self, line):
        """ Helper function to log a warning message with a descriptive heading. 
        Args:
            line (str): Message to be logged.
        """
        logging.warning("Habitat - %s" % line)
class LaborThread(Thread):
    """"""

    #----------------------------------------------------------------------
    def __init__(self, result_queue, master, clean_mod=True, *args, **kargs):
        """Constructor"""
        Thread.__init__(self,
                        name='ThreadPool-Labor-' + uuid1().hex,
                        *args,
                        **kargs)

        self._master = master

        self._clean_mod = clean_mod

        self._result_queue = result_queue

        self._startworkingflag_ = True

        self._task_queue = Queue(1)

        self._count_lock = Lock()

    #----------------------------------------------------------------------
    def get_result_queue(self):
        """"""
        return self._result_queue

    #----------------------------------------------------------------------
    def get_task_queue(self):
        """"""
        return self._task_queue

    #----------------------------------------------------------------------
    def feed(self, function, *vargs, **kwargs):
        """"""
        try:
            self._task_queue.put_nowait(tuple([function, vargs, kwargs]))
            return True
        except Full:
            #format_exc()
            return False

    #----------------------------------------------------------------------
    def run(self):
        """"""
        while self._startworkingflag_:
            #pprint('Running')
            try:
                _task = self._task_queue.get(timeout=3)
                result = {}
                result['from'] = self.name
                result['state'] = False
                result['result'] = None
                result['current_task'] = _task.__str__()
                result['exception'] = tuple()
                try:
                    ret = self._process_task(_task)
                    result['state'] = True
                    result['result'] = ret
                    #self._result_queue.put(result)
                except Exception as e:
                    result['state'] = False
                    result['result'] = None
                    exception_i = (str(type(e)), str(e))
                    result['exception'] = exception_i
                finally:
                    if self._clean_mod:
                        _result = {}
                        _result['state'] = result['state']
                        _result['result'] = result['result']
                        result = _result
                    self._result_queue.put(result)

                self._count_lock.acquire()
                self._master._executed_task_count = \
                    self._master._executed_task_count + 1
                self._count_lock.release()
            except Empty:
                pass

    #----------------------------------------------------------------------
    def _process_task(self, task):
        """"""
        try:
            ret = task[0](*task[1], **task[2])
            return ret
        except Exception as e:
            raise e

    #----------------------------------------------------------------------
    def stop(self):
        """"""
        #self.stop()
        self._startworkingflag_ = False

    #----------------------------------------------------------------------
    def __del__(self):
        """"""
        self.stop()

    #----------------------------------------------------------------------
    def _exception_process(self):
        """"""
Ejemplo n.º 35
0
class VelbusDev:
    """
    Velbus domogik plugin
    """
    def __init__(self, log, cb_send_xpl, cb_send_trig, stop):
        """ Init object
            @param log : log instance
            @param cb_send_xpl : callback
            @param cb_send_trig : callback
            @param stop : 
        """
        self._log = log
        self._callback = cb_send_xpl
        self._cb_send_trig = cb_send_trig
        self._stop = stop
        self._dev = None
        self._devtype = 'serial'
        self._nodes = {}

        # Queue for writing packets to Rfxcom
        self.write_rfx = Queue()

        # Thread to process queue
        write_process = threading.Thread(None, self.write_daemon,
                                         "write_packets_process", (), {})
        write_process.start()

    def open(self, device, devicetype):
        """ Open (opens the device once)
	    @param device : the device string to open
        """
        self._devtype = devicetype
        try:
            self._log.info("Try to open VELBUS: %s" % device)
            if devicetype == 'socket':
                addr = device.split(':')
                addr = (addr[0], int(addr[1]))
                self._dev = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                self._dev.connect(addr)
            else:
                self._dev = serial.Serial(device, 38400, timeout=0)
            self._log.info("VELBUS opened")
        except:
            error = "Error while opening Velbus : %s. Check if it is the good device or if you have the good permissions on it." % device
            raise VelbusException(error)

    def close(self):
        """ Close the open device
        """
        self._log.info("Close VELBUS")
        try:
            self._dev.close()
        except:
            error = "Error while closing device"
            raise VelbusException(error)

    def scan(self):
        self._log.info("Starting the bus scan")
        for add in range(0, 255):
            self.send_moduletyperequest(add)
        self._log.info("Bus scan finished")

    def send_shutterup(self, address, channel):
        """ Send shutter up message
        """
        data = chr(0x05) + self._blinchannel_to_byte(channel) + chr(
            0x00) + chr(0x00) + chr(0x00)
        self.write_packet(address, data)

    def send_shutterdown(self, address, channel):
        """ Send shutter down message
        """
        data = chr(0x06) + self._blinchannel_to_byte(channel) + chr(
            0x00) + chr(0x00) + chr(0x00)
        self.write_packet(address, data)

    def send_level(self, address, channel, level):
        """ Set the level for a device
            if relay => level can only be 0 or 100
            if dimmer => level can be anything from 0 to 100
        """
        address = int(address)
        self._log.debug("received set_level for {0}".format(address))
        if address in self._nodes.keys():
            mtype = self._nodes[address]
        else:
            self._log.error(
                "Request to set a level on a device, but the device is not known. address {0}"
                .format(address))
            return
        try:
            ltype = MODULE_TYPES[mtype]["subtype"]
        except KeyError:
            self._log.error(
                "Request to set a level on a device, but the subtype is not known. mtype {0}"
                .format(mtype))
            return
        if ltype == "DIMMER":
            """ Send dimemr value
            - speed = 1 second
            """
            level = (255 / 100) * level
            data = chr(0x07) + self._channels_to_byte(channel) + chr(
                int(level)) + chr(0x00) + chr(0x01)
            self.write_packet(address, data)
        elif int(level) == 255 and ltype == "RELAY":
            """ Send relay on message
            """
            data = chr(0x02) + self._channels_to_byte(channel)
            self.write_packet(address, data)
        elif int(level) == 0 and ltype == "RELAY":
            """ Send relay off message
            """
            data = chr(0x01) + self._channels_to_byte(channel)
            self.write_packet(address, data)
        else:
            self._log.error(
                "This methode should only be called for dimmers or relays and with level 0 to 255"
            )
        return

    def send_moduletyperequest(self, address):
        """ Request module type
        """
        self.write_packet(address, None)

    def write_packet(self, address, data):
        """ put a packet in the write queu
        """
        self._log.debug("put packet for {0} in send queue ({1})".format(
            address, data))
        self.write_rfx.put_nowait({"address": address, "data": data})

    def write_daemon(self):
        """ handle the queu
        """
        self._log.info("write deamon")
        while not self._stop.isSet():
            res = self.write_rfx.get(block=True)
            self._log.debug("start sending packet to {0}".format(
                hex(int(res["address"]))))
            # start (8bit)
            packet = chr(0x0F)
            # priority (8bit, F8=high, FB=low)
            if res["data"] == None:
                packet += chr(0xFB)
            else:
                packet += chr(0xF8)
            # address (8bit)
            packet += chr(int(res["address"]))
            if res["data"] == None:
                # module type request
                packet += chr(0x40)
            else:
                packet += chr(len(res["data"]))
                # data
                packet += res["data"]
            # checksum (8bit)
            packet += self._checksum(packet)
            # end byte (8bit)
            packet += chr(0x04)
            self._log.debug(packet.encode('hex'))
            # send
            if self._devtype == 'socket':
                self._dev.send(packet)
            else:
                self._dev.write(packet)
            # sleep for 60ms
            self._stop.wait(0.06)

    def listen(self, stop):
        """ Listen thread for incomming VELBUS messages
        """
        self._log.info("Start listening VELBUS")
        # infinite
        try:
            while not stop.isSet():
                self.read()
        except:
            error = "Error while reading velbus device (disconnected ?) : %s" % traceback.format_exc(
            )
            print(error)
            self._log.error(error)
            return

    def read(self):
        """ Read data from the velbus line
        """
        if self._devtype == 'socket':
            data = self._dev.recv(9999)
        else:
            data = self._dev.read(9999)
        if len(data) >= 6:
            if ord(data[0]) == 0x0f:
                size = ord(data[3]) & 0x0F
                self._parser(data[0:6 + size])

    def _checksum(self, data):
        """
           Calculate the velbus checksum
        """
        assert isinstance(data, str)
        __checksum = 0
        for data_byte in data:
            __checksum += ord(data_byte)
        __checksum = -(__checksum % 256) + 256
        try:
            __checksum = chr(__checksum)
        except ValueError:
            __checksum = chr(0)
        return __checksum

    def _parser(self, data):
        """
           parse the velbus packet
        """
        assert isinstance(data, str)
        assert len(data) > 0
        assert len(data) >= 6
        assert ord(data[0]) == 0x0f
        self._log.debug("starting parser: %s" % data.encode('hex'))
        if len(data) > 14:
            self._log.warning(
                "Velbus message: maximum %s bytes, this one is %s",
                str(14, str(len(data))))
            return
        if ord(data[-1]) != 0x04:
            self._log.warning("Velbus message: end byte not correct")
            return data
        if ord(data[1]) != 0xfb and ord(data[1]) != 0xf8:
            self._log.warning("Velbus message: unrecognized priority")
            return
        data_size = ord(data[3]) & 0x0F
        if data_size + 6 != len(data):
            self._log.warning(
                "length of data size does not match actual length of message")
            return
        if not self._checksum(data[:-2]) == data[-2]:
            self._log.warning("Packet has no valid checksum")
            return
        if data_size > 0:
            if ord(data[4]) in MSG_TYPES:
                # lookup the module type
                try:
                    if ord(data[2]) in self._nodes.keys():
                        mtype = self._nodes[ord(data[2])]
                    else:
                        mtype = None
                except KeyError:
                    mtype = None
                if mtype:
                    self._log.debug(
                        "Received message with type: '%s' address: %s module: %s(%s)"
                        % (MSG_TYPES[ord(data[4])], ord(
                            data[2]), MODULE_TYPES[mtype]['id'], mtype))
                else:
                    self._log.debug(
                        "Received message with type: '%s' address: %s module: UNKNOWN"
                        % (MSG_TYPES[ord(data[4])], ord(data[2])))
                # first try the module specifick parser
                parsed = False
                if mtype:
                    try:
                        methodcall = getattr(
                            self,
                            "_process_{0}_{1}".format(ord(data[4]), mtype))
                        methodcall(data)
                        parsed = True
                    except AttributeError:
                        self._log.debug(
                            "Messagetype module specifick parser not implemented"
                        )
                if not parsed:
                    try:
                        methodcall = getattr(
                            self, "_process_{0}".format(ord(data[4])))
                        methodcall(data)
                    except AttributeError:
                        self._log.debug("Messagetype unimplemented {0}".format(
                            ord(data[4])))
            else:
                self._log.warning(
                    "Received message with unknown type {0}".format(
                        ord(data[4])))
        else:
            if (ord(data[3]) & 0x40 == 0x40):
                self._log.debug("Received module type request")
            else:
                self._log.warning(
                    "zero sized message received without rtr set")

# procee the velbus received messages
# format will beL
#   _process_<messageId> => general parser for this messagetype
#   _process_<messageId>_<moduleType> => parser specifickly for this module type

    def _process_255(self, data):
        """
           Process a 255 Message
           Node type => send out as answer on a module_type_request
        """
        naddress = ord(data[2])
        ntype = ord(data[5])
        self._log.info(
            "Found node with address {0} and module_type {1}".format(
                str(naddress), MODULE_TYPES[ntype]['id']))
        self._nodes[naddress] = ntype

    def _process_251_8(self, data):
        """
           Process a 251 Message
           Specifickly for VMB4RY
           Switch status => send out when a relay is switched
        """
        naddress = ord(data[2])
        #chan = self._blinchannel_to_byte(data[5])
        status = ord(data[7])

        #self._callback("lighting.device",
        #           {"device" : device,
        #            "level" : level})

    def _process_251(self, data):
        """
           Process a 251 Message
           Switch status => send out when a relay is switched
        """
        for channel in self._byte_to_channels(data[5]):
            address = str(ord(data[2]))
            channel = str(channel)
            level = -1
            if (ord(data[7]) & 0x03) == 0:
                level = 0
            if (ord(data[7]) & 0x03) == 1:
                level = 255
            if level != -1:
                self._callback(
                    "lighting.device", {
                        "device": str(ord(data[2])),
                        "channel": str(channel),
                        "level": level
                    })

    def _process_238(self, data):
        """
           Process a 251 Message
           Dimmer status => send out when the dimmer status is changed
        """
        level = -1
        level = (100 / 255) * ord(data[7])
        if level != -1:
            self._callback(
                "lighting.device", {
                    "device": str(ord(data[2])),
                    "channel": str(ord(data[5]) - 1),
                    "level": level
                })

    def _process_190(self, data):
        device = ord(data[2])
        channel = (ord(data[5]) & 3) + 1
        pulses = (ord(data[5]) >> 2) * 100
        counter = (ord(data[6]) << 24) + (ord(data[7]) << 16) + (
            ord(data[8]) << 8) + ord(data[9])
        kwh = float(float(counter) / pulses)
        delay = (ord(data[10]) << 8) + ord(data[11])
        watt = float((1000 * 1000 * 3600) / (delay * pulses))
        # transmit kwh
        self._callback(
            "sensor.basic", {
                "device": device,
                "channel": channel,
                "type": "energy",
                "units": "kWh",
                "current": str(kwh)
            })
        # transmit watt
        self._callback(
            "sensor.basic", {
                "device": device,
                "channel": channel,
                "type": "power",
                "units": "W",
                "current": str(watt)
            })

    def _process_184(self, data):
        """
           Process a 184 Message
           Dimmer channel status => send out when the dimmer status is changed
        """
        for channel in self._byte_to_channels(data[5]):
            level = -1
            level = (100 / 255) * ord(data[7])
            if level != -1:
                self._callback(
                    "lighting.device", {
                        "device": str(ord(data[2])),
                        "channel": str(channel),
                        "level": level
                    })

    def _process_0(self, data):
        """
           Process a 0 Message
           switch status => send out when an input (switch changed)
           HIGH = just pressed
           LOW = just released
           LONG = long pressed
        """
        device = str(ord(data[2]))
        chanpres = self._byte_to_channels(data[5])
        chanrel = self._byte_to_channels(data[6])
        chanlpres = self._byte_to_channels(data[7])
        for chan in chanpres:
            self._callback(
                "sensor.basic", {
                    "device": str(device),
                    "channel": str(chan),
                    "type": "input",
                    "current": "HIGH"
                })
        for chan in chanlpres:
            self._callback(
                "sensor.basic", {
                    "device": str(device),
                    "channel": str(chan),
                    "type": "input",
                    "current": "LONG"
                })
        for chan in chanrel:
            self._callback(
                "sensor.basic", {
                    "device": str(device),
                    "channel": str(chan),
                    "type": "input",
                    "current": "LOW"
                })

    def _process_236(self, data):
        """
           Process a 236 Message
           blind channel status => send out when the blind status changes
           chan <X> <status>
           chan: 00000011=1, 00001100=2
           status: 0=off, 1=chan 1 up, 2=chan 1 down, 4=chan 2 up, 8=chan 2 down

           foreach _byte_to_blindchannel(data[5])
		status _byte_to_channels data[7]                
        """
        chan = self._blinchannel_to_byte(data[5])
        device = str(ord(data[2]))
        status = self._byte_to_channels(data[7])
        command = []
        if chan == 1:
            if 1 in status:
                command = "up"
            if 2 in status:
                command = "down"
        elif chan == 2:
            if 4 in status:
                command = "up"
            if 8 in status:
                command = "down"
        else:
            command = "off"
        if command == "":
            self._callback("shutter.device", {
                "device": device + "-" + chan,
                "command": command
            })

    def _process_230(self, data):
        """
           Process a 230 message Temperature Sensor Temperature
           Databyte 2 => High byte current sensor temperature
           Databyte 3 => Low byte of current temperature sensor in two's complement format
           Resolution: 0.0625 degree celcius
        """
        device = str(ord(data[2]))
        cur = ord(data[5]) << 8
        cur = ((cur | ord(data[6])) / 32) * 0.0625
        low = ord(data[7]) << 8
        low = ((low | ord(data[8])) / 32) * 0.0625
        high = ord(data[9]) << 8
        high = ((high | ord(data[10])) / 32) * 0.0625
        self._callback(
            "sensor.basic", {
                "device": device,
                "type": "temp",
                "units": "c",
                "current": str(cur),
                "lowest": str(low),
                "highest": str(high)
            })

# Some convert procs

    def _channels_to_byte(self, chan):
        """
           Convert a channel to a byte
           only works for one channel at a time
        """
        return chr((1 << (int(chan) - 1)))

    def _byte_to_channels(self, byte):
        """
           Convert a byte to a channel list
        """
        assert isinstance(byte, str)
        assert len(byte) == 1
        byte = ord(byte)
        result = []
        for offset in range(0, 8):
            if byte & (1 << offset):
                result.append(offset + 1)
        return result

    def _blinchannel_to_byte(self, channel):
        """
           Convert a channel 1 or 2 to its correct byte
        """
        assert isinstance(channel, int)
        if channel == 1:
            return chr(0x03)
        else:
            return chr(0x0C)

    def _byte_to_blindchannel(self, byte):
        """
           Convert a byte to its channel
        """
        if byte == chr(0x03):
            return 1
        else:
            return 2
Ejemplo n.º 36
0
class JsonRpcServer(object):
    log = logging.getLogger("jsonrpc.JsonRpcServer")
    """
    Creates new JsonrRpcServer by providing a bridge, timeout in seconds
    which defining how often we should log connections stats and thread
    factory.
    """
    def __init__(self, bridge, timeout, threadFactory=None):
        self._bridge = bridge
        self._workQueue = Queue()
        self._threadFactory = threadFactory
        self._timeout = timeout
        self._next_report = monotonic_time() + self._timeout
        self._counter = 0

    def queueRequest(self, req):
        self._workQueue.put_nowait(req)

    """
    Aggregates number of requests received by vdsm. Each request from
    a batch is added separately. After time defined by timeout we log
    number of requests.
    """

    def _attempt_log_stats(self):
        self._counter += 1
        if monotonic_time() > self._next_report:
            self.log.info('%s requests processed during %s seconds',
                          self._counter, self._timeout)
            self._next_report += self._timeout
            self._counter = 0

    def _serveRequest(self, ctx, req):
        self._attempt_log_stats()
        mangledMethod = req.method.replace(".", "_")
        logLevel = logging.DEBUG
        if mangledMethod in ('Host_getVMList', 'Host_getAllVmStats',
                             'Host_getStats', 'StorageDomain_getStats',
                             'VM_getStats', 'Host_fenceNode'):
            logLevel = logging.TRACE
        self.log.log(logLevel, "Calling '%s' in bridge with %s", req.method,
                     req.params)
        try:
            method = getattr(self._bridge, mangledMethod)
        except AttributeError:
            if req.isNotification():
                return

            ctx.requestDone(
                JsonRpcResponse(None, JsonRpcMethodNotFoundError(), req.id))
            return

        try:
            params = req.params
            self._bridge.register_server_address(ctx.address)
            if isinstance(req.params, list):
                res = method(*params)
            else:
                res = method(**params)
            self._bridge.unregister_server_address()
        except JsonRpcError as e:
            ctx.requestDone(JsonRpcResponse(None, e, req.id))
        except Exception as e:
            self.log.exception("Internal server error")
            ctx.requestDone(
                JsonRpcResponse(None, JsonRpcInternalError(str(e)), req.id))
        else:
            res = True if res is None else res
            self.log.log(logLevel, "Return '%s' in bridge with %s", req.method,
                         res)
            ctx.requestDone(JsonRpcResponse(res, None, req.id))

    @traceback(on=log.name)
    def serve_requests(self):
        while True:
            obj = self._workQueue.get()
            if obj is None:
                break

            client, addr, msg = obj
            self._parseMessage(client, addr, msg)

    def _parseMessage(self, client, addr, msg):
        ctx = _JsonRpcServeRequestContext(client, addr)

        try:
            rawRequests = json.loads(msg)
        except:
            ctx.addResponse(JsonRpcResponse(None, JsonRpcParseError(), None))
            ctx.sendReply()
            return

        if isinstance(rawRequests, list):
            # Empty batch request
            if len(rawRequests) == 0:
                ctx.addResponse(
                    JsonRpcResponse(
                        None,
                        JsonRpcInvalidRequestError('request batch is empty',
                                                   rawRequests), None))
                ctx.sendReply()
                return
        else:
            # From this point on we know it's always a list
            rawRequests = [rawRequests]

        # JSON Parsed handling each request
        requests = []
        for rawRequest in rawRequests:
            try:
                req = JsonRpcRequest.fromRawObject(rawRequest)
                requests.append(req)
            except JsonRpcError as err:
                ctx.addResponse(JsonRpcResponse(None, err, None))
            except:
                ctx.addResponse(
                    JsonRpcResponse(None, JsonRpcInternalError(), None))

        ctx.setRequests(requests)

        # No request was built successfully or is only notifications
        if ctx.counter == 0:
            ctx.sendReply()

        for request in requests:
            self._runRequest(ctx, request)

    def _runRequest(self, ctx, request):
        if self._threadFactory is None:
            self._serveRequest(ctx, request)
        else:
            try:
                self._threadFactory(partial(self._serveRequest, ctx, request))
            except Exception as e:
                self.log.exception("could not allocate request thread")
                ctx.requestDone(
                    JsonRpcResponse(None, JsonRpcInternalError(str(e)),
                                    request.id))

    def stop(self):
        self.log.info("Stopping JsonRPC Server")
        self._workQueue.put_nowait(None)
Ejemplo n.º 37
0
class DBPool(object):
    '''''一个数据库连接池'''
    
    def initPool(self, maxActive=5, maxWait=None, init_size=1, db_type="mysql", **config):
        '''初始化数据库连接池
        '''
        log.msg("__init__ Pool..")
        self.__freeConns = Queue(maxActive)
        self.maxWait = maxWait
        self.db_type = db_type
        self.config = config
        if init_size > maxActive:
            init_size = maxActive
        for i in range(init_size):
            self.free(self._create_conn())
        self.nowconn = None
            
    def __del__(self):
        log.msg("__del__ Pool..")
        self.release()
        
    def release(self):
        '''''释放资源,关闭池中的所有连接'''
        log.msg("release Pool..")
        while self.__freeConns and not self.__freeConns.empty():
            con = self.get()
            con.release()
            self.__freeConns = None
            
    def _create_conn(self):
        '''''创建连接 '''
        if self.db_type in DBCS:
            return MySQLdb.connect(**self.config);
            
    def get(self, timeout=None):
        '''''获取一个连接
        @param timeout:超时时间
        '''
        if timeout is None:
            timeout = self.maxWait
            conn = None
        if self.__freeConns.empty():#如果容器是空的,直接创建一个连接
            conn = self._create_conn()
        else:
            conn = self.__freeConns.get(timeout=timeout)
            conn.pool = self
        return conn
    
    def cursor(self,cursorclass = None):
        '''通配接口'''
        conn = self.get()
        self.nowconn = conn
        ucur = UCursor( conn, cursorclass)
        return ucur
    
    def commit(self):
        '''提交'''
        try:
            self.nowconn.commit()
        except Exception as e:
            log.err(e.message)
    def rollback(self):
        '''事务回滚
        '''
        try:
            self.nowconn.rollback()
        except Exception as e:
            log.err(e.message)
    
    def free(self, conn):
        '''''将一个连接放回池中
        @param conn: 连接对象
        '''
        conn.pool = None
        if(self.__freeConns.full()):#如果当前连接池已满,直接关闭连接
            conn.release()
        return self.__freeConns.put_nowait(conn)
    
    def execSql(self,sqlstr):
        '''执行数据库的写操作(插入,修改,删除)
        @param sqlstr: str 需要执行的sql语句
        '''
        try:
            conn = self.get(5)
            cursor = conn.cursor()
            count = cursor.execute(sqlstr)
            conn.commit()
            cursor.close()
            conn.close()
            if count>0:
                return True
            return False
        except Exception,err:
            log.err(err)
            conn.close()
            return None#通过放回NONE在远程调用上抛出异常
class SerialAnalyzerThread():

    """
    SerialAnalyzerThread: Thread to analyze every lines that are read from
    Serial port.
    The mechanism is similar to LogCatAnalyzerThread
    """

    def __init__(self, logger):
        # Analyzer thread stop condition
        self.__start_analyze = False

        # Messages to trigger
        self.__messages_to_trigger = {}

        # Message to received
        self.__message_to_receive = None
        self.__message_received = None
        self.__is_message_received = False

        # Lock object
        self.__lock_message_triggered = threading.RLock()
        self.__lock_message_received = threading.RLock()

        # Internal buffer
        self.__queue = Queue()

        self.__analyzer_thread = None

        # Logger
        self._logger = logger

    def stop(self):
        """
        Stop the Thread
        """
        self.__start_analyze = False

        if self.__analyzer_thread is not None:
            try:
                self.__analyzer_thread.join(5)
            except Exception:  # pylint: disable=W0703
                pass
            finally:
                del self.__analyzer_thread
                self.__analyzer_thread = None

        return

    def start(self):
        """
        Start the thread
        """
        self.__analyzer_thread = threading.Thread(target=self.__run)
        self.__analyzer_thread.name = "SerialAnalyzerThread"
        self.__analyzer_thread.daemon = True
        self.__analyzer_thread.start()

    def push(self, line):
        """
        Method to receive the line that are read from the serial port.
        This method is used by the SerialReaderThread

        :type  line: String
        :param line: Line read from the serial port
        """
        self.__queue.put_nowait(line)

    def __run(self):
        """
        Overloaded method that contains the instructions to run
        when the thread is started
        """
        self.__start_analyze = True
        while self.__start_analyze:
            while not self.__queue.empty():
                try:
                    line = self.__queue.get_nowait()
                    if len(line) > 0:
                        self.__analyse_line(line.rstrip('\r\n'))
                except Empty:
                    pass
            time.sleep(1)

    def __analyse_line(self, line):
        """
        Sub method to analyse every line read by the SerialReaderThread
        and store them if they match one of the trigger message

        :type  line: String
        :param line: Line read from the serial port
        """
        # For each line to analyze
        # for line in lines:

        # Check all messages to be triggered
        self.__lock_message_triggered.acquire()
        for trig_message in self.__messages_to_trigger:
            if line.find(trig_message) != -1:
                # Message received, store log line
                self.__messages_to_trigger[trig_message].append(line)

        self.__lock_message_triggered.release()

        # Check message to be received
        self.__lock_message_received.acquire()
        if self.__message_to_receive is not None:
            if line.find(self.__message_to_receive) != -1:
                self.__message_received.append(line)
                self.__is_message_received = True
        self.__lock_message_received.release()

    def add_trigger_messages(self, messages):
        """ Trigger a list of messages

        :type  messages: Array
        :param messages: messages to be triggered
        """
        for message in messages:
            self.add_trigger_message(message)

    def add_trigger_message(self, message):
        """ Trigger a message

        :type  message: string
        :param message: message to be triggered
        """
        self.__lock_message_triggered.acquire()
        self.__messages_to_trigger[message] = list()
        self.__lock_message_triggered.release()

    def remove_trigger_message(self, message):
        """ Remove a triggered message

        :type  message: string
        :param message: message to be removed
        """
        if message in self.__messages_to_trigger:
            self.__lock_message_triggered.acquire()
            del self.__messages_to_trigger[message]
            self.__lock_message_triggered.release()

    def is_message_received(self, message, timeout):
        """ Check if a message is received

        :type  message: string
        :param message: message that we look for
        :type  timeout: int
        :param timeout: time limit where we expect to receive the message

        :return: Array of message received, empty array if nothing
        :rtype: list
        """
        self.__lock_message_received.acquire()
        self.__is_message_received = False
        self.__message_received = list()
        self.__message_to_receive = message
        self.__lock_message_received.release()

        time_count = 0
        while (not self.__is_message_received) and (time_count <= timeout):
            time.sleep(1)
            time_count += 1

        self.__is_message_received = False
        self.__message_to_receive = None
        return self.__message_received

    def get_message_triggered_status(self, message):
        """ Get the status of a message triggered

        :type  message: string
        :param message: message triggered

        :rtype: list of string
        :return: Array of message received, empty array if nothing
        """
        if message in self.__messages_to_trigger:
            return self.__messages_to_trigger[message]
        else:
            return None

    def reset_trigger_message(self, message):
        """ Reset triggered message

        :type  message: string
        :param message: message to be reseted
        """
        if message in self.__messages_to_trigger:
            self.__lock_message_received.acquire()
            self.__messages_to_trigger[message] = list()
            self.__lock_message_received.release()
Ejemplo n.º 39
0
class ThreadPool(object):
    def __init__(self,
                 parse_inst,
                 save_inst=None,
                 proxieser=None,
                 fetch_inst=None,
                 max_count_parsave=100,
                 max_count_proxies=100):

        self._number_dict = {
            TPEnum.TASKS_RUNNING: 0,  # the count of tasks which are running
            TPEnum.URL_FETCH_NOT:
            0,  # the count of urls which haven't been fetched
            TPEnum.URL_FETCH_SUCC:
            0,  # the count of urls which have been fetched successfully
            TPEnum.URL_FETCH_FAIL:
            0,  # the count of urls which have been fetched failed
            TPEnum.URL_FETCH_COUNT:
            0,  # the count of urls which appeared in self._queue_fetch
            TPEnum.HTM_PARSE_NOT:
            0,  # the count of urls which haven't been parsed
            TPEnum.HTM_PARSE_SUCC:
            0,  # the count of urls which have been parsed successfully
            TPEnum.HTM_PARSE_FAIL:
            0,  # the count of urls which have been parsed failed
            TPEnum.ITEM_SAVE_NOT:
            0,  # the count of urls which haven't been saved
            TPEnum.ITEM_SAVE_SUCC:
            0,  # the count of urls which have been saved successfully
            TPEnum.ITEM_SAVE_FAIL:
            0,  # the count of urls which have been saved failed
            TPEnum.PROXIES_LEFT: 0,  # the count of proxies which are avaliable
            TPEnum.PROXIES_FAIL:
            0,  # the count of proxies which banned by website
        }

        self._lock = threading.Lock()  # the lock which self._number_dict needs
        self._url_filter = UrlFilter()  #URL

        self._thread_stop_flag = False  # default: False, stop flag of threads
        self._fetcher_number = 0  # default: 0, fetcher number in thread pool
        self._max_count_parsave = max_count_parsave  # maximum count of items which in parse queue or save queue
        self._max_count_proxies = max_count_proxies  # maximum count of items which in proxies queue

        self._queue_fetch = Queue()
        self._queue_parse = Queue()
        self._queue_saver = Queue()
        self._queue_proxies = Queue(
        )  # {"http": "http://auth@ip:port", "https": "https://auth@ip:port"}

        self._inst_fetcher = fetch_inst if fetch_inst else Fetcher()
        self._inst_parse = parse_inst
        self._inst_saver = save_inst if save_inst else Saver()
        self._inst_proxieser = proxieser if proxieser else Proxieser()

        self._thread_fetch_list = []
        self._thread_parse = None
        self._thread_save = None
        self._thread_proxieser = None

        self._thread_moniter = MoniterThread("MoniterThread", self)

        return

    ## ===================================================================================================================

    def start_working(self, root_url, fetcher_num):

        self._fetcher_number = fetcher_num
        self._thread_stop_flag = False

        self.add_a_task(TPEnum.URL_FETCH, (root_url, None, 0))
        logging.info(
            "ThreadPool starts working: urls_count=%s, fetcher_num=%s",
            self.get_number_dict(TPEnum.URL_FETCH_NOT), fetcher_num)
        self._thread_fetch_list = [
            FetchThread("FetchThread %d" % i, self._inst_fetcher, self)
            for i in xrange(fetcher_num)
        ]
        self._thread_parse = ParseThread("ParseThread", self._inst_parse, self)
        self._thread_save = SaveThread("SaveThread", self._inst_saver, self)
        self._thread_proxieser = ProxiesThread("ProxiesThread",
                                               self._inst_proxieser, self)

        if self._thread_moniter:
            self._thread_moniter.setDaemon(True)
            self._thread_moniter.start()

        for thread in self._thread_fetch_list:
            thread.setDaemon(True)
            thread.start()

        if self._thread_parse:
            self._thread_parse.setDaemon(True)
            self._thread_parse.start()

        if self._thread_save:
            self._thread_save.setDaemon(True)
            self._thread_save.start()
        logging.info("ThreadPool starts working: success")
        return

    def wait_for_finish(self):

        self._thread_stop_flag = True

        for thread in filter(lambda x: x.is_alive(), self._thread_fetch_list):
            thread.join()

        if self._thread_parse and self._thread_parse.is_alive():
            self._thread_parse.join()

        if self._thread_save and self._thread_save.is_alive():
            self._thread_save.join()

        if self._thread_proxieser and self._thread_proxieser.is_alive():
            self._thread_proxieser.join()

        if self._thread_monitor and self._thread_monitor.is_alive():
            self._thread_monitor.join()

        logging.info("ThreadPool has finished")

        return None

    ## ===================================================================================================================

    def get_thread_stop_flag(self):
        return self._thread_stop_flag

    def get_fetcher_number(self):
        return self._fetcher_number

    def get_number_dict(self, key=None):
        return self._number_dict[key] if key else self._number_dict

    def update_number_dict(self, key, value):
        self._lock.acquire()
        self._number_dict[key] += value
        self._lock.release()
        return None

    def is_all_tasks_done(self):
        return False if self._number_dict[TPEnum.TASKS_RUNNING] or self._number_dict[TPEnum.URL_FETCH_NOT] or \
                        self._number_dict[TPEnum.HTM_PARSE_NOT] or self._number_dict[TPEnum.ITEM_SAVE_NOT] else True

    ## ===================================================================================================================

    def add_a_task(self, task_name, task):

        if task_name == TPEnum.URL_FETCH and (
            (not self._url_filter) or self._url_filter.check_and_add(task[0])):
            self._queue_fetch.put_nowait(task)
            self.update_number_dict(TPEnum.URL_FETCH_NOT, +1)
            self.update_number_dict(TPEnum.COUNTER, +1)
        elif task_name == TPEnum.HTM_PARSE and self._thread_parse:
            self._queue_parse.put_nowait(task)
            self.update_number_dict(TPEnum.HTM_PARSE_NOT, +1)
        elif task_name == TPEnum.ITEM_SAVE and self._thread_save:
            self._queue_saver.put_nowait(task)
            self.update_number_dict(TPEnum.ITEM_SAVE_NOT, +1)
        elif (task_name == TPEnum.PROXIES) and self._thread_proxieser:
            self._queue_proxies.put_nowait(task)
            self.update_number_dict(TPEnum.PROXIES_LEFT, +1)
        return None

    def get_a_task(self, task_name):

        task = None
        if task_name == TPEnum.PROXIES:
            task = self._queue_proxies.get(block=True, timeout=5)
            self.update_number_dict(TPEnum.PROXIES_LEFT, -1)
            return task
        if task_name == TPEnum.URL_FETCH:
            task = self._queue_fetch.get(block=True, timeout=5)
            self.update_number_dict(TPEnum.URL_FETCH_NOT, -1)
        elif task_name == TPEnum.HTM_PARSE:
            task = self._queue_parse.get(block=True, timeout=5)
            self.update_number_dict(TPEnum.HTM_PARSE_NOT, -1)
        elif task_name == TPEnum.ITEM_SAVE:
            task = self._queue_saver.get(block=True, timeout=10)
            self.update_number_dict(TPEnum.ITEM_SAVE_NOT, -1)
        self.update_number_dict(TPEnum.TASKS_RUNNING, +1)
        return task

    def finish_a_task(self, task_name):

        if task_name == TPEnum.PROXIES:
            self._queue_proxies.task_done()
            return
        if task_name == TPEnum.URL_FETCH:
            self._queue_fetch.task_done()
        elif task_name == TPEnum.HTM_PARSE:
            self._queue_parse.task_done()
        elif task_name == TPEnum.ITEM_SAVE:
            self._queue_saver.task_done()
        self.update_number_dict(TPEnum.TASKS_RUNNING, -1)
        return None
Ejemplo n.º 40
0
class SMSd(object):
    '''
    Very basic SMS relay
    Receive, acknowledge and forward SMS-RP messages
    '''
    #
    # verbosity level: list of log types to display when calling
    # self._log(logtype, msg)
    DEBUG = ('ERR', 'WNG', 'INF', 'DBG')
    #
    TRACK_PDU = True
    #
    # time resolution for consuming the queue for TP msg
    QUEUE_TO = 0.1
    #
    # SMS relay phone number
    RP_OA = {'Type': 1, 'NumberingPlan': 1, 'Num': '1234'}
    #
    # TP settings for sending handcrafted SMS DELIVER to UEs
    TP_OA = {'Type': 1, 'NumberingPlan': 1, 'Num': '12341234'}
    TP_PID = {'Format': 0, 'Telematic': {'Telematic': 0, 'Protocol': 0}}
    TP_DCS = {'Group': 0, 'Charset': 0, 'Class': 0}
    #
    # timezone for TP_SCTS information (float)
    TIMEZONE = 0.0
    #
    # CorenetServer reference, for checking UE MSISDN and sending MT-SMS
    Server = None

    def __init__(self):
        self._pdu = []
        # dict with dicts of ongoing RP transactions indexed by RP ref and
        #                    ongoing TP transactions indexed by TP msg ref
        # indexed by UE msisdn
        self.Proc = {}
        # dict with lists of RP-DATA and TP procedures in error, indexed by UE msisdn
        self.Err = {}
        #
        # set 2 queues to process / forward or inject TP messages within a background thread
        self._forward_q = Queue()
        self._inject_q = Queue()
        self._forwarding = True
        self._forward_t = threadit(self.forward)
        self._log('INF', 'SMS relay started')

    def _log(self, logtype='DBG', msg=''):
        # logtype: 'ERR', 'WNG', 'INF', 'DBG'
        if logtype in self.DEBUG:
            log('[%s] [SMSd] %s' % (logtype, msg))

    def stop(self):
        if self._forwarding:
            self._forwarding = False
            self._forward_t.join()

    def forward(self):
        # consume the queue
        while self._forwarding:
            try:
                tp_msg, num = self._forward_q.get_nowait()
            except Empty:
                try:
                    tp_msg, num = self._inject_q.get_nowait()
                except Empty:
                    sleep(self.QUEUE_TO)
                else:
                    self.send_tp(tp_msg, num)
            else:
                self.process_tp(tp_msg, num)

    def init_ue(self, num):
        self.Proc[num] = {
            'RP': {},  # dict of ongoing RP procedures at the RP layer
            'TP': {}  # dict of ongoing TP procedures at the TP layer
        }
        self.Err[num] = {
            'RP': [],  # list of RP procedures in error
            'TP': []  # list of TP procedures in error
        }

    def process_rp(self, rp_msg, num):
        """process an RP message `rp_msg' sent by a UE with a given MSISDN `num',
        
        returns an RP ACK or ERROR if rp_msg is DATA or SMMA
                None if rp_msg is ACK or ERROR
        """
        if not isinstance(rp_msg, NAS.SMS_RP):
            self._log('WNG', 'process_rp: invalid rp_msg')
            return None
        #
        if self.TRACK_PDU:
            self._pdu.append((time(), 'UL', rp_msg))
        #
        if num not in self.Proc:
            self.init_ue(num)
        #
        if rp_msg._name == 'RP_DATA_MO':
            # this will return an RP_ACK or RP_ERR
            ret = self._process_rp_data(rp_msg, num)
        elif rp_msg._name == 'RP_SMMA':
            # this will return an RP_ACK or RP_ERR
            ret = self._process_rp_smma(rp_msg, num)
        elif rp_msg._name in ('RP_ACK_MO', 'RP_ERROR_MO'):
            # check the ref together with num
            ret = self._process_rp_ack_err(rp_msg, num)
        else:
            self._log('WNG', 'process_rp: invalid message %r' % rp_msg)
            ret = None
        #
        if ret and self.TRACK_PDU:
            self._pdu.append((time(), 'DL', ret))
        return ret

    def _process_rp_data(self, rp_msg, num):
        ref = rp_msg[2].get_val()
        rp_procs = self.Proc[num]['RP']
        rp_procs[ref] = (rp_msg, None)
        #
        # check RP orig / dest address
        if rp_msg[3][0].get_val() > 0:
            rp_orig = rp_msg[3][1]
            self._log(
                'WNG',
                'process_rp_data: non-empty originator address, %r' % rp_orig)
            # invalid mandatory information
            del rp_procs[ref]
            return NAS.RP_ERROR_MT(val={
                'Ref': ind[1],
                'RPCause': {
                    'Value': 96
                }
            })
        #
        if rp_msg[4][0].get_val() > 0:
            rp_dest = rp_msg[4][1]
            if rp_dest['Num'].decode() != self.RP_OA['Num']:
                self._log('INF',
                          'process_rp_data: destination address, %r' % rp_dest)
        else:
            self._log('WNG', 'process_rp_data: empty destination address')
            # invalid mandatory information
            del rp_procs[ref]
            return NAS.RP_ERROR_MT(val={
                'Ref': ind[1],
                'RPCause': {
                    'Value': 96
                }
            })
        #
        if not isinstance(rp_msg[5][1], NAS.SMS_TP):
            self._log('WNG',
                      'process_rp_data: invalid TP data, %r' % tp_msg[5])
            # invalid mandatory information
            del rp_procs[ref]
            return NAS.RP_ERROR_MT(val={
                'Ref': ind[1],
                'RPCause': {
                    'Value': 96
                }
            })
        #
        # process TP in the background thread
        self._insert_tp(rp_msg[5][1], num)
        # acknowledge RP
        rp_ack = NAS.RP_ACK_MT(val={'Ref': ref})
        del rp_procs[ref]
        return rp_ack

    def _process_rp_smma(self, rp_msg, num):
        ref = rp_msg[2].get_val()
        self._log('INF',
                  'process_rp_smma: procedure ref (%s, %i)' % (num, ref))
        return NAS.RP_ACK_MT(val={'Ref': ref})

    def _process_rp_ack_err(self, rp_msg, num):
        rp_msg_name = rp_msg._name[:-3].replace('_', '-')
        ref = rp_msg[2].get_val()
        rp_procs = self.Proc[num]['RP']
        if ref in rp_procs:
            rp_req, tp_ref = rp_procs[ref]
            rp_ud = rp_msg['RPUserData']
            if not rp_ud.get_trans() and isinstance(rp_ud[2], NAS.SMS_TP):
                # SMS_DELIVER_REPORT_RP_ACK/ERROR provided
                if rp_msg._name == 'RP_ACK_MO':
                    # TP status 0: Short message transaction completed - Short message received by the SME
                    stat = 0
                else:
                    # TP status 64: Permanent error, SC is not making any more transfer attempts - Remote procedure error
                    stat = 64
                self._report_status(rp_req, tp_ref, stat)
                # TODO: check if it requires an RP-ACK back
            # delete the RP procedure
            del rp_procs[ref]
            if rp_msg_name == 'RP-ACK':
                self._log(
                    'DBG',
                    'process_rp_ack_err: procedure ref (%s, %i) completed' %
                    (num, ref))
            else:
                self.Err[num]['RP'].append(rp_req)
                self._log('INF', 'process_rp_ack_err: procedure ref (%s, %i) in error with cause %r'\
                          % (num, ref, rp_msg[3][1]))
        else:
            self._log(
                'INF', 'process_rp_ack_err: procedure ref (%s, %i) unknown' %
                (num, ref))
        return None

    def _report_status(self, rp_req, tp_ref, stat=64):
        # when a downlink RP-DATA fails within CorenetServer (-> discard_rp())
        # or receiving an RP-ACK/ERROR-MO with TP data (SMS-DELIVER-REPORT-RP-ACK/ERROR)
        # we need to start an SMS-STATUS-REPORT toward to original sender
        # 1) reassociate to the SMS SUBMIT of the initial sender
        try:
            tp_oa = rp_req[5][1]['TP_OA']['Num'].decode()
        except:
            self._log(
                'WNG',
                'report_status: unable to retrieve the TP originating address')
        else:
            if tp_oa in self.Proc:
                tp_procs = self.Proc[tp_oa]['TP']
                if tp_ref in tp_procs:
                    tp_req, atime = self.Proc[tp_oa]['TP'][tp_ref]
                    # 2) send a status report to the initial sender and delete the TP transaction
                    del self.Proc[tp_oa]['TP'][tp_ref]
                    tp_stat = self._create_tp_stat_rep(tp_req, tp_oa, atime,
                                                       stat)
                    self._inject_tp(tp_stat, tp_oa)
                    self._log(
                        'DBG', 'report_status: delete TP procedure (%s, %i)' %
                        (tp_oa, tp_ref))
                    return
            # no status report was requested, hence we just pass our way
            self._log(
                'DBG',
                'report_status: no SMS SUBMIT requiring status report for %s' %
                tp_oa)

    def _insert_tp(self, tp_msg, num):
        """put the tp_msg within the forwarding queue,
        and let the forwarding thread take care of it
        """
        try:
            self._forward_q.put_nowait((tp_msg, num))
        except Full as err:
            self._log('ERR', 'insert_tp: TP forwarding queue is full (%i), deleting it, %s'\
                      % (self._forward_q.qsize(), err))
            self._forward_q = Queue()

    def process_tp(self, tp_msg, num):
        """process a TP message `tp_msg' sent by a UE with a given MSISDN `num'
        """
        if tp_msg._name == 'SMS_SUBMIT':
            # should forward TP user data in an SMS DELIVER to the TP dest
            self._process_tp_submit(tp_msg, num)
        elif tp_msg._name == 'SMS_COMMAND':
            # correspond to an MS invoking an operation within the SMS-Center
            self._process_tp_cmd(tp_msg, num)
        else:
            # SMS_DELIVER_REPORT_RP_ACK and SMS_DELIVER_REPORT_RP_ERROR
            # are processed within _process_rp_ack_err()
            self._log('WNG', 'process_tp: invalid message %r' % tp_msg)
            return None

    def _process_tp_submit(self, tp_msg, num):
        atime = localtime()
        if tp_msg[0].get_val():
            # the sender UE requests a status report as a result of the SMS DELIVER process
            tp_ref = tp_msg[6].get_val()
        else:
            tp_ref = None
        #
        # check TP dest addr
        num_dest = tp_msg[7]['Num'].decode()
        if num_dest in self.Server.MSISDN:
            imsi = self.Server.MSISDN[num_dest]
        else:
            # unknown msisdn
            # status 65: incompatible dest
            self._log('INF',
                      'process_tp_submit: destination unknown, %s' % num_dest)
            if tp_ref:
                tp_stat = self._create_tp_stat_rep(tp_msg, num, atime, stat=65)
                self.send_tp(tp_stat, num)
            return
        #
        if imsi in self.Server.UE:
            ued = self.Server.UE[imsi]
        else:
            # UE never attached
            # status 34: no response from SME
            self._log('INF',
                      'process_tp_submit: destination offline, %s' % num_dest)
            if tp_ref:
                tp_stat = self._create_tp_stat_rep(tp_msg, num, atime, stat=34)
                self.send_tp(tp_stat, num)
            return
        #
        # build tp_deliver
        if tp_ref is not None:
            # keep track of the SMS SUBMIT for further status report
            self.Proc[num]['TP'][tp_ref] = (tp_msg, atime)
        tp_del = self._create_tp_deliver(tp_msg, num, atime)
        self.send_tp(tp_del, num_dest, report_ref=tp_ref)

    def _process_tp_cmd(self, tp_msg, num):
        self._log('INF', 'process_tp_cmd: CDL %i, CD 0x%s'\
                  % (tp_msg['TP_CDL'].get_val(),
                     hexlify(tp_msg['TP_CD'].get_val()).decode('ascii')))
        atime = localtime()
        if tp_msg[0].get_val():
            # the sender UE requests a status report of the result of the SMS COMMAND process
            tp_stat = self._create_tp_stat_rep(tp_msg, num, atime, stat=0)
            self.send_tp(tp_stat, num_dest)

    def _create_tp_stat_rep(self, tp_msg, num, atime, stat=64):
        tp_srq = 1 if isinstance(tp_msg, NAS.SMS_COMMAND) else 0
        tp_mr = tp_msg[6].get_val()
        tp_ra = {'Type': 1, 'NumberingPlan': 1, 'Num': num}
        tp_scts = (atime, self.TIMEZONE)
        if 0 <= stat <= 255:
            tp_stat = stat
        else:
            tp_stat = 64
        #
        tp_stat = NAS.SMS_STATUS_REPORT(
            val={
                'TP_SRQ': tp_srq,
                'TP_MR': tp_mr,
                'TP_RA': tp_ra,
                'TP_SCTS': tp_scts,
                'TP_ST': stat
            })
        tp_stat['TP_PI'].set_trans(True)
        self._set_tp_scts(tp_stat['TP_DT'])
        return tp_stat

    def _create_tp_deliver(self, tp_msg, num, atime):
        tp_sri = tp_msg[0].get_val()
        tp_udhi = tp_msg[1].get_val()
        tp_oa = {'Type': 1, 'NumberingPlan': 1, 'Num': num}
        tp_pid = tp_msg[8].get_val()
        tp_dcs = tp_msg[9].get_val()
        tp_msg_ud = tp_msg['TP_UD']
        if tp_udhi:
            tp_udh = tp_msg_ud[1][1].get_val()
        else:
            tp_udh = None
        tp_ud = tp_msg_ud[2].get_val()
        #
        tp_del = NAS.SMS_DELIVER(
            val={
                'TP_SRI': tp_sri,
                'TP_UDHI': tp_udhi,
                'TP_OA': tp_oa,
                'TP_PID': tp_pid,
                'TP_DCS': tp_dcs,
                'TP_UD': {
                    'UDH': {
                        'UDH': tp_udh
                    },
                    'UD': tp_ud
                }
            })
        self._set_tp_scts(tp_del['TP_SCTS'])
        return tp_del

    def _set_tp_scts(self, tp_scts):
        if tp_scts.get_len() == 7:
            T = localtime()
            tp_scts.encode(localtime(), tz=self.TIMEZONE)
        else:
            self._log('WNG', 'set_tp_scts: custom timestamping unhandled')

    def _inject_tp(self, tp_msg, num):
        """put the tp_msg within the injection queue,
        and let the forwarding thread take care of it
        """
        try:
            self._inject_q.put_nowait((tp_msg, num))
        except Full as err:
            self._log('ERR', 'inject_tp: TP injection queue is full (%i), deleting it, %s'\
                      % (self._inject_q.qsize(), err))
            self._inject_q = Queue()

    def _get_new_rp_ref(self, num):
        if num not in self.Proc:
            self.init_ue(num)
            return 0
        else:
            for i in range(0, 257):
                if i not in self.Proc[num]['RP']:
                    break
            if i == 256:
                # no RP ref available...
                self._log('ERR', 'get_new_rp_ref: no RP ref available, clearing all procedure for %s'\
                          % num)
                self.Proc[num]['RP'].clear()
                self.Proc[num]['TP'].clear()
                return 0
            else:
                return i

    def send_tp(self, tp_msg, num, tp_ref=None):
        """send the SMS TP message `tp_msg' to UE msisdn `num'
        associate the TP transaction ref `tp_ref' to the RP transaction
        """
        # wrap the TP msg into an RP DATA msg
        ref = self._get_new_rp_ref(num)
        rp_msg = NAS.RP_DATA_MT(val={
            'Ref': ref,
            'RPOriginatorAddress': self.RP_OA
        })
        rp_msg.set_tpdu(tp_msg)
        self.Proc[num]['RP'][ref] = (rp_msg, tp_ref)
        self._log('DBG', 'sending TP msg with RP ref %i' % ref)
        self.send_rp(rp_msg, num)

    def send_rp(self, rp_msg, num):
        if self.TRACK_PDU:
            self._pdu.append((time(), 'DL', rp_msg))
        self.Server.send_smsrp(num, rp_msg)

    def discard_rp(self, rp_msg, num):
        """discard an RP message `rp_msg' sent to UE with msisdn `num'
        """
        if num not in self.Proc:
            return
        rp_procs = self.Proc[num]['RP']
        ref = rp_msg[2].get_val()
        if ref not in rp_procs:
            return
        rp_req, tp_ref = rp_procs[ref]
        if tp_ref is not None:
            # downlink RP-DATA failed within corenet, status report required
            # TP status 97 : Temporary error, SC is not making any more transfer attempts - SME busy
            self._report_status(rp_req, tp_ref, 97)
        # delete the RP transaction
        del rp_procs[ref]
        self._log('INF',
                  'discard_rp: delete RP procedure (%s, %i)' % (num, ref))

    #--------------------------------------------------------------------------#
    # custom methods to send TP messages from the SMSd to UEs
    #--------------------------------------------------------------------------#

    def send_text(self, text, num):
        """sends a given text (ascii string, that will be converted to SMS 7bit)
        to a given phone number
        """
        tp_dcs = self.TP_DCS
        self.TP_DCS = {'Group': 0, 'Charset': 0, 'Class': 0}  # GSM 7bit
        self.send_tpud(text, num=num)
        self.TP_DCS = tp_dcs

    def send_tpud(self, ud, num):
        """sends a given user-data (directly the data buffer, or a tuple with 
        options and the data buffer) to a given phone number
        
        each option must be a 2-tuple (Tag, Value) were Tag is an uint8 and Value
        is a buffer
        """
        # TODO: implement SMS UD fragmentation into several tp_msg
        try:
            tp_msg = NAS.SMS_DELIVER(
                val={
                    'TP_MMS': 1,  # no more messages
                    'TP_OA': self.TP_OA,
                    'TP_PID': self.TP_PID,
                    'TP_DCS': self.TP_DCS
                })
            self._set_tp_scts(tp_msg['TP_SCTS'])
            if isinstance(ud, (list, tuple)):
                if len(ud) > 1:
                    # UD header IEs
                    tp_msg['TP_UDHI'].set_val(1)
                    tp_msg['TP_UD']['UDH']['UDH'].set_val([{
                        'T': udh[0],
                        'V': udh[1]
                    } for udh in ud[:-1]])
                data = ud[-1]
            else:
                data = ud
            tp_msg['TP_UD']['UD'].set_val(data)
        except:
            self._log('WNG', 'invalid TP UD')
        else:
            self._inject_tp(tp_msg, num)
Ejemplo n.º 41
0
class Connection(object):

    in_buffer_size = 4096
    out_buffer_size = 4096

    cql_version = None
    protocol_version = 2

    keyspace = None
    compression = True
    compressor = None
    decompressor = None

    ssl_options = None
    last_error = None
    in_flight = 0
    is_defunct = False
    is_closed = False
    lock = None

    def __init__(self,
                 host='127.0.0.1',
                 port=9042,
                 credentials=None,
                 ssl_options=None,
                 sockopts=None,
                 compression=True,
                 cql_version=None,
                 protocol_version=2):
        self.host = host
        self.port = port
        self.credentials = credentials
        self.ssl_options = ssl_options
        self.sockopts = sockopts
        self.compression = compression
        self.cql_version = cql_version
        self.protocol_version = protocol_version

        self._id_queue = Queue(MAX_STREAM_PER_CONNECTION)
        for i in range(MAX_STREAM_PER_CONNECTION):
            self._id_queue.put_nowait(i)

        self.lock = RLock()

    def close(self):
        raise NotImplementedError()

    def defunct(self, exc):
        raise NotImplementedError()

    def send_msg(self, msg, cb):
        raise NotImplementedError()

    def wait_for_response(self, msg, **kwargs):
        raise NotImplementedError()

    def wait_for_responses(self, *msgs, **kwargs):
        raise NotImplementedError()

    def register_watcher(self, event_type, callback):
        raise NotImplementedError()

    def register_watchers(self, type_callback_dict):
        raise NotImplementedError()

    @defunct_on_error
    def process_msg(self, msg, body_len):
        version, flags, stream_id, opcode = map(int8_unpack, msg[:4])
        if stream_id < 0:
            callback = None
        else:
            callback = self._callbacks.pop(stream_id, None)
            self._id_queue.put_nowait(stream_id)

        body = None
        try:
            # check that the protocol version is supported
            given_version = version & PROTOCOL_VERSION_MASK
            if given_version != self.protocol_version:
                msg = "Server protocol version (%d) does not match the specified driver protocol version (%d). " +\
                      "Consider setting Cluster.protocol_version to %d."
                raise ProtocolError(
                    msg %
                    (given_version, self.protocol_version, given_version))

            # check that the header direction is correct
            if version & HEADER_DIRECTION_MASK != HEADER_DIRECTION_TO_CLIENT:
                raise ProtocolError(
                    "Header direction in response is incorrect; opcode %04x, stream id %r"
                    % (opcode, stream_id))

            if body_len > 0:
                body = msg[8:]
            elif body_len == 0:
                body = ""
            else:
                raise ProtocolError("Got negative body length: %r" % body_len)

            response = decode_response(stream_id, flags, opcode, body,
                                       self.decompressor)
        except Exception as exc:
            log.exception(
                "Error decoding response from Cassandra. "
                "opcode: %04x; message contents: %r", opcode, body)
            if callback is not None:
                callback(exc)
            self.defunct(exc)
            return

        try:
            if stream_id < 0:
                self.handle_pushed(response)
            elif callback is not None:
                callback(response)
        except Exception:
            log.exception("Callback handler errored, ignoring:")

    @defunct_on_error
    def _send_options_message(self):
        if self.cql_version is None and (not self.compression or
                                         not locally_supported_compressions):
            log.debug(
                "Not sending options message for new connection(%s) to %s "
                "because compression is disabled and a cql version was not "
                "specified", id(self), self.host)
            self._compressor = None
            self.cql_version = DEFAULT_CQL_VERSION
            self._send_startup_message()
        else:
            log.debug(
                "Sending initial options message for new connection (%s) to %s",
                id(self), self.host)
            self.send_msg(OptionsMessage(), self._handle_options_response)

    @defunct_on_error
    def _handle_options_response(self, options_response):
        if self.is_defunct:
            return

        if not isinstance(options_response, SupportedMessage):
            if isinstance(options_response, ConnectionException):
                raise options_response
            else:
                log.error("Did not get expected SupportedMessage response; " \
                          "instead, got: %s", options_response)
                raise ConnectionException("Did not get expected SupportedMessage " \
                                          "response; instead, got: %s" \
                                          % (options_response,))

        log.debug("Received options response on new connection (%s) from %s",
                  id(self), self.host)
        supported_cql_versions = options_response.cql_versions
        remote_supported_compressions = options_response.options['COMPRESSION']

        if self.cql_version:
            if self.cql_version not in supported_cql_versions:
                raise ProtocolError(
                    "cql_version %r is not supported by remote (w/ native "
                    "protocol). Supported versions: %r" %
                    (self.cql_version, supported_cql_versions))
        else:
            self.cql_version = supported_cql_versions[0]

        self._compressor = None
        compression_type = None
        if self.compression:
            overlap = (set(locally_supported_compressions.keys())
                       & set(remote_supported_compressions))
            if len(overlap) == 0:
                log.debug(
                    "No available compression types supported on both ends."
                    " locally supported: %r. remotely supported: %r",
                    locally_supported_compressions.keys(),
                    remote_supported_compressions)
            else:
                compression_type = iter(overlap).next()  # choose any
                # set the decompressor here, but set the compressor only after
                # a successful Ready message
                self._compressor, self.decompressor = \
                    locally_supported_compressions[compression_type]

        self._send_startup_message(compression_type)

    @defunct_on_error
    def _send_startup_message(self, compression=None):
        opts = {}
        if compression:
            opts['COMPRESSION'] = compression
        sm = StartupMessage(cqlversion=self.cql_version, options=opts)
        self.send_msg(sm, cb=self._handle_startup_response)

    @defunct_on_error
    def _handle_startup_response(self,
                                 startup_response,
                                 did_authenticate=False):
        if self.is_defunct:
            return
        if isinstance(startup_response, ReadyMessage):
            log.debug("Got ReadyMessage on new connection (%s) from %s",
                      id(self), self.host)
            if self._compressor:
                self.compressor = self._compressor
            self.connected_event.set()
        elif isinstance(startup_response, AuthenticateMessage):
            log.debug("Got AuthenticateMessage on new connection (%s) from %s",
                      id(self), self.host)

            if self.credentials is None:
                raise AuthenticationFailed(
                    'Remote end requires authentication.')

            self.authenticator = startup_response.authenticator
            cm = CredentialsMessage(creds=self.credentials)
            callback = partial(self._handle_startup_response,
                               did_authenticate=True)
            self.send_msg(cm, cb=callback)
        elif isinstance(startup_response, ErrorMessage):
            log.debug(
                "Received ErrorMessage on new connection (%s) from %s: %s",
                id(self), self.host, startup_response.summary_msg())
            if did_authenticate:
                raise AuthenticationFailed(
                    "Failed to authenticate to %s: %s" %
                    (self.host, startup_response.summary_msg()))
            else:
                raise ConnectionException(
                    "Failed to initialize new connection to %s: %s" %
                    (self.host, startup_response.summary_msg()))
        else:
            msg = "Unexpected response during Connection setup: %r"
            log.error(msg, startup_response)
            raise ProtocolError(msg % (startup_response, ))

    def set_keyspace_blocking(self, keyspace):
        if not keyspace or keyspace == self.keyspace:
            return

        query = QueryMessage(query='USE "%s"' % (keyspace, ),
                             consistency_level=ConsistencyLevel.ONE)
        try:
            result = self.wait_for_response(query)
        except InvalidRequestException as ire:
            # the keyspace probably doesn't exist
            raise ire.to_exception()
        except Exception as exc:
            conn_exc = ConnectionException(
                "Problem while setting keyspace: %r" % (exc, ), self.host)
            self.defunct(conn_exc)
            raise conn_exc

        if isinstance(result, ResultMessage):
            self.keyspace = keyspace
        else:
            conn_exc = ConnectionException(
                "Problem while setting keyspace: %r" % (result, ), self.host)
            self.defunct(conn_exc)
            raise conn_exc

    def set_keyspace_async(self, keyspace, callback):
        """
        Use this in order to avoid deadlocking the event loop thread.
        When the operation completes, `callback` will be called with
        two arguments: this connection and an Exception if an error
        occurred, otherwise :const:`None`.
        """
        if not keyspace or keyspace == self.keyspace:
            callback(self, None)
            return

        query = QueryMessage(query='USE "%s"' % (keyspace, ),
                             consistency_level=ConsistencyLevel.ONE)

        def process_result(result):
            if isinstance(result, ResultMessage):
                self.keyspace = keyspace
                callback(self, None)
            elif isinstance(result, InvalidRequestException):
                callback(self, result.to_exception())
            else:
                callback(
                    self,
                    self.defunct(
                        ConnectionException(
                            "Problem while setting keyspace: %r" % (result, ),
                            self.host)))

        self.send_msg(query, process_result, wait_for_id=True)

    def __str__(self):
        status = ""
        if self.is_defunct:
            status = " (defunct)"
        elif self.is_closed:
            status = " (closed)"

        return "<%s(%r) %s:%d%s>" % (self.__class__.__name__, id(self),
                                     self.host, self.port, status)

    __repr__ = __str__
Ejemplo n.º 42
0
class APRSUploader(object):
    """ 
    Queued APRS Telemetry Uploader class
    This performs uploads to an APRS-IS server.

    Incoming telemetry packets are fed into queue, which is checked regularly.
    At a regular interval, the most recent telemetry packet is extracted, and converted to an
    APRS object format, and then uploaded into APRS-IS.

    If an upload attempt times out, the packet is discarded.
    If the queue fills up (probably indicating no network connection, and a fast packet downlink rate),
    it is immediately emptied, to avoid upload of out-of-date packets.

    Note that this uploader object is intended to handle telemetry from multiple sondes
    """

    # We require the following fields to be present in the incoming telemetry dictionary data
    REQUIRED_FIELDS = [
        "frame",
        "id",
        "datetime",
        "lat",
        "lon",
        "alt",
        "temp",
        "type",
        "freq",
        "freq_float",
        "datetime_dt",
    ]

    def __init__(
        self,
        aprs_callsign="N0CALL",
        aprs_passcode="00000",
        object_name_override=None,
        object_comment="RadioSonde",
        position_report=False,
        aprsis_host="rotate.aprs2.net",
        aprsis_port=14580,
        aprsis_reconnect=300,
        station_beacon=False,
        station_beacon_rate=30,
        station_beacon_position=(0.0, 0.0, 0.0),
        station_beacon_comment="radiosonde_auto_rx SondeGate v<version>",
        station_beacon_icon="/r",
        synchronous_upload_time=30,
        callsign_validity_threshold=5,
        upload_queue_size=16,
        upload_timeout=5,
        inhibit=False,
    ):
        """ Initialise an APRS Uploader object.

        Args:
            aprs_callsign (str): Callsign of the uploader, used when logging into APRS-IS.
            aprs_passcode (tuple): Optional - a tuple consisting of (lat, lon, alt), which if populated,
                is used to plot the listener's position on the Habitat map, both when this class is initialised, and
                when a new sonde ID is observed.

            object_name_override (str): Override the object name in the uploaded sentence with this value.
                WARNING: This will horribly break the aprs.fi map if multiple sondes are uploaded simultaneously under the same callsign.
                USE WITH CAUTION!!!
            object_comment (str): A comment to go with the object. Various fields will be replaced with telmetry data.

            position_report (bool): If True, upload positions as APRS position reports, otherwise, upload as an Object.

            aprsis_host (str): APRS-IS Server to upload packets to.
            aprsis_port (int): APRS-IS TCP port number.
            aprsis_reconnect (int): Reconnect to the APRS-IS server at least every X minutes. Reconnections will occur when telemetry needs to be sent.

            station_beacon (bool): Enable beaconing of station position.
            station_beacon_rate (int): Time delay between beacon uploads (minutes)
            station_beacon_position (tuple): (lat, lon, alt), in decimal degrees, of the station position.
            station_beacon_comment (str): Comment field for the station beacon. <version> will be replaced with the current auto_rx version.
            station_beacon_icon (str): The APRS icon to be used, as the two characters (symbol table, symbol index), as per http://www.aprs.org/symbols.html

            synchronous_upload_time (int): Upload the most recent telemetry when time.time()%synchronous_upload_time == 0
                This is done in an attempt to get multiple stations uploading the same telemetry sentence simultaneously,
                and also acts as decimation on the number of sentences uploaded to APRS-IS.

            callsign_validity_threshold (int): Only upload telemetry data if the callsign has been observed more than N times. Default = 5

            upload_queue_size (int): Maximum number of sentences to keep in the upload queue. If the queue is filled,
                it will be emptied (discarding the queue contents).
            upload_timeout (int): Timeout (Seconds) when performing uploads to APRS-IS. Default: 10 seconds.

            inhibit (bool): Inhibit all uploads. Mainly intended for debugging.

        """

        self.aprs_callsign = aprs_callsign
        self.aprs_passcode = aprs_passcode
        self.object_comment = object_comment
        self.position_report = position_report
        self.aprsis_host = aprsis_host
        self.aprsis_port = aprsis_port
        self.aprsis_reconnect = aprsis_reconnect
        self.upload_timeout = upload_timeout
        self.upload_queue_size = upload_queue_size
        self.synchronous_upload_time = synchronous_upload_time
        self.callsign_validity_threshold = callsign_validity_threshold
        self.inhibit = inhibit

        self.station_beacon = {
            "enabled": station_beacon,
            "position": station_beacon_position,
            "rate": station_beacon_rate,
            "comment": station_beacon_comment,
            "icon": station_beacon_icon,
        }

        if object_name_override is None:
            self.object_name_override = "<id>"
        else:
            self.object_name_override = object_name_override
            self.log_info("Using APRS Object Name Override: %s" %
                          self.object_name_override)

        # Our two Queues - one to hold sentences to be upload, the other to temporarily hold
        # input telemetry dictionaries before they are converted and processed.
        self.aprs_upload_queue = Queue(upload_queue_size)
        self.input_queue = Queue()

        # Dictionary where we store sorted telemetry data for upload when required.
        # Elements will be named after payload IDs, and will contain:
        #   'count' (int): Number of times this callsign has been observed. Uploads will only occur when
        #       this number rises above callsign_validity_threshold.
        #   'data' (Queue): A queue of telemetry sentences to be uploaded. When the upload timer fires,
        #       this queue will be dumped, and the most recent telemetry uploaded.
        self.observed_payloads = {}

        # Record of when we last uploaded a user station position to Habitat.
        self.last_user_position_upload = 0

        # APRS-IS Socket Object
        self.aprsis_socket = None
        self.aprsis_lastconnect = 0
        self.aprsis_upload_lock = Lock()
        # Attempt to connect to the APRS-IS server.
        # If this fails, we will attempt to re-connect when a packet needs to be uploaded.
        self.connect()

        # Start the uploader thread.
        self.upload_thread_running = True
        self.upload_thread = Thread(target=self.aprs_upload_thread)
        self.upload_thread.start()

        # Start the input queue processing thread.
        self.input_processing_running = True
        self.input_thread = Thread(target=self.process_queue)
        self.input_thread.start()

        self.timer_thread_running = True
        self.timer_thread = Thread(target=self.upload_timer)
        self.timer_thread.start()

        self.log_info("APRS Uploader Started.")

    def connect(self):
        """ Connect to an APRS-IS Server """
        # create socket & connect to server
        self.aprsis_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.aprsis_socket.settimeout(self.upload_timeout)
        try:
            self.aprsis_socket.connect((self.aprsis_host, self.aprsis_port))
            # Send logon string
            # _logon = 'user %s pass %s vers VK5QI-AutoRX filter b/%s \r\n' % (self.aprs_callsign, self.aprs_passcode, self.aprs_callsign)
            _logon = "user %s pass %s vers VK5QI-AutoRX\r\n" % (
                self.aprs_callsign,
                self.aprs_passcode,
            )
            self.log_debug("Logging in: %s" % _logon)
            self.aprsis_socket.sendall(_logon.encode("ascii"))

            # Set packet filters to limit inbound bandwidth.
            _filter = "#filter p/ZZ\r\n"
            self.log_debug("Setting Filter: %s" % _filter)
            self.aprsis_socket.sendall(_filter.encode("ascii"))
            _filter = "#filter -t/po\r\n"
            self.log_debug("Setting Filter: %s" % _filter)
            self.aprsis_socket.sendall(_filter.encode("ascii"))

            # Wait for login to complete.
            time.sleep(1)

            # Check response
            _resp = self.aprsis_socket.recv(1024)

            try:
                _resp = _resp.decode("ascii").strip()
            except:
                print(_resp)

            if _resp[0] != "#":
                raise IOError("Invalid response from APRS-IS Server: %s" %
                              _resp)
            else:
                self.log_debug("Server Logon Response: %s" % str(_resp))

            self.log_info("Connected to APRS-IS server %s:%d" %
                          (self.aprsis_host, self.aprsis_port))
            self.aprsis_lastconnect = time.time()
            return True

        except Exception as e:
            self.log_error("Connection to APRS-IS Failed - %s" % str(e))
            self.aprsis_socket = None
            return False

    def flush_rx(self):
        """ Flush the APRS-IS RX buffer """
        try:
            _start = time.time()
            _data = self.aprsis_socket.recv(32768)
            _dur = time.time() - _start
            self.log_debug("Incoming data from APRS-IS: %s" % (_data.decode()))
        except:
            # Ignore any exceptions from attempting to read the buffer.
            pass

    def aprsis_upload(self, source, packet, igate=False, retries=5):
        """ Upload a packet to APRS-IS

        Args:
            source (str): Callsign of the packet source.
            packet (str): APRS packet to upload.
            igate (boolean): If True, iGate the packet into APRS-IS
                (i.e. use the original source call, but add SONDEGATE and our callsign to the path.)
            retries (int): Number of times to retry uploading.

        """
        # If we are inhibited, just return immediately.
        if self.inhibit:
            self.log_info("Upload Inhibited: %s" % packet)
            return True

        self.aprsis_upload_lock.acquire()

        # If we have not connected in a long time, reset the APRS-IS connection.
        if (time.time() - self.aprsis_lastconnect) > (self.aprsis_reconnect *
                                                      60):
            self.disconnect()
            time.sleep(1)
            self.connect()

        # Generate APRS packet
        if igate:
            # If we are emulating an IGATE, then we need to add in a path, a q-construct, and our own callsign.
            # We have the TOCALL field 'APRARX' allocated by Bob WB4APR, so we can now use this to indicate
            # that these packets have arrived via radiosonde_auto_rx!
            _packet = "%s>APRARX,SONDEGATE,TCPIP,qAR,%s:%s\r\n" % (
                source,
                self.aprs_callsign,
                packet,
            )
        else:
            # Otherwise, we are probably just placing an object, usually sourced by our own callsign
            _packet = "%s>APRS:%s\r\n" % (source, packet)

        _attempts = 1
        while _attempts < retries:
            try:
                # Immediately throw exception if we're not connected.
                # This will trigger a reconnect.
                if self.aprsis_socket is None:
                    raise IOError("Socket not connected.")

                # Attempt to send the packet.
                # This will timeout if the socket is locked up.
                self.aprsis_socket.sendall(_packet.encode("ascii"))

                # If OK, return.
                self.log_info("Uploaded to APRS-IS: %s" % str(_packet).strip())
                self.aprsis_upload_lock.release()
                return True

            except Exception as e:
                # If something broke, forcibly shutdown the socket, then reconnect.
                self.log_error("Upload Error: %s" % str(e))

                self.log_info("Attempting to reconnect...")
                self.disconnect()
                time.sleep(1)
                self.connect()

                _attempts += 1

        # If we end up here, something has really broken.
        self.aprsis_upload_lock.release()
        return False

    def disconnect(self):
        """ Close APRS-IS connection """
        try:
            self.aprsis_socket.shutdown(0)
        except Exception as e:
            self.log_debug("Socket shutdown failed - %s" % str(e))

        try:
            self.aprsis_socket.close()
        except Exception as e:
            self.log_debug("Socket close failed - %s" % str(e))

    def beacon_station_position(self):
        """ Send a station position beacon into APRS-IS """
        if self.station_beacon["enabled"]:
            if (self.station_beacon["position"][0]
                    == 0.0) and (self.station_beacon["position"][1] == 0.0):
                self.log_error(
                    "Station position is 0,0, not uploading position beacon.")
                self.last_user_position_upload = time.time()
                return

            # Generate the station position packet
            # Note - this is now generated as an APRS position report, for radiosondy.info compatability.
            _packet = generate_station_object(
                self.aprs_callsign,
                self.station_beacon["position"][0],
                self.station_beacon["position"][1],
                self.station_beacon["comment"],
                self.station_beacon["icon"],
                position_report=True,
            )

            # Send the packet as an iGated packet.
            self.aprsis_upload(self.aprs_callsign, _packet, igate=True)
            self.last_user_position_upload = time.time()

    def update_station_position(self, lat, lon, alt):
        """ Update the internal station position record. Used when determining the station position by GPSD """
        self.station_beacon["position"] = (lat, lon, alt)

    def aprs_upload_thread(self):
        """ Handle uploading of packets to APRS """

        self.log_debug("Started APRS Uploader Thread.")

        while self.upload_thread_running:

            if self.aprs_upload_queue.qsize() > 0:
                # If the queue is completely full, jump to the most recent telemetry sentence.
                if self.aprs_upload_queue.qsize() == self.upload_queue_size:
                    while not self.aprs_upload_queue.empty():
                        _telem = self.aprs_upload_queue.get()

                    self.log_warning(
                        "Uploader queue was full - possible connectivity issue."
                    )
                else:
                    # Otherwise, get the first item in the queue.
                    _telem = self.aprs_upload_queue.get()

                # Convert to a packet.
                try:
                    (_packet, _call) = telemetry_to_aprs_position(
                        _telem,
                        object_name=self.object_name_override,
                        aprs_comment=self.object_comment,
                        position_report=self.position_report,
                    )
                except Exception as e:
                    self.log_error(
                        "Error converting telemetry to APRS packet - %s" %
                        str(e))
                    _packet = None

                # Attempt to upload it.
                if _packet is not None:

                    # If we are uploading position reports, the source call is the generated callsign
                    # usually based on the sonde serial number, and we iGate the position report.
                    # Otherwise, we upload APRS Objects, sourced by our own callsign, but still iGated via us.
                    if self.position_report:
                        self.aprsis_upload(_call, _packet, igate=True)
                    else:
                        self.aprsis_upload(self.aprs_callsign,
                                           _packet,
                                           igate=True)

            else:
                # Wait for a short time before checking the queue again.
                time.sleep(0.1)

        self.log_debug("Stopped APRS Uploader Thread.")

    def upload_timer(self):
        """ Add packets to the aprs upload queue if it is time for us to upload. """

        while self.timer_thread_running:
            if int(time.time()) % self.synchronous_upload_time == 0:
                # Time to upload!
                for _id in self.observed_payloads.keys():
                    # If no data, continue...
                    if self.observed_payloads[_id]["data"].empty():
                        continue
                    else:
                        # Otherwise, dump the queue and keep the latest telemetry.
                        while not self.observed_payloads[_id]["data"].empty():
                            _telem = self.observed_payloads[_id]["data"].get()

                        # Attept to add it to the habitat uploader queue.
                        try:
                            self.aprs_upload_queue.put_nowait(_telem)
                        except Exception as e:
                            self.log_error(
                                "Error adding sentence to queue: %s" % str(e))

                # Sleep a second so we don't hit the synchronous upload time again.
                time.sleep(1)

                # Flush APRS-IS RX buffer
                self.flush_rx()
            else:
                # Not yet time to upload, wait for a bit.
                time.sleep(0.1)

    def process_queue(self):
        """ Process packets from the input queue.

        This thread handles packets from the input queue (provided by the decoders)
        Packets are sorted by ID, and a dictionary entry is created. 

        """

        while self.input_processing_running:
            # Process everything in the queue.
            while self.input_queue.qsize() > 0:
                # Grab latest telem dictionary.
                _telem = self.input_queue.get_nowait()

                _id = _telem["id"]

                if _id not in self.observed_payloads:
                    # We haven't seen this ID before, so create a new dictionary entry for it.
                    self.observed_payloads[_id] = {"count": 1, "data": Queue()}
                    self.log_debug(
                        "New Payload %s. Not observed enough to allow upload."
                        % _id)
                    # However, we don't yet add anything to the queue for this payload...
                else:
                    # We have seen this payload before!
                    # Increment the 'seen' counter.
                    self.observed_payloads[_id]["count"] += 1

                    # If we have seen this particular ID enough times, add the data to the ID's queue.
                    if (self.observed_payloads[_id]["count"] >=
                            self.callsign_validity_threshold):
                        # Add the telemetry to the queue
                        self.observed_payloads[_id]["data"].put(_telem)

                    else:
                        self.log_debug(
                            "Payload ID %s not observed enough to allow upload."
                            % _id)

            if (time.time() - self.last_user_position_upload
                ) > self.station_beacon["rate"] * 60:
                if self.aprsis_socket != None:
                    self.beacon_station_position()

            time.sleep(0.1)

    def add(self, telemetry):
        """ Add a dictionary of telemetry to the input queue. 

        Args:
            telemetry (dict): Telemetry dictionary to add to the input queue.

        """

        # Discard any telemetry which is indicated to be encrypted.
        if "encrypted" in telemetry:
            if telemetry["encrypted"] == True:
                return

        # Check the telemetry dictionary contains the required fields.
        for _field in self.REQUIRED_FIELDS:
            if _field not in telemetry:
                self.log_error("JSON object missing required field %s" %
                               _field)
                return

        # Add it to the queue if we are running.
        if self.input_processing_running:
            self.input_queue.put(telemetry)
        else:
            self.log_error("Processing not running, discarding.")

    def close(self):
        """ Shutdown uploader and processing threads. """
        self.log_debug("Waiting for threads to close...")
        self.input_processing_running = False
        self.timer_thread_running = False
        self.upload_thread_running = False

        self.disconnect()

        # Wait for all threads to close.
        if self.upload_thread is not None:
            self.upload_thread.join()

        if self.timer_thread is not None:
            self.timer_thread.join()

        if self.input_thread is not None:
            self.input_thread.join()

    def log_debug(self, line):
        """ Helper function to log a debug message with a descriptive heading. 
        Args:
            line (str): Message to be logged.
        """
        logging.debug("APRS-IS - %s" % line)

    def log_info(self, line):
        """ Helper function to log an informational message with a descriptive heading. 
        Args:
            line (str): Message to be logged.
        """
        logging.info("APRS-IS - %s" % line)

    def log_error(self, line):
        """ Helper function to log an error message with a descriptive heading. 
        Args:
            line (str): Message to be logged.
        """
        logging.error("APRS-IS - %s" % line)

    def log_warning(self, line):
        """ Helper function to log a warning message with a descriptive heading. 
        Args:
            line (str): Message to be logged.
        """
        logging.warning("APRS-IS - %s" % line)
Ejemplo n.º 43
0
class printcore():
    def __init__(self, port = None, baud = None):
        """Initializes a printcore instance. Pass the port and baud rate to
           connect immediately"""
        self.baud = None
        self.port = None
        self.analyzer = GCodeAnalyzer()
        self.printer = None  # Serial instance connected to the printer,
                             # should be None when disconnected
        self.clear = 0  # clear to send, enabled after responses
        self.online = False  # The printer has responded to the initial command
                             # and is active
        self.printing = False  # is a print currently running, true if printing
                               # , false if paused
        self.mainqueue = None
        self.priqueue = Queue(0)
        self.queueindex = 0
        self.lineno = 0
        self.resendfrom = -1
        self.paused = False
        self.sentlines = {}
        self.log = deque(maxlen = 10000)
        self.sent = []
        self.writefailures = 0
        self.tempcb = None  # impl (wholeline)
        self.recvcb = None  # impl (wholeline)
        self.sendcb = None  # impl (wholeline)
        self.preprintsendcb = None  # impl (wholeline)
        self.printsendcb = None  # impl (wholeline)
        self.layerchangecb = None  # impl (wholeline)
        self.errorcb = None  # impl (wholeline)
        self.startcb = None  # impl ()
        self.endcb = None  # impl ()
        self.onlinecb = None  # impl ()
        self.loud = False  # emit sent and received lines to terminal
        self.greetings = ['start', 'Grbl ']
        self.wait = 0  # default wait period for send(), send_now()
        self.read_thread = None
        self.stop_read_thread = False
        self.send_thread = None
        self.stop_send_thread = False
        self.print_thread = None
        if port is not None and baud is not None:
            self.connect(port, baud)
        self.xy_feedrate = None
        self.z_feedrate = None
        self.pronterface = None

    @locked
    def disconnect(self):
        """Disconnects from printer and pauses the print
        """
        if self.printer:
            if self.read_thread:
                self.stop_read_thread = True
                self.read_thread.join()
                self.read_thread = None
            if self.print_thread:
                self.printing = False
                self.print_thread.join()
            self._stop_sender()
            try:
                self.printer.close()
            except socket.error:
                pass
            except OSError:
                pass
        self.printer = None
        self.online = False
        self.printing = False

    @locked
    def connect(self, port = None, baud = None):
        """Set port and baudrate if given, then connect to printer
        """
        if self.printer:
            self.disconnect()
        if port is not None:
            self.port = port
        if baud is not None:
            self.baud = baud
        if self.port is not None and self.baud is not None:
            # Connect to socket if "port" is an IP, device if not
            host_regexp = re.compile("^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$|^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$")
            is_serial = True
            if ":" in port:
                bits = port.split(":")
                if len(bits) == 2:
                    hostname = bits[0]
                    try:
                        port = int(bits[1])
                        if host_regexp.match(hostname) and 1 <= port <= 65535:
                            is_serial = False
                    except:
                        pass
            self.writefailures = 0
            if not is_serial:
                self.printer_tcp = socket.socket(socket.AF_INET,
                                                 socket.SOCK_STREAM)
                self.timeout = 0.25
                self.printer_tcp.settimeout(1.0)
                try:
                    self.printer_tcp.connect((hostname, port))
                    self.printer_tcp.settimeout(self.timeout)
                    self.printer = self.printer_tcp.makefile()
                except socket.error as e:
                    print _("Could not connect to %s:%s:") % (hostname, port)
                    self.printer = None
                    self.printer_tcp = None
                    print _("Socket error %s:") % e.errno,
                    print e.strerror
                    return
            else:
                disable_hup(self.port)
                self.printer_tcp = None
                try:
                    self.printer = Serial(port = self.port,
                                          baudrate = self.baud,
                                          timeout = 0.25)
                except SerialException as e:
                    print _("Could not connect to %s at baudrate %s:") % (self.port, self.baud)
                    self.printer = None
                    print _("Serial error: %s") % e
                    return
            self.stop_read_thread = False
            self.read_thread = Thread(target = self._listen)
            self.read_thread.start()
            self._start_sender()

    def reset(self):
        """Reset the printer
        """
        if self.printer and not self.printer_tcp:
            self.printer.setDTR(1)
            time.sleep(0.2)
            self.printer.setDTR(0)

    def _readline(self):
        try:
            try:
                line = self.printer.readline()
                if self.printer_tcp and not line:
                    raise OSError(-1, "Read EOF from socket")
            except socket.timeout:
                return ""

            if len(line) > 1:
                self.log.append(line)
                if self.recvcb:
                    try: self.recvcb(line)
                    except: pass
                if self.loud: print "RECV:", line.rstrip()
            return line
        except SelectError as e:
            if 'Bad file descriptor' in e.args[1]:
                print _("Can't read from printer (disconnected?) (SelectError {0}): {1}").format(e.errno, e.strerror)
                return None
            else:
                print _("SelectError ({0}): {1}").format(e.errno, e.strerror)
                raise
        except SerialException as e:
            print _("Can't read from printer (disconnected?) (SerialException): {0}").format(e)
            return None
        except socket.error as e:
            print _("Can't read from printer (disconnected?) (Socket error {0}): {1}").format(e.errno, e.strerror)
            return None
        except OSError as e:
            if e.errno == errno.EAGAIN:  # Not a real error, no data was available
                return ""
            print _("Can't read from printer (disconnected?) (OS Error {0}): {1}").format(e.errno, e.strerror)
            return None

    def _listen_can_continue(self):
        if self.printer_tcp:
            return not self.stop_read_thread and self.printer
        return (not self.stop_read_thread
                and self.printer
                and self.printer.isOpen())

    def _listen_until_online(self):
        while not self.online and self._listen_can_continue():
            self._send("M105")
            empty_lines = 0
            while self._listen_can_continue():
                line = self._readline()
                if line is None: break  # connection problem
                # workaround cases where M105 was sent before printer Serial
                # was online an empty line means read timeout was reached,
                # meaning no data was received thus we count those empty lines,
                # and once we have seen 15 in a row, we just break and send a
                # new M105
                # 15 was chosen based on the fact that it gives enough time for
                # Gen7 bootloader to time out, and that the non received M105
                # issues should be quite rare so we can wait for a long time
                # before resending
                if not line:
                    empty_lines += 1
                    if empty_lines == 15: break
                else: empty_lines = 0
                if line.startswith(tuple(self.greetings)) \
                   or line.startswith('ok') or "T:" in line:
                    if self.onlinecb:
                        try: self.onlinecb()
                        except: pass
                    self.online = True
                    return

    def _listen(self):
        """This function acts on messages from the firmware
        """
        self.clear = True
        if not self.printing:
            self._listen_until_online()
        while self._listen_can_continue():
            line = self._readline()
            if line is None:
                break
            if line.startswith('DEBUG_'):
                continue
            if line.startswith(tuple(self.greetings)) or line.startswith('ok'):
                self.clear = True
            if line.startswith('ok') and "T:" in line and self.tempcb:
                #callback for temp, status, whatever
                try: self.tempcb(line)
                except: pass
            elif line.startswith('Error'):
                if self.errorcb:
                    #callback for errors
                    try: self.errorcb(line)
                    except: pass
            # Teststrings for resend parsing       # Firmware     exp. result
            # line="rs N2 Expected checksum 67"    # Teacup       2
            if line.lower().startswith("resend") or line.startswith("rs"):
                for haystack in ["N:", "N", ":"]:
                    line = line.replace(haystack, " ")
                linewords = line.split()
                while len(linewords) != 0:
                    try:
                        toresend = int(linewords.pop(0))
                        self.resendfrom = toresend
                        #print str(toresend)
                        break
                    except:
                        pass
                self.clear = True
        self.clear = True

    def _start_sender(self):
        self.stop_send_thread = False
        self.send_thread = Thread(target = self._sender)
        self.send_thread.start()

    def _stop_sender(self):
        if self.send_thread:
            self.stop_send_thread = True
            self.send_thread.join()
            self.send_thread = None

    def _sender(self):
        while not self.stop_send_thread:
            try:
                command = self.priqueue.get(True, 0.1)
            except QueueEmpty:
                continue
            while self.printer and self.printing and not self.clear:
                time.sleep(0.001)
            self._send(command)
            while self.printer and self.printing and not self.clear:
                time.sleep(0.001)

    def _checksum(self, command):
        return reduce(lambda x, y: x ^ y, map(ord, command))

    def startprint(self, gcode, startindex = 0):
        """Start a print, gcode is an array of gcode commands.
        returns True on success, False if already printing.
        The print queue will be replaced with the contents of the data array,
        the next line will be set to 0 and the firmware notified. Printing
        will then start in a parallel thread.
        """
        if self.printing or not self.online or not self.printer:
            return False
        self.printing = True
        self.mainqueue = gcode
        self.lineno = 0
        self.queueindex = startindex
        self.resendfrom = -1
        self._send("M110", -1, True)
        if not gcode.lines:
            return True
        self.clear = False
        resuming = (startindex != 0)
        self.print_thread = Thread(target = self._print,
                                   kwargs = {"resuming": resuming})
        self.print_thread.start()
        return True

    # run a simple script if it exists, no multithreading
    def runSmallScript(self, filename):
        if filename is None: return
        f = None
        try:
            with open(filename) as f:
                for i in f:
                    l = i.replace("\n", "")
                    l = l[:l.find(";")]  # remove comments
                    self.send_now(l)
        except:
            pass

    def pause(self):
        """Pauses the print, saving the current position.
        """
        if not self.printing: return False
        self.paused = True
        self.printing = False

        # try joining the print thread: enclose it in try/except because we
        # might be calling it from the thread itself
        try:
            self.print_thread.join()
        except:
            pass

        self.print_thread = None

        # saves the status
        self.pauseX = self.analyzer.x - self.analyzer.xOffset
        self.pauseY = self.analyzer.y - self.analyzer.yOffset
        self.pauseZ = self.analyzer.z - self.analyzer.zOffset
        self.pauseE = self.analyzer.e - self.analyzer.eOffset
        self.pauseF = self.analyzer.f
        self.pauseRelative = self.analyzer.relative

    def resume(self):
        """Resumes a paused print.
        """
        if not self.paused: return False
        if self.paused:
            # restores the status
            self.send_now("G90")  # go to absolute coordinates

            xyFeedString = ""
            zFeedString = ""
            if self.xy_feedrate is not None:
                xyFeedString = " F" + str(self.xy_feedrate)
            if self.z_feedrate is not None:
                zFeedString = " F" + str(self.z_feedrate)

            self.send_now("G1 X%s Y%s%s" % (self.pauseX, self.pauseY,
                                            xyFeedString))
            self.send_now("G1 Z" + str(self.pauseZ) + zFeedString)
            self.send_now("G92 E" + str(self.pauseE))

            # go back to relative if needed
            if self.pauseRelative: self.send_now("G91")
            # reset old feed rate
            self.send_now("G1 F" + str(self.pauseF))

        self.paused = False
        self.printing = True
        self.print_thread = Thread(target = self._print,
                                   kwargs = {"resuming": True})
        self.print_thread.start()

    def send(self, command, wait = 0):
        """Adds a command to the checksummed main command queue if printing, or
        sends the command immediately if not printing"""

        if self.online:
            if self.printing:
                self.mainqueue.append(command)
            else:
                self.priqueue.put_nowait(command)
        else:
            print "Not connected to printer."

    def send_now(self, command, wait = 0):
        """Sends a command to the printer ahead of the command queue, without a
        checksum"""
        if self.online:
            self.priqueue.put_nowait(command)
        else:
            print "Not connected to printer."

    def _print(self, resuming = False):
        self._stop_sender()
        try:
            if self.startcb:
                #callback for printing started
                try: self.startcb(resuming)
                except:
                    print "Print start callback failed with:"
                    traceback.print_exc(file = sys.stdout)
            while self.printing and self.printer and self.online:
                self._sendnext()
            self.sentlines = {}
            self.log.clear()
            self.sent = []
            if self.endcb:
                #callback for printing done
                try: self.endcb()
                except:
                    print "Print end callback failed with:"
                    traceback.print_exc(file = sys.stdout)
        except:
            print "Print thread died due to the following error:"
            traceback.print_exc(file = sys.stdout)
        finally:
            self.print_thread = None
            self._start_sender()

    #now only "pause" is implemented as host command
    def processHostCommand(self, command):
        command = command.lstrip()
        if command.startswith(";@pause"):
            if self.pronterface is not None:
                self.pronterface.pause(None)
            else:
                self.pause()

    def _sendnext(self):
        if not self.printer:
            return
        while self.printer and self.printing and not self.clear:
            time.sleep(0.001)
        self.clear = False
        if not (self.printing and self.printer and self.online):
            self.clear = True
            return
        if self.resendfrom < self.lineno and self.resendfrom > -1:
            self._send(self.sentlines[self.resendfrom], self.resendfrom, False)
            self.resendfrom += 1
            return
        self.resendfrom = -1
        if not self.priqueue.empty():
            self._send(self.priqueue.get_nowait())
            self.priqueue.task_done()
            return
        if self.printing and self.queueindex < len(self.mainqueue):
            (layer, line) = self.mainqueue.idxs(self.queueindex)
            gline = self.mainqueue.all_layers[layer][line]
            if self.layerchangecb and self.queueindex > 0:
                (prev_layer, prev_line) = self.mainqueue.idxs(self.queueindex - 1)
                if prev_layer != layer:
                    try: self.layerchangecb(layer)
                    except: traceback.print_exc()
            if self.preprintsendcb:
                if self.queueindex + 1 < len(self.mainqueue):
                    (next_layer, next_line) = self.mainqueue.idxs(self.queueindex + 1)
                    next_gline = self.mainqueue.all_layers[next_layer][next_line]
                else:
                    next_gline = None
                gline = self.preprintsendcb(gline, next_gline)
            if gline is None:
                self.queueindex += 1
                self.clear = True
                return
            tline = gline.raw
            if tline.lstrip().startswith(";@"):  # check for host command
                self.processHostCommand(tline)
                self.queueindex += 1
                self.clear = True
                return

            tline = tline.split(";")[0]
            if len(tline) > 0:
                self._send(tline, self.lineno, True)
                self.lineno += 1
                if self.printsendcb:
                    try: self.printsendcb(gline)
                    except: traceback.print_exc()
            else:
                self.clear = True
            self.queueindex += 1
        else:
            self.printing = False
            self.clear = True
            if not self.paused:
                self.queueindex = 0
                self.lineno = 0
                self._send("M110", -1, True)

    def _send(self, command, lineno = 0, calcchecksum = False):
        if calcchecksum:
            prefix = "N" + str(lineno) + " " + command
            command = prefix + "*" + str(self._checksum(prefix))
            if "M110" not in command:
                self.sentlines[lineno] = command
        if self.printer:
            self.sent.append(command)
            # run the command through the analyzer
            try: self.analyzer.Analyze(command)
            except:
                print "Warning: could not analyze command %s:" % command
                traceback.print_exc(file = sys.stdout)
            if self.loud:
                print "SENT:", command
            if self.sendcb:
                try: self.sendcb(command)
                except: pass
            try:
                self.printer.write(str(command + "\n"))
                if self.printer_tcp: self.printer.flush()
                self.writefailures = 0
            except socket.error as e:
                print _("Can't write to printer (disconnected?) (Socket error {0}): {1}").format(e.errno, e.strerror)
                self.writefailures += 1
            except SerialException as e:
                print _("Can't write to printer (disconnected?) (SerialException): {0}").format(e)
                self.writefailures += 1
            except RuntimeError as e:
                print _("Socket connection broken, disconnected. ({0}): {1}").format(e.errno, e.strerror)
                self.writefailures += 1
Ejemplo n.º 44
0
class ZUsageStatisticsService(IZService, IZDataStoreListener, IZMediaStorageServiceListener, IZAccountStoreListener, IZRunnable):

    def __init__(self):
        self.serializer = ZUsagePacketSerializer()
        self.logger = None
        self.running = False
        self.version = ZVersion()
        self.profileGuid = getApplicationModel().getUserProfile().getGuid()
        self.usageDir = None
    # end __init__()

    def start(self, applicationModel):
        self.queue = Queue(0)
        userProfile = applicationModel.getUserProfile()
        self.usageDir = userProfile.getDirectory(u"usage") #$NON-NLS-1$

        engine = applicationModel.getEngine()
        self.logger = engine.getService(IZBlogAppServiceIDs.LOGGER_SERVICE_ID)
        self.logger.debug(u"Anonymous Usage Statistics Service started.") #$NON-NLS-1$

        accountStore = engine.getService(IZBlogAppServiceIDs.ACCOUNT_STORE_SERVICE_ID)
        dataStore = engine.getService(IZBlogAppServiceIDs.DATA_STORE_SERVICE_ID)
        mediaStoreService = engine.getService(IZBlogAppServiceIDs.MEDIA_STORAGE_SERVICE_ID)

        accountStore.addListener(self)
        dataStore.addListener(self)
        mediaStoreService.addListener(self)

        self.done = False
        self.running = True
        thread = ZThread(self, u"ZUsageStatisticsService", True) #$NON-NLS-1$
        thread.start()
    # end start()

    def stop(self):
        self.logger = None
        packet = self._createPacket(IZUsagePacketTypes.EXIT_SERVICE)
        self.queue.put_nowait(packet)
        while self.running:
            pass
    # end stop()

    def run(self):
        done = False
        while not done:
            packet = self.queue.get()
            if packet.getType() == IZUsagePacketTypes.EXIT_SERVICE:
                done = True
            else:
                self._savePacket(packet)
        self.running = False
    # end run()

    def _savePacket(self, packet):
        packetDom = self.serializer.serialize(packet)
        fname = os.path.join(self.usageDir, packet.getId() + u".xml") #$NON-NLS-1$
        packetDom.save(fname, True)
    # end _savePacket()
#
#    def onDocumentAdded(self, document):
#        self.logger.debug(u"Document added.")
#    # end onDocumentAdded()
#
#    def onDocumentChanged(self, document, metaDataOnly):
#        self.logger.debug(u"Document changed.")
#    # end onDocumentChange()
#
#    def onDocumentDeleted(self, document):
#        self.logger.debug(u"Document deleted.")
#    # end onDocumentDeleted()

    def onMediaStorageAdded(self, mediaStore):
        packet = self._createPacket(IZUsagePacketTypes.NEW_MEDIA_STORE)
        packet.addAttribute(u"mediastorage.site-id", mediaStore.getMediaSiteId()) #$NON-NLS-1$
        packet.addAttribute(u"mediastorage.id-hash", hash(mediaStore.getId())) #$NON-NLS-1$
        self.queue.put_nowait(packet)
    # end onMediaStorageAdded()

    def onMediaStorageRemoved(self, mediaStore):
        packet = self._createPacket(IZUsagePacketTypes.DELETE_MEDIA_STORE)
        packet.addAttribute(u"mediastorage.site-id", mediaStore.getMediaSiteId()) #$NON-NLS-1$
        packet.addAttribute(u"mediastorage.id-hash", hash(mediaStore.getId())) #$NON-NLS-1$
        self.queue.put_nowait(packet)
    # end onMediaStorageRemoved() #$NON-NLS-1$

    def onAccountAdded(self, account):
        packet = self._createPacket(IZUsagePacketTypes.NEW_ACCOUNT)
        if isinstance(account, IZBlogAccount):
            apiInfo = account.getAPIInfo()
            packet.addAttribute(u"account.type", apiInfo.getType()) #$NON-NLS-1$
            packet.addAttribute(u"account.id-hash", unicode(hash(account.getId()))) #$NON-NLS-1$
        self.queue.put_nowait(packet)
    # end onAccountAdded()

    def onAccountChanged(self, account): #@UnusedVariable
        pass
    # end onAccountChange()

    def onAccountDeleted(self, account):
        packet = self._createPacket(IZUsagePacketTypes.NEW_ACCOUNT)
        if isinstance(account, IZBlogAccount):
            apiInfo = account.getAPIInfo()
            packet.addAttribute(u"account.type", apiInfo.getType()) #$NON-NLS-1$
            packet.addAttribute(u"account.id-hash", unicode(hash(account.getId()))) #$NON-NLS-1$
        self.queue.put_nowait(packet)
    # end onAccountDeleted()

    def _createPacket(self, type):
        packet = ZUsagePacket(type)
        packet.addAttribute(u"global.app-version", self.version.getFullVersionString()) #$NON-NLS-1$
        packet.addAttribute(u"global.profile-guid", self.profileGuid) #$NON-NLS-1$
        return packet
Ejemplo n.º 45
0
class ExecThread(threading.Thread):
    """Thread that does the execution. It can accept options with an execution, and queues execs if necessary."""
    def __init__(self):
        threading.Thread.__init__(self)
        self.daemon = True
        self.queue = Queue()

    def run(self):
        from flexget.ui.webui import manager
        while True:
            kwargs = self.queue.get() or {}
            opts = kwargs.pop('options', None)
            parsed_options = kwargs.pop('parsed_options', None)
            output = kwargs.pop('output', None)
            if opts:
                # make copy of original options and apply options from opts
                old_opts = copy(manager.options)
                self._apply_options(manager.options, opts)
            if parsed_options:
                old_opts = manager.options
                manager.options = parsed_options
            if output:
                old_stdout = sys.stdout
                old_stderr = sys.stderr
                sys.stdout = output
                sys.stderr = output
                # TODO: Use a filter to capture only the logging for this execution
                streamhandler = logging.StreamHandler(output)
                streamhandler.setFormatter(FlexGetFormatter())
                logging.getLogger().addHandler(streamhandler)
            try:
                manager.execute(**kwargs)
            finally:
                # Inform queue we are done processing this item.
                self.queue.task_done()
                # Restore manager's previous options and stdout
                if opts:
                    manager.options = old_opts
                if output:
                    # Write EOF to the output, so that a listener knows when the output is over
                    output.write('EOF')
                    sys.stdout = old_stdout
                    sys.stderr = old_stderr
                    logging.getLogger().removeHandler(streamhandler)

    def _apply_options(self, parser, options):
        """Applies dict :options: to ArgParse parser results"""

        for name, value in options.iteritems():
            if hasattr(parser, name):
                log.debug('setting options %s to %s' % (name, value))
                setattr(parser, name, value)
            else:
                log.error('Option %s does not exist, ignoring it' % name)

    def execute(self, **kwargs):
        """
        Adds an execution to the queue.

        keyword arguments:

        options: Dict containing option, value pairs for this execution
        parsed_options: Parsed OptionParser to be used for this execution
        output: a BufferQueue object that will be filled with output from the execution.

        all other keyword arguments will be passed to manager.execute
        kwargs options and parsed_options are mutually exclusive
        """

        if 'options' in kwargs and not isinstance(kwargs['options'], dict):
            raise TypeError('options should be a dict, got %s' %
                            type(kwargs['options']))

        if 'tasks' in kwargs and not hasattr(kwargs['tasks'], '__iter__'):
            raise TypeError('tasks should be iterable, got %s' %
                            type(kwargs['tasks']))

        if 'options' in kwargs and 'parsed_options' in kwargs:
            raise ValueError(
                'options and parsed_options are mutually exclusive')

        if kwargs.get('output') and self.queue.unfinished_tasks:
            kwargs['output'].write(
                'There is already an execution running. ' +
                'This execution will start when the previous completes.')
        self.queue.put_nowait(kwargs)
Ejemplo n.º 46
0
class DragonflyWrapper(object):
    def __init__(self):
        engine = Sapi5InProcEngine()
        engine.connect()

        engine.speak('Speak recognition active!')
        logger.info('Speak recognition active!')

        self._results = Queue()

        self._grammar = None

    def process_recognition_failure(self):
        logger.info('Grammar:process_recognition_failure')

    def set_grammar(self, spec, choices_values):
        spec_truncated = (spec[:75] + '...') if len(spec) > 75 else spec

        logger.info('Set Grammar: %s %s', spec_truncated, choices_values)

        # TODO: cache the rule
        assert self._results.empty()

        if self._grammar:
            # remove the old rule
            self._grammar.unload()

        self._grammar = Grammar("G")
        # attach failure callback
        self._grammar.process_recognition_failure = self.process_recognition_failure

        rule = self._make_rule(spec, choices_values, self._result_callback)
        self._grammar.add_rule(rule)

        self._grammar.load()
        winsound.PlaySound(data_path + "/grammar_loaded.wav",
                           winsound.SND_ASYNC)

        logger.info("Grammar loaded")

    def spin_once(self):
        pythoncom.PumpWaitingMessages()

    @property
    def results(self):
        return self._results

    @staticmethod
    def _make_rule(spec, choices_values, callback):
        # build the dragonfly request
        extras = []
        for name, choices in choices_values.iteritems():
            extras.append(Choice(name, dict((c, c) for c in choices)))

        class CustomRule(CompoundRule):
            def _process_recognition(self, node, extras):
                logger.info('_process_recognition callback: %s', str(node))
                callback(node, extras)

            def _process_begin(self):
                logger.debug('Rule:__process_begin')

        rule = CustomRule(spec=spec, extras=extras)
        return rule

    def _result_callback(self, node, extras):
        self._results.put_nowait(Result(node=node, extras=extras))
Ejemplo n.º 47
0
class MetricQueue(object):
    """ Object to which various metrics are written, for distribution to metrics collection
      system(s) such as Prometheus.
  """
    def __init__(self, prom):
        # Define the various exported metrics.
        self.resp_time = prom.create_histogram(
            'response_time',
            'HTTP response time in seconds',
            labelnames=['endpoint'],
            buckets=API_RESPONSE_TIME_BUCKETS)
        self.resp_code = prom.create_counter('response_code',
                                             'HTTP response code',
                                             labelnames=['endpoint', 'code'])
        self.non_200 = prom.create_counter('response_non200',
                                           'Non-200 HTTP response codes',
                                           labelnames=['endpoint'])
        self.error_500 = prom.create_counter('response_500',
                                             '5XX HTTP response codes',
                                             labelnames=['endpoint'])
        self.multipart_upload_start = prom.create_counter(
            'multipart_upload_start', 'Multipart upload started')
        self.multipart_upload_end = prom.create_counter(
            'multipart_upload_end',
            'Multipart upload ends.',
            labelnames=['type'])
        self.build_capacity_shortage = prom.create_gauge(
            'build_capacity_shortage', 'Build capacity shortage.')
        self.builder_time_to_start = prom.create_histogram(
            'builder_tts',
            'Time from triggering to starting a builder.',
            labelnames=['builder_type'],
            buckets=BUILDER_START_TIME_BUCKETS)
        self.builder_time_to_build = prom.create_histogram(
            'builder_ttb',
            'Time from triggering to actually starting a build',
            labelnames=['builder_type'],
            buckets=BUILDER_START_TIME_BUCKETS)
        self.build_time = prom.create_histogram('build_time',
                                                'Time spent building',
                                                labelnames=['builder_type'])
        self.builder_fallback = prom.create_counter(
            'builder_fallback', 'Builder fell back to secondary executor')
        self.build_start_success = prom.create_counter(
            'build_start_success',
            'Executor succeeded in starting a build',
            labelnames=['builder_type'])
        self.build_start_failure = prom.create_counter(
            'build_start_failure',
            'Executor failed to start a build',
            labelnames=['builder_type'])
        self.percent_building = prom.create_gauge('build_percent_building',
                                                  'Percent building.')
        self.build_counter = prom.create_counter('builds',
                                                 'Number of builds',
                                                 labelnames=['name'])
        self.ephemeral_build_workers = prom.create_counter(
            'ephemeral_build_workers',
            'Number of started ephemeral build workers')
        self.ephemeral_build_worker_failure = prom.create_counter(
            'ephemeral_build_worker_failure',
            'Number of failed-to-start ephemeral build workers')

        self.work_queue_running = prom.create_gauge('work_queue_running',
                                                    'Running items in a queue',
                                                    labelnames=['queue_name'])
        self.work_queue_available = prom.create_gauge(
            'work_queue_available',
            'Available items in a queue',
            labelnames=['queue_name'])

        self.work_queue_available_not_running = prom.create_gauge(
            'work_queue_available_not_running',
            'Available items that are not yet running',
            labelnames=['queue_name'])

        self.repository_pull = prom.create_counter(
            'repository_pull',
            'Repository Pull Count',
            labelnames=['namespace', 'repo_name', 'protocol', 'status'])

        self.repository_push = prom.create_counter(
            'repository_push',
            'Repository Push Count',
            labelnames=['namespace', 'repo_name', 'protocol', 'status'])

        self.repository_build_queued = prom.create_counter(
            'repository_build_queued',
            'Repository Build Queued Count',
            labelnames=['namespace', 'repo_name'])

        self.repository_build_completed = prom.create_counter(
            'repository_build_completed',
            'Repository Build Complete Count',
            labelnames=['namespace', 'repo_name', 'status', 'executor'])

        self.chunk_size = prom.create_histogram('chunk_size',
                                                'Registry blob chunk size',
                                                labelnames=['storage_region'])

        self.chunk_upload_time = prom.create_histogram(
            'chunk_upload_time',
            'Registry blob chunk upload time',
            labelnames=['storage_region'])

        self.authentication_count = prom.create_counter(
            'authentication_count',
            'Authentication count',
            labelnames=['kind', 'status'])

        self.repository_count = prom.create_gauge('repository_count',
                                                  'Number of repositories')
        self.user_count = prom.create_gauge('user_count', 'Number of users')
        self.org_count = prom.create_gauge('org_count',
                                           'Number of Organizations')
        self.robot_count = prom.create_gauge('robot_count',
                                             'Number of robot accounts')

        self.instance_key_renewal_success = prom.create_counter(
            'instance_key_renewal_success',
            'Instance Key Renewal Success Count',
            labelnames=['key_id'])

        self.instance_key_renewal_failure = prom.create_counter(
            'instance_key_renewal_failure',
            'Instance Key Renewal Failure Count',
            labelnames=['key_id'])

        self.invalid_instance_key_count = prom.create_counter(
            'invalid_registry_instance_key_count',
            'Invalid registry instance key count',
            labelnames=['key_id'])

        self.verb_action_passes = prom.create_counter(
            'verb_action_passes',
            'Verb Pass Count',
            labelnames=['kind', 'pass_count'])

        self.push_byte_count = prom.create_counter(
            'registry_push_byte_count',
            'Number of bytes pushed to the registry')

        self.pull_byte_count = prom.create_counter(
            'estimated_registry_pull_byte_count',
            'Number of (estimated) bytes pulled from the registry',
            labelnames=['protocol_version'])

        # Deprecated: Define an in-memory queue for reporting metrics to CloudWatch or another
        # provider.
        self._queue = None

    def enable_deprecated(self, maxsize=10000):
        self._queue = Queue(maxsize)

    def put_deprecated(self, name, value, **kwargs):
        if self._queue is None:
            logger.debug('No metric queue %s %s %s', name, value, kwargs)
            return

        try:
            kwargs.setdefault('timestamp', datetime.datetime.now())
            kwargs.setdefault('dimensions', {})
            self._queue.put_nowait((name, value, kwargs))
        except Full:
            logger.error('Metric queue full')

    def get_deprecated(self):
        return self._queue.get()

    def get_nowait_deprecated(self):
        return self._queue.get_nowait()
Ejemplo n.º 48
0
class paka(object):
    # /**
    # * 让链接的参数直接传递给connection
    # *
    # */
    def __init__(self, *args, **argv):
        self.parameters = paka_param(*args, **argv)
        self.routing_key = ''
        self._typ = ''
        self.queue = ''
        self.exchange = ''
        self.connection = None
        self.channel = None
        self.running = False
        self._queue = Queue()

    def blocking_error_out(self, *args, **argv):
        if self.connection:
            self.connection.socket.close()
            self.connection._on_connection_closed(None, True)
            self.connection = None

        func = object.__getattribute__(self, 'connect')
        routing_key = object.__getattribute__(self, 'routing_key')
        _typ = object.__getattribute__(self, '_typ')
        exchange = object.__getattribute__(self, 'exchange')
        func(exchange, routing_key, _typ)

    def basic_publish(self,
                      exchange,
                      routing_key,
                      body,
                      properties=None,
                      mandatory=False,
                      immediate=False):
        if not self.running:
            self._queue.put_nowait({
                'exchange': exchange,
                'routing_key': routing_key,
                'body': body,
                'properties': properties,
                'mandatory': mandatory,
                'immediate': immediate
            })
        try:
            return self.connection.basic_publish(exchange, routing_key, body,
                                                 properties, mandatory,
                                                 immediate)
        except:
            self._queue.put({
                'exchange': exchange,
                'routing_key': routing_key,
                'body': body,
                'properties': properties,
                'mandatory': mandatory,
                'immediate': immediate
            })

    def connect(self,
                exchange='',
                routing_key='',
                _typ='fanout',
                queue='OtherQueueTest'):
        self.routing_key = routing_key
        self._typ = _typ
        self.queue = queue
        self.exchange = exchange
        try:
            self.connection = pika.BlockingConnection(
                self.parameters.get_param())
            self.connection._handle_disconnect = self.blocking_error_out
            self.channel = self.connection.channel()
            self.channel.exchange_declare(exchange=exchange, type=_typ)
            self.channel.queue_bind(exchange=exchange, queue=queue)
            self.running = True
            while not self._queue.empty():
                info = self._queue.get_nowait()
                if info:
                    self.basic_publish(info['exchange'], info['routing_key'],
                                       info['body'], info['properties'],
                                       info['mandatory'], info['immediate'])
        except AMQPChannelError:
            self.running = False
            self.blocking_error_out()
        except error, e:
            self.running = False
            # 链接被中断,重新连一次
            self.blocking_error_out()
        except:
Ejemplo n.º 49
0
class IBusHandler(object):
    def __init__(self):

        self.serial_port = serial.Serial()
        self.serial_port.baudrate = 9600
        self.serial_port.parity = serial.PARITY_EVEN
        self.serial_port.stopbits = serial.STOPBITS_ONE
        self.serial_port.timeout = 0.001
        self.rts_state = False

        self.read_buffer = []
        self.read_lock = Lock()
        self.read_error_counter = 0
        self.read_error_container = []
        self.cancel_read_thread = False
        self.read_thread = Thread(target=self._reading)
        self.read_thread.daemon = True
        self.packet_buffer = Queue()

        self.cts_counter = 0.0
        self.cts_thread = Thread(target=self._cts_watcher)
        self.cts_thread.daemon = True

        self.write_buffer = PriorityQueue()
        self.write_counter = 0

        self.cancel_write_thread = False
        self.write_thread = Thread(target=self._writing)
        self.write_thread.daemon = True

    def __enter__(self):
        self.connect()
        return self

    def __exit__(self, *_args):
        self.disconnect()

    @property
    def ntsc(self):
        return self.rts_state

    @ntsc.setter
    def ntsc(self, value):
        if self.rts_state == value:
            return
        self.serial_port.setRTS(value)
        self.rts_state = value

    @staticmethod
    def _calculate_checksum(packet):
        result = 0
        for value in packet:
            result ^= value
        return result

    def _cut_read_buffer(self, offset=1):
        with self.read_lock:
            self.read_buffer = self.read_buffer[offset:]

    def _wait_free_bus(self, waiting=17, timeout=1000):
        #
        # FIXME Log message doesn't match ``if`` condition.
        #
        if waiting >= timeout:
            log('Error: Waiting Time (%sms) is bigger than Timeout Time (%sms)'
                % (waiting, timeout))
            return False

        for _ in xrange(timeout):
            if self.cts_counter >= waiting:
                return True
            time.sleep(0.001)

        return False

    def _reading(self):
        while not self.cancel_read_thread:
            data = self.serial_port.read(50)
            if data:
                with self.read_lock:
                    self.read_buffer.extend(ord(x) for x in data)
            self._read_bus_packet()

        log('Read/Write Thread finished')

    def _writing(self):
        while not self.cancel_read_thread:
            try:
                prio, write_counter, data = self.write_buffer.get(timeout=1)
                try:
                    while self.cts_counter < 7.0:
                        time.sleep(0.001)

                    self.serial_port.write(data)
                    self.cts_counter = 0.0
                    self.serial_port.flush()
                    self.cts_counter = 0.0
                    log('\033[1;33;40mWRITE:\033[0m %s' %
                        ' '.join('%02X' % i for i in data))
                except serial.SerialException:
                    self.write_buffer.put((prio, write_counter, data))
            except Empty:
                pass

    def _cts_watcher(self):
        while not self.cancel_read_thread:
            if self.serial_port.getCTS():
                self.cts_counter += 0.1
                time.sleep(0.0001)
            else:
                self.cts_counter = 0.0
                #
                # TODO Maybe sleeping a little or move the `sleep()`
                #   in the ``if`` branch after the ``if``/``else``?
                #

        log('CTS Thread finished')

    def set_port(self, device_path):
        self.serial_port.port = device_path

    def connect(self):
        self.serial_port.open()
        self.serial_port.setRTS(0)
        self.cts_thread.start()

        if not self._wait_free_bus(120, 3000):
            log('Error: Can not locate free Bus')
            raise RuntimeError('can not locate free bus')

        self.serial_port.flushInput()

        self.read_thread.start()
        self.write_thread.start()

    def disconnect(self):
        self.cancel_write_thread = True
        self.cancel_read_thread = True
        self.ntsc = False
        time.sleep(0.6)
        self.serial_port.close()
        log('disconnected')

    def read_bus_packet(self):
        try:
            return self.packet_buffer.get(timeout=1)
        except Empty:
            return None

    def _read_bus_packet(self):
        if self.cancel_read_thread:
            return None

        try:
            data_length = self.read_buffer[1]
        except IndexError:
            return None

        if not 3 <= data_length <= 37:
            self.read_error_container.append(self.read_buffer[0])
            self._cut_read_buffer()
            return None

        buffer_len = len(self.read_buffer)
        if buffer_len < 5 or buffer_len < data_length + 2:
            return None

        message = self.read_buffer[:data_length + 2]

        if self._calculate_checksum(message) == 0:
            if self.read_error_container:
                error_hex_string = ' '.join('%02X' % i
                                            for i in self.read_error_container)
                log('READ-ERR: %s' % error_hex_string)
                self.read_error_counter += len(self.read_error_container)
                self.read_error_container = []

            self._cut_read_buffer(data_length + 2)

            self.packet_buffer.put_nowait({
                'src': message[0],
                'len': data_length,
                'dst': message[2],
                'data': message[3:data_length + 1],
                'xor': message[-1]
            })
        else:
            self.read_error_container.append(self.read_buffer[0])
            self._cut_read_buffer()
            return None

    def write_bus_packet(self,
                         src,
                         dst,
                         data,
                         highprio=False,
                         veryhighprio=False,
                         repeat=1):
        #
        # FIXME The ``except`` needs explicit exceptions because it is
        #   unclear under what circumstances the ``except`` is a proper
        #   handling of the exception.
        #
        try:
            packet = [src, len(data) + 2, dst]
            packet.extend(data)
        except:
            packet = [int(src, 16), len(data) + 2, int(dst, 16)]
            packet.extend([int(s, 16) for s in data])

        packet.append(self._calculate_checksum(packet))
        for _ in xrange(repeat):
            self.write_buffer.put_nowait(
                (0 if highprio or veryhighprio else 1,
                 0 if veryhighprio else self.write_counter, bytearray(packet)))
            self.write_counter += 1

    def write_hex_message(self, hexstring):
        hexstring_tmp = hexstring.upper().split(' ')
        src = int(hexstring_tmp[0], 16)
        dst = int(hexstring_tmp[2], 16)
        data = [int(s, 16) for s in hexstring_tmp[3:-1]]
        self.write_bus_packet(src, dst, data)
Ejemplo n.º 50
0
class WriteWarp10(object):
    def __init__(self, url, token, flush_interval, flush_retry_interval,
                 buffer_size, default_labels, rewrite_rules, rewrite_limit):
        self.url = url
        self.token = token
        self.flush_interval = flush_interval
        self.flush_retry_interval = flush_retry_interval
        self.buffer_size = buffer_size
        self.default_labels = default_labels
        self.rewrite_rules = rewrite_rules
        self.rewrite_limit = rewrite_limit

        self.queue = Queue(buffer_size)
        self.flush_timer = None

    @staticmethod
    def config(cfg):
        # Handle legacy config (not multiple-endpoint capable)
        if not any([n.key == 'Endpoint' for n in cfg.children]):
            # Create fake intermediary Endpoint node
            cfg.children = (collectd.Config('Endpoint', cfg, ('default', ),
                                            cfg.children), )

        endpoints = []
        for node in cfg.children:
            if node.key == 'Endpoint':
                endpoint = WriteWarp10.config_endpoint(node)
                if endpoint:
                    if any(e['name'] == endpoint['name'] for e in endpoints):
                        collectd.warning('write_warp10 plugin: Duplicate '
                                         'endpoint: %s' % endpoint['name'])
                    else:
                        endpoints.append(endpoint)
            else:
                collectd.warning('write_warp10 plugin: Unknown config key: '
                                 '%s' % node.key)

        if endpoints:
            for e in endpoints:
                ww10 = WriteWarp10(e['url'], e['token'], e['flush_interval'],
                                   e['flush_retry_interval'], e['buffer_size'],
                                   e['default_labels'], e['rewrite_rules'],
                                   e['rewrite_limit'])
                collectd.info('write_warp10 plugin: register init write and '
                              'shutdown functions')
                collectd.register_init(ww10.init,
                                       name='write_warp10/%s' % e['name'])
                collectd.register_write(ww10.write,
                                        name='write_warp10/%s' % e['name'])
                collectd.register_shutdown(ww10.shutdown,
                                           name='write_warp10/%s' % e['name'])
        else:
            collectd.warning('write_warp10 plugin: No valid endpoints found')

    @staticmethod
    def config_endpoint(cfg):
        endpoint = {
            'name': None,
            'url': None,
            'token': None,
            'flush_interval': 30.0,
            'flush_retry_interval': 10.0,
            'buffer_size': 65536,
            'default_labels': {},
            'rewrite_rules': [],
            'rewrite_limit': 10
        }
        if len(cfg.values) == 1:
            endpoint['name'] = cfg.values[0]
        for node in cfg.children:
            if node.key == 'URL':
                endpoint['url'] = node.values[0]
            elif node.key == 'Token':
                endpoint['token'] = node.values[0]
            elif node.key == 'FlushInterval':
                endpoint['flush_interval'] = float(node.values[0])
            elif node.key == 'FlushRetryInterval':
                endpoint['flush_retry_interval'] = float(node.values[0])
            elif node.key == 'BufferSize':
                endpoint['buffer_size'] = int(node.values[0])
            elif node.key == 'DefaultLabel':
                endpoint['default_labels'][node.values[0]] = node.values[1]
            elif node.key == 'RewriteLimit':
                endpoint['rewrite_limit'] = int(node.values[0])
            elif node.key == 'RewriteRule':
                if len(node.values) not in [2, 3]:
                    collectd.warning('write_warp10 plugin: Invalid '
                                     'RewriteRule declaration: '
                                     '%s' % node.values)
                    continue
                rule = re.compile(r'%s' % node.values[0])
                rewrite = r'%s' % node.values[1]
                flags = []
                if len(node.values) == 3:
                    flags = [
                        r'%s' % f.strip() for f in node.values[2].split(',')
                        if f.strip()
                    ]
                endpoint['rewrite_rules'].append([rule, rewrite, flags])
            else:
                collectd.warning('write_warp10 plugin: Unknown config key for '
                                 'Endpoint: %s' % node.key)
        if not endpoint['name'] or not endpoint['url'] \
                or not endpoint['token']:
            collectd.warning('write_warp10 plugin: Missing name, URL or Token '
                             'config for Endpoint')
            endpoint = None

        return endpoint

    def init(self):
        self.flush_timer = Timer(self.flush_interval, self._flush_timer)
        self.flush_timer.start()

    def write(self, vl, data=None):
        datasets = collectd.get_dataset(vl.type)
        for ds, value in zip(datasets, vl.values):
            if math.isnan(value):
                continue
            ds_name, ds_type, ds_min, ds_max = ds
            classname, new_labels = self._format(vl.plugin, vl.plugin_instance,
                                                 vl.type, vl.type_instance,
                                                 ds_name, ds_type)
            if classname is None:
                # Ignore classname that are unset (it's a feature from rewrite
                # rule to destroy a point)
                continue

            labels = self.default_labels.copy()
            labels.update(vl.meta)
            labels.update(new_labels)
            # Remove empty values
            labels = {
                k: str(v).strip()
                for k, v in labels.items() if v is not None and str(v).strip()
            }

            msg = '%d// %s{%s} %f' % (
                int(1000000 * vl.time),  # Microseconds
                classname,
                urllib.urlencode(labels).replace('&', ', '),
                value)

            try:
                self.queue.put_nowait(msg)
            except Full:
                collectd.warning('write_warp10 plugin: Buffer is full (%s '
                                 'elements) for endpoint "%s". The WARP '
                                 'endpoint may encounter issues. Otherwise, '
                                 'consider increasing BufferSize or reducing '
                                 'FlushInterval' %
                                 (self.queue.qsize(), self.url))

    def _format(self, *arr):
        classname = urllib.quote('.'.join(
            [x.strip() for x in arr if x.strip()]))
        labels = {}

        for _ in xrange(self.rewrite_limit):
            last = False
            next_round = False
            for rule, rewrite, flags in self.rewrite_rules:
                last = False
                next_round = False
                matches = re.match(rule, classname)
                if matches:
                    # Replacement
                    classname = re.sub(rule, rewrite, classname)

                    # Apply flags
                    for flag in flags:
                        if flag == 'F':
                            return None, None
                        elif flag == 'L':
                            last = True
                        elif flag == 'N':
                            next_round = True
                        elif flag.startswith('T:'):
                            lbl_name, lbl_value = flag[2:].split('=', 1)
                            for ma in re.findall(r'(\\[0-9]+)', lbl_name):
                                v = matches.group(int(ma[1:]))
                                lbl_name = lbl_name.replace(ma, v)
                            for ma in re.findall(r'(\\[0-9]+)', lbl_value):
                                v = matches.group(int(ma[1:]))
                                lbl_value = lbl_value.replace(ma, v)
                            labels[lbl_name] = lbl_value
                if last or next_round:
                    break
            else:
                last = True  # Implicit last if we reach end of rules

            if last and next_round:
                raise Exception('write_warp10 plugin: Incompatible rewrite '
                                'flags in the same rule: L and N')
            elif last:
                break
            elif next_round:
                pass
        else:
            raise Exception('write_warp10 plugin: Rewrite limit exceeded')

        return classname, labels

    def shutdown(self):
        collectd.info("write_warp10 plugin: Shutdown: Start")
        self.flush_timer.cancel()
        collectd.info("write_warp10 plugin: Shutdown: Timer cancelled")
        self.flush_timer.join()
        collectd.info("write_warp10 plugin: Shutdown: Timer thread joined")
        try:
            self._flush()
        except Exception as e:
            stack_str = repr(traceback.format_exception(*sys.exc_info()))
            collectd.error('write_warp10 plugin: Failed to post data before '
                           'shutdown: %s' % stack_str)

    def _flush_timer(self):
        try:
            self._flush()
            next_interval = self.flush_interval
        except:
            next_interval = self.flush_retry_interval

        self.flush_timer = Timer(next_interval, self._flush_timer)
        self.flush_timer.daemon = True
        self.flush_timer.start()

    def _flush(self):
        messages = []
        try:
            while True:
                messages.append(self.queue.get_nowait())
        except Empty:
            pass

        if len(messages) > 0:
            for msg in messages:
                collectd.debug('write_warp10 plugin: Posting: %s' % msg)
            try:
                # Header X-CityzenData-Token is deprecated in favor of
                # X-Warp10-Token, keeping compatibility
                headers = {
                    'X-Warp10-Token': self.token,
                    'X-CityzenData-Token': self.token
                }
                body = "\n".join(messages)
                req = urllib2.Request(self.url, body, headers)
                resp = urllib2.urlopen(req, timeout=80)
                if resp.getcode() != 200:
                    raise Exception('%d %s' % (resp.getcode(), resp.read()))
            except Exception as e:
                stack_str = repr(traceback.format_exception(*sys.exc_info()))
                collectd.error('write_warp10 plugin: Failed to post data: '
                               '%s' % stack_str)

                try:
                    for msg in messages:
                        self.queue.put_nowait(msg)
                except Full:
                    collectd.warning('write_warp10 plugin: Buffer is full (%s '
                                     'elements) for endpoint "%s". The WARP '
                                     'endpoint may encounter issues. '
                                     'Otherwise, consider increasing '
                                     'BufferSize or reducing FlushInterval' %
                                     (self.queue.qsize(), self.url))
                raise
Ejemplo n.º 51
0
class Gviz(wx.Panel):

    # Mark canvas as dirty when setting showall
    _showall = 0

    def _get_showall(self):
        return self._showall

    def _set_showall(self, showall):
        if showall != self._showall:
            self.dirty = 1
            self._showall = showall

    showall = property(_get_showall, _set_showall)

    def __init__(self,
                 parent,
                 size=(200, 200),
                 build_dimensions=[200, 200, 100, 0, 0, 0],
                 grid=(10, 50),
                 extrusion_width=0.5,
                 bgcolor="#000000",
                 realparent=None):
        wx.Panel.__init__(self, parent, -1)
        self.widget = self
        size = [max(1.0, x) for x in size]
        ratio = size[0] / size[1]
        self.SetMinSize((150, 150 / ratio))
        self.parent = realparent if realparent else parent
        self.size = size
        self.build_dimensions = build_dimensions
        self.grid = grid
        self.lastpos = [0, 0, 0, 0, 0, 0, 0]
        self.hilightpos = self.lastpos[:]
        self.Bind(wx.EVT_PAINT, self.paint)
        self.Bind(wx.EVT_SIZE, self.resize)
        self.lines = {}
        self.pens = {}
        self.arcs = {}
        self.arcpens = {}
        self.layers = []
        self.layerindex = 0
        self.filament_width = extrusion_width  # set it to 0 to disable scaling lines with zoom
        self.update_basescale()
        self.scale = self.basescale
        penwidth = max(
            1.0, self.filament_width * ((self.scale[0] + self.scale[1]) / 2.0))
        self.translate = [0.0, 0.0]
        self.mainpen = wx.Pen(wx.Colour(0, 0, 0), penwidth)
        self.arcpen = wx.Pen(wx.Colour(255, 0, 0), penwidth)
        self.travelpen = wx.Pen(wx.Colour(10, 80, 80), penwidth)
        self.hlpen = wx.Pen(wx.Colour(200, 50, 50), penwidth)
        self.fades = [
            wx.Pen(
                wx.Colour(250 - 0.6**i * 100, 250 - 0.6**i * 100,
                          200 - 0.4**i * 50), penwidth) for i in xrange(6)
        ]
        self.penslist = [self.mainpen, self.travelpen, self.hlpen] + self.fades
        self.showall = 0
        self.hilight = deque()
        self.hilightarcs = deque()
        self.hilightqueue = Queue(0)
        self.hilightarcsqueue = Queue(0)
        self.dirty = 1
        self.bgcolor = wx.Colour()
        self.bgcolor.SetFromName(bgcolor)
        self.blitmap = wx.EmptyBitmap(self.GetClientSize()[0],
                                      self.GetClientSize()[1], -1)
        self.paint_overlay = None

    def inject(self):
        #import pdb; pdb.set_trace()
        print "Inject code here..."
        print "Layer " + str(self.layerindex + 1) + " - Z = " + str(
            self.layers[self.layerindex]) + " mm"

    def clearhilights(self):
        self.hilight.clear()
        self.hilightarcs.clear()
        while not self.hilightqueue.empty():
            self.hilightqueue.get_nowait()
        while not self.hilightarcsqueue.empty():
            self.hilightarcsqueue.get_nowait()

    def clear(self):
        self.lastpos = [0, 0, 0, 0, 0, 0, 0]
        self.lines = {}
        self.pens = {}
        self.arcs = {}
        self.arcpens = {}
        self.layers = []
        self.clearhilights()
        self.layerindex = 0
        self.showall = 0
        self.dirty = 1
        wx.CallAfter(self.Refresh)

    def layerup(self):
        if self.layerindex + 1 < len(self.layers):
            self.layerindex += 1
            self.parent.SetStatusText(
                _("Layer %d - Going Up - Z = %.03f mm") %
                (self.layerindex + 1, self.layers[self.layerindex]), 0)
            self.dirty = 1
            self.parent.setlayercb(self.layerindex)
            wx.CallAfter(self.Refresh)

    def layerdown(self):
        if self.layerindex > 0:
            self.layerindex -= 1
            self.parent.SetStatusText(
                _("Layer %d - Going Down - Z = %.03f mm") %
                (self.layerindex + 1, self.layers[self.layerindex]), 0)
            self.dirty = 1
            self.parent.setlayercb(self.layerindex)
            wx.CallAfter(self.Refresh)

    def setlayer(self, layer):
        if layer in self.layers:
            self.layerindex = self.layers.index(layer)
            self.dirty = 1
            self.showall = 0
            wx.CallAfter(self.Refresh)

    def update_basescale(self):
        self.basescale = 2 * [
            min(
                float(self.size[0] - 1) / self.build_dimensions[0],
                float(self.size[1] - 1) / self.build_dimensions[1])
        ]

    def resize(self, event):
        old_basescale = self.basescale
        self.size = self.GetClientSizeTuple()
        self.update_basescale()
        zoomratio = float(self.basescale[0]) / old_basescale[0]
        wx.CallLater(200, self.zoom, 0, 0, zoomratio)

    def zoom(self, x, y, factor):
        if x == -1 and y == -1:
            side = min(self.size)
            x = y = side / 2
        self.scale = [s * factor for s in self.scale]

        self.translate = [
            x - (x - self.translate[0]) * factor,
            y - (y - self.translate[1]) * factor
        ]
        penwidth = max(
            1.0, self.filament_width * ((self.scale[0] + self.scale[1]) / 2.0))
        for pen in self.penslist:
            pen.SetWidth(penwidth)
        self.dirty = 1
        wx.CallAfter(self.Refresh)

    def _line_scaler(self, x):
        return (
            self.scale[0] * x[0],
            self.scale[1] * x[1],
            self.scale[0] * x[2],
            self.scale[1] * x[3],
        )

    def _arc_scaler(self, x):
        return (
            self.scale[0] * x[0],
            self.scale[1] * x[1],
            self.scale[0] * x[2],
            self.scale[1] * x[3],
            self.scale[0] * x[4],
            self.scale[1] * x[5],
        )

    def _drawlines(self, dc, lines, pens):
        scaled_lines = map(self._line_scaler, lines)
        dc.DrawLineList(scaled_lines, pens)

    def _drawarcs(self, dc, arcs, pens):
        scaled_arcs = map(self._arc_scaler, arcs)
        dc.SetBrush(wx.TRANSPARENT_BRUSH)
        for i in range(len(scaled_arcs)):
            dc.SetPen(pens[i] if type(pens) == list else pens)
            dc.DrawArc(*scaled_arcs[i])

    def repaint_everything(self):
        width = self.scale[0] * self.build_dimensions[0]
        height = self.scale[1] * self.build_dimensions[1]
        self.blitmap = wx.EmptyBitmap(width + 1, height + 1, -1)
        dc = wx.MemoryDC()
        dc.SelectObject(self.blitmap)
        dc.SetBackground(wx.Brush((250, 250, 200)))
        dc.Clear()
        dc.SetPen(wx.Pen(wx.Colour(180, 180, 150)))
        for grid_unit in self.grid:
            if grid_unit > 0:
                for x in xrange(int(self.build_dimensions[0] / grid_unit) + 1):
                    draw_x = self.scale[0] * x * grid_unit
                    dc.DrawLine(draw_x, 0, draw_x, height)
                for y in xrange(int(self.build_dimensions[1] / grid_unit) + 1):
                    draw_y = self.scale[1] * (self.build_dimensions[1] -
                                              y * grid_unit)
                    dc.DrawLine(0, draw_y, width, draw_y)
            dc.SetPen(wx.Pen(wx.Colour(0, 0, 0)))

        if not self.showall:
            # Draw layer gauge
            dc.SetBrush(wx.Brush((43, 144, 255)))
            dc.DrawRectangle(width - 15, 0, 15, height)
            dc.SetBrush(wx.Brush((0, 255, 0)))
            if self.layers:
                dc.DrawRectangle(
                    width - 14,
                    (1.0 - (1.0 * (self.layerindex + 1)) / len(self.layers)) *
                    height, 13, height - 1)

        if self.showall:
            for i in self.layers:
                self._drawlines(dc, self.lines[i], self.pens[i])
                self._drawarcs(dc, self.arcs[i], self.arcpens[i])
            return

        if self.layerindex < len(
                self.layers) and self.layers[self.layerindex] in self.lines:
            for layer_i in range(max(0, self.layerindex - 6), self.layerindex):
                self._drawlines(dc, self.lines[self.layers[layer_i]],
                                self.fades[self.layerindex - layer_i - 1])
                self._drawarcs(dc, self.arcs[self.layers[layer_i]],
                               self.fades[self.layerindex - layer_i - 1])
            self._drawlines(dc, self.lines[self.layers[self.layerindex]],
                            self.pens[self.layers[self.layerindex]])
            self._drawarcs(dc, self.arcs[self.layers[self.layerindex]],
                           self.arcpens[self.layers[self.layerindex]])

        self._drawlines(dc, self.hilight, self.hlpen)
        self._drawarcs(dc, self.hilightarcs, self.hlpen)

        self.paint_hilights(dc)

        dc.SelectObject(wx.NullBitmap)

    def paint_hilights(self, dc=None):
        if self.hilightqueue.empty() and self.hilightarcsqueue.empty():
            return
        hl = []
        if not dc:
            dc = wx.MemoryDC()
            dc.SelectObject(self.blitmap)
        while not self.hilightqueue.empty():
            hl.append(self.hilightqueue.get_nowait())
        self._drawlines(dc, hl, self.hlpen)
        hlarcs = []
        while not self.hilightarcsqueue.empty():
            hlarcs.append(self.hilightarcsqueue.get_nowait())
        self._drawarcs(dc, hlarcs, self.hlpen)

    def paint(self, event):
        if self.dirty:
            self.dirty = 0
            self.repaint_everything()
        self.paint_hilights()
        dc = wx.PaintDC(self)
        dc.SetBackground(wx.Brush(self.bgcolor))
        dc.Clear()
        dc.DrawBitmap(self.blitmap, self.translate[0], self.translate[1])
        if self.paint_overlay:
            self.paint_overlay(dc)

    def addfile(self, gcode):
        self.clear()
        self.add_parsed_gcodes(gcode)
        max_layers = len(self.layers)
        if hasattr(self.parent, "layerslider"):
            self.parent.layerslider.SetRange(0, max_layers - 1)
            self.parent.layerslider.SetValue(0)

    # FIXME : there's code duplication going on there, we should factor it (but
    # the reason addgcode is not factored as a add_parsed_gcodes([gline]) is
    # because when loading a file there's no hilight, so it simply lets us not
    # do the if hilight: all the time for nothing when loading a lot of lines
    def add_parsed_gcodes(self, gcode):
        def _y(y):
            return self.build_dimensions[1] - (y - self.build_dimensions[4])

        def _x(x):
            return x - self.build_dimensions[3]

        for layer_idx, layer in enumerate(gcode.all_layers):
            has_move = False
            for gline in layer:
                if gline.is_move:
                    has_move = True
                    break
            if not has_move:
                continue
            self.lines[layer.z] = []
            self.pens[layer.z] = []
            self.arcs[layer.z] = []
            self.arcpens[layer.z] = []
            self.layers.append(layer.z)
            for gline in layer:
                if not gline.is_move:
                    continue

                target = self.lastpos[:]
                target[0] = gline.current_x
                target[1] = gline.current_y
                target[2] = gline.current_z
                target[5] = 0.0
                target[6] = 0.0
                if gline.e is not None:
                    if gline.relative_e:
                        target[3] += gline.e
                    else:
                        target[3] = gline.e
                if gline.f is not None: target[4] = gline.f
                if gline.i is not None: target[5] = gline.i
                if gline.j is not None: target[6] = gline.j

                start_pos = self.lastpos[:]

                if gline.command in ["G0", "G1"]:
                    self.lines[layer.z].append(
                        (_x(start_pos[0]), _y(start_pos[1]), _x(target[0]),
                         _y(target[1])))
                    self.pens[layer.z].append(self.mainpen if target[3] != self
                                              .lastpos[3] else self.travelpen)
                elif gline.command in ["G2", "G3"]:
                    # startpos, endpos, arc center
                    arc = [
                        _x(start_pos[0]),
                        _y(start_pos[1]),
                        _x(target[0]),
                        _y(target[1]),
                        _x(start_pos[0] + target[5]),
                        _y(start_pos[1] + target[6])
                    ]
                    if gline.command == "G2":  # clockwise, reverse endpoints
                        arc[0], arc[1], arc[2], arc[3] = arc[2], arc[3], arc[
                            0], arc[1]

                    self.arcs[layer.z].append(arc)
                    self.arcpens[layer.z].append(self.arcpen)

                self.lastpos = target
        self.dirty = 1
        self.Refresh()

    def addgcode(self, gcode="M105", hilight=0):
        gcode = gcode.split("*")[0]
        gcode = gcode.split(";")[0]
        gcode = gcode.lower().strip()
        if not gcode:
            return
        gline = gcoder.Line(gcode)
        split_raw = gcoder.split(gline)
        gcoder.parse_coordinates(gline, split_raw, imperial=False)

        def _y(y):
            return self.build_dimensions[1] - (y - self.build_dimensions[4])

        def _x(x):
            return x - self.build_dimensions[3]

        if gline.command not in ["G0", "G1", "G2", "G3"]:
            return

        start_pos = self.hilightpos[:] if hilight else self.lastpos[:]

        target = start_pos[:]
        target[5] = 0.0
        target[6] = 0.0
        if gline.x is not None: target[0] = gline.x
        if gline.y is not None: target[1] = gline.y
        if gline.z is not None: target[2] = gline.z
        if gline.e is not None: target[3] = gline.e
        if gline.f is not None: target[4] = gline.f
        if gline.i is not None: target[5] = gline.i
        if gline.j is not None: target[6] = gline.j

        z = target[2]
        if not hilight and z not in self.layers:
            self.lines[z] = []
            self.pens[z] = []
            self.arcs[z] = []
            self.arcpens[z] = []
            self.layers.append(z)

        if gline.command in ["G0", "G1"]:
            line = [
                _x(start_pos[0]),
                _y(start_pos[1]),
                _x(target[0]),
                _y(target[1])
            ]
            if not hilight:
                self.lines[z].append((_x(start_pos[0]), _y(start_pos[1]),
                                      _x(target[0]), _y(target[1])))
                self.pens[z].append(self.mainpen if target[3] != self.
                                    lastpos[3] else self.travelpen)
            else:
                self.hilight.append(line)
                self.hilightqueue.put_nowait(line)
        elif gline.command in ["G2", "G3"]:
            # startpos, endpos, arc center
            arc = [
                _x(start_pos[0]),
                _y(start_pos[1]),
                _x(target[0]),
                _y(target[1]),
                _x(start_pos[0] + target[5]),
                _y(start_pos[1] + target[6])
            ]
            if gline.command == "G2":  # clockwise, reverse endpoints
                arc[0], arc[1], arc[2], arc[3] = arc[2], arc[3], arc[0], arc[1]

            if not hilight:
                self.arcs[z].append(arc)
                self.arcpens[z].append(self.arcpen)
            else:
                self.hilightarcs.append(arc)
                self.hilightarcsqueue.put_nowait(arc)

        if not hilight:
            self.lastpos = target
            self.dirty = 1
        else:
            self.hilightpos = target
        self.Refresh()
Ejemplo n.º 52
0
class LogCatAnalyzerThread():
    """ Logger analyzer that will check input messages to check if
        they validate some criteria
    """
    def __init__(self, logger):
        # Analyzer thread stop condition
        self._stop_event = threading.Event()

        # Messages to trigger
        self.__messages_to_trigger = {}

        # Lock object
        self.__lock_message_triggered = threading.RLock()

        # Internal buffer
        self.__queue = Queue()

        # Working thread
        self.__analyzer_thread = None

        # Logger to be used to output messages
        self._logger = logger

        # Delay to wait before processing new item in the queue
        self.analyzer_loop_delay = 0.1

    def stop(self):
        self._stop_event.set()

        if self.__analyzer_thread is not None:
            try:
                self.__analyzer_thread.join(5)
            except (KeyboardInterrupt, SystemExit):
                raise
            except BaseException:
                pass
            finally:
                del self.__analyzer_thread
                self.__analyzer_thread = None

    def start(self):
        self._stop_event.clear()
        self.__analyzer_thread = threading.Thread(target=self.__run)
        self.__analyzer_thread.name = "LogCatAnalyzerThread"
        self.__analyzer_thread.daemon = True
        self.__analyzer_thread.start()

    def push(self, line):
        self.__queue.put_nowait(line)

    def __run(self):
        while not self._stop_event.is_set():
            while not self.__queue.empty():
                try:
                    line = self.__queue.get_nowait()
                    self.__analyze_line(line)
                except Empty:
                    pass
            self._stop_event.wait(self.analyzer_loop_delay)

    def __analyze_line(self, line):
        if line:
            line = line.rstrip('\r\n')
            # Check all messages to be triggered
            self.__lock_message_triggered.acquire()
            for trig_message in self.__messages_to_trigger:
                if trig_message.startswith("regex:"):
                    reg_ex = trig_message.split("regex:")[1]
                    try:
                        if re.search(reg_ex, line) is not None:
                            # Message received, store log line
                            self.__messages_to_trigger[trig_message].append(
                                line)
                    except re.error as ex:
                        if self._logger is not None:
                            self._logger.error(
                                "Cannot compute regular expression \"%s\": %s"
                                % (reg_ex, ex))
                elif line.find(trig_message) != -1:
                    # Message received, store log line
                    self.__messages_to_trigger[trig_message].append(line)

            self.__lock_message_triggered.release()

    def add_trigger_messages(self, messages):
        for message in messages:
            self.add_trigger_message(message)

    def add_trigger_message(self, message):
        """ Trigger a message

        :type  message: string
        :param message: message to be triggered
        """
        self.__lock_message_triggered.acquire()
        self.__messages_to_trigger[message] = list()
        self.__lock_message_triggered.release()

    def remove_trigger_message(self, message):
        """ Remove a triggered message

        :type  message: string
        :param message: message to be removed
        """
        if message in self.__messages_to_trigger:
            self.__lock_message_triggered.acquire()
            del self.__messages_to_trigger[message]
            self.__lock_message_triggered.release()

    def is_message_received(self, message, timeout):
        """ Check if a message is received

        :type  message: string
        :param message: message that we look for
        :type  timeout: int
        :param timeout: time limit where we expect to receive the message

        :return: Array of message received, empty array if nothing
        :rtype: list
        """
        remove_trigger_message = False
        if message not in self.__messages_to_trigger:
            self.add_trigger_message(message)
            remove_trigger_message = True

        messages_received = None
        begin_time = time.time()
        end_time = begin_time + float(timeout)

        while (not messages_received) and (time.time() < end_time):
            messages_received = self.get_message_triggered_status(message)
            time.sleep(0.2)

        if messages_received:
            # Clone the list to return as remove trigger message
            # is going to delete it
            messages_received = list(messages_received)

        if remove_trigger_message:
            self.remove_trigger_message(message)

        return messages_received

    def get_message_triggered_status(self, message):
        """ Get the status of a message triggered

        :type  message: string
        :param message: message triggered
        :return: Array of message received, empty array if nothing
        :rtype: list
        """
        if message in self.__messages_to_trigger:
            return self.__messages_to_trigger[message]
        else:
            return None

    def reset_trigger_message(self, message):
        """ Reset triggered message

        :type  message: string
        :param message: message to be reseted
        """
        if message in self.__messages_to_trigger:
            self.__lock_message_triggered.acquire()
            self.__lock_message_triggered.release()
Ejemplo n.º 53
0
def create_step60(maindir, mbconnect=None, maxsongs=100, nfilesbuffer=0):
    """
    Makes sure we have the similar artists to the top 100 most familiar
    artists, and then go on with more similar artists.
    INPUT
       maindir       - root directory of the Million Song dataset
       mbconnect     - open pg connection to Musicbrainz
       maxsongs      - max number of song per search (max=100)
       nfilesbuffer  - number of files we leave unfilled in the dataset
    RETURN
       the number of songs actually created
    """
    # will contain artists TID that are done or already in the queue
    artists_done = set()
    # get all artists ids
    artist_queue = Queue()
    artists = get_most_familiar_artists(nresults=100)
    n_most_familiars = len(artists)
    npr.shuffle(artists)
    for a in artists:
        artists_done.add(a.id)
        artist_queue.put_nowait(a)
    # for each of them create all songs
    cnt_created = 0
    cnt_artists = 0
    while not artist_queue.empty():
        artist = artist_queue.get_nowait()
        cnt_artists += 1
        # CLOSED CREATION?
        if CREATION_CLOSED:
            break
        if cnt_artists % 10 == 0:
            nh5 = count_h5_files(maindir)
            print 'found', nh5, 'h5 song files in', maindir
            sys.stdout.flush()
            if nh5 > TOTALNFILES - nfilesbuffer:
                return cnt_created
        # verbose
        print 'doing artist', cnt_artists, '(pid=' + str(os.getpid()) + ')'
        sys.stdout.flush()
        # encode that artist unless it was done in step10
        #if cnt_artists > n_most_familiars:
        # we had to relaunch this function, lets not redo all the same artists over and over
        if cnt_artists > 1000:
            cnt_created += create_track_files_from_artist(maindir,
                                                          artist,
                                                          mbconnect=mbconnect,
                                                          maxsongs=maxsongs)
        # get similar artists, add to queue
        similars = get_similar_artists(artist)
        if len(similars) == 0: continue
        npr.shuffle(similars)
        similars = similars[:
                            10]  # we keep 10 at random, the radius of artists grows faster
        # the thread dont redo the same artists over and over
        # too bad for the artists we miss (if any...)
        for a in similars:
            if a.id in artists_done:
                continue
            artists_done.add(a.id)
            artist_queue.put_nowait(a)
    return cnt_created
Ejemplo n.º 54
0
class SplunkHandler(logging.Handler):
    """
    A logging handler to send events to a Splunk Enterprise instance
    running the Splunk HTTP Event Collector.
    """
    instances = []  # For keeping track of running class instances

    def __init__(self,
                 host,
                 port,
                 token,
                 index,
                 hostname=None,
                 source=None,
                 sourcetype='text',
                 verify=True,
                 timeout=60,
                 flush_interval=15.0,
                 queue_size=5000):

        SplunkHandler.instances.append(self)
        logging.Handler.__init__(self)

        self.host = host
        self.port = port
        self.token = token
        self.index = index
        self.source = source
        self.sourcetype = sourcetype
        self.verify = verify
        self.timeout = timeout
        self.flush_interval = flush_interval
        self.log_payload = ""
        self.SIGTERM = False  # 'True' if application requested exit
        self.timer = None
        self.testing = False  # Used for slightly altering logic during unit testing
        # It is possible to get 'behind' and never catch up, so we limit the queue size
        self.queue = Queue(maxsize=queue_size)

        if hostname is None:
            self.hostname = socket.gethostname()
        else:
            self.hostname = hostname

        # prevent infinite recursion by silencing requests and urllib3 loggers
        logging.getLogger('requests').propagate = False

        # and do the same for ourselves
        logging.getLogger(__name__).propagate = False

        # disable all warnings from urllib3 package
        if not self.verify:
            requests.packages.urllib3.disable_warnings()

        # Start a worker thread responsible for sending logs
        self.timer = Timer(self.flush_interval, self._splunk_worker)
        self.timer.daemon = True  # Auto-kill thread if main process exits
        self.timer.start()

    def emit(self, record):
        record = self.format_record(record)
        try:
            # Put log message into queue; worker thread will pick up
            self.queue.put_nowait(record)
        except Full:
            print("Log queue full; log data will be dropped.")

    def format_record(self, record):
        if self.source is None:
            source = record.pathname
        else:
            source = self.source

        current_time = time.time()
        if self.testing:
            current_time = None

        params = {
            'time': current_time,
            'host': self.hostname,
            'index': self.index,
            'source': source,
            'sourcetype': self.sourcetype,
            'event': self.format(record),
        }

        return json.dumps(params, sort_keys=True)

    def _splunk_worker(self):
        queue_empty = True

        # Pull everything off the queue.
        while not self.queue.empty():
            try:
                item = self.queue.get(block=False)
                self.log_payload = self.log_payload + item
                self.queue.task_done()
            except Empty:
                pass

            # If the payload is getting very long, stop reading and send immediately.
            if not self.SIGTERM and len(self.log_payload) >= 524288:  # 50MB
                queue_empty = False
                break

        if self.log_payload:
            url = 'https://%s:%s/services/collector' % (self.host, self.port)

            try:
                r = requests.post(
                    url,
                    data=self.log_payload,
                    headers={'Authorization': "Splunk %s" % self.token},
                    verify=self.verify,
                    timeout=self.timeout,
                )
                r.raise_for_status()  # Throws exception for 4xx/5xx status

            except Exception as e:
                try:
                    print(traceback.format_exc())
                    print("Exception in Splunk logging handler: %s" % str(e))
                except:
                    pass

            self.log_payload = ""

        # Restart the timer
        timer_interval = self.flush_interval
        if not self.SIGTERM:
            if not queue_empty:
                timer_interval = 1.0  # Start up again right away if queue was not cleared

            self.timer = Timer(timer_interval, self._splunk_worker)
            self.timer.daemon = True  # Auto-kill thread if main process exits
            self.timer.start()

    def shutdown(self):
        self.SIGTERM = True
        self.timer.cancel(
        )  # Cancels the scheduled Timer, allows exit immediatley

        # Send the remaining items that might be sitting in queue.
        self._splunk_worker()

    # Called when application exit imminent (main thread ended / got kill signal)
    @atexit.register
    def catch_exit():
        for instance in SplunkHandler.instances:
            try:
                instance.shutdown()
            except:
                pass
Ejemplo n.º 55
0
class APRSUploader(object):
    ''' 
    Queued APRS Telemetry Uploader class
    This performs uploads to the Habitat servers, and also handles generation of flight documents.

    Incoming telemetry packets are fed into queue, which is checked regularly.
    If a new callsign is sighted, a payload document is created in the Habitat DB.
    The telemetry data is then converted into a UKHAS-compatible format, before being added to queue to be
    uploaded as network speed permits.

    If an upload attempt times out, the packet is discarded.
    If the queue fills up (probably indicating no network connection, and a fast packet downlink rate),
    it is immediately emptied, to avoid upload of out-of-date packets.

    Note that this uploader object is intended to handle telemetry from multiple sondes
    '''

    # We require the following fields to be present in the incoming telemetry dictionary data
    REQUIRED_FIELDS = [
        'frame', 'id', 'datetime', 'lat', 'lon', 'alt', 'temp', 'type', 'freq',
        'freq_float', 'datetime_dt'
    ]

    def __init__(
            self,
            aprs_callsign='N0CALL',
            aprs_passcode="00000",
            object_name_override=None,
            object_comment="RadioSonde",
            position_report=False,
            aprsis_host='rotate.aprs2.net',
            aprsis_port=14580,
            station_beacon=False,
            station_beacon_rate=30,
            station_beacon_position=(0.0, 0.0, 0.0),
            station_beacon_comment="radiosonde_auto_rx SondeGate v<version>",
            station_beacon_icon="/r",
            synchronous_upload_time=30,
            callsign_validity_threshold=5,
            upload_queue_size=16,
            upload_timeout=10,
            inhibit=False):
        """ Initialise an APRS Uploader object.

        Args:
            aprs_callsign (str): Callsign of the uploader, used when logging into APRS-IS.
            aprs_passcode (tuple): Optional - a tuple consisting of (lat, lon, alt), which if populated,
                is used to plot the listener's position on the Habitat map, both when this class is initialised, and
                when a new sonde ID is observed.

            object_name_override (str): Override the object name in the uploaded sentence with this value.
                WARNING: This will horribly break the aprs.fi map if multiple sondes are uploaded simultaneously under the same callsign.
                USE WITH CAUTION!!!
            object_comment (str): A comment to go with the object. Various fields will be replaced with telmetry data.

            position_report (bool): If True, upload positions as APRS position reports, otherwise, upload as an Object.

            aprsis_host (str): APRS-IS Server to upload packets to.
            aprsis_port (int): APRS-IS TCP port number.

            station_beacon (bool): Enable beaconing of station position.
            station_beacon_rate (int): Time delay between beacon uploads (minutes)
            station_beacon_position (tuple): (lat, lon, alt), in decimal degrees, of the station position.
            station_beacon_comment (str): Comment field for the station beacon. <version> will be replaced with the current auto_rx version.
            station_beacon_icon (str): The APRS icon to be used, as the two characters (symbol table, symbol index), as per http://www.aprs.org/symbols.html

            synchronous_upload_time (int): Upload the most recent telemetry when time.time()%synchronous_upload_time == 0
                This is done in an attempt to get multiple stations uploading the same telemetry sentence simultaneously,
                and also acts as decimation on the number of sentences uploaded to APRS-IS.

            callsign_validity_threshold (int): Only upload telemetry data if the callsign has been observed more than N times. Default = 5

            upload_queue_size (int): Maximum number of sentences to keep in the upload queue. If the queue is filled,
                it will be emptied (discarding the queue contents).
            upload_timeout (int): Timeout (Seconds) when performing uploads to APRS-IS. Default: 10 seconds.

            inhibit (bool): Inhibit all uploads. Mainly intended for debugging.

        """

        self.aprs_callsign = aprs_callsign
        self.aprs_passcode = aprs_passcode
        self.object_comment = object_comment
        self.position_report = position_report
        self.aprsis_host = aprsis_host
        self.aprsis_port = aprsis_port
        self.upload_timeout = upload_timeout
        self.upload_queue_size = upload_queue_size
        self.synchronous_upload_time = synchronous_upload_time
        self.callsign_validity_threshold = callsign_validity_threshold
        self.inhibit = inhibit

        self.station_beacon = {
            'enabled': station_beacon,
            'position': station_beacon_position,
            'rate': station_beacon_rate,
            'comment': station_beacon_comment,
            'icon': station_beacon_icon
        }

        if object_name_override is None:
            self.object_name_override = "<id>"
        else:
            self.object_name_override = object_name_override

        # Our two Queues - one to hold sentences to be upload, the other to temporarily hold
        # input telemetry dictionaries before they are converted and processed.
        self.aprs_upload_queue = Queue(upload_queue_size)
        self.input_queue = Queue()

        # Dictionary where we store sorted telemetry data for upload when required.
        # Elements will be named after payload IDs, and will contain:
        #   'count' (int): Number of times this callsign has been observed. Uploads will only occur when
        #       this number rises above callsign_validity_threshold.
        #   'data' (Queue): A queue of telemetry sentences to be uploaded. When the upload timer fires,
        #       this queue will be dumped, and the most recent telemetry uploaded.
        self.observed_payloads = {}

        # Record of when we last uploaded a user station position to Habitat.
        self.last_user_position_upload = 0

        # Start the uploader thread.
        self.upload_thread_running = True
        self.upload_thread = Thread(target=self.aprs_upload_thread)
        self.upload_thread.start()

        # Start the input queue processing thread.
        self.input_processing_running = True
        self.input_thread = Thread(target=self.process_queue)
        self.input_thread.start()

        self.timer_thread_running = True
        self.timer_thread = Thread(target=self.upload_timer)
        self.timer_thread.start()

        self.log_info("APRS Uploader Started.")

    def aprsis_upload(self, source, packet, igate=False):
        """ Upload a packet to APRS-IS

        Args:
            source (str): Callsign of the packet source.
            packet (str): APRS packet to upload.
            igate (boolean): If True, iGate the packet into APRS-IS
                (i.e. use the original source call, but add SONDEGATE and our callsign to the path.)

        """

        # If we are inhibited, just return immediately.
        if self.inhibit:
            self.log_info("Upload Inhibited: %s" % packet)
            return True

        # Generate APRS packet
        if igate:
            # If we are emulating an IGATE, then we need to add in a path, a q-construct, and our own callsign.
            # We have the TOCALL field 'APRARX' allocated by Bob WB4APR, so we can now use this to indicate
            # that these packets have arrived via radiosonde_auto_rx!
            _packet = '%s>APRARX,SONDEGATE,TCPIP,qAR,%s:%s\n' % (
                source, self.aprs_callsign, packet)
        else:
            # Otherwise, we are probably just placing an object, usually sourced by our own callsign
            _packet = '%s>APRS:%s\n' % (source, packet)

        # create socket & connect to server
        _s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        _s.settimeout(self.upload_timeout)
        try:
            _s.connect((self.aprsis_host, self.aprsis_port))
            # Send logon string
            _logon = 'user %s pass %s vers VK5QI-AutoRX \n' % (
                self.aprs_callsign, self.aprs_passcode)
            _s.send(_logon.encode('ascii'))
            # send packet
            _s.send(_packet.encode('ascii'))
            # close socket
            _s.shutdown(0)
            _s.close()
            self.log_info("Uploaded to APRS-IS: %s" % _packet)
            return True
        except Exception as e:
            self.log_error("Upload to APRS-IS Failed - %s" % str(e))
            return False

    def beacon_station_position(self):
        ''' Send a station position beacon into APRS-IS '''
        if self.station_beacon['enabled']:
            if (self.station_beacon['position'][0]
                    == 0.0) and (self.station_beacon['position'][1] == 0.0):
                self.log_error(
                    "Station position is 0,0, not uploading position beacon.")
                self.last_user_position_upload = time.time()
                return

            # Generate the station position packet
            # Note - this is now generated as an APRS position report, for radiosondy.info compatability.
            _packet = generate_station_object(
                self.aprs_callsign,
                self.station_beacon['position'][0],
                self.station_beacon['position'][1],
                self.station_beacon['comment'],
                self.station_beacon['icon'],
                position_report=True)

            # Send the packet as an iGated packet.
            self.aprsis_upload(self.aprs_callsign, _packet, igate=True)
            self.last_user_position_upload = time.time()

    def update_station_position(self, lat, lon, alt):
        """ Update the internal station position record. Used when determining the station position by GPSD """
        self.station_beacon['position'] = (lat, lon, alt)

    def aprs_upload_thread(self):
        ''' Handle uploading of packets to Habitat '''

        self.log_debug("Started APRS Uploader Thread.")

        while self.upload_thread_running:

            if self.aprs_upload_queue.qsize() > 0:
                # If the queue is completely full, jump to the most recent telemetry sentence.
                if self.aprs_upload_queue.qsize() == self.upload_queue_size:
                    while not self.aprs_upload_queue.empty():
                        _telem = self.aprs_upload_queue.get()

                    self.log_warning(
                        "Uploader queue was full - possible connectivity issue."
                    )
                else:
                    # Otherwise, get the first item in the queue.
                    _telem = self.aprs_upload_queue.get()

                # Convert to a packet.
                try:
                    (_packet, _call) = telemetry_to_aprs_position(
                        _telem,
                        object_name=self.object_name_override,
                        aprs_comment=self.object_comment,
                        position_report=self.position_report)
                except Exception as e:
                    self.log_error(
                        "Error converting telemetry to APRS packet - %s" %
                        str(e))
                    _packet = None

                # Attempt to upload it.
                if _packet is not None:
                    # If we are uploading position reports, the source call is the generated callsign
                    # usually based on the sonde serial number, and we iGate the position report.
                    # Otherwise, we upload APRS Objects, sourced by our own callsign, but still iGated via us.
                    if self.position_report:
                        self.aprsis_upload(_call, _packet, igate=True)
                    else:
                        self.aprsis_upload(self.aprs_callsign,
                                           _packet,
                                           igate=True)

            else:
                # Wait for a short time before checking the queue again.
                time.sleep(0.1)

        self.log_debug("Stopped APRS Uploader Thread.")

    def upload_timer(self):
        """ Add packets to the habitat upload queue if it is time for us to upload. """

        while self.timer_thread_running:
            if int(time.time()) % self.synchronous_upload_time == 0:
                # Time to upload!
                for _id in self.observed_payloads.keys():
                    # If no data, continue...
                    if self.observed_payloads[_id]['data'].empty():
                        continue
                    else:
                        # Otherwise, dump the queue and keep the latest telemetry.
                        while not self.observed_payloads[_id]['data'].empty():
                            _telem = self.observed_payloads[_id]['data'].get()

                        # Attept to add it to the habitat uploader queue.
                        try:
                            self.aprs_upload_queue.put_nowait(_telem)
                        except Exception as e:
                            self.log_error(
                                "Error adding sentence to queue: %s" % str(e))

                # Sleep a second so we don't hit the synchronous upload time again.
                time.sleep(1)
            else:
                # Not yet time to upload, wait for a bit.
                time.sleep(0.1)

    def process_queue(self):
        """ Process packets from the input queue.

        This thread handles packets from the input queue (provided by the decoders)
        Packets are sorted by ID, and a dictionary entry is created. 

        """

        while self.input_processing_running:
            # Process everything in the queue.
            while self.input_queue.qsize() > 0:
                # Grab latest telem dictionary.
                _telem = self.input_queue.get_nowait()

                _id = _telem['id']

                if _id not in self.observed_payloads:
                    # We haven't seen this ID before, so create a new dictionary entry for it.
                    self.observed_payloads[_id] = {'count': 1, 'data': Queue()}
                    self.log_debug(
                        "New Payload %s. Not observed enough to allow upload."
                        % _id)
                    # However, we don't yet add anything to the queue for this payload...
                else:
                    # We have seen this payload before!
                    # Increment the 'seen' counter.
                    self.observed_payloads[_id]['count'] += 1

                    # If we have seen this particular ID enough times, add the data to the ID's queue.
                    if self.observed_payloads[_id][
                            'count'] >= self.callsign_validity_threshold:
                        # Add the telemetry to the queue
                        self.observed_payloads[_id]['data'].put(_telem)

                    else:
                        self.log_debug(
                            "Payload ID %s not observed enough to allow upload."
                            % _id)

            if (time.time() - self.last_user_position_upload
                ) > self.station_beacon['rate'] * 60:
                self.beacon_station_position()

            time.sleep(0.1)

    def add(self, telemetry):
        """ Add a dictionary of telemetry to the input queue. 

        Args:
            telemetry (dict): Telemetry dictionary to add to the input queue.

        """

        # Discard any telemetry which is indicated to be encrypted.
        if 'encrypted' in telemetry:
            if telemetry['encrypted'] == True:
                return

        # Check the telemetry dictionary contains the required fields.
        for _field in self.REQUIRED_FIELDS:
            if _field not in telemetry:
                self.log_error("JSON object missing required field %s" %
                               _field)
                return

        # Add it to the queue if we are running.
        if self.input_processing_running:
            self.input_queue.put(telemetry)
        else:
            self.log_error("Processing not running, discarding.")

    def close(self):
        ''' Shutdown uploader and processing threads. '''
        self.log_debug("Waiting for threads to close...")
        self.input_processing_running = False
        self.timer_thread_running = False
        self.upload_thread_running = False

        # Wait for all threads to close.
        if self.upload_thread is not None:
            self.upload_thread.join()

        if self.timer_thread is not None:
            self.timer_thread.join()

        if self.input_thread is not None:
            self.input_thread.join()

    def log_debug(self, line):
        """ Helper function to log a debug message with a descriptive heading. 
        Args:
            line (str): Message to be logged.
        """
        logging.debug("APRS-IS - %s" % line)

    def log_info(self, line):
        """ Helper function to log an informational message with a descriptive heading. 
        Args:
            line (str): Message to be logged.
        """
        logging.info("APRS-IS - %s" % line)

    def log_error(self, line):
        """ Helper function to log an error message with a descriptive heading. 
        Args:
            line (str): Message to be logged.
        """
        logging.error("APRS-IS - %s" % line)

    def log_warning(self, line):
        """ Helper function to log a warning message with a descriptive heading. 
        Args:
            line (str): Message to be logged.
        """
        logging.warning("APRS-IS - %s" % line)
Ejemplo n.º 56
0
class BaseDriver(object):
    browser = None

    def __init__(self, config=None):
        self.driver = None
        self.config = config or {}
        self.alive = False

        self.ready_condition = threading.Condition()
        self.task_queue = Queue()

        # turn of logging
        selenium_logger = logging.getLogger(
            'selenium.webdriver.remote.remote_connection')
        selenium_logger.setLevel(logging.WARNING)

    def _initialize_driver(self, driver, config):
        # config.setdefault('window_size', {'width': 1200, 'height': 800})
        # driver.set_window_size(config['window_size']['width'], config['window_size']['height'])

        if not config.setdefault('cookies', False):
            driver.delete_all_cookies()

        self.initialize_driver(driver, config)

    def create_driver(self, config):
        pass

    def initialize_driver(self, driver, config):
        pass

    def wait_until_ready(self):
        with self.ready_condition:
            self.ready_condition.wait()

    def start(self, wait=False):
        t = threading.Thread(target=self._start)
        t.daemon = True
        t.start()

        if wait:
            self.wait_until_ready()

    def _start(self):
        self.driver = self.create_driver(self.config)
        self._initialize_driver(self.driver, self.config)
        log.info("Browser initialized")

        with self.ready_condition:
            self.ready_condition.notifyAll()

        self.alive = True
        while self.alive:
            task = self.task_queue.get()
            self.get(task)

    def get(self, url):
        try:
            # self.driver.set_page_load_timeout(50)
            self.driver.get(url)
        except TimeoutException:
            pass

    def queue_url(self, url):
        self.task_queue.put_nowait(url)

    def close(self):
        self.driver.close()

    def stop(self):
        self.driver.stop_client()

    def pid(self):
        import psutil
        gecko_pid = self.driver.service.process.pid
        return psutil.Process(gecko_pid).children()[0].pid

    def quit(self):
        self.alive = False
        self.driver.quit()
Ejemplo n.º 57
0
class TipWidget(QWidget):

    TIMER_INTERVAL = 10
    TIMER_COUNTER_STEP = 30
    MAX_OPACITY_TIME = 100

    TEXT_TYPE_INFO = 1
    TEXT_TYPE_WARNING = 2
    TEXT_TYPE_ERROR = 3
    INFO_ICON_PATH = ":/tip/icons/tip/info.png"
    WARNING_ICON_PATH = ":/tip/icons/tip/warning.png"
    ERROR_ICON_PATH = ":/tip/icons/tip/error.png"

    SHORT_TIME = 2000
    LONG_TIME = 4000

    MAX_QUEUE_SIZE = 20

    def __init__(self, parent=None):
        QWidget.__init__(self, parent)
        self.setWindowFlags(Qt.FramelessWindowHint | Qt.Dialog)
        self.ui = Ui_TipWidget()
        self.ui.setupUi(self)

        self.timer = QTimer()
        self.timer.timeout.connect(self.__onTimeout)
        self.queue = Queue(self.MAX_QUEUE_SIZE)

        self.timerCounter = 0
        self.maxOpacityCounter = 0

        self.setWindowOpacity(0)

    def __makeText(self, text, type=TEXT_TYPE_INFO, t=SHORT_TIME):
        if self.timer.isActive():
            if not self.queue.full():
                self.queue.put_nowait((text, type, t))
            return

        iconPath = self.INFO_ICON_PATH
        if type == self.TEXT_TYPE_WARNING:
            iconPath = self.WARNING_ICON_PATH
        elif type == self.TEXT_TYPE_ERROR:
            iconPath = self.ERROR_ICON_PATH

        self.ui.image_label.setPixmap(QPixmap(iconPath))
        self.ui.text_label.setText(text)
        self.timer.start(self.TIMER_INTERVAL)

        self.show()

    def makeInfoText(self, text="", t=SHORT_TIME):
        self.__makeText(text, self.TEXT_TYPE_INFO, t)

    def makeWarningText(self, text="", t=SHORT_TIME):
        self.__makeText(text, self.TEXT_TYPE_WARNING, t)

    def makeErrorText(self, text="", t=LONG_TIME):
        self.__makeText(text, self.TEXT_TYPE_ERROR, t)

    def __onTimeout(self):
        self.timerCounter += self.TIMER_COUNTER_STEP
        if self.timerCounter >= self.SHORT_TIME:
            self.__onFinished()
            return

        if self.timerCounter < self.SHORT_TIME / 2:
            self.setWindowOpacity(1.0 / (self.SHORT_TIME / 2) *
                                  self.timerCounter)
        else:
            self.maxOpacityCounter += 1
            if self.maxOpacityCounter < self.MAX_OPACITY_TIME:
                self.timerCounter -= self.TIMER_COUNTER_STEP
                return

            self.setWindowOpacity(1.0 / (self.SHORT_TIME / 2) *
                                  (self.SHORT_TIME - self.timerCounter))

    def __onFinished(self):
        self.close()
        self.timer.stop()
        self.maxOpacityCounter = 0
        self.timerCounter = 0
        if not self.queue.empty():
            args = self.queue.get()
            self.__makeText(args[0], args[1], args[2])
def ibfWorker(publisherAddr,
              sinkAddr,
              cells,
              k,
              m,
              blockSz,
              secret,
              public,
              w,
              pbSize,
              hashProdOne=True):

    workerName = mp.current_process().name
    context = zmq.Context()
    taskQ = Queue()
    endQ = Queue()
    taskLock = threading.Lock()
    sharedTimer = ExpTimer()
    threadsNumber = 1  #ALWAYS 1! This is here not to slow down the subscriber

    subSocket = context.socket(zmq.SUB)
    subSocket.setsockopt(zmq.SUBSCRIBE, b'work')
    subSocket.setsockopt(zmq.SUBSCRIBE, b'end')
    subSocket.connect(publisherAddr)
    hashFunc = [Hash1, Hash2, Hash3, Hash4, Hash5, Hash6]
    cells = set(cells)

    print "PreprocWorker", workerName, "initiated"
    results = {
        "worker": workerName,
        "cells": {},
        "w": {},
        "timers": None,
        "timerNames": []
    }

    for i in xrange(threadsNumber):
        taskThread = threading.Thread(
            target=ibfTask,
            args=(taskQ, endQ, results, workerName, k, m, hashFunc, blockSz,
                  secret, public, cells, i, taskLock, sharedTimer, pbSize))

        taskThread.daemon = True
        taskThread.start()

    for i in range(m):
        results["cells"][i] = Cell(0, blockSz)

    while True:
        try:
            workItem = subSocket.recv_multipart()
            if workItem[0] == 'end':
                for i in xrange(threadsNumber):
                    taskQ.put_nowait("end")

                finishedTaskThreads = set()
                while len(finishedTaskThreads) != threadsNumber:
                    tName = endQ.get(True)
                    endQ.task_done()
                    finishedTaskThreads.add(tName)

                results["timers"] = sharedTimer
                for i in range(m):
                    if results["cells"][i].count == 0:
                        del results["cells"][i]
                rpc = RpcPdrClient(context)
                inMsg = rpc.rpcAddress(sinkAddr, results)
                if inMsg == "ACK":
                    break
            else:
                taskQ.put_nowait(workItem[1])

        except zmq.ZMQError as e:
            print "ZMQ Exception", str(e)
        except (RuntimeError, TypeError, NameError) as e:
            print "Error", str(e)
Ejemplo n.º 59
0
class SerialWriterThread():
    """
    Logger based on serial utility
    """
    def __init__(self, logger):
        # Output file
        self._output_file_path = None

        # Reader thread stop condition
        self._stop_event = threading.Event()

        # Internal buffer
        self._queue = Queue()

        self.__writer_thread = None

        # Logger
        self._logger = logger

    def stop(self):
        """
        Stop the writer thread
        """
        self._stop_event.set()

        if self.__writer_thread is not None:
            try:
                self.__writer_thread.join(5)
            except (KeyboardInterrupt, SystemExit):
                raise
            except BaseException:
                pass
            finally:
                del self.__writer_thread
                self.__writer_thread = None
        return

    def start(self):
        """
        Start the write thread
        """
        self._stop_event.clear()
        self.__writer_thread = threading.Thread(target=self._run)
        self.__writer_thread.name = "SerialWriterThread"
        self.__writer_thread.daemon = True
        self.__writer_thread.start()

    def push(self, line):
        """
        Push data in the internal queue

        :type  line: string
        :param line: data to be written
        """
        self._queue.put_nowait(line)

    def set_output_path(self, output_path):
        """
        Set stdout file path

        :type  output_path: string
        :param output_path: path of the log file to be created
        """
        self._output_file_path = output_path

    def _run(self):
        """
        Runner thread method
        """
        # Create the output file if output file was specified
        if self._output_file_path is None:
            self._logger.info("No file specified for serial logger output")
            self._output_file_path = strftime(
                "_%Y-%m-%d_%Hh%M.%S") + "_serial.log"
            self._logger.info("%s will be used." % self._output_file_path)

        while not self._stop_event.is_set():
            try:
                with open(self._output_file_path, "ab") as output_stream:
                    while not self._queue.empty():
                        try:
                            line = self._queue.get_nowait()
                            if len(line) > 0:
                                output_stream.write(line)
                                output_stream.flush()
                        except Exception as e:  # pylint: disable=W0703
                            self._logger.error(str(e))
            except (IOError, OSError, TypeError) as e:
                self._logger.error(str(e))
                break
Ejemplo n.º 60
0
class JsonRpcServer(object):
    log = logging.getLogger("jsonrpc.JsonRpcServer")

    def __init__(self, bridge, threadFactory=None):
        self._bridge = bridge
        self._workQueue = Queue()
        self._threadFactory = threadFactory

    def queueRequest(self, req):
        self._workQueue.put_nowait(req)

    def _serveRequest(self, ctx, req):
        mangledMethod = req.method.replace(".", "_")
        logLevel = logging.DEBUG
        if mangledMethod in ('Host_getVMList', 'Host_getAllVmStats',
                             'Host_getStats', 'StorageDomain_getStats',
                             'VM_getStats', 'Host_fenceNode'):
            logLevel = logging.TRACE
        self.log.log(logLevel, "Calling '%s' in bridge with %s", req.method,
                     req.params)
        try:
            method = getattr(self._bridge, mangledMethod)
        except AttributeError:
            if req.isNotification():
                return

            ctx.requestDone(
                JsonRpcResponse(None, JsonRpcMethodNotFoundError(), req.id))
            return

        try:
            params = req.params
            server_address = ctx.client.get_local_address()
            self._bridge.register_server_address(server_address)
            if isinstance(req.params, list):
                res = method(*params)
            else:
                res = method(**params)
            self._bridge.unregister_server_address()
        except JsonRpcError as e:
            ctx.requestDone(JsonRpcResponse(None, e, req.id))
        except Exception as e:
            self.log.exception("Internal server error")
            ctx.requestDone(
                JsonRpcResponse(None, JsonRpcInternalError(str(e)), req.id))
        else:
            res = True if res is None else res
            self.log.log(logLevel, "Return '%s' in bridge with %s", req.method,
                         res)
            ctx.requestDone(JsonRpcResponse(res, None, req.id))

    @traceback(on=log.name)
    def serve_requests(self):
        while True:
            self.log.debug("Waiting for request")
            obj = self._workQueue.get()
            if obj is None:
                break

            client, msg = obj
            self._parseMessage(client, msg)

    def _parseMessage(self, client, msg):
        ctx = _JsonRpcServeRequestContext(client)

        try:
            rawRequests = json.loads(msg)
        except:
            ctx.addResponse(JsonRpcResponse(None, JsonRpcParseError(), None))
            ctx.sendReply()
            return

        if isinstance(rawRequests, list):
            # Empty batch request
            if len(rawRequests) == 0:
                ctx.addResponse(
                    JsonRpcResponse(None, JsonRpcInvalidRequestError(), None))
                ctx.sendReply()
                return
        else:
            # From this point on we know it's always a list
            rawRequests = [rawRequests]

        # JSON Parsed handling each request
        requests = []
        for rawRequest in rawRequests:
            try:
                req = JsonRpcRequest.fromRawObject(rawRequest)
                requests.append(req)
            except JsonRpcError as err:
                ctx.addResponse(JsonRpcResponse(None, err, None))
            except:
                ctx.addResponse(
                    JsonRpcResponse(None, JsonRpcInternalError(), None))

        ctx.setRequests(requests)

        # No request was built successfully or is only notifications
        if ctx.counter == 0:
            ctx.sendReply()

        for request in requests:
            self._runRequest(ctx, request)

    def _runRequest(self, ctx, request):
        if self._threadFactory is None:
            self._serveRequest(ctx, request)
        else:
            self._threadFactory(partial(self._serveRequest, ctx, request))

    def stop(self):
        self.log.info("Stopping JsonRPC Server")
        self._workQueue.put_nowait(None)