Example #1
0
class CallbackMulti:
    """ Call callback whenever a result is available. """

    def __init__(self, future, cb, *args):
        super(CallbackMulti, self).__init__()
        self.future = future
        self.cb = cb
        self._results = {}
        self._reuslt_q = Queue()
        self.job_count = 0
        self.finished_job_count = 0
        glob_executor.submit(self._wait_result, cb, *args)

    def set_result(self, args, result_type, result):
        self._results[args] = {result_type: result}
        if result:
            self._reuslt_q.put_nowait((args, self._results[args]))
        else:
            # result == None, adding a job to _results
            self.job_count += 1

    def _wait_result(self, callback, *args):
        while True:
            arg_result = self._reuslt_q.get()
            callback(arg_result, *args)
            self.finished_job_count += 1
            if self._all_job_done():
                self.future.finished.set()
                break

    def _all_job_done(self):
        return self.finished_job_count == self.job_count
Example #2
0
class TestCommandEvents(unittest.TestCase):

    def setUp(self):
        domain = ENVIRONMENTS['streaming'][TARGET_ENV]
        settings = read_settings(CONFIG_PATH_FOR_UNIT_TESTS, TARGET_ENV)

        self.streamer = OandaEventStreamer(domain, settings['ACCESS_TOKEN'], settings['ACCOUNT_ID'], Journaler())
        self.streamer.set_events_q(Queue()).set_heartbeat_q(Queue()).set_exception_q(Queue())
        self.streamer.set_context(OANDA_CONTEXT_EVENTS)
        self.streaming_thread = Thread(target=self.streamer.stream, args=[])

        self.command_q = Queue()
        self.listener = QueueCommandListener(self.command_q, self.streamer.on_command)
        self.command_thread = self.listener.start_thread()

        self.streaming_thread.start()

    def tearDown(self):
        if self.streamer.streaming:
            self.streamer.stop()
            self.command_thread.join(timeout=5)
            self.streaming_thread.join(timeout=5)

    def test_should_be_able_to_connect_for_receiving_streaming_account_events(self):
        self.command_q.put_nowait(COMMAND_STOP)
        self.streaming_thread.join(timeout=5)
        self.command_thread.join(timeout=5)
        self.assertFalse(self.streamer.streaming, 'streaming should have stopped, but did not')
Example #3
0
    def test_max_connections_blocks(self):
        """Getting a connection should block for until available."""

        import time
        from copy import deepcopy
        from threading import Thread

        # We use a queue for cross thread communication within the unit test.
        try: # Python 3
            from queue import Queue
        except ImportError:
            from Queue import Queue

        q = Queue()
        q.put_nowait('Not yet got')
        pool = self.get_pool(max_connections=2, timeout=5)
        c1 = pool.get_connection('_')
        c2 = pool.get_connection('_')
        
        target = lambda: q.put_nowait(pool.get_connection('_'))
        Thread(target=target).start()

        # Blocks while non available.
        time.sleep(0.05)
        c3 = q.get_nowait()
        self.assertEquals(c3, 'Not yet got')
        
        # Then got when available.
        pool.release(c1)
        time.sleep(0.05)
        c3 = q.get_nowait()
        self.assertEquals(c1, c3)
Example #4
0
def breadthFirstExplore(maze,x,y):
    '''explore a maze in a breadth first manner'''
    qLen=len(maze[0][:])*len(maze)
    pointQueue=Queue(qLen)
    pointQueue.put_nowait((x,y,qLen))
    goal=None
    maze[x][y]=qLen
    while not pointQueue.empty():
        i=pointQueue.get_nowait()
        x=i[0]
        y=i[1]
        k=i[2]
        val=getMazeValue(maze, x, y)
        if val==2:
            goal=(x,y)
            return goal
        else:
            maze[x][y]=k
        #explore the neighborhood
        a=getMazeValue(maze, x+1, y)
        b=getMazeValue(maze, x, y+1)
        c=getMazeValue(maze, x-1, y)
        d=getMazeValue(maze, x, y-1)
        if a==0 or a ==2:
            pointQueue.put((x+1,y,k-1))
        if b==0 or b==2:
            pointQueue.put((x,y+1,k-1))
        if c==0 or c==2:
            pointQueue.put((x-1,y,k-1))
        if d==0 or d==2:
            pointQueue.put((x,y-1,k-1))
    return goal  
Example #5
0
class MusicMetaHandler(PylaunchrHandler):

    def __init__(self, logger):
        super().__init__()
        self.queue = Queue(5)
        self.logger = logger

    def handle_event(self, event):
        self.logger.debug("Received event")
        args = event.args

        callback = args["callback"]
        callback_args = args["callback_args"]
        result = args["result"]

        callback(result, *callback_args)

    def fetch(self, albuminfo, callback, callback_args=[]):
        artist = albuminfo["artist"]
        album = albuminfo["album"]

        self.logger.debug("Fetch {} - {}", artist, album)
        self.queue.put_nowait({
            "albuminfo": albuminfo,
            "callback": callback,
            "callback_args": callback_args})

    def fetch_mosaic(self, playlist, callback, callback_args=[]):
        ''' Takes a sequence of set of '''
        raise

    def tick(self, tick):
        return False
Example #6
0
File: fakers.py Project: RSDT/bot2
class FakeBot:
    def __init__(self, *args, **kwargs):
        self.next_message_id = next_message_id()
        self.next_update_id = next_message_id()
        self.user = telegram.User(1234567890, 'Unittest')
        self.updates = Queue()

    def getUpdates(self, last_update_id, *args, **kwargs):
        updates = []
        try:
            while not self.updates.empty():
                updates.append(self.updates.get_nowait())
        except Empty:
            pass
        return updates

    def sendMessage(self, chat_id, message, *args, **kwargs):
        chat = telegram.Chat(chat_id, telegram.Chat.SUPERGROUP)
        message = telegram.Message(next(self.next_message_id), self.user,
                         datetime.datetime.now(), chat)
        return message

    def sendSticker(self, chat_id, *args, **kwargs):
        pass

    def sendLocation(self, chat_id, *args, **kwargs):
        pass

    def add_update(self, chat_id, text):
        chat = telegram.Chat(chat_id, telegram.Chat.SUPERGROUP)
        user = telegram.User(1234, 'test')
        message = telegram.Message(next(self.next_message_id), user,
                                   datetime.datetime.now(), chat, text=text)
        update = telegram.Update(next(self.next_update_id), message=message)
        self.updates.put_nowait(update)
Example #7
0
class TestQueuedCommandListener(unittest.TestCase):

    def on_command(self, command):
        self.last_command = command

    def setUp(self):
        self.command_q = Queue()
        self.listener = QueueCommandListener(self.command_q, self.on_command)
        self.last_command = None
        self.command_thread = self.listener.start_thread()

    def tearDown(self):
        if self.listener.listening:
            self.listener.force_stop()
        self.command_thread.join(timeout=2)

    def test_should_listen_to_STOP_command(self):
        self.command_q.put_nowait(COMMAND_STOP)
        self.command_thread.join(timeout=2)
        self.assertEqual(COMMAND_STOP, self.last_command)

    def test_should_stop_listening_after_STOP_command(self):
        self.command_q.put_nowait(COMMAND_STOP)
        self.command_thread.join(timeout=2)
        self.assertFalse(self.listener.listening, 'listening should have stopped, but did not')
Example #8
0
class CoverFetcherHandler(ServiceHandler):

    def __init__(self, logger):
        super().__init__()
        self.queue = Queue(5)
        self.logger = logger

    def handle_event(self, event):
        self.logger.debug("Received event")
        args = event.args

        callback = args["callback"]
        callback_args = args["callback_args"]
        result = args["result"]

        callback(result, *callback_args)

    def fetch(self, albuminfo, callback, callback_args=[]):
        artist = albuminfo["artist"]
        album = albuminfo["album"]

        self.logger.debug("Fetch {} - {}", artist, album)
        self.queue.put_nowait({
            "albuminfo": albuminfo,
            "callback": callback,
            "callback_args": callback_args})

    def tick(self, tick):
        return False
	def findFirstTri(vert, lastMFace, mirrorMesh, searchData) :
		#
		#	Itterates through all faces in the mirror mesh and tests for intersection, first intersecting adjacent face connected to the initial search face is returned
		#
		
		faceQueue = Queue()
		#Tag keep track on what face we have tested / in queue. False for not tested
		taggedFaces = []
		
		#Start testing from the initial face!
		lastMFace.tag = True
		faceQueue.put_nowait(lastMFace)
		taggedFaces.append(lastMFace)
		
		while not faceQueue.empty() :
			face = faceQueue.get_nowait()
			mDat = MirrorMesh.triIntersection(vert.co, face)
			if mDat is not None and mDat._intersected :
				searchData[vert.index].setMirror(mDat)
				break #we found an intersecting tri
			#Queue connected faces
			MirrorMesh.queueConnectedFaces(face, mirrorMesh, faceQueue, taggedFaces)
		
		for f in taggedFaces :
			f.tag = False
class Kernel(Thread):
    def __init__(self):
        Thread.__init__(self)
        self.programsQueue = Queue()
        self.isFirstLoad = True
        self.shouldShutDown= False

    def initializeKernel(self, clock, programloader, scheduler):
        self.programLoader = programloader
        self.scheduler = scheduler
        self.clock = clock

    def load(self, program):
        # Sets a program that the program loader will load to the memory
        self.programsQueue.put_nowait(program)

    def run(self):
        Thread.run(self)

        while not self.shouldShutDown:
            if not self.programsQueue.qsize() == 0:
                program = self.programsQueue.get_nowait()
                self.isFirstLoad = len(self.programLoader.pcbTable.pcbs) == 0
                self.programLoader.load(program)
                if self.isFirstLoad:
                    self.scheduler.setNextPcbToCpu()
Example #11
0
class Event(Thread):

    def __init__(self):
        Thread.__init__(self)
        self.running = False
        self.starting = True
        self.tasks = Queue()

    def pause(self):
        self.running = False

    def resume(self):
        self.running = True

    def stop(self):
        self.pause()
        self.starting = False

    def add(self, item):
        self.tasks.put_nowait(item)

    def run(self):
        print("事件监听start")
        while self.starting:
            if self.running:
                continue
            try:
                func, fuin, suin, iseq, content = self.tasks.get()
                print("取出队列: %s %s %s %s %s", func, fuin, suin, iseq, content)
                func(fuin, suin, iseq, content)
            except Empty:
                pass
            sleep(0.1)
Example #12
0
class MockSock:
    def __init__(self, autorespond=False):
        self.sent_queue = Queue()
        self.recv_queue = Queue()
        self.closed = False
        self.timeout = 0.5
        self.autorespond = autorespond

    def gettimeout(self):
        return self.timeout 

    def close(self):
        self.closed = True

    def send(self, message):
        parsed = json.loads(message)
        self.sent_queue.put_nowait(parsed)
        if self.autorespond:
            self.recv_queue.put_nowait({'msg': RESULT, 'id': parsed['id']})

    def recv(self):
        try:
            message = self.recv_queue.get(timeout=self.timeout)
        except Empty as e:
            raise JCoreAPITimeoutException("recv timed out", e) 

        if isinstance(message, Exception):
            raise message
        return json.dumps(message)
Example #13
0
 def put_nowait(self, *args, **kwargs):
     if self.full():
         try:
             # oldest_data
             self.get()
         except Queue.Empty:
             pass
     Queue.put_nowait(self, *args, **kwargs)
Example #14
0
def create_step60(maindir,mbconnect=None,maxsongs=100,nfilesbuffer=0):
    """
    Makes sure we have the similar artists to the top 100 most familiar
    artists, and then go on with more similar artists.
    INPUT
       maindir       - root directory of the Million Song dataset
       mbconnect     - open pg connection to Musicbrainz
       maxsongs      - max number of song per search (max=100)
       nfilesbuffer  - number of files we leave unfilled in the dataset
    RETURN
       the number of songs actually created
    """
    # will contain artists TID that are done or already in the queue
    artists_done = set()
    # get all artists ids
    artist_queue = Queue()
    artists = get_most_familiar_artists(nresults=100)
    n_most_familiars = len(artists)
    npr.shuffle(artists)
    for a in artists:
        artists_done.add( a.id )
        artist_queue.put_nowait( a )
    # for each of them create all songs
    cnt_created = 0
    cnt_artists = 0
    while not artist_queue.empty():
        artist = artist_queue.get_nowait()
        cnt_artists += 1
        # CLOSED CREATION?
        if CREATION_CLOSED:
            break
        if cnt_artists % 10 == 0:
            nh5 = count_h5_files(maindir)
            print('found',nh5,'h5 song files in',maindir); sys.stdout.flush()
            if nh5 > TOTALNFILES - nfilesbuffer:
                return cnt_created
        # verbose
        print('doing artist',cnt_artists,'(pid='+str(os.getpid())+')'); sys.stdout.flush()
        # encode that artist unless it was done in step10
        #if cnt_artists > n_most_familiars:
        # we had to relaunch this function, lets not redo all the same artists over and over
        if cnt_artists > 1000:
            cnt_created += create_track_files_from_artist(maindir,artist,
                                                          mbconnect=mbconnect,
                                                          maxsongs=maxsongs)
        # get similar artists, add to queue
        similars = get_similar_artists(artist)
        if len(similars) == 0: continue
        npr.shuffle(similars)
        similars = similars[:10] # we keep 10 at random, the radius of artists grows faster
                                 # the thread dont redo the same artists over and over
                                 # too bad for the artists we miss (if any...)
        for a in similars:
            if a.id in artists_done:
                continue
            artists_done.add(a.id)
            artist_queue.put_nowait(a)
    return cnt_created
Example #15
0
class SocketServer(object):

    def __init__(self):
        self.host = "0.0.0.0"
        self.port = 9998
        self.sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.sock.bind((self.host, self.port))
        self.sock.listen(10)
        self.conn, addr = self.sock.accept()
        self.queue = Queue()


    def run(self):
        rest_msg = ""
        try:
            while True:
                try:
                    item = self.queue.get_nowait()
                    time.sleep(3)
                    print("stop")
                    self.conn.sendall("stop".encode())
                except Empty:
                    dados= self.conn.recv(1024)
                    if dados:
                        data_list = (rest_msg + dados.decode()).split("\n")
                        if len(data_list) > 1:
                            rest_msg = data_list.pop()
                        for data in data_list:
                            try:
                                dados = json.loads(data)
                                if dados:
                                    if dados.get("command") == "reader":
                                        _thread.start_new_thread(self.read_file, (dados["file"],))
                                    elif dados.get("command") == "write":
                                        _thread.start_new_thread(self.write_file, (dados["file"], dados["content"],))
                            except:
                                print(traceback.format_exc())

        except:
            print(traceback.format_exc())
            self.conn.close()

    def read_file(self, file):
        with open(file, 'r') as line:
            for l in line:
                self.conn.sendall(l.encode())

        self.queue.put_nowait(0)


    def write_file(self, file_path, content):
        file = open(file_path, "a")
        file.write(content + "\n")
        file.close()
Example #16
0
class TcpSock(object):

    RECV_BUFF_SIZE = 8192

    def __init__(self, conn_details):
        self._host = conn_details.host
        self._port = conn_details.port
        self._client = None
        self._send_queue = Queue()
        self._recv_queue = Queue()
        self._running = False

    def start(self):
        self._client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self._client.setblocking(0)
        self._running = True

        while self._running:
            send_status = self._send()
            recv_status = self._recv()

            if not send_status and not recv_status:
                time.sleep(1)

    def stop(self):
        self._running = False

    def send(self, data):
        self._send_queue.put_nowait(data)

    def recv(self):
        if self._recv_queue.not_empty:
            return self._recv_queue.get()

    def is_running(self):
        return self._running

    def _send(self):
        counter = 0
        while self._send_queue.not_empty:
            self._client.send(self._send_queue.get())
            counter += 1

        return counter > 0

    def _recv(self):
        data = self._client.recv(self.RECV_BUFF_SIZE)
        counter = 0
        while data is not None:
            self._recv_queue.put_nowait(data)
            data = self._client.recv(self.RECV_BUFF_SIZE)
            counter += 1

        return counter > 0
Example #17
0
class WatcherHandler(FileSystemEventHandler):
    def __init__(self, filename, counter, notifier, config, queue_len = 100):
        self.filename = path.abspath(filename)
        self.queue = Queue(queue_len)
        self.monitor = Monitor(self.queue, counter, notifier)
        self.offset_persistance = OffsetPersistance(config)
        self.fd = None
        self.time = datetime.now()
        self.offset = self.offset_persistance.get(filename)
        if path.isfile(self.filename):
            self.fd = open(self.filename)
            self.offset = path.getsize(self.filename)

    def on_created(self, event):
        if event.src_path == self.filename and path.isfile(self.filename):
            self.offset  = 0
            self.fd = open(self.filename, 'r')
            self.fd.seek(self.offset,0)

    def on_deleted(self, event):
        if event.src_path == self.filename:
            self.fd.closed()

    def on_modified(self, event):
        self.fd.seek(self.offset, 0)
        for line in self.fd:
            line = line.rstrip('\n')
            try:
                self.queue.put_nowait(line)
            except Full:
                logging.error('{0} input queue is full!'.format(datetime.now()))
        self.offset = self.fd.tell()
        if (datetime.now() - self.time).seconds > 30:
            self.offset_persistance.put(self.filename, self.offset)
            self.time = datetime.now()

    def on_moved(self, event):
        if path.abspath(event.src_path) == self.filename:
            self.fd.close()
            self.offset = 0

        if path.abspath(event.dest_path) == self.filename:
            self.fd = open(self.filename,'r')
            self.offset = path.getsize(self.fd)

    def start(self):
        self.monitor.start()

    def stop(self):
        self.monitor.stop()
        if self.fd is not None and not self.fd.closed:
            self.fd.close()
        self.offset_persistance.sync()
        self.offset_persistance.close()
Example #18
0
 def match(self, checker):
     queue = Queue()
     self.queues[checker.name] = queue
     threading.Thread(target=self._match, args=(checker, )).start()
     while not self.events[checker.name].is_set():
         with self.__cond:
             self.__cond.wait()
             try:
                 queue.put_nowait(self.line)
             except Full:
                 logging.error("match queue for {0} full".format(checker.name))
Example #19
0
 def load_chain(self):
     # TODO : load chainstate from database
     get_children_of = Queue()
     get_children_of.put_nowait(self.genesis_block.get_hash())
     while not get_children_of.empty():
         next_hash = get_children_of.get_nowait()
         children = set(self.get_children(next_hash))
         for child_hash in children:
             block = self.get_block(child_hash)
             self.seek_n_build.add_block(block)  # add to seeknbuild just to get preexisting heights
             self.add_block(block, skip_db=True)  # this add_block does the real work -- allows proper loading of chain
             get_children_of.put_nowait(block.get_hash())
Example #20
0
 def match(self, checker, event):
     queue = Queue()
     self.queue[checker.name] = queue
     threading.Thread(target=self._match, args=(checker, event)).start()
     while not event.is_set():
         with self.__cond:
             self.__cond.wait()
             try:
                 queue.put_nowait(self.line)
             except Full:
                 logging.error("metch queue full")
         line = self.queue.get()
         if self.macthers.get(checker.name).match(line):
             self.counter.inc(checker.name)
Example #21
0
class ConnectionPool(object):
    """
    Usage:
        conn_pool = nmi_mysql.ConnectionPool(config)

        db = conn_pool.get_connection()
        db.query('SELECT 1', [])
        conn_pool.return_connection(db)

        conn_pool.close()
    """

    def __init__(self, conf, max_pool_size=20):
        self.conf = conf
        self.max_pool_size = max_pool_size
        self.initialize_pool()

    def initialize_pool(self):
        self.pool = Queue(maxsize=self.max_pool_size)
        for _ in range(0, self.max_pool_size):
            self.pool.put_nowait(DB(self.conf, True))

    def get_connection(self):
        # returns a db instance when one is available else waits until one is
        db = self.pool.get(True)

        # checks if db is still connected because db instance automatically
        # closes when not in used
        if not self.ping(db):
            db.connect()

        return db

    def return_connection(self, db):
        return self.pool.put_nowait(db)

    def close(self):
        while not self.is_empty():
            self.pool.get().close()

    def ping(self, db):
        data = db.query("SELECT 1", [])
        return data

    def get_initialized_connection_pool(self):
        return self.pool

    def is_empty(self):
        return self.pool.empty()
Example #22
0
class TestPeerNodeManager(unittest.TestCase):
    """
    Tests for the PeerNodeManager class.
    """

    def setUp(self):
        self.message_queue = Queue()
        self.node_list = PeerNodeList()
        self.address = Address("127.0.0.1", 8880)
        self.timestamp = Timestamp.create_timestamp()
        self.node_name = "hello node"
        self.node1 = PeerNode(self.node_name, self.address, self.timestamp)

    def test_init(self):
        self.assertRaisesRegex(ValueError, PeerNodeManager.queue_error_string,
                               PeerNodeManager, "queue", "node_list")
        self.assertRaisesRegex(ValueError, PeerNodeManager.node_list_error_string,
                               PeerNodeManager, self.message_queue, "node_list")
        manager = PeerNodeManager(self.message_queue, self.node_list)
        self.assertEqual(self.message_queue, manager.message_queue)
        self.assertEqual(self.node_list, manager.node_list)
        self.assertTrue(manager.keep_alive)

    def test_read_message_from_queue(self):
        message = mock.create_autospec(Message)
        manager = PeerNodeManager(self.message_queue, self.node_list)
        self.message_queue.put_nowait(message)
        message_from_queue = manager.read_message_from_queue()
        manager.message_queue.task_done()
        self.assertEqual(message, message_from_queue)

    def test_update_node_list(self):
        self.assertRaises(ValueError, self.node1.update_node, "hello")
        test_name = "bye node"
        test_ip = "127.0.0.1"
        test_port = 8880
        test_address = Address(test_ip, test_port)
        test_timestamp = Timestamp.create_timestamp()
        message1 = Message(MessageType.description_response, test_name, test_address, test_timestamp)
        message2 = Message(MessageType.description_response, self.node_name, test_address, test_timestamp)

        manager = PeerNodeManager(self.message_queue, self.node_list)
        self.assertEqual(0, manager.node_list.count())
        self.assertEqual(UpdateResult.added_new_node, manager.update_node_list(message1))
        self.assertEqual(1, manager.node_list.count())
        manager.node_list.add(self.node1)
        self.assertEqual(UpdateResult.updated_existing_node, manager.update_node_list(message2))
Example #23
0
    def test_should_have_executed_order_in_portfolio_q(self):
        execution_q = Queue()
        portfolio_q = Queue()
        execution_loop = EventLoop(execution_q, self.executor, processed_event_q=portfolio_q)
        execution_thread = Thread(target=execution_loop.start)
        execution_thread.start()
        execution_q.put_nowait(self.buy_order)
        sleep(2*execution_loop.heartbeat)
        execution_loop.stop()
        execution_thread.join(timeout=2*execution_loop.heartbeat)

        try:
            executed_order = portfolio_q.get_nowait()
            self.assertEquals(self.buy_order, executed_order.order)
            self.assertEquals(self.executed_order, executed_order)
        except Empty:
            self.fail('should have one event in portfolio queue as response from executor')
class KongregateData(object):
    URL = 'http://api.kongregate.com/api/user_info.json?user_ids='
    USERS_PER_REQUEST = 50
    USER_KEY = 'users'

    def __init__(self, start, end):
        self.start = start
        self.end = end

        self.urls = Queue()
        self.results = Queue()
        self.generate_urls()

    def __str__(self):
        return "[KongregateData from:{} to:{}]".format(self.start, self.end)

    def generate_urls(self):
        i = self.start

        while i <= self.end:
            user_ids = list(range(i, i + KongregateData.USERS_PER_REQUEST))
            url = '{URL}{QUERY}'.format(URL=KongregateData.URL, QUERY=','.join([str(user_id) for user_id in user_ids]))
            self.urls.put_nowait(url)

            i += KongregateData.USERS_PER_REQUEST

    def run(self, threads, callback):
        """
        Start downloading data and processing it

        :param threads:		number of threads to open
        :param callback:	callback function to be called for every user
        """
        for i in range(threads):
            t = LoaderThread(self.urls, self.results)
            t.daemon = True
            t.start()

        t = ActionThread(self.results, callback)
        t.daemon = True
        t.start()

        self.urls.join()
        self.results.join()
class MessageProcessor(Thread):

    def __init__(self, server_pool):
        super(MessageProcessor, self).__init__()
        self.setDaemon(True)
        self.__tasks = Queue()
        self.message_filter_callback = None
        self.__server_pool = server_pool

    def queue(self, messages, server_name):
        """Add messages to the queue

        Args:
            messages (list): list of Message instance
            server_name (str): the name of the server in the config
        """
        logger.debug("Add messages to queue: %s", len(messages))
        for message in messages:
            self.__tasks.put_nowait((message, server_name))

    def run(self):
        while True:
            message, server_name = self.__tasks.get()
            logger.debug("Start processing a message")
            message.parse()
            """
            However the IMAPListener has a server connection, waits for the new emails with idle_check, but meanwhile the server
            checking there is no way to perform any command (move, mark as read, etc) in this server instance
            se need to open an other connection.
            """
            message.server = self.__server_pool.fetch(server_name)

            if self.message_filter_callback:
                logger.debug("Call filter callback for message id: %s, from: '%s'", message.id, message.sender)
                try:
                    self.message_filter_callback(message)
                except Exception as e:
                    logger.exception("Exception during run message filters.", e)
            else:
                logger.warning("There is no message filter callback set in MessageProcessor!")

            self.__tasks.task_done()
Example #26
0
    def geocode_threaded(self, addrs, parallelism=5):
        address_q = Queue(len(addrs))
        for addr in addrs: address_q.put_nowait(addr)

        # Use a list to collect results, because .append() is
        # thread-safe
        out = []

        for i in range(parallelism):
            gt = GeocoderThread(address_q, out, self)
            gt.start()

        address_q.join()

        # Out is a list of pairs of (addr, result)
        results = dict(out)

        # Pluck out the results in the same order as the input
        # addresses:
        return [results[a] for a in addrs]
Example #27
0
    def test_should_not_forwarded_event_when_forward_q_is_full(self):
        events = Queue()
        dummy_forwarded_event = 'i was forwarded earlier'
        forward_q = Queue(maxsize=1)
        looper = EventLoop(events, DummyEventHandler(), forward_q=forward_q)

        forward_q.put_nowait(dummy_forwarded_event)
        price_thread = Thread(target=looper.start, args=[])
        price_thread.start()
        events.put('dummy event')
        sleep(2*looper.heartbeat)
        looper.stop()
        price_thread.join(timeout=2*looper.heartbeat)
        out_event = forward_q.get_nowait()
        self.assertEqual(dummy_forwarded_event, out_event)
        try:
            forward_q.get_nowait()
            self.fail('forward_q should be empty as the previous dummy message would have filled it')
        except Empty:
            pass
Example #28
0
class PacketDigester(ThreadMonster):
    def __init__(self, packet_generator, input_stream, swallow_trigger=500, 
                 *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.input_stream = input_stream
        self.swallow_trigger = swallow_trigger
        self.packet_generator = packet_generator
        self._output_q = Queue()
        
        self.add_thread(task=self.parse, name='packet_digester', 
                        no_faster_than=.01)
        
    def parse(self):
        ''' Parses data in stream forever, placing the resulting objects in
        the q. Waits for the stream to buffer to stream_buffer bytes before
        parsing.
        '''
        if len(self.input_stream) > self.swallow_trigger:
            try:
                packet = self.packet_generator(self.input_stream)
                self._output_q.put_nowait(packet)
            # If the packet is too small, break out.
            except PacketSizeError:
                return
            # Catch bad checksums and delete the header. This
            # forces the stream to realign.
            except ChecksumMismatch:
                print(checksum_warning)
                del self.input_stream[0]
                return
            # except Full:
            
    def pop(self):
        ''' Returns and removes a packet. Threadsafe. Returns None if 
        no packet is available.
        '''
        try:
            return self._output_q.get_nowait()
        except Empty:
            return None
Example #29
0
class FileRecorder(ThreadMonster):
    def __init__(self, filename, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.filename = filename
        self._file_q = Queue()
        self.add_thread(task=self.dump, name='file_recorder', 
                        no_faster_than=.001)
        self._f = None
    
    def dump(self):
        ''' Appends a string to the supplied file and adds a newline.
        '''
        # If there's an item on the queue, grab it and execute; otherwise nvm
        try:
            obj = self._file_q.get_nowait()
        except Empty:
            return
            
        # Open, if it hasn't been opened yet.
        if not self._f:
            self._f = open(self.filename, 'a+')
        
        s = json.dumps(obj)
        self._f.write(s)
        self._f.write('\n')
        
    def __exit__(self, *args, **kwargs):
        # Close our file, then call super.
        if self._f:
            self._f.close()
        # Reset state, regardless (tiny performance hit; reliability assurance)
        self._f = None
        super().__exit__(*args, **kwargs)
            
    def schedule_object(self, obj):
        ''' Schedules an object to be recorded to the file. Threadsafe.
        '''
        self._file_q.put_nowait(obj)
Example #30
0
class TestDummyBuyStrategy(unittest.TestCase):
    def setUp(self):

        self.ticks_and_ack_q = Queue()
        self.signal_output_q = Queue()
        self.strategy = StrategyOrderManager(DummyBuyStrategy(), 100)
        self.strategy_loop = EventLoop(self.ticks_and_ack_q, self.strategy, processed_event_q=self.signal_output_q)
        self.strategy_thread = Thread(target=self.strategy_loop.start, args=[])

        # ticks and events for testing
        self.tick = TickEvent('EUR_GBP', get_time(), 0.87, 0.88)

    def tearDown(self):
        self.strategy_loop.stop()
        self.strategy_thread.join(MAX_TIME_TO_ALLOW_SOME_EVENTS_TO_STREAM)

    def test_should_output_signals(self):
        self.strategy_thread.start()
        self.ticks_and_ack_q.put_nowait(self.tick)

        order = await_event_receipt(self, self.signal_output_q, 'should receive one order in output q')
        self.assertEquals(self.tick.instrument, order.instrument)
        self.assertEquals(EVENT_TYPES_ORDER, order.TYPE)
        self.assertEquals(self.strategy.units, order.units)
Example #31
0
class AsyncProcess:
    """
    A coroutine-compatible multiprocessing.Process-alike.
    All normally blocking methods are wrapped in Tornado coroutines.
    """
    def __init__(self, loop=None, target=None, name=None, args=(), kwargs={}):
        if not callable(target):
            raise TypeError("`target` needs to be callable, not %r" %
                            (type(target), ))
        self._state = _ProcessState()
        self._loop = loop or IOLoop.current(instance=False)

        # _keep_child_alive is the write side of a pipe, which, when it is
        # closed, causes the read side of the pipe to unblock for reading. Note
        # that it is never closed directly. The write side is closed by the
        # kernel when our process exits, or possibly by the garbage collector
        # closing the file descriptor when the last reference to
        # _keep_child_alive goes away. We can take advantage of this fact to
        # monitor from the child and exit when the parent goes away unexpectedly
        # (for example due to SIGKILL). This variable is otherwise unused except
        # for the assignment here.
        parent_alive_pipe, self._keep_child_alive = mp_context.Pipe(
            duplex=False)

        self._process = mp_context.Process(
            target=self._run,
            name=name,
            args=(
                target,
                args,
                kwargs,
                parent_alive_pipe,
                self._keep_child_alive,
                dask.config.global_config,
            ),
        )
        _dangling.add(self._process)
        self._name = self._process.name
        self._watch_q = PyQueue()
        self._exit_future = Future()
        self._exit_callback = None
        self._closed = False

        self._start_threads()

    def __repr__(self):
        return "<%s %s>" % (self.__class__.__name__, self._name)

    def _check_closed(self):
        if self._closed:
            raise ValueError("invalid operation on closed AsyncProcess")

    def _start_threads(self):
        self._watch_message_thread = threading.Thread(
            target=self._watch_message_queue,
            name="AsyncProcess %s watch message queue" % self.name,
            args=(
                weakref.ref(self),
                self._process,
                self._loop,
                self._state,
                self._watch_q,
                self._exit_future,
            ),
        )
        self._watch_message_thread.daemon = True
        self._watch_message_thread.start()

        def stop_thread(q):
            q.put_nowait({"op": "stop"})
            # We don't join the thread here as a finalizer can be called
            # asynchronously from anywhere

        self._finalizer = weakref.finalize(self, stop_thread, q=self._watch_q)
        self._finalizer.atexit = False

    def _on_exit(self, exitcode):
        # Called from the event loop when the child process exited
        self._process = None
        if self._exit_callback is not None:
            self._exit_callback(self)
        self._exit_future.set_result(exitcode)

    @classmethod
    def _immediate_exit_when_closed(cls, parent_alive_pipe):
        """
        Immediately exit the process when parent_alive_pipe is closed.
        """
        def monitor_parent():
            try:
                # The parent_alive_pipe should be held open as long as the
                # parent is alive and wants us to stay alive. Nothing writes to
                # it, so the read will block indefinitely.
                parent_alive_pipe.recv()
            except EOFError:
                # Parent process went away unexpectedly. Exit immediately. Could
                # consider other exiting approches here. My initial preference
                # is to unconditionally and immediately exit. If we're in this
                # state it is possible that a "clean" process exit won't work
                # anyway - if, for example, the system is getting bogged down
                # due to the running out of memory, exiting sooner rather than
                # later might be needed to restore normal system function.
                # If this is in appropriate for your use case, please file a
                # bug.
                os._exit(-1)
            else:
                # If we get here, something odd is going on. File descriptors
                # got crossed?
                raise RuntimeError("unexpected state: should be unreachable")

        t = threading.Thread(target=monitor_parent)
        t.daemon = True
        t.start()

    @staticmethod
    def reset_logger_locks():
        """ Python 2's logger's locks don't survive a fork event

        https://github.com/dask/distributed/issues/1491
        """
        for name in logging.Logger.manager.loggerDict.keys():
            for handler in logging.getLogger(name).handlers:
                handler.createLock()

    @classmethod
    def _run(cls, target, args, kwargs, parent_alive_pipe, _keep_child_alive,
             inherit_config):
        # On Python 2 with the fork method, we inherit the _keep_child_alive fd,
        # whether it is passed or not. Therefore, pass it unconditionally and
        # close it here, so that there are no other references to the pipe lying
        # around.
        cls.reset_logger_locks()

        _keep_child_alive.close()

        # Child process entry point
        cls._immediate_exit_when_closed(parent_alive_pipe)

        threading.current_thread().name = "MainThread"
        # Update the global config given priority to the existing global config
        dask.config.update(dask.config.global_config,
                           inherit_config,
                           priority="old")
        target(*args, **kwargs)

    @classmethod
    def _watch_message_queue(cls, selfref, process, loop, state, q,
                             exit_future):
        # As multiprocessing.Process is not thread-safe, we run all
        # blocking operations from this single loop and ship results
        # back to the caller when needed.
        r = repr(selfref())
        name = selfref().name

        def _start():
            process.start()

            thread = threading.Thread(
                target=AsyncProcess._watch_process,
                name="AsyncProcess %s watch process join" % name,
                args=(selfref, process, state, q),
            )
            thread.daemon = True
            thread.start()

            state.is_alive = True
            state.pid = process.pid
            logger.debug("[%s] created process with pid %r" % (r, state.pid))

        while True:
            msg = q.get()
            logger.debug("[%s] got message %r" % (r, msg))
            op = msg["op"]
            if op == "start":
                _call_and_set_future(loop, msg["future"], _start)
            elif op == "terminate":
                _call_and_set_future(loop, msg["future"], process.terminate)
            elif op == "stop":
                break
            else:
                assert 0, msg

    @classmethod
    def _watch_process(cls, selfref, process, state, q):
        r = repr(selfref())
        process.join()
        exitcode = process.exitcode
        assert exitcode is not None
        logger.debug("[%s] process %r exited with code %r", r, state.pid,
                     exitcode)
        state.is_alive = False
        state.exitcode = exitcode
        # Make sure the process is removed from the global list
        # (see _children in multiprocessing/process.py)
        # Then notify the Process object
        self = selfref()  # only keep self alive when required
        try:
            if self is not None:
                _loop_add_callback(self._loop, self._on_exit, exitcode)
        finally:
            self = None  # lose reference

    def start(self):
        """
        Start the child process.

        This method is a coroutine.
        """
        self._check_closed()
        fut = Future()
        self._watch_q.put_nowait({"op": "start", "future": fut})
        return fut

    def terminate(self):
        """
        Terminate the child process.

        This method is a coroutine.
        """
        self._check_closed()
        fut = Future()
        self._watch_q.put_nowait({"op": "terminate", "future": fut})
        return fut

    @gen.coroutine
    def join(self, timeout=None):
        """
        Wait for the child process to exit.

        This method is a coroutine.
        """
        self._check_closed()
        assert self._state.pid is not None, "can only join a started process"
        if self._state.exitcode is not None:
            return
        if timeout is None:
            yield self._exit_future
        else:
            try:
                yield asyncio.wait_for(self._exit_future, timeout)
            except TimeoutError:
                pass

    def close(self):
        """
        Stop helper thread and release resources.  This method returns
        immediately and does not ensure the child process has exited.
        """
        if not self._closed:
            self._finalizer()
            self._process = None
            self._closed = True

    def set_exit_callback(self, func):
        """
        Set a function to be called by the event loop when the process exits.
        The function is called with the AsyncProcess as sole argument.

        The function may be a coroutine function.
        """
        # XXX should this be a property instead?
        assert callable(func), "exit callback should be callable"
        assert (self._state.pid is
                None), "cannot set exit callback when process already started"
        self._exit_callback = func

    def is_alive(self):
        return self._state.is_alive

    @property
    def pid(self):
        return self._state.pid

    @property
    def exitcode(self):
        return self._state.exitcode

    @property
    def name(self):
        return self._name

    @property
    def daemon(self):
        return self._process.daemon

    @daemon.setter
    def daemon(self, value):
        self._process.daemon = value
Example #32
0
class LiveChat:
    '''
    LiveChat object fetches chat data and stores them
    in a buffer with ThreadpoolExecutor.

    Parameter
    ---------
    video_id : str

    seektime : int
        start position of fetching chat (seconds).
        This option is valid for archived chat only.
        If negative value, chat data posted before the start of the broadcast
        will be retrieved as well.

    processor : ChatProcessor

    buffer : Buffer
        buffer of chat data fetched background.

    interruptable : bool
        Allows keyboard interrupts.
        Set this parameter to False if your own threading program causes
        the problem.

    callback : func
        function called periodically from _listen().

    done_callback : func
        function called when listener ends.

    direct_mode : bool
        If True, invoke specified callback function without using buffer.
        callback is required. If not, IllegalFunctionCall will be raised.

    force_replay : bool
        force to fetch archived chat data, even if specified video is live.

    topchat_only : bool
        If True, get only top chat.

    replay_continuation : str
        If this parameter is not None, the processor will attempt to get chat data from continuation.
        This parameter is only allowed in archived mode.

    Attributes
    ---------
    _executor : ThreadPoolExecutor
        This is used for _listen() loop.

    _is_alive : bool
        Flag to stop getting chat.
    '''

    _setup_finished = False

    def __init__(self, video_id,
                 seektime=-1,
                 processor=DefaultProcessor(),
                 client = httpx.Client(http2=True),
                 buffer=None,
                 interruptable=True,
                 callback=None,
                 done_callback=None,
                 direct_mode=False,
                 force_replay=False,
                 topchat_only=False,
                 logger=config.logger(__name__),
                 replay_continuation=None
                 ):
        self._client = client
        self._video_id = util.extract_video_id(video_id)
        self.seektime = seektime
        if isinstance(processor, tuple):
            self.processor = Combinator(processor)
        else:
            self.processor = processor
        self._buffer = buffer
        self._callback = callback
        self._done_callback = done_callback
        self._executor = ThreadPoolExecutor(max_workers=2)
        self._direct_mode = direct_mode
        self._is_alive = True
        self._is_replay = force_replay or (replay_continuation is not None)
        self._parser = Parser(is_replay=self._is_replay)
        self._pauser = Queue()
        self._pauser.put_nowait(None)
        self._first_fetch = replay_continuation is None
        self._fetch_url = config._sml if replay_continuation is None else config._smr
        self._topchat_only = topchat_only
        self._dat = ''
        self._last_offset_ms = 0
        self._logger = logger
        self._event = Event()
        self.continuation = replay_continuation

        self.exception = None
        if interruptable:
            signal.signal(signal.SIGINT, lambda a, b: self.terminate())
        self._setup()

    def _setup(self):
        # An exception is raised when direct mode is true and no callback is set.
        if self._direct_mode:
            if self._callback is None:
                raise exceptions.IllegalFunctionCall(
                    "When direct_mode=True, callback parameter is required.")
        else:
            # Create a default buffer if `direct_mode` is False and buffer is not set.
            if self._buffer is None:
                self._buffer = Buffer(maxsize=20)
                # Create a loop task to call callback if the `callback` param is specified.
            if self._callback is None:
                pass
            else:
                # Start a loop task calling callback function.
                self._executor.submit(self._callback_loop, self._callback)
        # Start a loop task for _listen()
        self.listen_task = self._executor.submit(self._startlisten)
        # Register add_done_callback
        if self._done_callback is None:
            self.listen_task.add_done_callback(self._finish)
        else:
            self.listen_task.add_done_callback(self._done_callback)

    def _startlisten(self):
        time.sleep(0.1)  # sleep shortly to prohibit skipping fetching data
        """Fetch first continuation parameter,
        create and start _listen loop.
        """
        if not self.continuation:
            self.continuation = liveparam.getparam(
                self._video_id,
                channel_id=util.get_channelid(self._client, self._video_id),
                past_sec=3)
        self._listen(self.continuation)

    def _listen(self, continuation):
        ''' Fetch chat data and store them into buffer,
        get next continuaiton parameter and loop.

        Parameter
        ---------
        continuation : str
            parameter for next chat data
        '''
        try:
            with self._client as client:
                while(continuation and self._is_alive):
                    continuation = self._check_pause(continuation)
                    contents = self._get_contents(continuation, client, headers)
                    metadata, chatdata = self._parser.parse(contents)
                    continuation = metadata.get('continuation')
                    if continuation:
                        self.continuation = continuation
                    timeout = metadata['timeoutMs'] / 1000
                    chat_component = {
                        "video_id": self._video_id,
                        "timeout": timeout,
                        "chatdata": chatdata
                    }
                    time_mark = time.time()
                    if self._direct_mode:
                        processed_chat = self.processor.process(
                            [chat_component])
                        if isinstance(processed_chat, tuple):
                            self._callback(*processed_chat)
                        else:
                            self._callback(processed_chat)
                    else:
                        self._buffer.put(chat_component)
                    diff_time = timeout - (time.time() - time_mark)
                    self._event.wait(diff_time if diff_time > 0 else 0)
                    self._last_offset_ms = metadata.get('last_offset_ms', 0)
        except exceptions.ChatParseException as e:
            self._logger.debug(f"[{self._video_id}]{str(e)}")
            raise
        except Exception:
            self._logger.error(f"{traceback.format_exc(limit=-1)}")
            raise

        self._logger.debug(f"[{self._video_id}] finished fetching chat.")

    def _check_pause(self, continuation):
        if self._pauser.empty():
            '''pause'''
            self._pauser.get()
            '''resume:
                prohibit from blocking by putting None into _pauser.
            '''
            self._pauser.put_nowait(None)
            if not self._is_replay:
                continuation = liveparam.getparam(
                    self._video_id, channel_id=util.get_channelid(httpx.Client(http2=True), self._video_id),
                    past_sec=3, topchat_only=self._topchat_only)

        return continuation

    def _get_contents(self, continuation, client, headers):
        '''Get 'continuationContents' from livechat json.
           If contents is None at first fetching,
           try to fetch archive chat data.

          Return:
          -------
            'continuationContents' which includes metadata & chat data.
        '''
        livechat_json = self._get_livechat_json(
            continuation, client, replay=self._is_replay, offset_ms=self._last_offset_ms)
        contents, dat = self._parser.get_contents(livechat_json)
        if self._dat == '' and dat:
            self._dat = dat
        if self._first_fetch:
            if contents is None or self._is_replay:
                '''Try to fetch archive chat data.'''
                self._parser.is_replay = True
                self._fetch_url = config._smr
                continuation = arcparam.getparam(
                    self._video_id, self.seektime, self._topchat_only, util.get_channelid(client, self._video_id))
                livechat_json = self._get_livechat_json(
                    continuation, client, replay=True, offset_ms=self.seektime * 1000)
                reload_continuation = self._parser.reload_continuation(
                    self._parser.get_contents(livechat_json)[0])
                if reload_continuation:
                    livechat_json = (self._get_livechat_json(
                        reload_continuation, client, headers))
                contents, _ = self._parser.get_contents(livechat_json)
                self._is_replay = True
            self._first_fetch = False
        return contents

    def _get_livechat_json(self, continuation, client, replay: bool, offset_ms: int = 0):
        '''
        Get json which includes chat data.
        '''
        livechat_json = None
        if offset_ms < 0:
            offset_ms = 0
        param = util.get_param(continuation, dat=self._dat, replay=replay, offsetms=offset_ms)
        for _ in range(MAX_RETRY + 1):
            try:
                response = client.post(self._fetch_url, json=param)
                livechat_json = response.json()
                break
            except (json.JSONDecodeError, httpx.HTTPError):
                time.sleep(2)
                continue
        else:
            self._logger.error(f"[{self._video_id}]"
                               f"Exceeded retry count.")
            raise exceptions.RetryExceedMaxCount()
        return livechat_json

    def _callback_loop(self, callback):
        """ If a callback is specified in the constructor,
        it throws chat data at regular intervals to the
        function specified in the callback in the backgroun

        Parameter
        ---------
        callback : func
            function to which the processed chat data is passed.
        """
        while self.is_alive():
            items = self._buffer.get()
            processed_chat = self.processor.process(items)
            if isinstance(processed_chat, tuple):
                self._callback(*processed_chat)
            else:
                self._callback(processed_chat)

    def get(self):
        """
        Retrieves data from the buffer,
        throws it to the processor,
        and returns the processed chat data.

        Returns
             : Chat data processed by the Processor
        """
        if self._callback is None:
            if self.is_alive():
                items = self._buffer.get()
                return self.processor.process(items)
            else:
                return []
        raise exceptions.IllegalFunctionCall(
            "Callback parameter is already set, so get() cannot be performed.")

    def is_replay(self):
        return self._is_replay

    def pause(self):
        if self._callback is None:
            return
        if not self._pauser.empty():
            self._pauser.get()

    def resume(self):
        if self._callback is None:
            return
        if self._pauser.empty():
            self._pauser.put_nowait(None)

    def is_alive(self):
        return self._is_alive

    def _finish(self, sender):
        '''Called when the _listen() task finished.'''
        try:
            self._task_finished()
        except CancelledError:
            self._logger.debug(f'[{self._video_id}] cancelled:{sender}')

    def terminate(self):
        if not self.is_alive():
            return
        if self._pauser.empty():
            self._pauser.put_nowait(None)
        self._is_alive = False
        self._buffer.put({})
        self._event.set()
        self.processor.finalize()

    def _task_finished(self):
        if self.is_alive():
            self.terminate()
        try:
            self.listen_task.result()
        except Exception as e:
            self.exception = e
            if not isinstance(e, exceptions.ChatParseException):
                self._logger.error(f'Internal exception - {type(e)}{str(e)}')
        self._logger.info(f'[{self._video_id}] finished.')

    def raise_for_status(self):
        if self.exception is not None:
            raise self.exception
Example #33
0
File: car.py Project: wzyplus/PiCar
class FourWDCar(object):
    '''
    Four-wheel Drive Car

    Status: stopped, forward, backward, turnleft, turnright
    '''

    _STATUS = {
        'stopped': u'停止',
        'forward': u'前进',
        'backward': u'后退',
        'turnleft': u'左转',
        'turnright': u'右转',
    }

    _AVALIBE_ACTIONS = {
        'stop', 'forward', 'backward', 'turnright', 'turnleft', 'turnstop'
    }

    def __init__(self):
        self._status = 'stopped'
        self._stashed_status = None
        self._lock = threading.RLock()

        self.when_changed = None
        self._queue = Queue(maxsize=1)
        self._consumer = threading.Thread(target=self._consume)
        self._consumer.daemon = True
        self._consumer.start()

    def _consume(self):
        while True:
            self._queue.get()
            if self.when_changed is not None:
                self.when_changed(self.info)
            self._queue.task_done()

    def _push_status(self, status):
        self._stashed_status = status

    def _pop_status(self):
        status = self._stashed_status
        self._stashed_status = None
        return status

    def _peek_status(self):
        return self._stashed_status

    @property
    def status(self):
        with self._lock:
            return self._status

    @property
    def status_text(self):
        with self._lock:
            return self._STATUS[self._status]

    @status.setter
    def status(self, value):
        if value not in self._STATUS:
            raise Exception(value + ' not valid car status')
        with self._lock:
            if self._status != value:
                logger.info('car status changed, %10s --> %-10s' %
                            (self._status, value))
                self._status = value
                self._nofity_change()

    def _nofity_change(self):
        try:
            self._queue.put_nowait(True)
        except Full:
            pass

    @property
    def info(self):
        return {
            'status': self.status,
            'status_text': self.status_text,
            'humidity': '0.0',
            'temperature': '0.0',
        }

    def forward(self):
        self.status = 'forward'
        self._push_status('forward')

    def backward(self):
        self.status = 'backward'
        self._push_status('backward')

    def stop(self):
        self.status = 'stopped'
        self._push_status('stopped')

    def turnright(self):
        self._push_status(self.status)
        self.status = 'turnright'

    def turnleft(self):
        self._push_status(self.status)
        self.status = 'turnleft'

    def turnstop(self):
        status = self._pop_status()
        if status == 'forward':
            self.forward()
        elif status == 'backward':
            self.backward()
        elif status == 'stopped':
            self.stop()

    def do_action(self, action):
        if action in self._AVALIBE_ACTIONS:
            getattr(self, action)()
Example #34
0
class HabitatUploader(object):
    """ 
    Queued Habitat Telemetry Uploader class 
    
    Packets to be uploaded to Habitat are added to a queue for uploading.
    If an upload attempt times out, the packet is discarded.
    If the queue fills up (probably indicating no network connection, and a fast packet downlink rate),
    it is immediately emptied, to avoid upload of out-of-date packets.
    """

    HABITAT_URL = "http://habitat.habhub.org/"
    HABITAT_DB = "habitat"
    HABITAT_UUIDS = HABITAT_URL + "_uuids?count=%d"
    HABITAT_DB_URL = HABITAT_URL + HABITAT_DB + "/"

    def __init__(
        self,
        user_callsign="FSK_DEMOD",
        listener_lat=0.0,
        listener_lon=0.0,
        listener_radio="",
        listener_antenna="",
        queue_size=64,
        upload_timeout=10,
        upload_retries=5,
        upload_retry_interval=0.25,
        inhibit=False,
    ):
        """ Create a Habitat Uploader object. """

        self.upload_timeout = upload_timeout
        self.upload_retries = upload_retries
        self.upload_retry_interval = upload_retry_interval
        self.queue_size = queue_size
        self.habitat_upload_queue = Queue(queue_size)
        self.inhibit = inhibit

        # Listener information
        self.user_callsign = user_callsign
        self.listener_lat = listener_lat
        self.listener_lon = listener_lon
        self.listener_radio = listener_radio
        self.listener_antenna = listener_antenna
        self.position_uploaded = False

        self.last_freq_hz = None

        self.callsign_init = False
        self.uuids = []

        # Start the uploader thread.
        self.habitat_uploader_running = True
        self.uploadthread = Thread(target=self.habitat_upload_thread)
        self.uploadthread.start()

    def habitat_upload(self, sentence):
        """ Upload a UKHAS-standard telemetry sentence to Habitat """

        # Generate payload to be uploaded
        # b64encode accepts and returns bytes objects.
        _sentence_b64 = b64encode(sentence.encode("ascii"))
        _date = datetime.datetime.utcnow().isoformat("T") + "Z"
        _user_call = self.user_callsign

        _data = {
            "type": "payload_telemetry",
            "data": {
                "_raw": _sentence_b64.decode(
                    "ascii")  # Convert back to a string to be serialisable
            },
            "receivers": {
                _user_call: {
                    "time_created": _date,
                    "time_uploaded": _date,
                },
            },
        }

        if self.last_freq_hz:
            # Add in frequency information if we have it.
            _data["receivers"][_user_call]["rig_info"] = {
                "frequency": self.last_freq_hz
            }

        # The URl to upload to.
        _url = f"{self.HABITAT_URL}{self.HABITAT_DB}/_design/payload_telemetry/_update/add_listener/{sha256(_sentence_b64).hexdigest()}"

        # Delay for a random amount of time between 0 and upload_retry_interval*2 seconds.
        time.sleep(random.random() * self.upload_retry_interval * 2.0)

        _retries = 0

        # When uploading, we have three possible outcomes:
        # - Can't connect. No point re-trying in this situation.
        # - The packet is uploaded successfult (201 / 403)
        # - There is a upload conflict on the Habitat DB end (409). We can retry and it might work.
        while _retries < self.upload_retries:
            # Run the request.
            try:
                _req = requests.put(_url,
                                    data=json.dumps(_data),
                                    timeout=(self.upload_timeout, 6.1))
            except Exception as e:
                logging.error("Habitat - Upload Failed: %s" % str(e))
                break

            if _req.status_code == 201 or _req.status_code == 403:
                # 201 = Success, 403 = Success, sentence has already seen by others.
                logging.info(
                    f"Habitat - Uploaded sentence: {sentence.strip()}")
                _upload_success = True
                break
            elif _req.status_code == 409:
                # 409 = Upload conflict (server busy). Sleep for a moment, then retry.
                logging.debug("Habitat - Upload conflict.. retrying.")
                time.sleep(random.random() * self.upload_retry_interval)
                _retries += 1
            else:
                logging.error(
                    "Habitat - Error uploading to Habitat. Status Code: %d." %
                    _req.status_code)
                break

        if _retries == self.upload_retries:
            logging.error(
                "Habitat - Upload conflict not resolved with %d retries." %
                self.upload_retries)

        return

    def habitat_upload_thread(self):
        """ Handle uploading of packets to Habitat """

        logging.info("Started Habitat Uploader Thread.")

        while self.habitat_uploader_running:

            if self.habitat_upload_queue.qsize() > 0:
                # If the queue is completely full, jump to the most recent telemetry sentence.
                if self.habitat_upload_queue.qsize() == self.queue_size:
                    while not self.habitat_upload_queue.empty():
                        sentence = self.habitat_upload_queue.get()

                    logging.warning(
                        "Habitat uploader queue was full - possible connectivity issue."
                    )
                else:
                    # Otherwise, get the first item in the queue.
                    sentence = self.habitat_upload_queue.get()

                # Attempt to upload it.
                self.habitat_upload(sentence)

            else:
                # Wait for a short time before checking the queue again.
                time.sleep(0.5)

            if not self.position_uploaded:
                # Validate the lat/lon entries.
                try:
                    _lat = float(self.listener_lat)
                    _lon = float(self.listener_lon)

                    if (_lat != 0.0) or (_lon != 0.0):
                        _success = self.uploadListenerPosition(
                            self.user_callsign,
                            _lat,
                            _lon,
                            self.listener_radio,
                            self.listener_antenna,
                        )
                    else:
                        logging.warning(
                            "Listener position set to 0.0/0.0 - not uploading."
                        )

                except Exception as e:
                    logging.error("Error uploading listener position: %s" %
                                  str(e))

                # Set this flag regardless if the upload worked.
                # The user can trigger a re-upload.
                self.position_uploaded = True

        logging.info("Stopped Habitat Uploader Thread.")

    def add(self, sentence):
        """ Add a sentence to the upload queue """

        if self.inhibit:
            # We have upload inhibited. Return.
            return

        # Handling of arbitrary numbers of $$'s at the start of a sentence:
        # Extract the data part of the sentence (i.e. everything after the $$'s')
        sentence = sentence.split("$")[-1]
        # Now add the *correct* number of $$s back on.
        sentence = "$$" + sentence

        if not (sentence[-1] == "\n"):
            sentence += "\n"

        try:
            self.habitat_upload_queue.put_nowait(sentence)
        except Exception as e:
            logging.error("Error adding sentence to queue: %s" % str(e))

    def close(self):
        """ Shutdown uploader thread. """
        self.habitat_uploader_running = False

    def ISOStringNow(self):
        return "%sZ" % datetime.datetime.utcnow().isoformat()

    def postListenerData(self, doc, timeout=10):

        # do we have at least one uuid, if not go get more
        if len(self.uuids) < 1:
            self.fetchUuids()

        # Attempt to add UUID and time data to document.
        try:
            doc["_id"] = self.uuids.pop()
        except IndexError:
            logging.error(
                "Habitat - Unable to post listener data - no UUIDs available.")
            return False

        doc["time_uploaded"] = self.ISOStringNow()

        try:
            _r = requests.post(f"{self.HABITAT_URL}{self.HABITAT_DB}/",
                               json=doc,
                               timeout=timeout)
            return True
        except Exception as e:
            logging.error("Habitat - Could not post listener data - %s" %
                          str(e))
            return False

    def fetchUuids(self, timeout=10):

        _retries = 5

        while _retries > 0:
            try:
                _r = requests.get(self.HABITAT_UUIDS % 10, timeout=timeout)
                self.uuids.extend(_r.json()["uuids"])
                logging.debug("Habitat - Got UUIDs")
                return
            except Exception as e:
                logging.error(
                    "Habitat - Unable to fetch UUIDs, retrying in 2 seconds - %s"
                    % str(e))
                time.sleep(2)
                _retries = _retries - 1
                continue

        logging.error("Habitat - Gave up trying to get UUIDs.")
        return

    def initListenerCallsign(self, callsign, radio="", antenna=""):
        doc = {
            "type": "listener_information",
            "time_created": self.ISOStringNow(),
            "data": {
                "callsign": callsign,
                "antenna": antenna,
                "radio": radio,
            },
        }

        resp = self.postListenerData(doc)

        if resp is True:
            logging.debug("Habitat - Listener Callsign Initialized.")
            return True
        else:
            logging.error("Habitat - Unable to initialize callsign.")
            return False

    def uploadListenerPosition(self, callsign, lat, lon, radio="", antenna=""):
        """ Initializer Listener Callsign, and upload Listener Position """

        # Attempt to initialize the listeners callsign
        resp = self.initListenerCallsign(callsign,
                                         radio=radio,
                                         antenna=antenna)
        # If this fails, it means we can't contact the Habitat server,
        # so there is no point continuing.
        if resp is False:
            return False

        doc = {
            "type": "listener_telemetry",
            "time_created": self.ISOStringNow(),
            "data": {
                "callsign": callsign,
                "chase": False,
                "latitude": lat,
                "longitude": lon,
                "altitude": 0,
                "speed": 0,
            },
        }

        # post position to habitat
        resp = self.postListenerData(doc)
        if resp is True:
            logging.info("Habitat - Listener information uploaded.")
            return True
        else:
            logging.error("Habitat - Unable to upload listener information.")
            return False

    def trigger_position_upload(self):
        """ Trigger a re-upload of the listener position """
        self.position_uploaded = False
Example #35
0
class EvPointer(object):

	def __init__(self):

		self.xdisplay = display.Display()
		screen = self.xdisplay.screen()
		self.screen_width = screen.width_in_pixels
		self.screen_height = screen.height_in_pixels
		self.xroot = screen.root
		pointer = self.xroot.query_pointer()

		self.position = MousePos(pointer.root_x, pointer.root_y)

		devices = [InputDevice(fn) for fn in list_devices()]
		self.pointer_devs = []
		self.keyboard_devs = []
		for device in devices:
			caps = device.capabilities()
			if ecodes.EV_REL in caps:
				self.pointer_devs.append(device)
			elif ecodes.EV_ABS in caps:
				self.pointer_devs.append(device)
			elif ecodes.EV_KEY in caps:
				self.keyboard_devs.append(device)

		self.queue = Queue()
		self.mainloop = Thread(target=self._mainloop, name='EvPointer mainloop')
		self.mainloop.start()
		self.stop = False
		self.hook_callback = None
		self.hook = Thread(target=self._hook, name='EvPointer hook loop')
		self.hook.start()

		caps = {
			ecodes.EV_REL: (
				ecodes.REL_X, ecodes.REL_Y, ecodes.REL_WHEEL, ecodes.REL_HWHEEL),
			ecodes.EV_KEY: (
				ecodes.BTN_LEFT, ecodes.BTN_RIGHT, ecodes.BTN_MIDDLE,
				ecodes.BTN_SIDE, ecodes.BTN_EXTRA)}
		self.uinput = UInput(caps, name='macpy pointer')

	def close(self):

		self.uinput.close()
		self.enqueue(None)
		if self.hook and self.hook.is_alive():
			self.stop = True

	def _hook(self):

		li = LibInput(ContextType.PATH)
		for device in self.pointer_devs:
			li.add_device(device.fn)

		for event in li.events:
			if self.stop:
				break

			mods = {
				'SHIFT': False,
				'ALTGR': False,
				'CTRL': False,
				'ALT': False,
				'META': False}
			active_mods = set()
			for device in self.keyboard_devs:
				active_mods |= set(device.active_keys())
			for key in active_mods:
				key = Key.from_ec(key)
				if key in Mods.SHIFT:
					mods['SHIFT'] = True
				elif key in Mods.CTRL:
					mods['CTRL'] = True
				elif key in Mods.ALT:
					mods['ALT'] = True
				elif key in Mods.META:
					mods['META'] = True

			if event.type == EventType.POINTER_MOTION:
				dx, dy = event.delta
				x = self.position.x + round(dx)
				y = self.position.y + round(dy)
				if x < 0:
					x = 0
				elif x > (self.screen_width - 1):
					x = self.screen_width - 1
				if y < 0:
					y = 0
				elif y > (self.screen_height - 1):
					y = self.screen_height - 1
				self.position = MousePos(x, y)
				if self.hook_callback:
					self.enqueue(self.hook_callback, PointerEventMotion(
						self.position.x, self.position.y, mods))
			elif event.type == EventType.POINTER_MOTION_ABSOLUTE:
				x, y = event.transform_absolute_coords(
					self.screen_width, self.screen_height)
				self.position = MousePos(round(x), round(y))
				if self.hook_callback:
					self.enqueue(self.hook_callback, PointerEventMotion(
						self.position.x, self.position.y, mods))
			elif event.type == EventType.POINTER_BUTTON:
				button = Key.from_ec(event.button)
				state = KeyState(event.button_state.value)
				if self.hook_callback:
					self.enqueue(self.hook_callback, PointerEventButton(
						self.position.x, self.position.y, button, state, mods))
			elif event.type == EventType.POINTER_AXIS:
				if event.has_axis(LIPAxis.SCROLL_VERTICAL):
					axis = mPAxis.VERTICAL
					value = event.get_axis_value(LIPAxis.SCROLL_VERTICAL)
				else:
					axis = mPAxis.HORIZONTAL
					value = event.get_axis_value(LIPAxis.SCROLL_HORIZONTAL)
				if self.hook_callback:
					self.enqueue(self.hook_callback, PointerEventAxis(
						self.position.x, self.position.y, value, axis, mods))

	def install_pointer_hook(self, callback, grab=False):

		self.hook_callback = callback

	def uninstall_pointer_hook(self):

		self.hook_callback = None

	def _mainloop(self):

		while True:
			method, args = self.queue.get()
			if method is None:
				break
			try:
				method(*args)
			except Exception as e:
				print(
					'Error in EvPointer mainloop: \n',
					''.join(traceback.format_exception(
						type(e), e, e.__traceback__)))
			self.queue.task_done()

	def enqueue(self, method, *args):

		self.queue.put_nowait((method, args))

	def _warp(self, x, y, relative=False):

		if relative:
			dx = x
			dy = y
		else:
			dx = x - self.position.x
			dy = y - self.position.y
		self.uinput.write(ecodes.EV_REL, ecodes.REL_X, dx)
		self.uinput.write(ecodes.EV_REL, ecodes.REL_Y, dy)
		self.uinput.syn()

	def warp(self, x, y, relative=False):

		self.enqueue(self._warp, x, y, relative)

	def _scroll(self, axis, value):

		if axis is mPAxis.VERTICAL:
			self.uinput.write(ecodes.EV_REL, ecodes.REL_WHEEL, -value)
		elif axis is mPAxis.HORIZONTAL:
			self.uinput.write(ecodes.EV_REL, ecodes.REL_HWHEEL, value)
		else:
			raise TypeError('Invalid axis type')
		self.uinput.syn()

	def scroll(self, axis, value):

		self.enqueue(self._scroll, axis, value)

	def _click(self, key, state=None):

		if state is None:
			self.uinput.write(ecodes.EV_KEY, key.ec.value, 1)
			self.uinput.write(ecodes.EV_KEY, key.ec.value, 0)
		elif state is KeyState.PRESSED:
			self.uinput.write(ecodes.EV_KEY, key.ec.value, 1)
		elif state is KeyState.RELEASED:
			self.uinput.write(ecodes.EV_KEY, key.ec.value, 0)
		self.uinput.syn()

	def click(self, key, state=None):

		self.enqueue(self._click, key)

	def get_button_state(self, button):

		active_keys = set()
		for dev in self.pointer_devs:
			active_keys |= set(dev.active_keys())
		if button.ec in active_keys:
			return KeyState.PRESSED
		else:
			return KeyState.RELEASED
Example #36
0
class VelbusUSBConnection(velbus.VelbusConnection):
    """
    Wrapper for SerialPort connection configuration
    """

    BAUD_RATE = 38400

    BYTE_SIZE = serial.EIGHTBITS

    PARITY = serial.PARITY_NONE

    STOPBITS = serial.STOPBITS_ONE

    XONXOFF = 0

    RTSCTS = 1

    SLEEP_TIME = 60 / 1000

    def __init__(self, device, controller=None):
        velbus.VelbusConnection.__init__(self)
        self.logger = logging.getLogger('velbus')
        self._device = device
        self.controller = controller
        try:
            self.serial = serial.Serial(port=device,
                                        baudrate=self.BAUD_RATE,
                                        bytesize=self.BYTE_SIZE,
                                        parity=self.PARITY,
                                        stopbits=self.STOPBITS,
                                        xonxoff=self.XONXOFF,
                                        rtscts=self.RTSCTS)
        except serial.serialutil.SerialException:
            self.logger.error("Could not open serial port, \
                              no messages are read or written to the bus")
            raise VelbusException("Could not open serial port")
        self._reader = serial.threaded.ReaderThread(self.serial, Protocol)
        self._reader.start()
        self._reader.protocol.parser = self.feed_parser
        self._reader.connect()
        self._write_queue = Queue()
        self._write_process = threading.Thread(None, self.write_daemon,
                                               "write_packets_process", (), {})
        self._write_process.daemon = True
        self._write_process.start()

    def stop(self):
        """Close serial port."""
        self.logger.warning("Stop executed")
        try:
            self._reader.close()
        except serial.serialutil.SerialException:
            self.logger.error("Error while closing device")
            raise VelbusException("Error while closing device")
        time.sleep(1)

    def feed_parser(self, data):
        """Parse received message."""
        assert isinstance(data, bytes)
        self.controller.feed_parser(data)

    def send(self, message, callback=None):
        """Add message to write queue."""
        assert isinstance(message, velbus.Message)
        self._write_queue.put_nowait((message, callback))

    def write_daemon(self):
        """Write thread."""
        while True:
            (message, callback) = self._write_queue.get(block=True)
            self.logger.info("Sending message on USB bus: %s", str(message))
            self.logger.debug("Sending binary message:  %s",
                              str(message.to_binary()))
            self._reader.write(message.to_binary())
            time.sleep(self.SLEEP_TIME)
            if callback:
                callback()
 def _enqueue_state_to_single_queue(self, state: Dict, state_queue: Queue):
     try:
         state_queue.put_nowait(state)
     except Full:
         pass
Example #38
0
class DysonPureCoolLink(DysonDevice):
    """Dyson device (fan)."""
    class DysonDeviceListener:
        """Message listener."""
        def __init__(self, serial, add_device_function):
            """Create a new message listener.

            :param serial: Device serial
            :param add_device_function: Callback function
            """
            self._serial = serial
            self.add_device_function = add_device_function

        def remove_service(self, zeroconf, device_type, name):
            # pylint: disable=unused-argument,no-self-use
            """Remove listener."""
            _LOGGER.info("Service %s removed", name)

        def add_service(self, zeroconf, device_type, name):
            """Add device.

            :param zeroconf: MSDNS object
            :param device_type: Service type
            :param name: Device name
            """
            device_serial = (name.split(".")[0]).split("_")[1]
            if device_serial == self._serial:
                # Find searched device
                info = zeroconf.get_service_info(device_type, name)
                address = socket.inet_ntoa(info.address)
                network_device = NetworkDevice(device_serial, address,
                                               info.port)
                self.add_device_function(network_device)
                zeroconf.close()

    def __init__(self, json_body):
        """Create a new Pure Cool Link device.

        :param json_body: JSON message returned by the HTTPS API
        """
        super().__init__(json_body)

        self._sensor_data_available = Queue()
        self._environmental_state = None
        self._request_thread = None

    @property
    def status_topic(self):
        """MQTT status topic."""
        return "{0}/{1}/status/current".format(self.product_type, self.serial)

    @staticmethod
    def on_message(client, userdata, msg):
        # pylint: disable=unused-argument, too-many-branches
        """Set function Callback when message received."""
        payload = msg.payload.decode("utf-8")
        if DysonPureCoolState.is_state_message(payload):
            if support_heating(userdata.product_type):
                device_msg = DysonPureHotCoolState(payload)
            elif support_heating_v2(userdata.product_type):
                device_msg = DysonPureHotCoolV2State(payload)
            elif is_pure_humidifycool_v2(userdata.product_type):
                device_msg = DysonPureHumidifyCoolV2State(payload)
            elif is_pure_cool_v2(userdata.product_type):
                device_msg = DysonPureCoolV2State(payload)
            else:
                device_msg = DysonPureCoolState(payload)
            if not userdata.device_available:
                userdata.state_data_available()
            userdata.state = device_msg
            for function in userdata.callback_message:
                function(device_msg)
        elif DysonEnvironmentalSensorState.is_environmental_state_message(
                payload):
            if is_pure_cool_v2(userdata.product_type):
                device_msg = DysonEnvironmentalSensorV2State(payload)
            else:
                device_msg = DysonEnvironmentalSensorState(payload)
            if not userdata.device_available:
                userdata.sensor_data_available()
            userdata.environmental_state = device_msg
            for function in userdata.callback_message:
                function(device_msg)
        else:
            _LOGGER.warning("Unknown message: %s", payload)

    def auto_connect(self, timeout=5, retry=15):
        """Try to connect to device using mDNS.

        :param timeout: Timeout
        :param retry: Max retry
        :return: True if connected, else False
        """
        for i in range(retry):
            zeroconf = Zeroconf()
            listener = self.DysonDeviceListener(self._serial,
                                                self._add_network_device)
            ServiceBrowser(zeroconf, "_dyson_mqtt._tcp.local.", listener)
            try:
                self._network_device = self._search_device_queue.get(
                    timeout=timeout)
            except Empty:
                # Unable to find device
                _LOGGER.warning("Unable to find device %s, try %s",
                                self._serial, i)
                zeroconf.close()
            else:
                break
        if self._network_device is None:
            _LOGGER.error("Unable to connect to device %s", self._serial)
            return False
        return self._mqtt_connect()

    def connect(self, device_ip, device_port=DEFAULT_PORT):
        """Connect to the device using ip address.

        :param device_ip: Device IP address
        :param device_port: Device Port (default: 1883)
        :return: True if connected, else False
        """
        self._network_device = NetworkDevice(self._name, device_ip,
                                             device_port)

        return self._mqtt_connect()

    def _mqtt_connect(self):
        """Connect to the MQTT broker."""
        self._mqtt = mqtt.Client(userdata=self)
        self._mqtt.on_message = self.on_message
        self._mqtt.on_connect = self.on_connect
        self._mqtt.username_pw_set(self._serial, self._credentials)
        self._mqtt.connect(self._network_device.address,
                           self._network_device.port)
        self._mqtt.loop_start()
        self._connected, self._connection_error_code = self._connection_queue.get(
            timeout=10)
        if self._connected:
            self.request_current_state()
            # Start Environmental thread
            self._request_thread = EnvironmentalSensorThread(
                self.request_environmental_state)
            self._request_thread.start()

            # Wait for first data
            self._state_data_available.get()
            self._sensor_data_available.get()
            self._device_available = True
        else:
            self._mqtt.loop_stop()
        return self._connected

    def sensor_data_available(self):
        """Call when first sensor data are available. Internal method."""
        _LOGGER.debug("Sensor data available for device %s", self._serial)
        self._sensor_data_available.put_nowait(True)

    def disconnect(self):
        """Disconnect from the device."""
        self._request_thread.stop()
        self._connected = False

    def request_environmental_state(self):
        """Request new state message."""
        if self._connected:
            payload = {
                "msg": "REQUEST-PRODUCT-ENVIRONMENT-CURRENT-SENSOR-DATA",
                "time": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
            }
            self._mqtt.publish(
                self._product_type + "/" + self._serial + "/command",
                json.dumps(payload))
        else:
            _LOGGER.warning(
                "Unable to send commands because device %s is not connected",
                self.serial)

    def set_fan_configuration(self, data):
        # pylint: disable=too-many-arguments,too-many-locals
        """Configure Fan.

        :param data: Data to send
        """
        if self._connected:
            payload = {
                "msg": "STATE-SET",
                "time": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
                "mode-reason": "LAPP",
                "data": data
            }
            self._mqtt.publish(self.command_topic, json.dumps(payload), 1)
        else:
            _LOGGER.warning("Not connected, can not set configuration: %s",
                            self.serial)

    def _parse_command_args(self, **kwargs):
        """Parse command arguments.

        :param kwargs Arguments
        :return payload dictionary
        """
        fan_mode = kwargs.get('fan_mode')
        oscillation = kwargs.get('oscillation')
        fan_speed = kwargs.get('fan_speed')
        night_mode = kwargs.get('night_mode')
        quality_target = kwargs.get('quality_target')
        standby_monitoring = kwargs.get('standby_monitoring')
        sleep_timer = kwargs.get('sleep_timer')
        reset_filter = kwargs.get('reset_filter')

        f_mode = fan_mode.value if fan_mode \
            else self._current_state.fan_mode
        f_speed = fan_speed.value if fan_speed \
            else self._current_state.speed
        f_oscillation = oscillation.value if oscillation \
            else self._current_state.oscillation
        f_night_mode = night_mode.value if night_mode \
            else self._current_state.night_mode
        f_quality_target = quality_target.value if quality_target \
            else self._current_state.quality_target
        f_standby_monitoring = standby_monitoring.value if \
            standby_monitoring else self._current_state.standby_monitoring
        f_sleep_timer = sleep_timer if sleep_timer or isinstance(
            sleep_timer, int) else "STET"
        f_reset_filter = reset_filter.value if reset_filter \
            else "STET"

        return {
            "fmod": f_mode,
            "fnsp": f_speed,
            "oson": f_oscillation,
            "sltm": f_sleep_timer,  # sleep timer
            "rhtm": f_standby_monitoring,  # monitor air quality
            # when inactive
            "rstf": f_reset_filter,  # reset filter lifecycle
            "qtar": f_quality_target,
            "nmod": f_night_mode
        }

    def set_configuration(self, **kwargs):
        """Configure fan.

        :param kwargs: Parameters
        """
        data = self._parse_command_args(**kwargs)
        self.set_fan_configuration(data)

    @property
    def environmental_state(self):
        """Environmental Device state."""
        return self._environmental_state

    @environmental_state.setter
    def environmental_state(self, value):
        """Set Environmental Device state."""
        self._environmental_state = value

    @property
    def connected(self):
        """Device connected."""
        return self._connected

    @connected.setter
    def connected(self, value):
        """Set device connected."""
        self._connected = value

    def __repr__(self):
        """Return a String representation."""
        fields = self._fields()
        return 'DysonPureCoolLink(' + ",".join(printable_fields(fields)) + ')'
class ContextManager(object):
    def __init__(self, database):
        """

        Args:
            database (database.Database subclass): the subclass/implementation
                of the Database
        """
        self._database = database
        self._first_merkle_root = None
        self._contexts = _ThreadsafeContexts()

        self._address_regex = re.compile('^[0-9a-f]{70}$')

        self._namespace_regex = re.compile('^([0-9a-f]{2}){0,35}$')

        self._address_queue = Queue()

        self._inflated_addresses = Queue()

        self._context_reader = _ContextReader(database, self._address_queue,
                                              self._inflated_addresses)
        self._context_reader.start()

        self._context_writer = _ContextWriter(self._inflated_addresses,
                                              self._contexts)
        self._context_writer.start()

    def get_first_root(self):
        if self._first_merkle_root is not None:
            return self._first_merkle_root
        self._first_merkle_root = MerkleDatabase(
            self._database).get_merkle_root()
        return self._first_merkle_root

    def address_is_valid(self, address):
        # return True
        return self._address_regex.match(address) is not None

    def namespace_is_valid(self, namespace):
        # return True
        return self._namespace_regex.match(namespace) is not None

    def create_context(self, state_hash, base_contexts, inputs, outputs):
        """Create a ExecutionContext to run a transaction against.

        Args:
            state_hash: (str): Merkle root to base state on.
            base_contexts (list of str): Context ids of contexts that will
                have their state applied to make this context.
            inputs (list of str): Addresses that can be read from.
            outputs (list of str): Addresses that can be written to.
        Returns:
            context_id (str): the unique context_id of the session
        """

        for address in inputs:
            if not self.namespace_is_valid(address):
                raise CreateContextException(
                    "Address or namespace {} listed in inputs is not "
                    "valid".format(address))
        for address in outputs:
            if not self.namespace_is_valid(address):
                raise CreateContextException(
                    "Address or namespace {} listed in outputs is not "
                    "valid".format(address))

        addresses_to_find = [add for add in inputs if len(add) == 70]

        address_values, reads = self._find_address_values_in_chain(
            base_contexts=base_contexts, addresses_to_find=addresses_to_find)

        context = ExecutionContext(state_hash=state_hash,
                                   read_list=inputs,
                                   write_list=outputs,
                                   base_context_ids=base_contexts)

        contexts_asked_not_found = [
            cid for cid in base_contexts if cid not in self._contexts
        ]
        if contexts_asked_not_found:
            raise KeyError("Basing a new context off of context ids {} "
                           "that are not in context manager".format(
                               contexts_asked_not_found))

        context.create_initial(address_values)

        self._contexts[context.session_id] = context

        if reads:
            context.create_prefetch(reads)
            self._address_queue.put_nowait(
                (context.session_id, state_hash, reads))
        return context.session_id

    def _find_address_values_in_chain(self, base_contexts, addresses_to_find):
        """Breadth first search through the chain of contexts searching for
        the bytes values at the addresses in addresses_to_find.

        Args:
            base_contexts (list of str): The context ids to start with.
            addresses_to_find (list of str): Addresses to find values in the
                chain of contexts.

        Returns:
            tuple of found address_values and still not found addresses
        """

        contexts_in_chain = deque()
        contexts_in_chain.extend(base_contexts)
        reads = list(addresses_to_find)
        address_values = []
        context_ids_already_searched = []
        context_ids_already_searched.extend(base_contexts)

        # There are two loop exit conditions, either all the addresses that
        # are being searched for have been found, or we run out of contexts
        # in the chain of contexts.

        while reads:
            try:
                current_c_id = contexts_in_chain.popleft()
            except IndexError:
                # There aren't any more contexts known about.
                break
            current_context = self._contexts[current_c_id]

            # First, check for addresses that have been deleted.
            deleted_addresses = current_context.get_if_deleted(reads)
            for address in deleted_addresses:
                if address is not None:
                    address_values.append((address, None))

            # optimized version
            s = set(deleted_addresses)
            reads = [x for x in reads if x not in s]

            # reads = list(set(reads) - set(deleted_addresses))

            # Second, check for addresses that have been set in the context,
            # and remove those addresses from being asked about again. Here
            # any value of None means the address hasn't been set.

            values = current_context.get_if_set(reads)
            addresses_not_found = []
            for address, value in zip(reads, values):
                if value is not None:
                    address_values.append((address, value))
                else:
                    addresses_not_found.append(address)
            reads = addresses_not_found

            # Next check for addresses that might be in a context
            # because they were inputs.

            addresses_in_inputs = [
                address for address in reads if address in current_context
            ]

            values = current_context.get_if_not_set(addresses_in_inputs)

            address_values.extend(list(zip(addresses_in_inputs, values)))

            for add in addresses_in_inputs:
                reads.remove(add)

            for c_id in current_context.base_contexts:
                if c_id not in context_ids_already_searched:
                    contexts_in_chain.append(c_id)
                    context_ids_already_searched.append(c_id)

        return address_values, reads

    def delete_contexts(self, context_id_list):
        """Delete contexts from the ContextManager.

        Args:
            context_id_list (list): a list of context ids

        Returns:
            None

        """
        for c_id in context_id_list:
            if c_id in self._contexts:
                del self._contexts[c_id]

    def delete(self, context_id, address_list):
        """Delete the values associated with list of addresses, for a specific
        context referenced by context_id.

        Args:
            context_id (str): the return value of create_context, referencing
                a particular context.
            address_list (list): a list of address strs

        Returns:
            (bool): True if the operation is successful, False if
                the context_id doesn't reference a known context.

        Raises:
            AuthorizationException: Raised when an address in address_list is
                not authorized either by not being in the inputs for the
                txn associated with this context, or it is under a namespace
                but the characters that are under the namespace are not valid
                address characters.
        """

        if context_id not in self._contexts:
            return False

        context = self._contexts[context_id]

        for add in address_list:
            if not self.address_is_valid(address=add):
                raise AuthorizationException(address=add)

        context.delete_direct(address_list)

        return True

    def get(self, context_id, address_list):
        """Get the values associated with list of addresses, for a specific
        context referenced by context_id.

        Args:
            context_id (str): the return value of create_context, referencing
                a particular context.
            address_list (list): a list of address strs

        Returns:
            values_list (list): a list of (address, value) tuples

        Raises:
            AuthorizationException: Raised when an address in address_list is
                not authorized either by not being in the inputs for the
                txn associated with this context, or it is under a namespace
                but the characters that are under the namespace are not valid
                address characters.
        """

        if context_id not in self._contexts:
            return []
        for add in address_list:
            if not self.address_is_valid(address=add):
                raise AuthorizationException(address=add)

        context = self._contexts[context_id]

        addresses_in_ctx = [add for add in address_list if add in context]

        # no init set
        s = set(addresses_in_ctx)
        addresses_not_in_ctx = [x for x in address_list if x not in s]

        # addresses_not_in_ctx = list(set(address_list) - set(addresses_in_ctx))

        values = context.get(addresses_in_ctx)
        values_list = list(zip(addresses_in_ctx, values))
        if addresses_not_in_ctx:
            # Validate the addresses that won't be validated by a direct get on
            # the context.
            for address in addresses_not_in_ctx:
                context.validate_read(address)
            try:
                address_values, reads = self._find_address_values_in_chain(
                    base_contexts=[context_id],
                    addresses_to_find=addresses_not_in_ctx)
            except KeyError:
                # This is in the exceptional case when a txn is in flight
                # and so the context may not exist but the tp is asking
                # about it.
                return []

            values_list.extend(address_values)

            if reads:
                tree = MerkleDatabase(self._database, context.merkle_root)
                add_values = []
                for add in reads:
                    value = None
                    try:
                        value = tree.get(add)
                    except KeyError:
                        # The address is not in the radix tree/merkle tree
                        pass
                    add_values.append((add, value))
                values_list.extend(add_values)

            values_list.sort(key=lambda x: address_list.index(x[0]))

        return values_list

    def set(self, context_id, address_value_list):
        """Within a context, sets addresses to a value.

        Args:
            context_id (str): the context id returned by create_context
            address_value_list (list): list of {address: value} dicts

        Returns:
            (bool): True if the operation is successful, False if
                the context_id doesn't reference a known context.

        Raises:
            AuthorizationException if an address is given in the
                address_value_list that was not in the original
                transaction's outputs, or was under a namespace but the
                characters after the namespace are not valid address
                characters.
        """

        if context_id not in self._contexts:
            LOGGER.warning("Context_id not in contexts, %s", context_id)
            return False

        context = self._contexts.get(context_id)

        add_value_dict = {}
        for d in address_value_list:
            for add, val in d.items():
                if not self.address_is_valid(address=add):
                    raise AuthorizationException(address=add)
                add_value_dict[add] = val
        context.set_direct(add_value_dict)
        return True

    def get_squash_handler(self):
        def _squash(state_root, context_ids, persist, clean_up):
            contexts_in_chain = deque()
            contexts_in_chain.extend(context_ids)
            context_ids_already_searched = []
            context_ids_already_searched.extend(context_ids)

            # There is only one exit condition and that is when all the
            # contexts have been accessed once.
            updates = dict()
            deletes = set()
            while contexts_in_chain:
                current_c_id = contexts_in_chain.popleft()
                current_context = self._contexts[current_c_id]
                if not current_context.is_read_only():
                    current_context.make_read_only()

                addresses_w_values = current_context.get_all_if_set()
                for add, val in addresses_w_values.items():
                    # Since we are moving backwards through the graph of
                    # contexts, only update if the address hasn't been set
                    # or deleted
                    if add not in updates and add not in deletes:
                        updates[add] = val

                addresses_w_values = current_context.get_all_if_deleted()
                for add, _ in addresses_w_values.items():
                    # Since we are moving backwards through the graph of
                    # contexts, only add to deletes if the address hasn't been
                    # previously deleted or set in the graph
                    if add not in updates and add not in deletes:
                        deletes.add(add)

                for c_id in current_context.base_contexts:
                    if c_id not in context_ids_already_searched:
                        contexts_in_chain.append(c_id)
                        context_ids_already_searched.append(c_id)

            tree = MerkleDatabase(self._database, state_root)

            # filter the delete list to just those items in the tree
            deletes = [addr for addr in deletes if addr in tree]

            if not updates and not deletes:
                state_hash = state_root
            else:
                virtual = not persist
                state_hash = tree.update(updates, deletes, virtual=virtual)

            if clean_up:
                self.delete_contexts(context_ids_already_searched)
            return state_hash

        return _squash

    def stop(self):
        self._address_queue.put_nowait(_SHUTDOWN_SENTINEL)
        self._inflated_addresses.put_nowait(_SHUTDOWN_SENTINEL)

    def add_execution_data(self, context_id, data):
        """Within a context, append data to the execution result.

        Args:
            context_id (str): the context id returned by create_context
            data_type (str): type of data to append
            data (bytes): data to append

        Returns:
            (bool): True if the operation is successful, False if
                the context_id doesn't reference a known context.
        """
        if context_id not in self._contexts:
            LOGGER.warning("Context_id not in contexts, %s", context_id)
            return False

        context = self._contexts.get(context_id)
        context.add_execution_data(data)
        return True

    def add_execution_event(self, context_id, event):
        """Within a context, append data to the execution result.

        Args:
            context_id (str): the context id returned by create_context
            data_type (str): type of data to append
            data (bytes): data to append

        Returns:
            (bool): True if the operation is successful, False if
                the context_id doesn't reference a known context.
        """
        if context_id not in self._contexts:
            LOGGER.warning("Context_id not in contexts, %s", context_id)
            return False

        context = self._contexts.get(context_id)
        context.add_execution_event(event)
        return True

    def get_execution_results(self, context_id):
        context = self._contexts.get(context_id)
        return (context.get_all_if_set().copy(),
                context.get_all_if_deleted().copy(),
                context.get_execution_events().copy(),
                context.get_execution_data().copy())
Example #40
0
class WIP_SerTransportProcess(Process):  # TODO: WIP
    """Interface for a packet transport using a process - WIP."""

    def __init__(self, loop, protocol, ser_port, extra=None):
        _LOGGER.debug("SerTransProc.__init__() *** Process version ***", ser_port)

        self._loop = loop

        self._protocol = protocol
        self._ser_port = ser_port
        self._extra = {} if extra is None else extra

        self.serial = None
        self._is_closing = None
        self._poller = None
        self._write_queue = None

        self._start()

    def _start(self):
        def _polling_loop(self):
            if DEV_MODE:
                _LOGGER.error("WinTransport._polling_loop() BEGUN")

            # asyncio.set_event_loop(self._loop)
            asyncio.get_running_loop()  # TODO: this fails

            self._protocol.connection_made(self)

            while self.serial.is_open:
                if self.serial.in_waiting:
                    self._protocol.data_received(
                        # self.serial.readline()
                        self.serial.read()
                        # self.serial.read(self.serial.in_waiting)
                    )
                    # time.sleep(0.005)
                    continue

                if self.serial.out_waiting:
                    # time.sleep(0.005)
                    continue

                if not self._write_queue.empty():
                    cmd = self._write_queue.get()
                    self.serial.write(bytearray(f"{cmd}\r\n".encode("ascii")))
                    self._write_queue.task_done()
                    # time.sleep(0.005)
                    continue

                # time.sleep(0.005)

            if DEV_MODE:
                _LOGGER.debug("SerTransProc._polling_loop() ENDED")
            self._protocol.connection_lost(exc=None)

        if DEV_MODE:
            _LOGGER.debug("SerTransProc._start() STARTING loop")
        self._write_queue = Queue(maxsize=200)

        self.serial = serial_for_url(self._ser_port[0], **self._ser_port[1])
        self.serial.timeout = 0

        self._poller = Thread(target=self._polling_loop, daemon=True)
        self._poller.start()

        self._protocol.connection_made(self)

    def write(self, cmd):
        """Write some data bytes to the transport.

        This does not block; it buffers the data and arranges for it to be sent out
        asynchronously.
        """
        # _LOGGER.debug("SerTransProc.write(%s)", cmd)

        self._write_queue.put_nowait(cmd)
Example #41
0
class BlockLight:

    __slots__ = ['_terrain', '_levels', '_queue']

    def __init__(self, terrain):
        self._terrain = terrain
        self._levels = {BlockID.TORCH: 14, BlockID.BURNING_FURNACE: 13}
        self._queue = Queue()

    def update(self):
        if self._queue.empty():
            return set()
        func, pos = self._queue.get_nowait()
        return func(pos)

    def add(self, pos):
        self._queue.put_nowait((self._add, pos))

    def remove(self, pos):
        self._queue.put_nowait((self._remove, pos))

    def _add(self, pos):
        block = self._terrain.get_block(pos)
        light_level = self._levels.get(block.id, 0)
        if light_level == 0:
            return set()
        return self._add_light(pos, light_level)

    def _add_light(self, pos, light_level):
        cursor = BlockCursor(self._terrain, pos)
        # X=0 に灯を追加する
        light_level = self._add_column_x(cursor, light_level)
        if light_level > 1:
            # X<0 と X>0 に灯を追加する
            for direc in (-1, 1):
                cursor.move(x=direc)
                l = light_level
                while l > 1:
                    l = self._add_column_x(cursor, l - 1)
                    cursor.move(x=direc)
                cursor.reset(x=True)
        return cursor.updated_chunk

    def _add_column_x(self, cursor, light_level):
        # Z=0 に灯を追加する
        light_level = self._add_column_xz(cursor, light_level)
        if light_level > 1:
            # Z<0 と Z>0 に灯を追加する
            for direc in (-1, 1):
                cursor.move(z=direc)
                l = light_level
                while l > 1:
                    l = self._add_column_xz(cursor, l - 1)
                    cursor.move(z=direc)
                cursor.reset(z=True)
        return light_level

    def _add_column_xz(self, cursor, light_level):
        # Y=0 に灯を追加する
        light_level = self._add_point(cursor, light_level)
        if light_level > 1:
            # Y<0 と Y>0 に灯を追加する
            for direc in (-1, 1):
                cursor.move(y=direc)
                l = light_level
                while l > 1:
                    l = self._add_point(cursor, l - 1)
                    cursor.move(y=direc)
                cursor.reset(y=True)
        return light_level

    def _add_point(self, cursor, light_level):
        # 周辺で最も明るいレベルを求める
        max_light_level = light_level
        for pos, chunk in cursor.surrounding():
            l = chunk.get_block_light(*pos.in_chunk) - 1
            if l > max_light_level:
                max_light_level = l
        # 現在よりも明るければ変更する
        pos, chunk = cursor.current()
        x, z, y = pos.in_chunk
        l = chunk.get_block_light(x, z, y)
        if l < max_light_level:
            chunk.set_block_light(x, z, y, max_light_level)
            cursor.mark_updated(chunk.pos)
            return max_light_level
        else:
            return 0

    def _remove(self, pos):
        cursor = BlockCursor(self._terrain, pos)
        # X=0 の灯を削除する
        if self._remove_column_x(cursor) == 0:
            # X<0 と X>0 の灯を削除する
            for direc in (-1, 1):
                cursor.move(x=direc)
                while self._remove_column_x(cursor) == 0:
                    cursor.move(x=direc)
                cursor.reset(x=True)
        updated_chunk = cursor.updated_chunk
        # 灯を残した地点から追加処理を行う
        for pos, light_level in cursor.light_lavel:
            updated_chunk.update(self._add_light(pos, light_level))
        return updated_chunk

    def _remove_column_x(self, cursor):
        # Z=0 の灯を削除する
        light_level = self._remove_column_xz(cursor)
        if light_level == 0:
            # Z<0 と Z>0 の灯を削除する
            for direc in (-1, 1):
                cursor.move(z=direc)
                while self._remove_column_xz(cursor) == 0:
                    cursor.move(z=direc)
                cursor.reset(z=True)
        return light_level

    def _remove_column_xz(self, cursor):
        # Y=0 の灯を削除する
        light_level = self._remove_point(cursor)
        if light_level == 0:
            # Y<0 と Y>0 の灯を削除する
            for direc in (-1, 1):
                cursor.move(y=direc)
                while self._remove_point(cursor) == 0:
                    cursor.move(y=direc)
                cursor.reset(y=True)
        return light_level

    def _remove_point(self, cursor):
        # 周辺で最も明るいレベルを求める
        max_light_level = 0
        for pos, chunk in cursor.surrounding():
            l = chunk.get_block_light(*pos.in_chunk)
            if l > max_light_level:
                max_light_level = l
        # 灯をなくす
        pos, chunk = cursor.current()
        x, z, y = pos.in_chunk
        l = chunk.get_block_light(x, z, y)
        chunk.set_block_light(x, z, y, 0)
        # 現在地が周囲よりも暗ければ灯をなくす処理を終える
        if l < max_light_level:
            cursor.save_light(l)
            return l
        # 周囲に灯があれば処理は継続する
        if max_light_level != 0:
            cursor.mark_updated(chunk.pos)
            return 0
        # 周囲に灯がなければ処理は終了する
        else:
            return -1
Example #42
0
class ContextManager(object):
    def __init__(self, database):
        """

        Args:
            database database.Database subclass: the subclass/implementation of
                                                the Database
        """
        self._database = database
        self._first_merkle_root = None
        self._contexts = {}

        self._address_queue = Queue()

        inflated_addresses = Queue()

        self._context_reader = _ContextReader(database, self._address_queue,
                                              inflated_addresses)
        self._context_reader.setDaemon(True)
        self._context_reader.start()

        # the lock is shared between the ContextManager and
        # the _ContextWriter because they both access _contexts
        self._shared_lock = Lock()
        self._context_writer = _ContextWriter(self._shared_lock,
                                              inflated_addresses,
                                              self._contexts)
        self._context_writer.setDaemon(True)
        self._context_writer.start()

    def get_first_root(self):
        if self._first_merkle_root is not None:
            return self._first_merkle_root
        self._first_merkle_root = MerkleDatabase(
            self._database).get_merkle_root()
        return self._first_merkle_root

    def create_context(self, state_hash, inputs, outputs):
        """
        Part of the interface to the Executor
        Args:
            state_hash: (str): Merkle root
            access_list: (list): list of tuples like [('read', 'address'),...

        Returns:
            context_id (str): the unique context_id of the session

        """
        context = StateContext(state_hash, inputs, outputs)
        with self._shared_lock:
            context.initialize_futures(inputs + outputs)
            self._contexts[context.session_id] = context

        self._address_queue.put_nowait(
            (context.session_id, state_hash, inputs))
        LOGGER.debug("CREATE_CONTEXT: %s", context.session_id)
        return context.session_id

    def commit_context(self, context_id_list, virtual):
        """
        Part of the interface to the Executor
        Args:
            context_id_list:

        Returns:
            state_hash (str): the new state hash after the context_id_list
                              has been committed

        """

        if any([c_id not in self._contexts for c_id in context_id_list]):
            raise CommitException("Context Id not in contexts")
        first_id = context_id_list[0]

        if not all([
                self._contexts[first_id].merkle_root
                == self._contexts[c_id].merkle_root for c_id in context_id_list
        ]):
            raise CommitException(
                "MerkleRoots not all equal, yet asking to merge")

        merkle_root = self._contexts[first_id].merkle_root
        tree = MerkleDatabase(self._database, merkle_root)

        merged_updates = {}
        for c_id in context_id_list:
            with self._shared_lock:
                context = self._contexts[c_id]
                del self._contexts[c_id]
            for k in context.get_writable_address_value_dict().keys():
                if k in merged_updates:
                    raise CommitException(
                        "Duplicate address {} in context {}".format(k, c_id))
            merged_updates.update(context.get_writable_address_value_dict())

        new_root = merkle_root

        add_value_dict = {}
        for k, val_fut in merged_updates.items():
            value = val_fut.result()
            if value is not None:
                add_value_dict[k] = value

        new_root = tree.update(set_items=add_value_dict, virtual=virtual)

        return new_root

    def delete_context(self, context_id_list):
        """
        Part of the interface to the Executor.
        Throws away contexts, eg. InvalidTransaction
        Args:
            context_id_list (list): a list of context ids

        Returns:
            void

        """
        for c_id in context_id_list:
            with self._shared_lock:
                if c_id in self._contexts:
                    del self._contexts[c_id]

    def get(self, context_id, address_list):
        """

        Args:
            context_id (str): the return value of create_context
            address_list (list): a list of address strs

        Returns:
            values_list (list): a list of (address, value) tuples
        """
        with self._shared_lock:
            if context_id not in self._contexts:
                return []
        with self._shared_lock:
            context = self._contexts.get(context_id)
        return [(a, f.result())
                for a, f in context.get_from_prefetched(address_list)]

    def set(self, context_id, address_value_list):
        """
        speculatively sets addresses to a value,
        can be destroyed or committed to the merkle store
        Args:
            context_id (str): the context id returned by create_context
            address_value_list (list): list of {address: value} dicts

        Returns (boolean): True, or False whether the

        """
        with self._shared_lock:
            if context_id not in self._contexts:
                LOGGER.info("Context_id not in contexts, %s", context_id)
                return False
        with self._shared_lock:
            context = self._contexts.get(context_id)
            context.can_set(address_value_list)
            add_value_dict = {}
            for d in address_value_list:
                for add, val in d.items():
                    add_value_dict[add] = val
            context.set_futures(add_value_dict)
        return True

    def get_squash_handler(self):
        def _squash(state_root, context_ids):
            tree = MerkleDatabase(self._database, state_root)
            updates = dict()
            for c_id in context_ids:
                with self._shared_lock:
                    context = self._contexts[c_id]
                for add in context.get_address_value_dict().keys():
                    if add in updates:
                        raise SquashException(
                            "Duplicate address {} in context {}".format(
                                add, c_id))

                effective_updates = {}
                for k, val_fut in context.get_address_value_dict().items():
                    value = val_fut.result()
                    if value is not None:
                        effective_updates[k] = value

                updates.update(effective_updates)

            state_hash = tree.update(updates, virtual=False)
            return state_hash

        return _squash

    def stop(self):
        self._context_writer.join(1)
        self._context_reader.join(1)
class Medical:
    def __init__(self):
        self.BoQinput = Queue()
        self.BoQoutput = Queue()
        self.Pred = Queue()
        self.AlertQ = Queue()
        self.bo = 0  # blood oxygen
        self.bp = 0  # blood pressure
        self.pul = 0  # pulse

    def Input(self):
        """
        It's the input Moudle, which gets data from machine and pass data to other modules.
        :param BoQinput: It's a queue for input to communicate input data between modules.
        :return:
        """
        while True:
            # input
            Inp.rand_input(self.BoQinput)  # Generating random data as input.
            time.sleep(2)

    def middle(self):
        """
        Middle part contains several modules, including AI module and Alert module.
        :param BoQinput: It's a input queue.
        :param BoQoutput: It's a queue and it pass data to output module.
        :param Pred: A queue for prediction data.
        :param AlertQ: A queue for alert module.
        :return:
        """
        while True:
            if not self.BoQinput.empty():
                value = self.BoQinput.get_nowait()  # getting data from queue.

                bo = value[0]  # blood oxygen
                bp = value[1]  # blood pressure
                pul = value[2]  # pulse
                self.bo = value[0]
                self.bp = value[1]
                self.pul = value[2]
                # AI
                A = Ai()  # AI module
                A.input_check(bo, bp, pul)  # check the type of input
                predBloodOxygen, predBloodPressure, prePulse = A.predict(
                )  # prediction output
                pred_info = predBloodOxygen, predBloodPressure, prePulse
                self.Pred.put_nowait(pred_info)
                # Alert
                Alt = Alert()  # Alert Module
                boi = bo, 0  # the last number stands for the type of the data
                bpi = bp, 1
                puli = pul, 2
                Alt.Alert_for_three_categories_input(boi)  # data check
                Alt.Alert_for_three_categories_input(bpi)
                Alt.Alert_for_three_categories_input(puli)
                alert = Alt.Alert_Output()  # alert signal
                self.AlertQ.put_nowait(alert)  # send alert signal
                self.BoQoutput.put_nowait(value)  # send data
                time.sleep(2)

    def Output(self):
        """
        It's the output part, could be seen as interface module.
        :param BoQoutput: A queue for output data.
        :param Pred: Queue for prediction data.
        :param AlertQ: Queue for Alert information.
        :return:
        """
        while True:
            # Interface
            if not self.BoQoutput.empty():
                value = self.BoQoutput.get_nowait()  # get data
                pred = self.Pred.get_nowait()  # get prediction data
                bo = value[0]  # display blood oxygen
                bp = value[1]  # display blood pressure
                pul = value[2]  # display pulse
                Alert_info = self.AlertQ.get_nowait()  # get alert signal
                U = userInterface()
                U.getFromData(bo, bp, pul)  # get data
                U.sendToShow()  # display
                print("Prediction:", pred, "\n")
                print("Alert information:", Alert_info, "\n")

                time.sleep(2)

    def runprogram(self):
        """
        To start the program
        :return:  No return
        """
        t1 = threading.Thread(target=self.Input)
        t2 = threading.Thread(target=self.middle)
        t3 = threading.Thread(target=self.Output)
        t1.start()
        t2.start()
        t3.start()

    def get_bo(self):
        """
        return blood oxygen
        :return: bo
        """
        return self.bo

    def get_bp(self):
        """
        return blood pressure
        :return: bp
        """
        return self.bp

    def get_pul(self):
        """
        return pulse
        :return: pul
        """
        return self.pul
Example #44
0
from queue import Queue

import time


class BG(threading.Thread):
    def __init__(self, queue):
        super().__init__()
        self.q = queue
    def run(self):
        while not self.q.empty():
            obj = self.q.get(True,0)
            print("got object in thread ["+str(threading.current_thread())+"]")
            print(obj)
            print("##########")
            self.q.put("from thread ["+str(threading.current_thread())+"]")
            time.sleep(1)
if __name__ == '__main__':
    q = Queue(-1)
    q.put_nowait("1")
    q.put_nowait("2")
    t = BG(q)
    t2 = BG(q)
    q.put_nowait("3")
    t.start()
    q.put_nowait("4")
    t2.start()
    q.put_nowait("5")
    q.join()
    print(q.qsize())
    print("finished")
Example #45
0
class VelbusSocketConnection(velbus.VelbusConnection):
    """
    Wrapper for Socket connection configuration
    :author: Maikel Punie <*****@*****.**>
    """
    SLEEP_TIME = 60 / 1000

    def __init__(self, device, controller=None):
        velbus.VelbusConnection.__init__(self)
        self.logger = logging.getLogger('velbus')
        self._device = device
        self.controller = controller
        # get the address from a <host>:<port> format
        addr = device.split(':')
        addr = (addr[0], int(addr[1]))
        try:
            self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            self._socket.connect(addr)
        except:
            self.logger.error("Could not open socket, \
                              no messages are read or written to the bus")
            raise VelbusException("Could not open socket port")
        # build a read thread
        self._listen_process = threading.Thread(None, self.read_daemon,
                                                "velbus-process-reader", (),
                                                {})
        self._listen_process.daemon = True
        self._listen_process.start()

        # build a writer thread
        self._write_queue = Queue()
        self._write_process = threading.Thread(None, self.write_daemon,
                                               "velbus-connection-writer", (),
                                               {})
        self._write_process.daemon = True
        self._write_process.start()

    def stop(self):
        """Close serial port."""
        self.logger.warning("Stop executed")
        try:
            self._socket.close()
        except:
            self.logger.error("Error while closing socket")
            raise VelbusException("Error while closing socket")
        time.sleep(1)

    def feed_parser(self, data):
        """Parse received message."""
        assert isinstance(data, bytes)
        self.controller.feed_parser(data)

    def send(self, message, callback=None):
        """Add message to write queue."""
        assert isinstance(message, velbus.Message)
        self._write_queue.put_nowait((message, callback))

    def read_daemon(self):
        """Read thread."""
        while True:
            data = self._socket.recv(9999)
            self.feed_parser(data)

    def write_daemon(self):
        """Write thread."""
        while True:
            (message, callback) = self._write_queue.get(block=True)
            self.logger.info("Sending message on USB bus: %s", str(message))
            self.logger.debug("Sending binary message:  %s",
                              str(message.to_binary()))
            self._socket.send(message.to_binary())
            time.sleep(self.SLEEP_TIME)
            if callback:
                callback()
class ContextManager(object):
    def __init__(self, database, state_delta_store):
        """

        Args:
            database (database.Database subclass): the subclass/implementation
                of the Database
            state_delta_store (StateDeltaStore): the store for state deltas
        """
        self._database = database
        self._state_delta_store = state_delta_store
        self._first_merkle_root = None
        self._contexts = _ThreadsafeContexts()

        self._address_queue = Queue()

        self._inflated_addresses = Queue()

        self._context_reader = _ContextReader(database, self._address_queue,
                                              self._inflated_addresses)
        self._context_reader.start()

        self._context_writer = _ContextWriter(self._inflated_addresses,
                                              self._contexts)
        self._context_writer.start()

    def get_first_root(self):
        if self._first_merkle_root is not None:
            return self._first_merkle_root
        self._first_merkle_root = MerkleDatabase(
            self._database).get_merkle_root()
        return self._first_merkle_root

    def create_context(self, state_hash, base_contexts, inputs, outputs):
        """Create a StateContext to run a transaction against.

        Args:
            state_hash: (str): Merkle root to base state on.
            base_contexts (list of str): Context ids of contexts that will
                have their state applied to make this context.
            inputs (list of str): Addresses that can be read from.
            outputs (list of str): Addresses that can be written to.
        Returns:
            context_id (str): the unique context_id of the session

        """

        context = StateContext(state_hash=state_hash,
                               read_list=inputs,
                               write_list=outputs,
                               base_context_ids=base_contexts)

        self._contexts[context.session_id] = context
        contexts_asked_not_found = [
            cid for cid in base_contexts if cid not in self._contexts
        ]
        if len(contexts_asked_not_found) > 0:
            raise KeyError("Basing a new context off of context ids {} "
                           "that are not in context manager".format(
                               contexts_asked_not_found))
        base_context_list = [self._contexts[cid] for cid in base_contexts]
        # Get the state from the base contexts
        prior_state = dict()
        for base_context in base_context_list:
            prior_state.update(base_context.get_state())

        addresses_already_in_state = set(prior_state.keys())
        reads = set(inputs) - addresses_already_in_state
        others = set(addresses_already_in_state | set(outputs)) - set(reads)
        context.initialize_futures(reads=list(reads), others=list(others))
        # Read the actual values that are based on _ContextFutures before
        # setting new futures with those values.
        prior_state_results = dict()
        for k, val_fut in prior_state.items():
            value = val_fut.result()
            prior_state_results[k] = value

        context.set_futures(prior_state_results)

        if len(reads) > 0:
            self._address_queue.put_nowait(
                (context.session_id, state_hash, list(reads)))
        return context.session_id

    def delete_context(self, context_id_list):
        """Delete contexts from the ContextManager.

        Args:
            context_id_list (list): a list of context ids

        Returns:
            None

        """
        for c_id in context_id_list:
            if c_id in self._contexts:
                del self._contexts[c_id]

    def get(self, context_id, address_list):
        """Get the values associated with list of addresses, for a specific
        context referenced by context_id.

        Args:
            context_id (str): the return value of create_context, referencing
                a particular context.
            address_list (list): a list of address strs

        Returns:
            values_list (list): a list of (address, value) tuples
        """

        if context_id not in self._contexts:
            return []
        context = self._contexts.get(context_id)
        return [(a, f.result())
                for a, f in context.get_from_prefetched(address_list)]

    def set(self, context_id, address_value_list):
        """Within a context, sets addresses to a value.

        Args:
            context_id (str): the context id returned by create_context
            address_value_list (list): list of {address: value} dicts

        Returns:
            (bool): True if the operation is successful, False if
                the context_id doesn't reference a known context.

        Raises:
            AuthorizationException if an address is specified to write to
                that was not in the original transaction's outputs.
        """

        if context_id not in self._contexts:
            LOGGER.warning("Context_id not in contexts, %s", context_id)
            return False

        context = self._contexts.get(context_id)
        # Where authorization on the address level happens.
        context.can_set(address_value_list)
        add_value_dict = {}
        for d in address_value_list:
            for add, val in d.items():
                add_value_dict[add] = val
        context.set_futures(add_value_dict)
        return True

    def get_squash_handler(self):
        def _squash(state_root, context_ids, persist):
            tree = MerkleDatabase(self._database, state_root)
            updates = dict()
            for c_id in context_ids:
                context = self._contexts[c_id]
                for add in context.get_state().keys():
                    if add in updates:
                        raise SquashException(
                            "Duplicate address {} in context {}".format(
                                add, c_id))

                effective_updates = {}
                for k, val_fut in context.get_state().items():
                    value = val_fut.result()
                    if value is not None:
                        effective_updates[k] = value

                updates.update(effective_updates)

            if len(updates) == 0:
                return state_root

            virtual = not persist
            state_hash = tree.update(updates, virtual=virtual)
            if persist:
                # save the state changes to the state_delta_store
                changes = [
                    StateChange(address=addr,
                                value=value,
                                type=StateChange.SET)
                    for addr, value in updates.items()
                ]
                self._state_delta_store.save_state_deltas(state_hash, changes)

                # clean up all contexts that are involved in being squashed.
                base_c_ids = []
                for c_id in context_ids:
                    base_c_ids += self._contexts[c_id].base_context_ids
                all_context_ids = base_c_ids + context_ids
                self.delete_context(all_context_ids)
            return state_hash

        return _squash

    def stop(self):
        self._address_queue.put_nowait(_SHUTDOWN_SENTINEL)
        self._inflated_addresses.put_nowait(_SHUTDOWN_SENTINEL)
Example #47
0
class HomeCenterConnection(velbus.VelbusConnection):
    """
    Wrapper for Homecenter Socket connection configuration
    :author: David Danssaert <*****@*****.**>
    """
    SLEEP_TIME = 60 / 1000

    def __init__(self, device, username, password, controller=None):
        velbus.VelbusConnection.__init__(self)
        self.logger = logging.getLogger('velbus')
        self._device = device
        self._username = username
        self._password = password

        self.controller = controller
        # get the address from a <host>:<port> format
        addr = device.split(':')
        addr = (addr[0], int(addr[1]))
        try:
            self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            self._socket.connect(addr)
        except:
            self.logger.error("Could not open socket, \
                              no messages are read or written to the bus")
            raise VelbusException("Could not open socket port")

        self.connect()

        # build a read thread
        self._listen_process = threading.Thread(None, self.read_daemon,
                                                "velbus-process-reader", (),
                                                {})
        self._listen_process.daemon = True
        self._listen_process.start()

        # build a writer thread
        self._write_queue = Queue()
        self._write_process = threading.Thread(None, self.write_daemon,
                                               "velbus-connection-writer", (),
                                               {})
        self._write_process.daemon = True
        self._write_process.start()

    def connect(self):
        self._socket.send("VelbusLink 9.82.0.2".encode() + bytes([0x0f]))
        self._socket.recv(9999)
        login_payload = self._username.encode() + bytes(
            [0xff]) + self._password.encode()
        login_data = bytes([len(login_payload), 0, 0, 0, 0x2c, 0, 0, 0
                            ]) + login_payload
        self._socket.send(login_data)
        self._socket.recv(9999)
        self._socket.send(bytes([0, 0, 0, 0, 0x37, 0, 0, 0]))
        self._socket.recv(9999)
        self._socket.recv(9999)

    def stop(self):
        """Close serial port."""
        self.logger.warning("Stop executed")
        try:
            self._socket.close()
        except:
            self.logger.error("Error while closing socket")
            raise VelbusException("Error while closing socket")
        time.sleep(1)

    def feed_parser(self, data):
        """Parse received message."""
        assert isinstance(data, bytes)
        self.controller.feed_parser(data)

    def send(self, message, callback=None):
        """Add message to write queue."""
        assert isinstance(message, velbus.Message)
        self._write_queue.put_nowait((message, callback))

    def read_daemon(self):
        """Read thread."""
        while True:
            data = self._socket.recv(9999)
            self.feed_parser(data[8:])

    def write_daemon(self):
        """Write thread."""
        while True:
            (message, callback) = self._write_queue.get(block=True)
            self.logger.info("Sending message on USB bus: %s", str(message))
            self.logger.debug("Sending binary message:  %s",
                              str(message.to_binary()))
            data = bytes([len(message.to_binary()), 0, 0, 0, 0, 0x40, 00, 00
                          ]) + message.to_binary()
            self._socket.send(data)
            time.sleep(self.SLEEP_TIME)
            if callback:
                callback()
Example #48
0
    def face_detection():
        count1 = 0
        count2 = 0
        image_queue = Queue()
        dirname, filename = os.path.split(os.path.abspath(sys.argv[0]))
        cascPath = os.path.join(dirname, "haarcascade_frontalface_default.xml")
        faceCascade = cv2.CascadeClassifier(cascPath)

        video_capture = cv2.VideoCapture(1)
        # could be changed to 1 if usb is not integrated, or if video isn't streaming use check if camera port is 3.0
        text = 'Face detected!'
        font = cv2.FONT_HERSHEY_SIMPLEX
        while True:

            ret, frame = video_capture.read()
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=40, minSize=(50, 50))
            try:
                for (x, y, w, h) in faces:
                    cv2.putText(frame, text, (x, y - 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
                    cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0, 0), 2)
                    rectangle = True
            except Exception:
                continue
            cv2.imshow('Video', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            elif cv2.waitKey(1) & 0xFF == ord('s'):
                if ret:
                    for (x, y, w, h) in faces:
                        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0, 0), 2)
                        rectangle = True
                    crop_frame = frame[y:y + h, x:x + w]
                    filename = f'./known_faces/{count1}.png'
                    cv2.imwrite(filename, crop_frame)
                    image_queue.put_nowait(filename)
                    print('Face_saved')
                    count1 += 1


            elif cv2.waitKey(1) & 0xFF == ord('d'):
                detect = False
                while not detect:
                    if ret:
                        for (x, y, w, h) in faces:
                            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0, 0), 2)
                            rectangle = True

                        crop_frame = frame[y:y + h, x:x + w]
                        filename = f'./images/example.png'
                        cv2.imwrite(filename, crop_frame)
                        image_queue.put_nowait(filename)
                        detect = True
                picture = face_recognition.load_image_file(f'./images/example.png')
                picture = face_recognition.face_encodings(picture)
                if len(picture) > 0:
                   picture = picture[0]
                else:
                    print("No faces found in the image!")
                # make if here
                if len(glob.glob('./known_faces/*')) > 0:
                    know = False
                    for number in range(len(glob.glob('./known_faces/*'))):
                        known_faces = face_recognition.load_image_file(f'./known_faces/{number}.png')
                        known_faces = face_recognition.face_encodings(known_faces)[0]
                        res = face_recognition.compare_faces([known_faces], picture)
                        if res != [0]:
                            know = True
                    if know == True:
                        print('Face recognized')
                    else:
                        print('Unknown face')
                else:
                    print('database empty')


        video_capture.release()
        cv2.destroyAllWindows()
Example #49
0
File: web_1.py Project: aquadrop/py
                    return json.dumps(result, ensure_ascii=False)
            u_i_kernel = lru_kernels[u]
            r = u_i_kernel.kernel(q=q, user=u)
            result = {"question": q, "result": {"answer": r}, "user": u}
            return json.dumps(result, ensure_ascii=False)

        else:
            r = kernel.kernel(q=q)
            result = {"question": q, "result": {"answer": r}, "user": "******"}
            return json.dumps(result, ensure_ascii=False)
    except Exception:
        logging.error("C@user:{}##error_details:{}".format(u, traceback.format_exc()))
        result = {"question": q, "result": {"answer": "kernel exception"}, "user": "******"}
        return json.dumps(result, ensure_ascii=False)

if __name__ == "__main__":
    # SK = SceneKernel()
    # print(SK.kernel('你叫什么名字'))

    parser = argparse.ArgumentParser()
    parser.add_argument('--qsize', choices={'1', '20', '200'},
                        default='200', help='q_size initializes number of the starting instances...')
    args = parser.parse_args()

    QSIZE = int(args.qsize)

    for i in tqdm(range(QSIZE)):
        k = MainKernel(config)
        kernel_backups.put_nowait(k)
    print('web started...')
    app.run(host='0.0.0.0', port=21303, threaded=True)
Example #50
0
class AsyncThread:
    """
    Create a single worker thread which runs commands from a queue
    """
    def __init__(self, panda_started):
        # Attributes are configured by thread
        self.running = True
        self.panda_started = panda_started

        # Thread in which users can queue fns
        self.task_queue = Queue()
        self.athread = threading.Thread(target=self.run,
                                        args=(self.task_queue, False))
        self.athread.daemon = True  # Quit on main quit
        self.warned = False  # Did we print a warning about empty queue?
        self.ending = False  # Is main PANDA execution ending?
        self.empty_at = None  # Last time when our task queue went from full to empty
        self.last_called = None  # Name of the last function called
        self.athread.start()

        # Internal thread which only pypanda should use
        # This allows us to exit even when the main athread is blocking on some slow task
        # Unfortunately we haven't found a cleaner way to just terminate whatever
        # function is running and then add internal tasks to the main queue
        self._task_queue = Queue()
        self._athread = threading.Thread(target=self.run,
                                         args=(self._task_queue, True))
        self._athread.daemon = True  # Quit on main quit
        self._athread.start()

    def stop(self):
        self.running = False
        self.athread.join()

    def queue(self,
              func,
              internal=False
              ):  # Queue a function to be run soon. Must be @blocking
        if not func:
            raise RuntimeError("Queued up an undefined function")
        if not (hasattr(func, "__blocking__")) or not func.__blocking__:
            raise RuntimeError(
                "Refusing to queue function '{}' without @blocking decorator".
                format(func.__name__))
        if internal:
            self._task_queue.put_nowait(func)
        else:
            self.task_queue.put_nowait(func)

    def run(self, task_queue, internal=False):  # Run functions from queue
        #name = threading.get_ident()
        while self.running:  # Note setting this to false will take some time
            try:  # Try to get an item repeatedly, but also check if we want to stop running
                func = task_queue.get(True,
                                      1)  # Implicit (blocking) wait for 1s
                if not internal:
                    self.empty_at = None
                self.last_called = func.__name__.replace(
                    " (with async thread)", "")
            except Empty:
                # If we've been empty for 5s without shutdown, warn (just once). *Unless* we're
                # in a replay or we've never queued up a serial command (e.g., the guest is
                # actually booting instead of being driven from a snapshot). In either of
                # these cases, self.last_called will be None
                if not internal and self.last_called is not None:
                    if self.empty_at is None:
                        self.empty_at = time()
                    else:
                        if time(
                        ) - self.empty_at > 5 and not self.warned and not self.ending:
                            warn(
                                f"PANDA finished all the queued functions but emulation was left running. You may have forgotten to call to panda.end_analysis() in the last queued function '{self.last_called}'"
                            )
                            self.warned = True
                continue

            # Don't interact with guest if it isn't running
            # Wait for self.panda_started, but also abort if running becomes false
            while not self.panda_started.is_set() and self.running:
                try:
                    self.panda_started.wait(timeout=1.0)
                except Empty:
                    continue

            if not self.running:
                break
            try:
                if debug:
                    print("Calling {}".format(func.__name__))
                # XXX: If running become false while func is running we need a way to kill it
                func()
            except Exception as e:
                print("exception {}".format(e))
                raise
            finally:
                task_queue.task_done()
                self.last_called = None
Example #51
0
class Network:
    def __init__(self, server_ip='192.168.3.10', server_port="8888"):
        self.server_ip = server_ip
        self.server_port = server_port
        self.client_id = 0
        self.server_msg = "Initial msg"
        self.reader = None
        self.writer = None
        self.speed = 2
        self.pos_send = [0, 0]
        self.pos_recv = Queue(
            maxsize=3
        )  # (x, Y) coordinates as tuple for each item in the Queue

    async def start(self):
        timer = 0  # used to simulate the waiting time for the player to enter room#
        # input "0" to simulate the request to join a room
        choice = input(
            "Input 'c' to create a new game room, or 'j' to join one: ")
        if choice == "c":
            self.reader, self.writer = await asyncio.open_connection(
                self.server_ip, self.server_port)
            data = await self.reader.read(100)
            self.client_id = data.decode()
            print(f"This is client# {self.client_id}")
            self.writer.write("c".encode())
            await self.client()
        else:
            self.reader, self.writer = await asyncio.open_connection(
                self.server_ip, self.server_port)
            data = await self.reader.read(100)
            self.client_id = data.decode()
            print(f"This is client# {self.client_id}")
            self.writer.write("j".encode(
            ))  # input "0" to simulate the request to join a room
            while True:
                data = await self.reader.read(100)
                rooms = data.decode()
                print(f"Rooms available to join: {rooms}")
                timer += 1
                if timer < 5:  # used to simulate the waiting time for the player to input a room#
                    reply = "j"
                    self.writer.write(reply.encode())
                else:
                    reply = rooms[
                        -1]  # simulate the room selected by the player
                    self.writer.write(reply.encode())
                    await self.client()

    async def client(self):
        while True:
            data = await self.reader.read(100)
            self.server_msg = data.decode()
            if self.server_msg == "Game Ready":
                # logging.info("Game Ready")
                break
            elif self.server_msg != "quit":
                # print(f'Received: {self.server_msg!r}')
                reply = f"{int(self.client_id)}: msg received is {self.server_msg!r}"
                self.writer.write(reply.encode())
                await self.writer.drain()
            else:
                await self.stop()
                break

        while True:
            reply = self.pos2str(self.pos_send)
            self.writer.write(reply.encode())
            # start = perf_counter()
            data = await self.reader.read(LEN)
            # print(perf_counter() - start)
            try:
                self.pos_recv.put_nowait(self.str2pos(data.decode()))
            except queue.Full:
                pass  # TODO: code to handle the exception

    async def stop(self):
        self.writer.close()
        await self.writer.wait_closed()
        print('Closing the connection')

    def pos2str(self, pos: list):
        # the returned string is like "100,100" from tuple (100, 100)
        return ','.join(map(str, pos))

    def str2pos(self, string: str):
        # string must be "100,100" or "100, 100" which will be converted to (100, 100)
        return tuple(map(int, string.split(',')))
Example #52
0
class Grating(Visual):
    """
    Moving grating
    """

    PARAMS = ['angle', 'freq', 'rate', 'phase',
              'mask', 'pos', 'size', 'duration']

    def __init__(self, angle, freq, rate, phase=0,
                 mask="gauss", pos=(0., 0.), size=(2,2),
                 duration=5000., debug=False):
        super(Grating, self).__init__(debug=debug)

        self.angle = angle
        self.freq = freq
        self.rate = rate
        self.phase = phase
        self.mask = mask
        self.pos = pos
        self.size = size
        self.duration = duration

        self.play_evt = threading.Event()
        self.stop_evt = threading.Event()
        self.stop_evt.clear()
        self.q = Queue()

        self.threadfn()

    def threadfn(self):
        self.thread = threading.Thread(target=self._thread)
        self.thread.start()

    def _thread(self):
        self.get_window()
        self.clock = core.Clock()
        self.draw_time = 0

        # init psychopy object
        self.ppo = visual.GratingStim(
            self.win,
            mask=self.mask,
            pos=self.pos,
            size=self.size,
            sf=self.freq,
            ori=self.angle,
            phase=self.phase)

        while not self.stop_evt.is_set():
            self.play_evt.wait()

            if self.debug:
                self.win.recordFrameIntervals = True

            # reset stim
            self.ppo.phase = self.phase

            start_time = self.clock.getTime()
            end_time = start_time + (self.duration / 1000.0)
            while self.clock.getTime() < end_time:
                try:
                    attrchange = self.q.get_nowait()
                except Empty:
                    attrchange = None
                if attrchange is not None:
                    if attrchange[0] == 'shift':
                        self.ppo.ori = self.ppo.ori + attrchange[1]

                self.update()
                self.ppo.draw()
                self.win.flip()

            # another flip clears the screen
            self.win.flip()
            self.play_evt.clear()

            if self.debug:
                path = os.path.join(prefs.DATADIR, 'frameintervals_'+datetime.datetime.now().isoformat()+'.csv')
                self.win.saveFrameIntervals(path)
                self.win.recordFrameIntervals = False




    def set(self, attr, value):
        """
        Set psychopy attrs

        Args:
            attr ():
            value ():

        Returns:

        """
        attr_map = {
            'mask':'mask', 'pos':'pos', 'size':'size',
            'freq':'sf', 'angle':'ori', 'phase':'phase'
        }

        if attr in ('mask', 'pos', 'size', 'freq', 'angle', 'phase'):
            self.q.put_nowait((attr_map[attr], value))



    def update(self):
        """advance the psychopy object one frame"""

        # get change since last draw, divide by rate
        dt = self.clock.getTime()-self.draw_time
        self.ppo.phase = (self.ppo.phase + self.rate*dt) % 1.0
        self.draw_time = self.clock.getTime()


    def play(self, attr, val):
        self.q.put((attr, val))
        self.play_evt.set()
Example #53
0
class process_pool(object):
    def __init__(self):
        self.reset()

    def reset(self):
        self.processes = []
        self.message_queue = Queue()
        self.activated = False

    def start(self, cmd, idx, cwd):
        p = subprocess.Popen(cmd,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT,
                             cwd=cwd,
                             encoding='utf-8')
        self.activated |= True
        t = Thread(target=self.enqueue_output, args=(p.stdout, idx))
        t.daemon = True
        t.start()
        self.processes.append((idx, p, t))

    def apply(self, cmd_cwd_list):
        for idx, cmd_cwd in enumerate(cmd_cwd_list):
            cmd, cwd = cmd_cwd
            self.start(cmd, idx, cwd)
        self.daemon()

    def enqueue_output(self, out, i):
        for line in iter(out.readline, b''):
            line_strip = line.strip()
            if len(line_strip) > 0:
                self.message_queue.put_nowait((i, line_strip))
        out.close()

    @custom_async
    def daemon(self):
        self.process_num = len(self.processes)
        alive_pool = [1 for _ in range(self.process_num)]
        outputs = []
        while True:
            if sum(alive_pool) == 0 and self.message_queue.empty():
                break
            try:
                i, out = self.message_queue.get_nowait()
            except Empty:
                pass
            else:
                if self.process_num > 1:
                    sys.stdout.write(' '.join(['pid: {:d}'.format(i), out]))
                else:
                    sys.stdout.write(out)
                sys.stdout.write('\n')
            for idx, p, t in self.processes:
                if p.poll() is not None:
                    alive_pool[idx] = 0

        self.reset()

    def wait(self):
        while True:
            if not self.activated:
                break
            else:
                time.sleep(0.1)
Example #54
0
class QiubaiSpider(object):
    def __init__(self):
        self.url_pattern = 'https://www.qiushibaike.com/8hr/page/{}/'
        self.headers = {
            'User-Agent':
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36'
        }
        # 1. 创建URL队列, 响应队列和数据队列(init)
        self.url_queue = Queue()
        self.page_queue = Queue()
        self.data_queue = Queue()
        self.pool = Pool()

    def add_url_to_queue(self):
        """把url添加到队列里"""
        for i in range(1, 14):
            url = self.url_pattern.format(i)
            self.url_queue.put_nowait(url)

    def add_page_to_queue(self):
        """从url队列中,取出url,发送请求,获取响应数据,把响应数据,放到响应队列中"""
        while True:
            url = self.url_queue.get()
            response = requests.get(url, headers=self.headers)
            if response.status_code != 200:
                # 如果请求没有成功, 再次放到URL队列
                self.url_queue.put(url)
            else:
                # 把响应数据添加响应队列中
                self.page_queue.put(response.content.decode())
            # 当URL处理完成了,就调用task_done
            self.url_queue.task_done()

    def add_date_to_queue(self):
        """从响应队列中取出响应数据,提取数据,把数据放到数据队列中"""
        while True:
            page = self.page_queue.get()
            element = etree.HTML(page)
            divs = element.xpath('//*[@id="content-left"]/div')
            # 使用xpath提取数据的原则: 先分组,再提取内容
            data_list = []
            for div in divs:
                # 定义字典保存数据
                data = {}
                imgs = div.xpath('./div[1]/a[1]/img/@src')
                data['header_img'] = 'https' + imgs[0] if len(
                    imgs) != 0 else None

                data['name'] = self.get_first_element(
                    div.xpath('./div[1]/a[2]/h2/text()'))
                gender_class = div.xpath('./div[1]/div/@class')
                if len(gender_class) != 0:
                    data['gender'] = re.findall('articleGender (.+?)Icon',
                                                gender_class[0])[0]

                data['content'] = ''.join([
                    text.strip() for text in div.xpath('./a/div/span//text()')
                ])
                data['vote'] = self.get_first_element(
                    div.xpath('./div[2]/span[1]/i/text()'))
                data['comments'] = self.get_first_element(
                    div.xpath('./div[2]/span[2]/a/i/text()'))

                data_list.append(data)
            # 把数据数据,添加数据队列中
            self.data_queue.put(data_list)
            # 页面任务处理完毕了
            self.page_queue.task_done()

    def get_first_element(self, lis):
        return lis[0].strip() if len(lis) != 0 else None

    def save_data(self):
        """保存数据"""
        while True:
            data_list = self.data_queue.get()
            with open('糗百_协程池版.jsonlines', 'a', encoding='utf8') as f:
                for data in data_list:
                    json.dump(data, f, ensure_ascii=False)
                    f.write('\n')
            # 数据任务完成了
            self.data_queue.task_done()

    def execute_task(self, task, count):
        """
        执行线程任务
        :param task: 任务函数
        :param count: 启动线程个数
        """
        for i in range(count):
            self.pool.apply_async(task)

    def run(self):

        self.add_url_to_queue()
        self.execute_task(self.add_page_to_queue, 2)
        self.execute_task(self.add_date_to_queue, 1)
        self.execute_task(self.save_data, 2)

        # 让主线等待任务队列的完成
        self.url_queue.join()
        self.page_queue.join()
        self.data_queue.join()
Example #55
0
class Brutedomain:
    def __init__(self, args):
        self.target_domain = args.domain
        self.cdn_flag = args.cdn
        if not (self.target_domain):
            print('usage: brutedns.py -d/-f baidu.com/domains.txt -s low/medium/high -c y/n')
            sys.exit(1)
        self.level = args.level
        self.sub_dict = args.sub_file
        self.speed = args.speed
        self.next_sub_dict = args.next_sub_file
        self.other_result = args.other_file

        self.resolver = dns.resolver.Resolver()
        self.resolver.nameservers = [
            '114.114.114.114',
            '114.114.115.115',
            '223.5.5.5',
            '223.6.6.6',
            '180.76.76.76',
            '119.29.29.29',
            '182.254.116.116',
            '210.2.4.8',
            '112.124.47.27',
            '114.215.126.16',
            '101.226.4.6',
            '218.30.118.6',
            '123.125.81.6',
            '140.207.198.6'
            '8.8.8.8',
            '8.8.4.4']
        self.resolver.timeout = 10

        self.add_ulimit()

        self.queues = Queue()
        self.dict_cname = dict()
        self.dict_ip = dict()
        self.dict_ip_block = dict()
        self.ip_flag = dict()
        self.cdn_set = set()
        self.queue_sub = Queue()
        self.active_ip_dict = dict()
        self.dict_ip_count = dict()
        self.found_count = 0

        self.set_next_sub = self.load_next_sub_dict()
        self.set_cdn = self.load_cdn()

        self.load_sub_dict_to_queue()
        self.extract_next_sub_log()

        self.segment_num = self.judge_speed(args.speed)


    def add_ulimit(self):
        if (platform.system() != "Windows"):
            os.system("ulimit -n 65535")

    def load_cdn(self):
        cdn_set = set()
        with open('dict/cdn_servers.txt', 'r') as file_cdn:
            for cdn in file_cdn:
                cdn_set.add(cdn.strip())
        return cdn_set

    def load_next_sub_dict(self):
        next_sub_set = set()
        with open(self.next_sub_dict, 'r') as file_next_sub:
            for next_sub in file_next_sub:
                next_sub_set.add(next_sub)
        return next_sub_set

    def load_sub_dict_to_queue(self):
        with open(self.sub_dict, 'r') as file_sub:
            for sub in file_sub:
                domain = "{sub}.{target_domain}".format(
                    sub=sub.strip(), target_domain=self.target_domain)
                self.queues.put(domain)

    def load_result_from_other(self):
        log_type = type(self.other_result)
        other_subdomain_list = list()
        if (log_type == str):
            try:
                subdomain_log = open('{target_domain}'.format(target_domain=self.other_result), 'r')
                other_result = [subdomain.strip() for subdomain in subdomain_log]
                subdomain_log.close()
                print other_result
            except Exception:
                print('subdomain log is not exist')
                sys.exit(1)
        elif (log_type == list):
            other_result = self.other_result
        else:
            other_result = []

        for subdomain in other_result:
            if (('.' + str(self.target_domain)) in subdomain):
                other_subdomain_list.append(subdomain.strip())
        return other_subdomain_list

    def extract_next_sub_log(self):
        other_subdomain_list = self.load_result_from_other()
        for subdomain in other_subdomain_list:
            self.queues.put(subdomain)
            sub = subdomain.strip(".{domain}".format(domain=self.target_domain))
            sub_num = sub.split(".")
            if (len(sub_num) != 1):
                sub_num.remove(sub_num[-1])
                for sub in sub_num:
                    self.set_next_sub.add(sub.strip())

    def check_cdn(self, cname):
        for cdn in self.set_cdn:
            if (cdn in cname or 'cdn' in cname):
                return True
            self.cdn_set.add(cname)
        return False

    def judge_speed(self, speed):
        if (speed == "low"):
            segment_num = config.low_segment_num
        elif (speed == "high"):
            segment_num = config.high_segment_num
        else:
            segment_num = config.medium_segment_num
        return segment_num

    def get_type_id(self, name):
        return dns.rdatatype.from_text(name)

    def query_domain(self, domain):
        list_ip, list_cname = [], []
        try:
            record = self.resolver.query(domain)
            for A_CNAME in record.response.answer:
                for item in A_CNAME.items:
                    if item.rdtype == self.get_type_id('A'):
                        list_ip.append(str(item))
                        self.dict_ip_block[domain] = list_ip
                    elif (item.rdtype == self.get_type_id('CNAME')):
                        list_cname.append(str(item))
                        self.dict_cname[domain] = list_cname
                    elif (item.rdtype == self.get_type_id('TXT')):
                        pass
                    elif item.rdtype == self.get_type_id('MX'):
                        pass
                    elif item.rdtype == self.get_type_id('NS'):
                        pass
        except Exception as e:
            pass

    def get_block(self):
        domain_list = list()
        if (self.queues.qsize() > self.segment_num):
            for num in range(self.segment_num):
                domain_list.append(self.queues.get())
        else:
            for num in range(self.queues.qsize()):
                domain_list.append(self.queues.get())
        return domain_list

    def generate_sub(self):
        try:
            domain = self.queue_sub.get_nowait()
            for next_sub in self.set_next_sub:
                subdomain = "{next}.{domain}".format(
                    next=next_sub.strip(), domain=domain)
                self.queues.put_nowait(subdomain)
            return True
        except Exception:
            return False

    def set_dynamic_num(self):
        if (self.speed == "high"):
            return 350000
        elif (self.speed == "low"):
            return 150000
        else:
            return 250000

    def deweighting_subdomain(self):
        temp_list = list()
        for subdomain, ip_list in self.dict_ip_block.items():
            ip_str = str(sorted(ip_list))
            if (self.dict_ip_count.__contains__(ip_str)):
                if (self.dict_ip_count[ip_str] > config.ip_max_count):
                    temp_list.append(subdomain)
                else:
                    self.dict_ip_count[ip_str] = self.dict_ip_count[ip_str] + 1
            else:
                self.dict_ip_count[ip_str] = 1

            for filter_ip in config.waiting_fliter_ip:
                if (filter_ip in ip_str):
                    temp_list.append(subdomain)

        for subdomain in temp_list:
            try:
                del self.dict_ip_block[subdomain]
                del self.dict_cname[subdomain]
            except Exception:
                pass

        self.dict_ip.update(self.dict_ip_block)
        self.found_count = self.dict_ip.__len__()

        for subdomain, ip_list in self.dict_ip_block.items():
            if (str(subdomain).count(".") < self.level):
                self.queue_sub.put(str(subdomain))
        self.dict_ip_block.clear()

    def handle_data(self):
        for subdomain, cname_list in self.dict_cname.items():
            for cname in cname_list:
                if (self.check_cdn(cname)):
                    self.dict_cname[subdomain] = "Yes"
                else:
                    self.dict_cname[subdomain] = "No"
        for subdomain, ip_list in self.dict_ip_block.items():
            for ip in ip_list:
                if (IP(ip).iptype() == 'PRIVATE'):
                    self.dict_ip[subdomain] = "private({ip})".format(ip=ip)
                else:
                    try:
                        key_yes = self.dict_cname[subdomain]
                    except KeyError:
                        key_yes = "No"
                    if (key_yes == "No"):
                        CIP = (IP(ip).make_net("255.255.255.0"))
                        if CIP in self.ip_flag:
                            self.ip_flag[CIP] = self.ip_flag[CIP] + 1
                        else:
                            self.ip_flag[CIP] = 1

                        if CIP in self.active_ip_dict:
                            active_ip_list = self.active_ip_dict[CIP]
                            if (ip not in active_ip_list):
                                active_ip_list.append(ip)
                                self.active_ip_dict[CIP] = active_ip_list
                        else:
                            active_ip_list = []
                            active_ip_list.append(ip)
                            self.active_ip_dict[CIP] = active_ip_list

    def raw_write_disk(self):
        if (not os.path.exists('result/{domain}'.format(domain=self.target_domain))):
            os.mkdir('result/{domain}'.format(domain=self.target_domain))
        with open('result/{name}/{name}.csv'.format(name=self.target_domain), 'w') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(['domain', 'CDN', 'IP'])
            for subdomain, ip_list in self.dict_ip.items():
                try:
                    flag = self.dict_cname[subdomain]
                except Exception:
                    flag = "No"
                writer.writerow([subdomain, flag, ip_list])

    def deal_write_disk(self):
        if (not os.path.exists('result/{domain}'.format(domain=self.target_domain))):
            os.mkdir('result/{domain}'.format(domain=self.target_domain))
        ip_flags = sorted(
            self.ip_flag.items(),
            key=lambda d: d[1],
            reverse=True)
        with open('result/{name}/deal_{name}.csv'.format(name=self.target_domain), 'w') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(['IP', 'frequency', 'active'])
            for ip_frequency in ip_flags:
                writer.writerow([ip_frequency[0], ip_frequency[1],
                                 self.active_ip_dict[ip_frequency[0]]])

    def collect_cname(self):
        with open('result/cname.txt', 'a') as txt:
            for cname in self.cdn_set:
                txt.write('{cname}\r\n'.format(cname=cname))

    def cmd_print(self, wait_size, start, end, i):
        print("domain: {domain} |found: {found_count} number|speed:{velocity} number/s|waiting: {qsize} number|"
              .format(domain=self.target_domain,
                      qsize=wait_size,
                      found_count=self.found_count,
                      velocity=round(self.segment_num * i / (end - start), 2)))

    def run(self):
        start = time.time()
        print("*****************************Begin*******************************")
        i = 0
        while not self.queues.empty():
            i = i + 1
            domain_list = self.get_block()
            coroutines = [gevent.spawn(self.query_domain, l)
                          for l in domain_list]
            try:
                gevent.joinall(coroutines)
            except KeyboardInterrupt:
                print('user stop')
                sys.exit(1)
            # pool=gevent.pool.Pool(2000)
            # for l in domain_list:
            #     pool.spawn(self.query_domain,l)
            # pool.join()
            self.deweighting_subdomain()
            self.cmd_print(self.queues.qsize(), start, time.time(), i)


            if (self.queues.qsize() < 30000):
                while (self.queues.qsize() < self.set_dynamic_num()):
                    if not self.generate_sub():
                        break
        self.handle_data()
        self.raw_write_disk()
        self.deal_write_disk()
        print("*****************************Over********************************")
Example #56
0
class printcore():
    def __init__(self, port=None, baud=None, dtr=None):
        """Initializes a printcore instance. Pass the port and baud rate to
           connect immediately"""
        self.baud = None
        self.dtr = None
        self.port = None
        self.analyzer = gcoder.GCode()
        # Serial instance connected to the printer, should be None when
        # disconnected
        self.printer = None
        # clear to send, enabled after responses
        # FIXME: should probably be changed to a sliding window approach
        self.clear = 0
        # The printer has responded to the initial command and is active
        self.online = False
        # is a print currently running, true if printing, false if paused
        self.printing = False
        self.mainqueue = None
        self.priqueue = Queue(0)
        self.queueindex = 0
        self.lineno = 0
        self.resendfrom = -1
        self.paused = False
        self.sentlines = {}
        self.log = deque(maxlen=10000)
        self.sent = []
        self.writefailures = 0
        self.tempcb = None  # impl (wholeline)
        self.recvcb = None  # impl (wholeline)
        self.sendcb = None  # impl (wholeline)
        self.preprintsendcb = None  # impl (wholeline)
        self.printsendcb = None  # impl (wholeline)
        self.layerchangecb = None  # impl (wholeline)
        self.errorcb = None  # impl (wholeline)
        self.startcb = None  # impl ()
        self.endcb = None  # impl ()
        self.onlinecb = None  # impl ()
        self.loud = False  # emit sent and received lines to terminal
        self.tcp_streaming_mode = False
        self.greetings = ['start', 'Grbl ']
        self.wait = 0  # default wait period for send(), send_now()
        self.read_thread = None
        self.stop_read_thread = False
        self.send_thread = None
        self.stop_send_thread = False
        self.print_thread = None
        self.readline_buf = []
        self.selector = None
        self.event_handler = PRINTCORE_HANDLER
        # Not all platforms need to do this parity workaround, and some drivers
        # don't support it.  Limit it to platforms that actually require it
        # here to avoid doing redundant work elsewhere and potentially breaking
        # things.
        self.needs_parity_workaround = platform.system(
        ) == "linux" and os.path.exists("/etc/debian")
        for handler in self.event_handler:
            try:
                handler.on_init()
            except:
                logging.error(traceback.format_exc())
        if port is not None and baud is not None:
            self.connect(port, baud)
        self.xy_feedrate = None
        self.z_feedrate = None

    def addEventHandler(self, handler):
        '''
        Adds an event handler.
        
        @param handler: The handler to be added.
        '''
        self.event_handler.append(handler)

    def logError(self, error):
        for handler in self.event_handler:
            try:
                handler.on_error(error)
            except:
                logging.error(traceback.format_exc())
        if self.errorcb:
            try:
                self.errorcb(error)
            except:
                logging.error(traceback.format_exc())
        else:
            logging.error(error)

    @locked
    def disconnect(self):
        """Disconnects from printer and pauses the print
        """
        if self.printer:
            if self.read_thread:
                self.stop_read_thread = True
                if threading.current_thread() != self.read_thread:
                    self.read_thread.join()
                self.read_thread = None
            if self.print_thread:
                self.printing = False
                self.print_thread.join()
            self._stop_sender()
            try:
                if self.selector is not None:
                    self.selector.unregister(self.printer_tcp)
                    self.selector.close()
                    self.selector = None
                if self.printer_tcp is not None:
                    self.printer_tcp.close()
                    self.printer_tcp = None
                self.printer.close()
            except socket.error:
                logger.error(traceback.format_exc())
                pass
            except OSError:
                logger.error(traceback.format_exc())
                pass
        for handler in self.event_handler:
            try:
                handler.on_disconnect()
            except:
                logging.error(traceback.format_exc())
        self.printer = None
        self.online = False
        self.printing = False

    @locked
    def connect(self, port=None, baud=None, dtr=None):
        """Set port and baudrate if given, then connect to printer
        """
        if self.printer:
            self.disconnect()
        if port is not None:
            self.port = port
        if baud is not None:
            self.baud = baud
        if dtr is not None:
            self.dtr = dtr
        if self.port is not None and self.baud is not None:
            # Connect to socket if "port" is an IP, device if not
            host_regexp = re.compile(
                "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$|^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$"
            )
            is_serial = True
            if ":" in self.port:
                bits = self.port.split(":")
                if len(bits) == 2:
                    hostname = bits[0]
                    try:
                        port_number = int(bits[1])
                        if host_regexp.match(
                                hostname) and 1 <= port_number <= 65535:
                            is_serial = False
                    except:
                        pass
            self.writefailures = 0
            if not is_serial:
                self.printer_tcp = socket.socket(socket.AF_INET,
                                                 socket.SOCK_STREAM)
                self.printer_tcp.setsockopt(socket.IPPROTO_TCP,
                                            socket.TCP_NODELAY, 1)
                self.timeout = 0.25
                self.printer_tcp.settimeout(1.0)
                try:
                    self.printer_tcp.connect((hostname, port_number))
                    #a single read timeout raises OSError for all later reads
                    #probably since python 3.5
                    #use non blocking instead
                    self.printer_tcp.settimeout(0)
                    self.printer = self.printer_tcp.makefile('rwb',
                                                             buffering=0)
                    self.selector = selectors.DefaultSelector()
                    self.selector.register(self.printer_tcp,
                                           selectors.EVENT_READ)
                except socket.error as e:
                    if (e.strerror is None): e.strerror = ""
                    self.logError(
                        _("Could not connect to %s:%s:") %
                        (hostname, port_number) + "\n" +
                        _("Socket error %s:") % e.errno + "\n" + e.strerror)
                    self.printer = None
                    self.printer_tcp = None
                    return
            else:
                disable_hup(self.port)
                self.printer_tcp = None
                try:
                    if self.needs_parity_workaround:
                        self.printer = Serial(port=self.port,
                                              baudrate=self.baud,
                                              timeout=0.25,
                                              parity=PARITY_ODD)
                        self.printer.close()
                        self.printer.parity = PARITY_NONE
                    else:
                        self.printer = Serial(baudrate=self.baud,
                                              timeout=0.25,
                                              parity=PARITY_NONE)
                        self.printer.port = self.port
                    try:  #this appears not to work on many platforms, so we're going to call it but not care if it fails
                        self.printer.dtr = dtr
                    except:
                        #self.logError(_("Could not set DTR on this platform")) #not sure whether to output an error message
                        pass
                    self.printer.open()
                except SerialException as e:
                    self.logError(
                        _("Could not connect to %s at baudrate %s:") %
                        (self.port, self.baud) + "\n" +
                        _("Serial error: %s") % e)
                    self.printer = None
                    return
                except IOError as e:
                    self.logError(
                        _("Could not connect to %s at baudrate %s:") %
                        (self.port, self.baud) + "\n" + _("IO error: %s") % e)
                    self.printer = None
                    return
            for handler in self.event_handler:
                try:
                    handler.on_connect()
                except:
                    logging.error(traceback.format_exc())
            self.stop_read_thread = False
            self.read_thread = threading.Thread(target=self._listen,
                                                name='read thread')
            self.read_thread.start()
            self._start_sender()

    def reset(self):
        """Reset the printer
        """
        if self.printer and not self.printer_tcp:
            self.printer.dtr = 1
            time.sleep(0.2)
            self.printer.dtr = 0

    def _readline_buf(self):
        "Try to readline from buffer"
        if len(self.readline_buf):
            chunk = self.readline_buf[-1]
            eol = chunk.find(b'\n')
            if eol >= 0:
                line = b''.join(self.readline_buf[:-1]) + chunk[:(eol + 1)]
                self.readline_buf = []
                if eol + 1 < len(chunk):
                    self.readline_buf.append(chunk[(eol + 1):])
                return line
        return PR_AGAIN

    def _readline_nb(self):
        "Non blocking readline. Socket based files do not support non blocking or timeouting readline"
        if self.printer_tcp:
            line = self._readline_buf()
            if line:
                return line
            chunk_size = 256
            while True:
                chunk = self.printer.read(chunk_size)
                if chunk is SYS_AGAIN and self.selector.select(self.timeout):
                    chunk = self.printer.read(chunk_size)
                #print('_readline_nb chunk', chunk, type(chunk))
                if chunk:
                    self.readline_buf.append(chunk)
                    line = self._readline_buf()
                    if line:
                        return line
                elif chunk is SYS_AGAIN:
                    return PR_AGAIN
                else:
                    #chunk == b'' means EOF
                    line = b''.join(self.readline_buf)
                    self.readline_buf = []
                    self.stop_read_thread = True
                    return line if line else PR_EOF
        else:  # serial port
            return self.printer.readline()

    def _readline(self):
        try:
            line_bytes = self._readline_nb()
            if line_bytes is PR_EOF:
                self.logError(
                    _("Can't read from printer (disconnected?). line_bytes is None"
                      ))
                return PR_EOF
            line = line_bytes.decode('utf-8')

            if len(line) > 1:
                self.log.append(line)
                for handler in self.event_handler:
                    try:
                        handler.on_recv(line)
                    except:
                        logging.error(traceback.format_exc())
                if self.recvcb:
                    try:
                        self.recvcb(line)
                    except:
                        self.logError(traceback.format_exc())
                if self.loud: logging.info("RECV: %s" % line.rstrip())
            return line
        except UnicodeDecodeError:
            self.logError(
                _("Got rubbish reply from %s at baudrate %s:") %
                (self.port, self.baud) + "\n" + _("Maybe a bad baudrate?"))
            return None
        except SelectError as e:
            if 'Bad file descriptor' in e.args[1]:
                self.logError(
                    _("Can't read from printer (disconnected?) (SelectError {0}): {1}"
                      ).format(e.errno, decode_utf8(e.strerror)))
                return None
            else:
                self.logError(
                    _("SelectError ({0}): {1}").format(e.errno,
                                                       decode_utf8(
                                                           e.strerror)))
                raise
        except SerialException as e:
            self.logError(
                _("Can't read from printer (disconnected?) (SerialException): {0}"
                  ).format(decode_utf8(str(e))))
            return None
        except socket.error as e:
            self.logError(
                _("Can't read from printer (disconnected?) (Socket error {0}): {1}"
                  ).format(e.errno, decode_utf8(e.strerror)))
            return None
        except OSError as e:
            if e.errno == errno.EAGAIN:  # Not a real error, no data was available
                return ""
            self.logError(
                _("Can't read from printer (disconnected?) (OS Error {0}): {1}"
                  ).format(e.errno, e.strerror))
            return None

    def _listen_can_continue(self):
        if self.printer_tcp:
            return not self.stop_read_thread and self.printer
        return (not self.stop_read_thread and self.printer
                and self.printer.is_open)

    def _listen_until_online(self):
        while not self.online and self._listen_can_continue():
            self._send("M105")
            if self.writefailures >= 4:
                logging.error(
                    _("Aborting connection attempt after 4 failed writes."))
                return
            empty_lines = 0
            while self._listen_can_continue():
                line = self._readline()
                if line is None: break  # connection problem
                # workaround cases where M105 was sent before printer Serial
                # was online an empty line means read timeout was reached,
                # meaning no data was received thus we count those empty lines,
                # and once we have seen 15 in a row, we just break and send a
                # new M105
                # 15 was chosen based on the fact that it gives enough time for
                # Gen7 bootloader to time out, and that the non received M105
                # issues should be quite rare so we can wait for a long time
                # before resending
                if not line:
                    empty_lines += 1
                    if empty_lines == 15: break
                else: empty_lines = 0
                if line.startswith(tuple(self.greetings)) \
                   or line.startswith('ok') or "T:" in line:
                    self.online = True
                    for handler in self.event_handler:
                        try:
                            handler.on_online()
                        except:
                            logging.error(traceback.format_exc())
                    if self.onlinecb:
                        try:
                            self.onlinecb()
                        except:
                            self.logError(traceback.format_exc())
                    return

    def _listen(self):
        """This function acts on messages from the firmware
        """
        self.clear = True
        if not self.printing:
            self._listen_until_online()
        while self._listen_can_continue():
            line = self._readline()
            if line is None:
                logging.debug('_readline() is None, exiting _listen()')
                break
            if line.startswith('DEBUG_'):
                continue
            if line.startswith(tuple(self.greetings)) or line.startswith('ok'):
                self.clear = True
            if line.startswith('ok') and "T:" in line:
                for handler in self.event_handler:
                    try:
                        handler.on_temp(line)
                    except:
                        logging.error(traceback.format_exc())
                if self.tempcb:
                    # callback for temp, status, whatever
                    try:
                        self.tempcb(line)
                    except:
                        self.logError(traceback.format_exc())
            elif line.startswith('Error'):
                self.logError(line)
            # Teststrings for resend parsing       # Firmware     exp. result
            # line="rs N2 Expected checksum 67"    # Teacup       2
            if line.lower().startswith("resend") or line.startswith("rs"):
                for haystack in ["N:", "N", ":"]:
                    line = line.replace(haystack, " ")
                linewords = line.split()
                while len(linewords) != 0:
                    try:
                        toresend = int(linewords.pop(0))
                        self.resendfrom = toresend
                        break
                    except:
                        pass
                self.clear = True
        self.clear = True
        logging.debug('Exiting read thread')

    def _start_sender(self):
        self.stop_send_thread = False
        self.send_thread = threading.Thread(target=self._sender,
                                            name='send thread')
        self.send_thread.start()

    def _stop_sender(self):
        if self.send_thread:
            self.stop_send_thread = True
            self.send_thread.join()
            self.send_thread = None

    def _sender(self):
        while not self.stop_send_thread:
            try:
                command = self.priqueue.get(True, 0.1)
            except QueueEmpty:
                continue
            while self.printer and self.printing and not self.clear:
                time.sleep(0.001)
            self._send(command)
            while self.printer and self.printing and not self.clear:
                time.sleep(0.001)

    def _checksum(self, command):
        return reduce(lambda x, y: x ^ y, map(ord, command))

    def startprint(self, gcode, startindex=0):
        """Start a print, gcode is an array of gcode commands.
        returns True on success, False if already printing.
        The print queue will be replaced with the contents of the data array,
        the next line will be set to 0 and the firmware notified. Printing
        will then start in a parallel thread.
        """
        if self.printing or not self.online or not self.printer:
            return False
        self.queueindex = startindex
        self.mainqueue = gcode
        self.printing = True
        self.lineno = 0
        self.resendfrom = -1
        self._send("M110", -1, True)
        if not gcode or not gcode.lines:
            return True
        self.clear = False
        resuming = (startindex != 0)
        self.print_thread = threading.Thread(target=self._print,
                                             name='print thread',
                                             kwargs={"resuming": resuming})
        self.print_thread.start()
        return True

    def cancelprint(self):
        self.pause()
        self.paused = False
        self.mainqueue = None
        self.clear = True

    # run a simple script if it exists, no multithreading
    def runSmallScript(self, filename):
        if filename is None: return
        f = None
        try:
            with open(filename) as f:
                for i in f:
                    l = i.replace("\n", "")
                    l = l[:l.find(";")]  # remove comments
                    self.send_now(l)
        except:
            pass

    def pause(self):
        """Pauses the print, saving the current position.
        """
        if not self.printing: return False
        self.paused = True
        self.printing = False

        # ';@pause' in the gcode file calls pause from the print thread
        if not threading.current_thread() is self.print_thread:
            try:
                self.print_thread.join()
            except:
                self.logError(traceback.format_exc())

        self.print_thread = None

        # saves the status
        self.pauseX = self.analyzer.abs_x
        self.pauseY = self.analyzer.abs_y
        self.pauseZ = self.analyzer.abs_z
        self.pauseE = self.analyzer.abs_e
        self.pauseF = self.analyzer.current_f
        self.pauseRelative = self.analyzer.relative
        self.pauseRelativeE = self.analyzer.relative_e

    def resume(self):
        """Resumes a paused print."""
        if not self.paused: return False
        # restores the status
        self.send_now("G90")  # go to absolute coordinates

        xyFeed = '' if self.xy_feedrate is None else ' F' + str(
            self.xy_feedrate)
        zFeed = '' if self.z_feedrate is None else ' F' + str(self.z_feedrate)

        self.send_now("G1 X%s Y%s%s" % (self.pauseX, self.pauseY, xyFeed))
        self.send_now("G1 Z" + str(self.pauseZ) + zFeed)
        self.send_now("G92 E" + str(self.pauseE))

        # go back to relative if needed
        if self.pauseRelative:
            self.send_now("G91")
        if self.pauseRelativeE:
            self.send_now('M83')
        # reset old feed rate
        self.send_now("G1 F" + str(self.pauseF))

        self.paused = False
        self.printing = True
        self.print_thread = threading.Thread(target=self._print,
                                             name='print thread',
                                             kwargs={"resuming": True})
        self.print_thread.start()

    def send(self, command, wait=0):
        """Adds a command to the checksummed main command queue if printing, or
        sends the command immediately if not printing"""

        if self.online:
            if self.printing:
                self.mainqueue.append(command)
            else:
                self.priqueue.put_nowait(command)
        else:
            self.logError(_("Not connected to printer."))

    def send_now(self, command, wait=0):
        """Sends a command to the printer ahead of the command queue, without a
        checksum"""
        if self.online:
            self.priqueue.put_nowait(command)
        else:
            self.logError(_("Not connected to printer."))

    def _print(self, resuming=False):
        self._stop_sender()
        try:
            for handler in self.event_handler:
                try:
                    handler.on_start(resuming)
                except:
                    logging.error(traceback.format_exc())
            if self.startcb:
                # callback for printing started
                try:
                    self.startcb(resuming)
                except:
                    self.logError(
                        _("Print start callback failed with:") + "\n" +
                        traceback.format_exc())
            while self.printing and self.printer and self.online:
                self._sendnext()
            self.sentlines = {}
            self.log.clear()
            self.sent = []
            for handler in self.event_handler:
                try:
                    handler.on_end()
                except:
                    logging.error(traceback.format_exc())
            if self.endcb:
                # callback for printing done
                try:
                    self.endcb()
                except:
                    self.logError(
                        _("Print end callback failed with:") + "\n" +
                        traceback.format_exc())
        except:
            self.logError(
                _("Print thread died due to the following error:") + "\n" +
                traceback.format_exc())
        finally:
            self.print_thread = None
            self._start_sender()

    def process_host_command(self, command):
        """only ;@pause command is implemented as a host command in printcore, but hosts are free to reimplement this method"""
        command = command.lstrip()
        if command.startswith(";@pause"):
            self.pause()

    def _sendnext(self):
        if not self.printer:
            return
        while self.printer and self.printing and not self.clear:
            time.sleep(0.001)
        # Only wait for oks when using serial connections or when not using tcp
        # in streaming mode
        if not self.printer_tcp or not self.tcp_streaming_mode:
            self.clear = False
        if not (self.printing and self.printer and self.online):
            self.clear = True
            return
        if self.resendfrom < self.lineno and self.resendfrom > -1:
            self._send(self.sentlines[self.resendfrom], self.resendfrom, False)
            self.resendfrom += 1
            return
        self.resendfrom = -1
        if not self.priqueue.empty():
            self._send(self.priqueue.get_nowait())
            self.priqueue.task_done()
            return
        if self.printing and self.mainqueue.has_index(self.queueindex):
            (layer, line) = self.mainqueue.idxs(self.queueindex)
            gline = self.mainqueue.all_layers[layer][line]
            if self.queueindex > 0:
                (prev_layer,
                 prev_line) = self.mainqueue.idxs(self.queueindex - 1)
                if prev_layer != layer:
                    for handler in self.event_handler:
                        try:
                            handler.on_layerchange(layer)
                        except:
                            logging.error(traceback.format_exc())
            if self.layerchangecb and self.queueindex > 0:
                (prev_layer,
                 prev_line) = self.mainqueue.idxs(self.queueindex - 1)
                if prev_layer != layer:
                    try:
                        self.layerchangecb(layer)
                    except:
                        self.logError(traceback.format_exc())
            for handler in self.event_handler:
                try:
                    handler.on_preprintsend(gline, self.queueindex,
                                            self.mainqueue)
                except:
                    logging.error(traceback.format_exc())
            if self.preprintsendcb:
                if self.mainqueue.has_index(self.queueindex + 1):
                    (next_layer,
                     next_line) = self.mainqueue.idxs(self.queueindex + 1)
                    next_gline = self.mainqueue.all_layers[next_layer][
                        next_line]
                else:
                    next_gline = None
                gline = self.preprintsendcb(gline, next_gline)
            if gline is None:
                self.queueindex += 1
                self.clear = True
                return
            tline = gline.raw
            if tline.lstrip().startswith(";@"):  # check for host command
                self.process_host_command(tline)
                self.queueindex += 1
                self.clear = True
                return

            # Strip comments
            tline = gcoder.gcode_strip_comment_exp.sub("", tline).strip()
            if tline:
                self._send(tline, self.lineno, True)
                self.lineno += 1
                for handler in self.event_handler:
                    try:
                        handler.on_printsend(gline)
                    except:
                        logging.error(traceback.format_exc())
                if self.printsendcb:
                    try:
                        self.printsendcb(gline)
                    except:
                        self.logError(traceback.format_exc())
            else:
                self.clear = True
            self.queueindex += 1
        else:
            self.printing = False
            self.clear = True
            if not self.paused:
                self.queueindex = 0
                self.lineno = 0
                self._send("M110", -1, True)

    def _send(self, command, lineno=0, calcchecksum=False):
        # Only add checksums if over serial (tcp does the flow control itself)
        if calcchecksum and not self.printer_tcp:
            prefix = "N" + str(lineno) + " " + command
            command = prefix + "*" + str(self._checksum(prefix))
            if "M110" not in command:
                self.sentlines[lineno] = command
        if self.printer:
            self.sent.append(command)
            # run the command through the analyzer
            gline = None
            try:
                gline = self.analyzer.append(command, store=False)
            except:
                logging.warning(
                    _("Could not analyze command %s:") % command + "\n" +
                    traceback.format_exc())
            if self.loud:
                logging.info("SENT: %s" % command)

            for handler in self.event_handler:
                try:
                    handler.on_send(command, gline)
                except:
                    logging.error(traceback.format_exc())
            if self.sendcb:
                try:
                    self.sendcb(command, gline)
                except:
                    self.logError(traceback.format_exc())
            try:
                self.printer.write((command + "\n").encode('ascii'))
                if self.printer_tcp:
                    try:
                        self.printer.flush()
                    except socket.timeout:
                        pass
                self.writefailures = 0
            except socket.error as e:
                if e.errno is None:
                    self.logError(
                        _("Can't write to printer (disconnected ?):") + "\n" +
                        traceback.format_exc())
                else:
                    self.logError(
                        _("Can't write to printer (disconnected?) (Socket error {0}): {1}"
                          ).format(e.errno, decode_utf8(e.strerror)))
                self.writefailures += 1
            except SerialException as e:
                self.logError(
                    _("Can't write to printer (disconnected?) (SerialException): {0}"
                      ).format(decode_utf8(str(e))))
                self.writefailures += 1
            except RuntimeError as e:
                self.logError(
                    _("Socket connection broken, disconnected. ({0}): {1}").
                    format(e.errno, decode_utf8(e.strerror)))
                self.writefailures += 1
Example #57
0
s = open("day9.txt").read()

l = [int(x) for x in s.split("\n")]

preamble_length = 25  # 5

q = Queue()

x_counts = defaultdict(int)

target = None

for i, x in enumerate(l):
    if i < preamble_length:
        x_counts[x] += 1
        q.put_nowait(x)
        continue
    found = False
    for x2 in q.queue:
        # Handle the case where the x complement is equal to x2
        if x_counts[x - x2] > 0 and (x2 != x - x2 or x_counts[x - x2] > 1):
            found = True
            break
    if not found:
        target = x
        break
    # Update
    x_counts[x] += 1
    q.put_nowait(x)
    x_counts[q.get_nowait()] -= 1
Example #58
0
class NakedSwordSearchIE(NakedSwordBaseIE):
    IE_NAME = "nakedsword:searchresult:playlist"
    _VALID_URL = r'https?://(?:www\.)?nakedsword.com/search\?(?P<query>.+)'
    _SEARCH_SITE_URL = 'https://vod.nakedsword.com'

    _CATEGORIES = {
        'alt': '687',
        'amateur': '501',
        'anal': '582',
        'asian': '502',
        'auto-f******o': '621',
        'bareback': '567',
        'bdsm': '511',
        'bear': '561',
        'big-dick': '515',
        'bisexual': '516',
        'black': '503',
        'b*****b': '510',
        'blue-collar': '683',
        'body-builders': '574',
        'boyfriends': '648',
        'brazilian': '651',
        'british': '693',
        'bubble-butt': '653',
        'chubs': '571',
        'classic': '556',
        'compilation': '594',
        'cops': '581',
        'cowboy': '563',
        'cream-pies': '672',
        'creator-content': '710',
        'cumshot': '512',
        'czech': '647',
        'daddy': '681',
        'd***o': '584',
        'double-penetration': '673',
        'euro': '521',
        'exclusive': '709',
        'extreme-penetration': '611',
        'feature': '523',
        'fetish': '505',
        'foot': '524',
        'fratboys': '579',
        'french-speaking': '695',
        'g******g': '506',
        'gender-queer': '674',
        'gloryhole': '624',
        'hardcore': '596',
        'hidden-camera': '592',
        'high-definition': '685',
        'instructional': '554',
        'international': '559',
        'interracial': '528',
        'japanese-unmosaic': '664',
        'jocks': '562',
        'latin': '514',
        'leather': '555',
        'massage': '686',
        'masturbation': '532',
        'mature': '536',
        'military': '593',
        'model': '646',
        'muscles': '557',
        'new-release': '513',
        'orgies': '537',
        'outdoors': '580',
        'parody': '684',
        'pigs': '649',
        'pissing': '540',
        'pre-condom': '661',
        'prison-sex': '688',
        'punishment': '620',
        'russian': '583',
        'safe-sex': '657',
        'sale-downloads': '698',
        'sale-rentals': '700',
        'sale-streaming': '703',
        'shaving': '542',
        'softcore': '585',
        'spanish-speaking': '544',
        'spanking': '545',
        'str8-bait': '606',
        'straight-for-gay': '659',
        'taboo': '702',
        'threeway': '644',
        'twink': '566',
        'ultra-high-definition': '707',
        'uncut': '604',
        'uniform': '558',
        'vintage': '569',
        'voyeur': '551',
        'vr-3d-360': '706',
        'white-collar': '682',
        'wrestling': '608'
    }
    _SETTINGS = {
        'abandonedbuilding': '48146',
        'airplane': '48001',
        'alley': '48002',
        'altar': '48003',
        'arcade': '48098',
        'arena': '48004',
        'artgallery': '48121',
        'athleticcourt': '48125',
        'attic': '48148',
        'balcony': '48005',
        'balletstudio': '48006',
        'bar': '48007',
        'barbershop-salon': '48135',
        'barn': '48008',
        'barracks': '48122',
        'basement': '48009',
        'bathroom': '48010',
        'bathtub': '48011',
        'beach': '48012',
        'bedroom': '48013',
        'boat': '48014',
        'bowlingalley': '48015',
        'boxingring': '48016',
        'bus': '48017',
        'business': '48144',
        'cabin': '48018',
        'cafeteria': '48147',
        'cage': '48019',
        'casino': '48021',
        'cave': '48139',
        'church': '48022',
        'circus': '48100',
        'classroom': '48023',
        'closet': '48024',
        'compilation': '48132',
        'conferenceroom': '48094',
        'constructionsite': '48112',
        'conventioncenter': '48123',
        'couch': '48110',
        'courtroom': '48025',
        'courtyard': '48145',
        'crypt': '48026',
        'diningroom': '48027',
        'doctoroffice': '48028',
        'dojo': '48029',
        'dormroom': '48105',
        'dressingroom': '48030',
        'dungeon': '48031',
        'elevator': '48032',
        'fantasy': '48113',
        'farm': '48114',
        'firestation': '48033',
        'forest': '48108',
        'funeralhome': '48142',
        'gameroom': '48128',
        'garage': '48034',
        'gazebo': '48107',
        'genericroom': '48035',
        'gloryhole': '48136',
        'graveyard': '48117',
        'gym': '48036',
        'hairsalon': '48037',
        'hallway': '48038',
        'hangar': '48131',
        'hardwarestore': '48099',
        'helicopter': '48039',
        'hospitalroom': '48040',
        'hotelroom': '48041',
        'icecreamparlor': '48109',
        'invehicle': '48020',
        'interrogationroom': '48134',
        'jacuzzi': '48042',
        'jailcell': '48043',
        'junkyard': '48111',
        'kitchen': '48044',
        'laboratory': '48045',
        'lake': '48140',
        'laundryroom': '48046',
        'library': '48106',
        'limousine': '48047',
        'liquorstore': '48091',
        'livingroom': '48048',
        'lobby': '48049',
        'lockerroom': '48050',
        'lounge': '48051',
        'massageparlor': '48052',
        'militarybase': '48129',
        'motorhome': '48053',
        'movieset': '48054',
        'nightclub': '48141',
        'office': '48055',
        'onvehicle': '48126',
        'outdoors': '48056',
        'paddedcell': '48057',
        'parkinglot': '48095',
        'patio': '48127',
        'photostudio': '48058',
        'playground': '48104',
        'pool': '48130',
        'poolhall': '48059',
        'pooltable': '48138',
        'poolside': '48060',
        'porch': '48103',
        'pornshop': '48101',
        'publicplace': '48061',
        'radiostudio': '48062',
        'restaurant': '48063',
        'rooftop': '48064',
        'sauna-steamroom': '48065',
        'school': '48066',
        'securityoffice': '48124',
        'sewer': '48096',
        'sexclub': '48067',
        'sexswing': '48115',
        'shed': '48068',
        'shed-shack': '48133',
        'shipcabin': '48069',
        'shootingrange': '48137',
        'shower': '48070',
        'spaceship': '48071',
        'stable': '48072',
        'stage': '48073',
        'staircase': '48102',
        'stairs': '48074',
        'store': '48075',
        'stripclub': '48076',
        'swimmingpool': '48077',
        'tattooparlor': '48078',
        'televisionstudio': '48119',
        'tenniscourt': '48079',
        'tent': '48080',
        'theater': '48081',
        'trailer': '48082',
        'train': '48083',
        'trainstation': '48084',
        'underwater': '48085',
        'van': '48116',
        'waitingroom': '48120',
        'warehouse': '48086',
        'waterfall': '48087',
        'whorehouse': '48088',
        'winecellar': '48089',
        'woods-jungle': '48090',
        'workshop': '48118'
    }
    _SEX_ACTS = {
        '3-way': '32001',
        'analdaisychain': '32002',
        'analsex': '32005',
        'asstomouth': '32006',
        'asstoothermouth': '32007',
        'b*****b': '32010',
        'bondage': '32012',
        'bootlicking': '32025',
        'breastplay': '32125',
        'bukkake': '32015',
        'casting': '32153',
        'choking': '32017',
        'circlejerk': '32140',
        'clubbing': '32018',
        'c**k&ballstorture': '32064',
        'collar&lead-leash': '32026',
        'creampie': '32019',
        'cumswallowing': '32021',
        'cumswap': '32023',
        'deepthroating': '32024',
        'docking': '32102',
        'domination': '32112',
        'doublepenetration': '32028',
        'enema': '32107',
        'exhibitionism': '32108',
        'extremepenetration': '32158',
        'faceslapping': '32034',
        'facesitting': '32035',
        'f******g': '32037',
        'fetish': '32138',
        'fingercuffing-anal': '32038',
        'fingercuffing-dp': '32039',
        'fingercuffing-vaginal': '32040',
        'fishhooking': '32101',
        'fisting': '32110',
        'fondling': '32145',
        'foodplay': '32120',
        'footjob': '32044',
        'footplay': '32041',
        'gagging': '32045',
        'g******g': '32047',
        'gapes': '32048',
        'girlongirlaction': '32049',
        'grinding': '32050',
        'grooming': '32131',
        'hairpulling': '32051',
        'handjob': '32052',
        'humiliation': '32123',
        'jousting': '32054',
        'lactation': '32111',
        'maleonmaleaction': '32149',
        'massage': '32104',
        'masturbation': '32055',
        'modeling': '32105',
        'multiplepops': '32056',
        'nippleplay': '32156',
        'oralsex': '32011',
        'orgy': '32063',
        'pissing': '32066',
        'ponyplay': '32124',
        'punishment': '32067',
        'reversegangbang': '32069',
        'rimjob': '32070',
        'rustytrombone': '32071',
        'self-bondage': '32113',
        'self-f*****g': '32143',
        'self-torture': '32154',
        'shaving': '32072',
        'showering-bathing': '32116',
        'sloppyseconds,anal': '32073',
        'smoking': '32118',
        'snowballing': '32075',
        'spanking-paddling': '32076',
        'spitting': '32078',
        'squirting': '32079',
        'straight-to-anal': '32080',
        'strap-on': '32081',
        'stripping': '32126',
        'teabagging': '32083',
        'throatfucking': '32139',
        'tickling': '32084',
        'tittiefucking': '32086',
        'toesucking': '32087',
        'torture': '32115',
        'toyplay-anal': '32089',
        'toyplay-cockandball': '32144',
        'toyplay-doubleanal': '32090',
        'toyplay-doublepenetration': '32091',
        'toyplay-doublevaginal': '32092',
        'toyplay-oral': '32088',
        'toyplay-vaginal': '32093',
        'trampling': '32122',
        'urethraplay': '32142',
        'vaginalsex': '32097',
        'vomiting': '32098',
        'voyeurism': '32109',
        'wet-messy': '32132',
        'whipping': '32099',
        'worship': '32114',
        'wrestling': '32100'
    }
    _SORTBY = {
        'scenes': ['Popularity', 'Trending', 'Newest'],
        'movies': ['MostWatched', 'Trending', 'Newest', 'Released']
    }
    _CONTENTS = ['movies', 'scenes']
    _PARAMS = {
        'movies': [
            'content', 'pages', 'tag', 'star', 'studio', 'videoquality',
            'director', 'releasedate'
        ],
        'scenes': [
            'content', 'pages', 'tag', 'star', 'studio', 'videoquality',
            'setting', 'sexact', 'position'
        ]
    }
    _STUDIOS = {}
    _STARS = {}

    @staticmethod
    def get_info_conf():
        with open(
                "/Users/antoniotorres/Projects/common/logs/nakedsword_conf.json",
                'r') as file:
            conf = json.load(file)
        return conf

    @staticmethod
    def upt_info_conf():
        conf_str = '{"studios": ' + json.dumps(
            NakedSwordSearchIE._STUDIOS) + ', ' + '"stars": ' + json.dumps(
                NakedSwordSearchIE._STARS) + '}'
        with open(
                "/Users/antoniotorres/Projects/common/logs/nakedsword_conf.json",
                'w') as file:
            file.write(conf_str)

    def get_starid(self, starname):

        query = starname.replace(' ', '+')
        url = f"https://vod-classic.nakedsword.com/dispatcher/fts?targetSearchMode=basic&isAdvancedSearch=false&isFlushAdvancedSearchCriteria=false&userQuery={query}d&sortType=Relevance&theaterId=22299&genreId=102&locale=en"

        driver = self.get_driver(usequeue=True)

        try:

            driver.get(url)

            elstar = self.wait_until(
                driver, 60,
                ec.presence_of_element_located(
                    (By.CLASS_NAME, "exactMatchStar")))
            if elstar:
                ela = try_get(elstar.find_elements(By.TAG_NAME, "a"),
                              lambda x: x[0])
                if ela:
                    starid = try_get(
                        re.findall(r'starId=(\d+)', ela.get_attribute('href')),
                        lambda x: x[0])
                    if starid:
                        NakedSwordSearchIE._STARS[starname.lower().replace(
                            ' ', '').replace("/", "-")] = starid
                        NakedSwordSearchIE._STARS = {
                            _key: NakedSwordSearchIE._STARS[_key]
                            for _key in sorted(NakedSwordSearchIE._STARS)
                        }
                        self.to_screen(NakedSwordSearchIE._STARS)
                        NakedSwordSearchIE.upt_info_conf()
                        return starid
        except Exception as e:
            self.to_screen(repr(e))
        finally:
            self.put_in_queue(driver)

    def get_studioid(self, studioname):

        query = studioname.replace(' ', '+')
        url = f"https://vod-classic.nakedsword.com/dispatcher/fts?targetSearchMode=basic&isAdvancedSearch=false&isFlushAdvancedSearchCriteria=false&userQuery={query}&sortType=Relevance&theaterId=22299&genreId=102&locale=en"

        driver = self.get_driver(usequeue=True)

        try:

            driver.get(url)

            elstudio = self.wait_until(
                driver, 60,
                ec.presence_of_element_located(
                    (By.CLASS_NAME, "exactMatchStudio")))
            if elstudio:
                ela = try_get(elstudio.find_elements(By.TAG_NAME, "a"),
                              lambda x: x[0])
                if ela:
                    studioid = try_get(
                        re.findall(r'studioId=(\d+)',
                                   ela.get_attribute('href')), lambda x: x[0])
                    if studioid:
                        NakedSwordSearchIE._STUDIOS[studioname.lower().replace(
                            ' ', '').replace("/", "-")] = studioid
                        NakedSwordSearchIE._STUDIOS = {
                            _key: NakedSwordSearchIE._STUDIOS[_key]
                            for _key in sorted(NakedSwordSearchIE._STUDIOS)
                        }
                        self.to_screen(NakedSwordSearchIE._STUDIOS)
                        NakedSwordSearchIE.upt_info_conf()
                        return studioid
        except Exception as e:
            self.to_screen(repr(e))
        finally:
            self.put_in_queue(driver)

    def get_scenes_ns(self, urls):
        def _get_scenes_url(j):
            _driver = self.get_driver(usequeue=True)

            try:
                while True:
                    _pos, _uq = self._urlqueriesqueue.get()
                    if _uq == "KILL": break
                    self.to_screen(
                        f'[get_scenes][{j}][{_pos}/{self._num}] {_uq}')
                    try:
                        _driver.execute_script("window.stop();")
                        _driver.get(_uq[0])
                        el_title = self.wait_until(
                            _driver, 2,
                            ec.presence_of_element_located(
                                (By.TAG_NAME, "title")))
                        if not el_title: continue
                        elscenes = self.wait_until(
                            _driver, 60,
                            ec.presence_of_all_elements_located(
                                (By.CLASS_NAME, "dts-panel ")))
                        if not elscenes: continue
                        _list_scenes_urls = []
                        for el in elscenes:

                            elinfo = try_get(
                                el.find_elements(By.TAG_NAME, "a"),
                                lambda x: x[0])
                            if not elinfo: continue
                            num_scene = elinfo.text.split(" ")[-1]
                            movie = try_get(
                                re.findall(r'gay/movies/(.+)#',
                                           elinfo.get_attribute('href')),
                                lambda x: x[0])
                            if movie and num_scene:
                                _urlscene = f"https://nakedsword.com/movies/{movie}/scene/{num_scene}"
                                _list_scenes_urls.append(
                                    (_urlscene, _uq[1], _uq[2]))

                        if not _list_scenes_urls: continue
                        _nw = min((_size := len(_list_scenes_urls)), 5)

                        def _check_url(_urlsc, _n):
                            try:
                                self.to_screen(
                                    f'[get_scenes][{j}][{_pos}/{self._num}][check_url][{_n}/{_size}] {_urlsc}'
                                )
                                res = NakedSwordSearchIE._CLIENT.get(_urlsc[0])
                                res.raise_for_status()
                                if res.text:
                                    self._urlscenesqueue.put_nowait(
                                        (_urlsc[0], _urlsc[1], _urlsc[2], _n))

                            except Exception as e:
                                self.to_screen(
                                    f'[get_scenes][{j}][{_pos}/{self._num}][check_url][{_n}/{_size}] error {repr(e)}'
                                )

                        with ThreadPoolExecutor(max_workers=_nw) as _ex:
                            for _k, _elurl in enumerate(_list_scenes_urls):
                                _ex.submit(_check_url, _elurl, _k + 1)

                    except Exception as e:
                        lines = traceback.format_exception(*sys.exc_info())
                        self.to_screen(
                            f"[get_scenes][{j}][{_pos}/{self._num}]  {repr(e)}\n{'!!'.join(lines)}"
                        )

            except Exception as e:
                lines = traceback.format_exception(*sys.exc_info())
                self.to_screen(
                    f"[get_scenes][{j}] {repr(e)}\n{'!!'.join(lines)}")
            finally:
                SeleniumInfoExtractor._QUEUE.put_nowait(_driver)
                self.to_screen(f'[get_scenes][{j}] bye')

        try:

            self._num = len(urls)
            self._urlqueriesqueue = Queue()
            self._urlscenesqueue = Queue()
            for _i, _urlquery in enumerate(urls):
                self._urlqueriesqueue.put_nowait((_i + 1, _urlquery))
            n_workers = min(self._num, 5)
            for _ in range(n_workers):
                self._urlqueriesqueue.put_nowait((-1, "KILL"))
            with ThreadPoolExecutor(max_workers=n_workers) as exe:
                for _j in range(n_workers):
                    exe.submit(_get_scenes_url, _j)

            return list(self._urlscenesqueue.queue)
        except ExtractorError:
            raise
        except Exception as e:
            self.to_screen(repr(e))
            raise ExtractorError(f"{repr(e)}")

    def get_movies_ns(self, urls):
        def _get_movies_url(j):
            _driver = self.get_driver(usequeue=True)

            try:
                while True:
                    _pos, _uq = self._urlqueriesqueue.get()
                    if _uq == "KILL": break
                    self.to_screen(
                        f'[get_movies][{j}][{_pos}/{self._num}] {_uq}')
                    try:
                        _driver.execute_script("window.stop();")
                        _driver.get(_uq[0])
                        el_title = self.wait_until(
                            _driver, 2,
                            ec.presence_of_element_located(
                                (By.TAG_NAME, "title")))
                        if not el_title: continue
                        elmovies = self.wait_until(
                            _driver, 60,
                            ec.presence_of_all_elements_located(
                                (By.CLASS_NAME,
                                 "dts-image-overlay-container")))
                        if not elmovies: continue
                        _list_movies_urls = []
                        for el in elmovies:

                            elinfo = try_get(
                                el.find_elements(By.TAG_NAME, "a"),
                                lambda x: x[0])
                            if not elinfo: continue
                            movie = try_get(
                                re.findall(r'gay/movies/(.+)',
                                           elinfo.get_attribute('href')),
                                lambda x: x[0])
                            if movie:
                                _urlmovie = f"https://nakedsword.com/movies/{movie}"

                                _list_movies_urls.append(
                                    (_urlmovie, _uq[1], _uq[2]))

                        if not _list_movies_urls: continue
                        _nw = min((_size := len(_list_movies_urls)), 5)

                        def _check_url(_urlmv, _n):
                            try:
                                self.to_screen(
                                    f'[get_movies][{j}][{_pos}/{self._num}][check_url][{_n}/{_size}] {_urlmv}'
                                )
                                res = NakedSwordSearchIE._CLIENT.get(_urlmv[0])
                                res.raise_for_status()
                                if not 'NakedSword.com | Untitled Page' in res.text:
                                    self._urlmoviesqueue.put_nowait(
                                        (_urlmv[0], _urlmv[1], _urlmv[2], _n))

                            except Exception as e:
                                self.to_screen(
                                    f'[get_movies][{j}][{_pos}/{self._num}][check_url][{_n}/{_size}] error {repr(e)}'
                                )

                        with ThreadPoolExecutor(max_workers=_nw) as _ex:
                            futures = [
                                _ex.submit(_check_url, _elurl, _k + 1)
                                for _k, _elurl in enumerate(_list_movies_urls)
                            ]

                    except Exception as e:
                        lines = traceback.format_exception(*sys.exc_info())
                        self.to_screen(
                            f"[get_movies][{j}][{_pos}/{self._num}]  {repr(e)}\n{'!!'.join(lines)}"
                        )

            except Exception as e:
                lines = traceback.format_exception(*sys.exc_info())
                self.to_screen(
                    f"[get_movies][{j}] {repr(e)}\n{'!!'.join(lines)}")
            finally:
                SeleniumInfoExtractor._QUEUE.put_nowait(_driver)
                self.to_screen(f'[get_movies][{j}] bye')

        try:

            self._num = len(urls)
            self._urlqueriesqueue = Queue()
            self._urlmoviesqueue = Queue()
            for _i, _urlquery in enumerate(urls):
                self._urlqueriesqueue.put_nowait((_i + 1, _urlquery))
            n_workers = min(self._num, 5)
            for _ in range(n_workers):
                self._urlqueriesqueue.put_nowait((-1, "KILL"))
            with ThreadPoolExecutor(max_workers=n_workers) as exe:
                for _j in range(n_workers):
                    exe.submit(_get_movies_url, _j)

            return list(self._urlmoviesqueue.queue)
        except ExtractorError:
            raise
        except Exception as e:
            self.to_screen(repr(e))
            raise ExtractorError(f"{repr(e)}")

    def _real_initialize(self):
        super()._real_initialize()
        conf = NakedSwordSearchIE.get_info_conf()
        NakedSwordSearchIE._STUDIOS = conf['studios']
        NakedSwordSearchIE._STARS = conf['stars']

    def _real_extract(self, url):

        query = re.search(self._VALID_URL, url).group('query')

        params = {
            el.split('=')[0]: el.split('=')[1]
            for el in query.split('&')
        }

        content = params.get('content', 'scenes')

        if (_sortby := params.get('sort')):
            _sortby = _sortby.replace(' ', '').lower()
            if _sortby == 'mostwatched':
                if content == 'scenes': _sby = 'Popularity'
                else: _sby = 'MostWatched'
            else: _sby = _sortby.capitalize()
            criteria_list = [{'sort': _sby}]
        else:
Example #59
0
File: queue.py Project: jbqh/miner
        print(q.get_nowait())
    else:
        print('q is empty')
    sleep(5)


data = [i for i in range(21)]

q = Queue(5)

th = []

while not q.full():
    for i in data:
        if not q.full():
            q.put_nowait(i)
    s = q.qsize()
    for i in range(s):
        t = Thread(target=test)
        th.append(t)

    for i in th:
        i.start()

    i.join()
    th = []
    data = data[s:]
    if not data:
        print('{0} all done'.format(ctime()))
        exit(0)
Example #60
0
class CoapAutomationHandler(threading.Thread):
    def __init__(self):
        super().__init__()
        self.q = Queue()
        self.processing_lock = threading.Lock()
        self.stop_event = threading.Event()  # used to signal termination to the threads
        self.status = AutomationStatus()
        if btmon_enable:
            self.btmon = None
            self.btmon_process = None
            self.max_btmon_tries = 10
        self.result_path = None
        self.test_path = None
        self.newtmgr = None
        self.rtt2pty = None

    def process(self, job):
        instrumentation_step, params = job
        test_suite = params['CODE_SUITE']
        test_group = params['CODE_TEST_GROUP']
        logging.debug('Test suite {}'.format(test_suite))
        logging.debug('Processing {}'.format(test_group))

        if str.startswith(instrumentation_step, INSTR_STEP_BEFORE_RUN):
            self.processing_lock.acquire()
            logging.debug("Executing: before run")
            logging.debug("Acquire lock")
            # print test run info
            logging.debug('Running suite: ' + params['CODE_SUITE'])
            logging.debug('Platform version: ' + params['CODE_SUITE_PLATFORM_VERSION'])
            logging.debug('Monitor version: ' + params['CODE_MONITOR_VERSION'])
            # make folder for results
            self.result_path = Path.cwd() / asctime()
            self.result_path.mkdir(parents=True, exist_ok=True)
            if crash_detection:
                # make newtmgr instance and add connection
                self.newtmgr = NewtMgr(profile_name='test', conn_type='oic_ble', connstring='"peer_name=c5"')
                self.newtmgr.make_profile()
            logging.debug("Release lock")
            # consume buffered serial data by reading it and saving to /dev/null
            if serial_read_enable:
                self.rtt2pty = RTT2PTY()
                self.rtt2pty.rtt2pty_start()
            self.processing_lock.release()
            return

        if str.startswith(instrumentation_step, INSTR_STEP_BEFORE_CASE):
            self.processing_lock.acquire()
            logging.debug("Executing: before case")
            # make folder for this test results
            if crash_detection:
                self.newtmgr.testcase = params['CODE_TEST_CASE']
            if serial_read_enable:
                self.rtt2pty.testcase = params['CODE_TEST_CASE']
            if btmon_enable:
                self.test_path = self.result_path / ('#' + params['CODE_TEST_CASE'])
                self.test_path.mkdir(parents=True, exist_ok=True)
            logging.debug("Acquire lock")
            test_case = params['CODE_TEST_CASE']
            # open btmon
            if btmon_enable:
                self.btmon = BTMonitor(testcase=test_case)
                rc = self.btmon.begin()
                logging.debug(rc)
                while rc == 1:
                    logging.debug('restarting btmon')
                    self.btmon.close()
                    self.btmon = BTMonitor(testcase=test_case)

                    rc = self.btmon.begin()
                    logging.debug(rc)
            logging.debug("Release lock")
            self.processing_lock.release()
            return

        if str.startswith(instrumentation_step, INSTR_STEP_AS_INSTR):
            self.processing_lock.acquire()
            logging.debug("Executing: as instrumentation")
            logging.debug("Acquire lock")
            logging.debug("Release lock")
            self.processing_lock.release()
            return

        if str.startswith(instrumentation_step, INSTR_STEP_AFTER_CASE):
            self.processing_lock.acquire()
            logging.debug("Executing: after case")
            logging.debug("Acquire lock")
            # close btmon
            # copy log files to results folder
            if btmon_enable:
                self.btmon.close()
                self.btmon = None
                snoop_file = Path.cwd() / (params['CODE_TEST_CASE'] + '.snoop')
                shutil.copy(snoop_file, self.test_path)
                Path.unlink(snoop_file)
            logging.debug("Release lock")
            self.processing_lock.release()
            return

        if str.startswith(instrumentation_step, INSTR_STEP_INSTR_FAIL):
            self.processing_lock.acquire()
            logging.debug("Acquire lock")
            logging.debug("Executing: fail")
            reset_process = subprocess.Popen(shlex.split(reset_cmd),
                                             shell=False,
                                             stdout=subprocess.PIPE,
                                             stderr=subprocess.PIPE)
            reset_process.wait()
            logging.debug("Release lock")
            self.processing_lock.release()
            return

        if str.startswith(instrumentation_step, INSTR_STEP_AFTER_RUN):
            self.processing_lock.acquire()
            logging.debug("Acquire lock")
            logging.debug("Executing: after run")
            if serial_read_enable:
                self.rtt2pty.rtt2pty_stop()
                shutil.move('iut-mynewt.log', self.result_path)
            final_file = coap_cfg.log_filename_final + str(datetime.now()) + '.log'
            # delete temporary logs and create final with timestamp
            final_server_logfile = os.path.dirname(__file__) + '/' + final_file
            shutil.copy(os.path.dirname(__file__) + '/' + coap_cfg.log_filename_temp,
                        final_server_logfile)
            shutil.move(final_server_logfile, self.result_path)
            os.remove(os.path.dirname(__file__) + '/' + coap_cfg.log_filename_temp)
            # kill server process
            p = subprocess.check_output(['ps', '-e', '-f'])
            p = p.decode().splitlines()
            for line in p:
                if 'coap_main.py' in line:
                    pid = int(line.split()[1])
                    os.kill(pid, signal.SIGKILL)
            logging.debug("Release lock")
            self.processing_lock.release()
            return

    def run(self):
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

        while not self.stop_event.is_set():
            if not self.q.empty():
                item = self.q.get()
                logging.debug('Getting ' + str(item) + ' : ' + str(self.q.qsize()) + ' items in queue')
                try:
                    self.process(item)
                except Exception as e:
                    logging.error('Exception: {}'.format(str(e)))
                    self.status.errors.append(str(e))
                    self.status.verdict = 'fail'
                finally:
                    if self.processing_lock.locked():
                        self.processing_lock.release()
        return

    def post(self, instrumentation_step, params):
        # If we receive next step and we are still processing
        # previous step then we should wait for it to finish
        if self.processing_lock.locked():
            logging.debug('Automation is still processing')

            self.processing_lock.acquire(blocking=True)
            logging.debug('Post release')
            self.processing_lock.release()

        try:
            self.q.put_nowait((instrumentation_step, params))
        except Full:
            self.status.verdict = 'fail'
            self.status.errors.append('Processing queue is full')
            return False

        return True

    def get_status(self):
        s = self.status
        self.status = AutomationStatus()
        return s

    def stop(self):
        self.stop_event.set()

    def parallel(func):
        def parallel_func(*args, **kw):
            p = Process(target=func, args=args, kwargs=kw)
            p.start()

        return parallel_func

    @parallel
    def make_server(self, host, port):
        self.start()
        httpd = HTTPServer((host, port),
                           MakeInstrumentationServer(self))
        try:
            httpd.serve_forever()
        except KeyboardInterrupt:
            httpd.shutdown()
            return 0