Example #1
0
 def test_bad_attr(self):
     """
     Accessing a bad attribute is only reported once (see #150)
     """
     # Parent directory setup
     os.chdir(self.tmpdir)
     sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
     basename = os.path.basename(sub_tmpdir)
     # Child setup
     fh = open(os.path.join(basename, '__init__.py'), 'w')
     fh.write('\n')
     fh.close()
     fh = open(os.path.join(basename, 'test_pool_runner_bad_attr.py'), 'w')
     fh.write(dedent(
         """
         import unittest
         class A(unittest.TestCase):
             def testBadAttr(self):
                 "".garbage
         """))
     fh.close()
     module_name = basename + '.test_pool_runner_bad_attr.A.testBadAttr'
     result = Queue()
     poolRunner(module_name, result)
     result.get_nowait() # should get the target name
     result.get_nowait() # should get the result
     result.get_nowait() # should get None
     # should raise Empty unless the extra result bug is present
     self.assertRaises(Empty, result.get_nowait)
Example #2
0
 def test_sensor(self):
     q = Queue()
     ID = 'test'
     # Test without noise
     test_sensor = sensor.Sensor(self.athlete, q, ID, noise = 0)
     test_sensor.start()
     sleep(.11)
     test_sensor.stop()
     data = []
     while not q.empty():
         e = q.get_nowait()
         data.append(e.coords)
     assert e.ID == ID
     assert len(data) == 3
     assert allclose(data, 0)
     # Test with noise
     test_sensor = sensor.Sensor(self.athlete, q, ID, noise = 1)
     test_sensor.start()
     sleep(.11)
     test_sensor.stop()
     data = []
     while not q.empty():
         e = q.get_nowait()
         data.append(e.coords)
     assert ~any(array(data) == 0)
Example #3
0
class Bus(object):

    def __init__(self, bus_url):
        self._bus_url = bus_url
        self._messages = Queue()

    def start(self):
        self._start_listening_thread()

    def stop(self):
        self._consumer.should_stop = True
        self._bus_thread.join()
        while not self._messages.empty():
            self._messages.get_nowait()

    def assert_msg_received(self, msg_name, body):
        while not self._messages.empty():
            try:
                message = self._messages.get(timeout=10.0)
                if message['name'] == msg_name and message['data'] == body:
                    return
            except Empty:
                break
        assert False, '{} not received'.format(msg_name)

    def _start_listening_thread(self):
        self._bus_thread = threading.Thread(target=self._start_consuming)
        self._bus_thread.start()

    def _start_consuming(self):
        with kombu.Connection(self._bus_url) as conn:
            self._consumer = Consumer(conn, self._messages)
            self._consumer.run()
Example #4
0
 def run_steps_parallel(self, steps):
     errors = []
     threads = []
     bucket = Queue()
     namedQueue[current_thread().name] = bucket
     try:
         for step in steps:
             threads.append(MyThread(bucket=bucket, target=self.run_step, args=(step,)))
         map(lambda x: x.start(), threads)
         map(lambda x: x.join(), threads)
         if current_thread().name == 'MainThread':
             root = ParallelLogNode('MainThread')
             post_order(root, root.children, self._context.output)
             root.children = []
             if not bucket.empty():
                 raise bucket.get_nowait()
         else:
             if not bucket.empty():
                 error = bucket.get_nowait()
                 namedQueue[current_thread().parent].put(error)
                 raise error
     except ExecutionPassed as exception:
         exception.set_earlier_failures(errors)
         raise exception
     except ExecutionFailed as exception:
         errors.extend(exception.get_errors())
     if errors:
         raise ExecutionFailures(errors)
    def batch_futures(self, session, statement_generator):
        concurrency = 10
        futures = Queue(maxsize=concurrency)
        number_of_timeouts = 0
        for i, statement in enumerate(statement_generator):
            if i > 0 and i % (concurrency - 1) == 0:
                # clear the existing queue
                while True:
                    try:
                        futures.get_nowait().result()
                    except (OperationTimedOut, WriteTimeout):
                        ex_type, ex, tb = sys.exc_info()
                        number_of_timeouts += 1
                        log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb)))
                        del tb
                        time.sleep(1)
                    except Empty:
                        break

            future = session.execute_async(statement)
            futures.put_nowait(future)

        while True:
            try:
                futures.get_nowait().result()
            except (OperationTimedOut, WriteTimeout):
                ex_type, ex, tb = sys.exc_info()
                number_of_timeouts += 1
                log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb)))
                del tb
                time.sleep(1)
            except Empty:
                break
        return number_of_timeouts
Example #6
0
    class StreamHandler(object):
        def __init__(self, stream):
            self.stream = stream
            self.alive = True
            self.out = Queue()
            def add():
                while self.alive:
                    line = self.stream.readline()
                    if line:
                        self.out.put(line)
                    else:
                        break
            self.thd = Thread(target=add)
            self.thd.daemon = True
            self.thd.start()

        def readline(self, timeout=None):
            try:
                return self.out.get(block=True, timeout=timeout)
            except Empty:
                return None

        def kill(self):
            self.alive = False

        def clear(self):
            while self.out.qsize() > 0:
                self.out.get_nowait()
Example #7
0
class ThreadPool(object):
    """Pool of threads consuming tasks from a queue"""
    def __init__(self, workers):
        self.tasks = Queue()
        self.workers = [Worker(self.tasks) for x in xrange(workers)]
        self.state = ThreadPoolState.IDLE

    def apply_async(self, func, args, **kargs):
        """Add a task to the queue"""
        if self.state != ThreadPoolState.IDLE:
            raise ThreadPoolError('ThreadPool cant accept any more tasks')
        self.tasks.put((func, args, kargs))

    def close(self):
        self.state = ThreadPoolState.CLOSED
        while not self.tasks.empty():
            self.tasks.get_nowait()
            self.tasks.task_done()
        for worker in self.workers:
            self.tasks.put((None, (), {}))

    def join(self):
        """Wait for completion of all the tasks in the queue"""
        self.state = ThreadPoolState.WAIT_JOIN
        self.tasks.join()
    def test_process_hold_queue(self):
        bucket_queue = Queue()
        hold_queue = Queue()
        m = PeriodicWorkController(bucket_queue, hold_queue)
        m.process_hold_queue()

        scratchpad = {}

        def on_accept():
            scratchpad["accepted"] = True

        hold_queue.put((MockTask("task1"),
                        datetime.now() - timedelta(days=1),
                        on_accept))

        m.process_hold_queue()
        self.assertRaises(Empty, hold_queue.get_nowait)
        self.assertTrue(scratchpad.get("accepted"))
        self.assertEquals(bucket_queue.get_nowait().value, "task1")
        tomorrow = datetime.now() + timedelta(days=1)
        hold_queue.put((MockTask("task2"), tomorrow, on_accept))
        m.process_hold_queue()
        self.assertRaises(Empty, bucket_queue.get_nowait)
        value, eta, on_accept = hold_queue.get_nowait()
        self.assertEquals(value.value, "task2")
        self.assertEquals(eta, tomorrow)
Example #9
0
class UrlResolverManager():

    def __init__(self, cacheFilename, tweetResolverListener):
        self.__queue = Queue(maxsize=50)
        self.__workers = []
        self.__tweetResolverListener = tweetResolverListener
        self.__resolverCache = UrlResolverCache(cacheFilename)
        for i in range(0,30):
            self.__workers.append(UrlResolverWorker(self, self.__queue, self.__resolverCache, i))
        self.__lastReportedQueueSize = None

    def start(self):
        self.__resolverCache.start()
        for worker in self.__workers:
            worker.start()

    def stop(self):
        self.__resolverCache.stop()
        for worker in self.__workers:
            worker.stop()
        logger.info("Urls in queue: " + str(self.__queue.qsize()))
        while not self.__queue.empty():
            self.__queue.get_nowait()

    def pauseWorkers(self):
        for worker in self.__workers:
            worker.pauseJob()

    def continueWorkers(self):
        logger.debug("Continue workers job")
        for worker in self.__workers:
            worker.continueJob()

    def cacheHitRate(self):
        return self.__resolverCache.hitRate()

    def addUrlToQueue(self, url):
        if url.getState() is not None:
            logger.info(u"Url already in queue:" + unicode(url))
            return
        url.setState("pending")
        self.__queue.put(url, timeout=3)
        s = self.__queue.qsize()
        if s % 10 == 0 and s > 10 and self.__lastReportedQueueSize != s:
            self.__lastReportedQueueSize = s
            logger.warning("Queue size is too big: " + unicode(s))

    def afterResolveUrl(self, url):
        if url.getState() != "pending":
            raise ValueError(unicode(url))
        url.setState("finished")
        if not url.isError():
            url.getText()
        self.__notifyUrlResolved(url)

    def __notifyUrlResolved(self, url):
        tweet = url.tweet()
        if tweet.isResolved():
            self.__tweetResolverListener.tweetResolved(tweet)
def main(fifo_name, lockfile):
    """Run a poller loop to manage the player
    """
    fp = open(lockfile, 'w')
    try:
        fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except IOError:
        # another instance is running
        sys.exit(1)
    # load initial playlist
    playlist = Playlist('shuf')
    # launch player
    player = Popen(['mpg123', '-R'], stdout=PIPE, stdin=PIPE)
    player_output = Queue()
    player_poll_thread = Thread(target=poll_mpg123,
                                args=(player.stdout, player_output))
    player_poll_thread.daemon = True   # thread dies with the program
    player_poll_thread.start()

    controller_input = Queue()
    controller_poll_thread = Thread(target=poll_vim,
                                    args=(fifo_name, controller_input))
    controller_poll_thread.daemon = True
    controller_poll_thread.start()

    while True:
        # poll player
        try:
            message = player_output.get_nowait()
            print "message %s" % message
        except Empty:
            message = None
        if message == '@P 0':
            player.stdin.write("LOAD %s\n" % playlist.next_track())
            print playlist.current_track

        try:
            command = controller_input.get_nowait()
            print "command %s" % command
        except Empty:
            command = None

        if command == 'pause':
            player.stdin.write("PAUSE\n")
        elif command == 'skip':
            player.stdin.write("LOAD %s\n" % playlist.next_track())
            print playlist.current_track
        elif command and command.startswith("load"):
            # Load playlist
            playlist_file = command[5:]
            playlist.load(playlist_file)
            track = playlist.current_track
            if track:
                player.stdin.write("LOAD %s\n" % track)
                print track
        elif command == 'quit':
            # We don't need to tell mpg123 to shut down here because this will
            # send an EOF to the pipe, which signals close anyway.
            exit(0)
Example #11
0
	def ffmpegprocess(self,fromfilename,tofilename):
		info = {}
		cmd = self.cmdline(fromfilename,tofilename)
		q = Queue()

		self.fp = subprocess.Popen(cmd,shell=shellos, stdout=subprocess.PIPE,stderr=subprocess.PIPE,bufsize=1,cwd=cwd,close_fds=ON_POSIX)		
		self.t = Thread(target=self.enqueue_output, args=(self.fp.stdout, q))
		self.t.daemon = True
		self.t.start()

		while (self.isrunning and self.fp.poll() == None):
			while wx.GetApp().Pending():
					wx.GetApp().Dispatch()
					wx.GetApp().Yield(True)
			try:  
				line = q.get_nowait()	
			except Empty:
				pass
			else:
				try:
					info = json.loads(line)
					self.durationbrano = round(float(info["duration"]),2)
					self.positionbrano = round(float(info["position"]),2)
					self.remainingbrano = round(float(info["remaining"]),2)			
					
					self.percbrano = round(((float(info['position']) / float(info['duration'])) * 100),1)					
					self.gaugefile.SetValue(self.percbrano)
					self.percfile.SetLabel(str(self.percbrano)+"%")
					self.timeleft.SetLabel(str(self.remainingbrano))
				
					self.perctotbrani = round((self.video*100 + self.percbrano)/self.totvideo,1)
					self.gaugetotal.SetValue(self.perctotbrani)
					self.perctot.SetLabel(str(self.perctotbrani)+"%")
				except:
					pass

		if self.isrunning:						# TRUE ->fine conversione / FALSE ->esce per STOP o QUIT
			ntimeout = 0
			while not ("result" in info):
				## print ntimeout
				ntimeout = ntimeout + 1
				try:  
					line = q.get_nowait()
					## print "line --> " + line
					info = json.loads(line)					 
				except:
					pass
				if (ntimeout >= 10):
					## print "oltre il timeout"
					return False
			if info.get('result', 'no') == 'ok':	# risultato conversione
				return True
			else:
				return False
Example #12
0
class PopenWrapper(subprocess.Popen):
    _locked = False

    def __init__(self, args, bufsize='1', stdin=subprocess.PIPE,
                 stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs):
        super(PopenWrapper, self).__init__(args, bufsize=bufsize, stdin=stdin,
                                           stdout=stdout, stderr=stderr,
                                           **kwargs)
        if "--logfile" in args:
            logfile_path = args[args.index("--logfile") + 1]
            logfile = open(logfile_path, 'a')
        else:
            logfile = None
        self.stdout_queue = Queue()
        self.stdout_thread = Thread(
            target=enqueue_stream,
            args=(self.stdout, self.stdout_queue, logfile),
        )
        self.stdout_thread.daemon = True
        self.stdout_thread.start()

        self.stderr_queue = Queue()
        self.stderr_thread = Thread(
            target=enqueue_stream,
            args=(self.stderr, self.stderr_queue, logfile),
        )
        self.stderr_thread.daemon = True
        self.stderr_thread.start()

    def get_stdout_nowait(self):
        try:
            return self.stdout_queue.get_nowait()
        except Empty:
            return None

    def get_stderr_nowait(self):
        try:
            return self.stderr_queue.get_nowait()
        except Empty:
            return None

    _output_generator = None

    def get_output_nowait(self):
        if self._output_generator is None:
            def output_generator():
                while True:
                    yield self.get_stdout_nowait()
                    yield self.get_stderr_nowait()
            self._output_generator = output_generator()
        return self._output_generator.next()

    def communicate(self, *args, **kwargs):
        raise ValueError("Cannot communicate with a PopenWrapper")
class StdoutStreamer(object):

    def __init__(self, stdout=None):
        self.queue = Queue()
        self.stdout = stdout

    @Threaded
    def startStreamer(self):
        '''Starts this StdoutStreamer's reader.'''
        for line in iter(self.stdout.readline, b''):
            try:
                self.queue.put(line)
            except Full:
                self.queue.get_nowait()
                self.queue.put(line)

    def flush(self):
        '''Flush the StdoutStreamer'''
        #=======================================================================
        # The queue is likely to have a bunch of history
        # in it that we need to flush before we run a command.
        #=======================================================================
        while not self.queue.empty():
            self.queue.get_nowait()

    def retrieve(self, pollingPeriod=0.001, MaxAttempts=50):
        '''Retrieves a generator of the Minecraft's current stdout content.'''
        #=======================================================================
        # This is essentially a do-while, the point being that this Python should wait for Minecraft
        # to start reporting back before the attempts counter starts to increment.
        #=======================================================================
        yield sub('\n', '', self.queue.get())
        attempts = 0
        while attempts <= MaxAttempts:
            #===================================================================
            # @TODO This pollingPeriod/MaxAttemps thing should really be in the config file since it can easily be tuned
            # depending on the machine this is running on. Put it under a "Don't
            # touch unless you know what you're doing" section.
            #
            # Minecraft really has no idea that this Python is controlling it. As such, it
            # doesn't do nice things like send STOP signals for output, leading to this code
            # not really being able to tell when Minecraft is done talking. How I got around this
            # was to repeatedly attempt to retrieve from the stdout queue at a reduced frequency
            # (set to 1kHz). Once MaxAttempts of consecutive Queue.Empty exceptions are hit we give up
            # and conclude that Minecraft has gone silent.
            #
            # If you know of a better way then please contact me at [email protected].
            #===================================================================
            sleep(pollingPeriod)
            try:
                yield sub('\n', '', self.queue.get_nowait())
            except Empty:
                attempts += 1
Example #14
0
def interact(server):
    # Reading suggestion from http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
    # Writing suggestion from http://stackoverflow.com/questions/3762881/how-do-i-check-if-stdin-has-some-data
    import sys, select
    from threading  import Thread

    try:
        from Queue import Queue, Empty
    except ImportError:
        from queue import Queue, Empty  # python 3.x

    stdin, stdout, stderr = server.run_interactive('python -c "import sys\nprint \'Welcome.  More input!\'\nsys.stdout.flush()\nwhile True:\n  print \'Got: \' + raw_input()\n  sys.stdout.flush()\nprint \'Done.\'"')

    def enqueue_output(out, queue):
        for line in iter(out.readline, b''):
            queue.put(line)
        out.close()

    qout = Queue()
    tout = Thread(target=enqueue_output, args=(stdout, qout))
    tout.daemon = True # thread dies with the program
    tout.start()

    qerr = Queue()
    terr = Thread(target=enqueue_output, args=(stderr, qerr))
    terr.daemon = True # thread dies with the program
    terr.start()

    while True:
        stdin.flush()
        sys.stdout.flush()

        try:
            line = qout.get_nowait()
        except Empty:
            pass
        else:
            print line

        try:
            line = qerr.get_nowait()
        except Empty:
            pass
        else:
            print "ERROR:", line

        hasinputs = select.select([sys.stdin],[],[],0.0)[0]
        for hasinput in hasinputs:
            line = sys.stdin.readline()
            stdin.write(line)

            if line[-1] == '\n':
                line = line[:-1]
Example #15
0
    def _create_tree(chars):
        """Create hierarchical representation of nodes based on frequency."""
        queue = Queue(maxsize=len(chars))
        for char in reversed(chars):
            queue.put(char)

        while queue.qsize() > 1:
            e1 = queue.get_nowait()
            e2 = queue.get_nowait()
            queue.put([e1, e2])

        root = queue.get_nowait()
        return root
Example #16
0
class Python_serial(object):
    
    
    def __init__(self):
        super(Python_serial, self).__init__()
        self.cola = Queue(20)
        
    
    def read_port(self):
        for i in range(0, 10):
            print "Im trying to connect..."
            try:
                ser = serial.Serial('/dev/ttyUSB0', 9600, timeout=1)
                #ser = serial.Serial(i)  # open first serial port
                print "Conectado a", ser.name   # check which port was really used
    
                #cola = Queue(10)
    
                while True:
                    line = ser.readline()   # read a '\n' terminated line
                    valor = float(line) / 100
    
                    try:
                        self.cola.put_nowait(valor)
    
                    except Exception, e:
                        self.cola.get_nowait()
                        self.cola.put_nowait(valor)
    
                    print "el valor en la cola es:", list(self.cola.queue)
    
                    #print line
                ser.close()             # close port
            except Exception, e:
                #Si no pude leer el puerto, escribo randoms (esto para probar sin estar conectado)
                #Borrar esto cuando se conecte!!!!
                #cola = Queue(10)
                print "entro aca... esta mal"
                while True:
                    valor = random.randint(0, 25)
      
                    try:
                        self.cola.put_nowait(valor)
      
                    except Exception, e:
                        self.cola.get_nowait()
                        self.cola.put_nowait(valor)
                     #print "el valor en la cola es:", list(self.cola.queue)
                    time.sleep(0.1)
Example #17
0
def generate_items():
	global items, symbols, goto_table

	item_queue = Queue()

	# 将第一个产生式[S->S']的闭包加进items
	start_production = productions[0]
	start_item = Item(0)
	start_item.insert_item_line(
		ItemLine(start_production.pid, 0, Symbol('$', 1)))
	start_item = closure(start_item)
	items.append(start_item)
	item_queue.put_nowait(start_item)

	while not item_queue.empty():
		current_item = item_queue.get_nowait()  # 弹出一个Item
		for s in symbols:
			next_item = goto(current_item, s)
			# 检查next_item是否为空(即无条目)以及items中是否已经存在,这里应该要自己写判断函数
			if next_item.item_lines:
				tmp = is_item_exist(next_item)
				if not tmp:
					items.append(next_item)
					item_queue.put_nowait(next_item)
				else:
					next_item = tmp
				
				# 添加到goto表中
				s_id = current_item.item_id
				
				if s_id not in goto_table.keys():
					goto_table[s_id] = {}

				goto_table[s_id][s.value] = next_item.item_id  # 建立边
class HttpPool(object):   
    def __init__(self, threads_count, fail_op, log):   
        self._tasks = Queue()   
        self._results = Queue()   
           
        for i in xrange(threads_count):   
            thread.start_new_thread(get_remote_data,    
                                                            (self._tasks, self._results, fail_op, log))   
               
    def add_task(self, tid, host, url, params, headers = {}, method = 'GET', timeout = None):   
        task = {   
            'id' : tid,   
            'conn_args' : {'host' : host} if timeout is None else {'host' : host, 'timeout' : timeout},   
            'headers' : headers,   
            'url' : url,   
            'params' : params,   
            'method' : method,   
            }   
        try:   
            self._tasks.put_nowait(task)   
        except Full:   
            return False  
        return True  
           
    def get_results(self):   
        results = []   
        while True:   
            try:   
                res = self._results.get_nowait()   
            except Empty:   
                break  
            results.append(res)   
        return results   
Example #19
0
class Pipe(object):
    """Define a thread-safe pipe object to transfer data between filters."""
    def __init__(self):
        """Create a Queue and an event to synchronize the filters."""
        self.queue = Queue()
        self.register = threading.Event()

    def open_register(self):
        """Declare that the pipe is open. Simply sets the register event."""
        self.register.set()

    def close_register(self):
        """Declare that the pipe is closed. Simply clears the register event."""
        self.register.clear()

    def push(self, data_packet):
        """Insert data into the pipe."""
        self.queue.put_nowait(data_packet)

    def pull(self):
        """Return data from the pipe."""
        return self.queue.get_nowait()

    def has_flow(self):
        """Return True if there are data items in the pipe."""
        return not self.queue.empty()

    def is_open(self):
        """Return True if the register event is set."""
        return self.register.is_set()
Example #20
0
def main(): 
    connection.connect(host="localhost", port=6969)
    if(len(sys.argv) > 2):
        connection.register_nick(sys.argv[2])
    else:
        connection.register_nick("POLITICALLY_CORRECT")
    connection.register_ident(ident="GHB578645_BOT_IDENT", realname="GHB578645_BOT_REALNAME")
    if(len(sys.argv)>3):
        connection.join(sys.argv[3])
    else:
        print "DEFAULT"
        connection.join("#battleship_testing")
    qu = Queue()
    server_thread = irc_server_thread(connection)
    msg_thread = msg_read_thread(qu, proc.stdout)

    server_thread.daemon = True
    msg_thread.daemon = True
    server_thread.start()
    msg_thread.start()
    connection.register_service("This is a test of the service registration module, nothing to see here, move along.")
    while(server_thread.isAlive()):
        try:
            line = qu.get_nowait()
        except Empty:
            pass
        else:
            msg = irc_client_message(strang=line)
            connection.send_message(msg)
    print "/main"
    return 0
Example #21
0
class Link:
    def __init__(self, socket_obj, connection):
        self._connection = connection
        self._socket = socket_obj
        self._read_buffer = bytearray()
        self._write_queue = Queue()

    def read(self):
        result = bytearray('')
        while True:
            read_count = self._connection.recv_into(self._read_buffer, BUFFER_SIZE)
            if read_count > 0:
                result.extend(self._read_buffer[:read_count - 1])
            else:
                break
        return bytearray

    def send(self, data):
        self._write_queue.put_nowait(data)
        self._socket.notify_send_data(self._connection.fileno())

    def write(self):
        while self._write_queue.not_empty():
            data = self._write_queue.get_nowait()
            self._connection.sendall(data)

    def close(self):
        self._connection.close()
Example #22
0
 def test_notification_handler_valid_notification(self):
     q = Queue()
     listener = NotificationHandler(q)
     listener.callback(parse_root(notification), notification)
     notif = q.get_nowait()
     self.assertEquals(notif.notification_xml, notification)
     self.assertRaises(Empty, q.get_nowait)
Example #23
0
  def test_sigpipe(self):
    r, w = os.pipe()
    outstream = os.fdopen(w, 'w')
    task = self.create_task(self.context(console_outstream=outstream))
    raised = Queue(maxsize=1)

    def execute():
      try:
        task.execute()
      except IOError as e:
        raised.put(e)

    execution = threading.Thread(target=execute, name='ConsoleTaskTestBase_sigpipe')
    execution.setDaemon(True)
    execution.start()
    try:
      data = os.read(r, 5)
      self.assertEqual('jake\n', data)
      os.close(r)
    finally:
      task.stop()
      execution.join()

    with self.assertRaises(Empty):
      e = raised.get_nowait()

      # Instead of taking the generic assertRaises raises message, provide a more detailed failure
      # message that shows exactly what untrapped error was on the queue.
      self.fail('task raised {0}'.format(e))
Example #24
0
def startEchoServer(work_dir):
    print("using startEchoServer")
    execlist = ["stdbuf", "-i0", "-o0", "-e0"]
    execlist.extend([os.path.join(work_dir, "echoServer.exe")])
    server = subprocess.Popen(execlist, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
    q = Queue()
    t = Thread(target=enqueue_output, args=(server.stdout, q))
    t.daemon = True  # thread dies with the program
    t.start()
    count = 0
    while server.poll() == None:
        if count > 50:
            server.terminate()
            return startEchoServer(work_dir)
        try:
            line = q.get_nowait()  # or q.get(timeout=.1)
        except Empty:
            print("no output yet")
            count += 1
            time.sleep(1)
        else:
            print(line)
            line = str(line)
            if "Server running...waiting for connections." in line:
                return server
            else:
                count += 1
                time.sleep(1)
Example #25
0
    def download_cover(self, log, result_queue, abort,  # {{{
            title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):
        cached_url = self.get_cached_cover_url(identifiers)
        if cached_url is None:
            log.info('No cached cover found, running identify')
            rq = Queue()
            self.identify(log, rq, abort, title=title, authors=authors,
                    identifiers=identifiers)
            if abort.is_set():
                return
            results = []
            while True:
                try:
                    results.append(rq.get_nowait())
                except Empty:
                    break
            results.sort(key=self.identify_results_keygen(
                title=title, authors=authors, identifiers=identifiers))
            for mi in results:
                cached_url = self.get_cached_cover_url(mi.identifiers)
                if cached_url is not None:
                    break
        if cached_url is None:
            log.info('No cover found')
            return

        if abort.is_set():
            return
        br = self.browser
        log('Downloading cover from:', cached_url)
        try:
            cdata = br.open_novisit(cached_url, timeout=timeout).read()
            result_queue.put((self, cdata))
        except:
            log.exception('Failed to download cover from:', cached_url)
Example #26
0
class SimObject(object):
    id = 0
    def __init__(self):
        self.ready = Queue()
        self.target = None
        self.sendVal = None
        SimObject.id += 1
        self.id = SimObject.id
        self.task = self.runTarget()
        self.done = False

    def run(self):
        while not self.done:
            yield
            self.task.next()

    @coroutine
    def runTarget(self):
        first = True
        while not self.ready.empty() or self.target or first:
            first = False
            yield
            if self.target:
                try:
                    self.target.send(self.sendVal)
                except StopIteration:
                    self.target = None
            elif not self.ready.empty():
                self.target = self.ready.get_nowait()
                try:
                    self.target.send(self.sendVal)
                except StopIteration:
                    self.target = None
Example #27
0
class UserEventLogZODBHandler(logging.Handler):
    """ Python logging handler to store log records into ZODB UserEventLog containers """

    def __init__(self):
        logging.Handler.__init__(self)
        self.queue = Queue(16)
        self.setFormatter(logging.Formatter('%(asctime)s'))

    def emit(self, record):
        if not getattr(record, 'username', None):
            return

        self.format(record)
        assert hasattr(record, 'asctime'), str(record)
        self.queue.put(record)

        @db.transact
        def flush():
            eventlog = db.get_root()['oms_root']['eventlog']
            try:
                while True:
                    if self.queue.empty():
                        break
                    record = self.queue.get_nowait()
                    eventlog.add_event(record)
            except Empty:
                pass

        if not self.queue.empty():
            d = flush()
            d.addErrback(log.err, system='usereventlog-handler')
class MyStore(object):
    def __init__(self):
        """initializes a Store"""
        self.store = Queue(maxsize=100)
        self.size = (24, 80)
        self.typesOfFractals = ['julia', 'mandelbrot', 'test']
    def put(self,item):
        """puts an item into the queue"""
        try:
            self.store.put_nowait(item)
        except:
            pass
        return True
    def get(self):
        """gets an item out of the store"""
        try:
            return self.store.get_nowait()
        except:
            return False
    def set_size(self, height, width):
        """sets a size for the console"""
        self.size = (int(height), int(width))
        return True
    def get_size(self):
        """Gets the console size"""
        return self.size
    def pick_type(self):
        """pick the type of fractal to generate"""
        return random.choice(self.typesOfFractals)
Example #29
0
def download_cover(log,
        title=None, authors=None, identifiers={}, timeout=30):
    '''
    Synchronous cover download. Returns the "best" cover as per user
    prefs/cover resolution.

    Returned cover is a tuple: (plugin, width, height, fmt, data)

    Returns None if no cover is found.
    '''
    rq = Queue()
    abort = Event()

    run_download(log, rq, abort, title=title, authors=authors,
            identifiers=identifiers, timeout=timeout, get_best_cover=True)

    results = []

    while True:
        try:
            results.append(rq.get_nowait())
        except Empty:
            break

    cp = msprefs['cover_priorities']

    def keygen(result):
        plugin, width, height, fmt, data = result
        return (cp.get(plugin.name, 1), 1/(width*height))

    results.sort(key=keygen)

    return results[0] if results else None
Example #30
0
class DeviceConnectionObject(Foundation.NSObject):
    def initWithHost_port_(self, host, port):
        self = self.init()
        if self is None: return None
        
        self.client = DeviceClient(host, port)
        self.commandQueue = Queue()

        return self

    def queueSize(self):
        return self.commandQueue.qsize()

    def appendCommand_(self, command):
        self.willChangeValueForKey_('queueSize')
        self.commandQueue.put_nowait([str(item) for item in command])
        self.didChangeValueForKey_('queueSize')

    def tickLoop(self):
        asyncore.loop(timeout=0, count=1)
        self.fillSendBuffer()

    def disconnect(self):
        self.client.close()
        self.client = None

    def fillSendBuffer(self):
        if self.client.buffer or self.commandQueue.empty(): return

        self.willChangeValueForKey_('queueSize')
        command = self.commandQueue.get_nowait()
        self.client.buffer = msgpack.packb(command)
        self.didChangeValueForKey_('queueSize')
Example #31
0
    def search(self, request):
        global number_of_searches

        # init vars
        requests = []
        results_queue = Queue()
        results = {}
        suggestions = set()
        answers = set()
        infoboxes = []

        # increase number of searches
        number_of_searches += 1

        # set default useragent
        # user_agent = request.headers.get('User-Agent', '')
        user_agent = gen_useragent()

        # start search-reqest for all selected engines
        for selected_engine in self.engines:
            if selected_engine['name'] not in engines:
                continue

            engine = engines[selected_engine['name']]

            # if paging is not supported, skip
            if self.pageno > 1 and not engine.paging:
                continue

            # if search-language is set and engine does not
            # provide language-support, skip
            if self.lang != 'all' and not engine.language_support:
                continue

            # set default request parameters
            request_params = default_request_params()
            request_params['headers']['User-Agent'] = user_agent
            request_params['category'] = selected_engine['category']
            request_params['started'] = time()
            request_params['pageno'] = self.pageno
            request_params['language'] = self.lang
            try:
                # 0 = None, 1 = Moderate, 2 = Strict
                request_params['safesearch'] = int(
                    request.cookies.get('safesearch', 1))
            except ValueError:
                request_params['safesearch'] = 1

            # update request parameters dependent on
            # search-engine (contained in engines folder)
            engine.request(self.query.encode('utf-8'), request_params)

            if request_params['url'] is None:
                # TODO add support of offline engines
                pass

            # create a callback wrapper for the search engine results
            callback = make_callback(selected_engine['name'], results_queue,
                                     engine.response, request_params)

            # create dictionary which contain all
            # informations about the request
            request_args = dict(headers=request_params['headers'],
                                hooks=dict(response=callback),
                                cookies=request_params['cookies'],
                                timeout=engine.timeout,
                                verify=request_params['verify'])

            # specific type of request (GET or POST)
            if request_params['method'] == 'GET':
                req = requests_lib.get
            else:
                req = requests_lib.post
                request_args['data'] = request_params['data']

            # ignoring empty urls
            if not request_params['url']:
                continue

            # append request to list
            requests.append((req, request_params['url'], request_args,
                             selected_engine['name']))

        if not requests:
            return results, suggestions, answers, infoboxes
        # send all search-request
        threaded_requests(requests)

        while not results_queue.empty():
            engine_name, engine_results = results_queue.get_nowait()

            # TODO type checks
            [
                suggestions.add(x['suggestion']) for x in list(engine_results)
                if 'suggestion' in x and engine_results.remove(x) is None
            ]

            [
                answers.add(x['answer']) for x in list(engine_results)
                if 'answer' in x and engine_results.remove(x) is None
            ]

            infoboxes.extend(
                x for x in list(engine_results)
                if 'infobox' in x and engine_results.remove(x) is None)

            results[engine_name] = engine_results

        # update engine-specific stats
        for engine_name, engine_results in results.items():
            engines[engine_name].stats['search_count'] += 1
            engines[engine_name].stats['result_count'] += len(engine_results)

        # score results and remove duplications
        results = score_results(results)

        # merge infoboxes according to their ids
        infoboxes = merge_infoboxes(infoboxes)

        # update engine stats, using calculated score
        for result in results:
            for res_engine in result['engines']:
                engines[result['engine']]\
                    .stats['score_count'] += result['score']

        # return results, suggestions, answers and infoboxes
        return results, suggestions, answers, infoboxes
Example #32
0
class JobQueue(object):
    """
    Job manager, waits for jobs to be runnable and then dispatches to 
    a JobRunner.
    """
    STOP_SIGNAL = object()

    def __init__(self, app, dispatcher):
        """Start the job manager"""
        self.app = app
        self.job_lock = False
        # Keep track of the pid that started the job manager, only it
        # has valid threads
        self.parent_pid = os.getpid()
        # Contains new jobs. Note this is not used if track_jobs_in_database is True
        self.queue = Queue()
        # Contains jobs that are waiting (only use from monitor thread)
        ## This and jobs_to_check[] are closest to a "Job Queue"
        self.waiting_jobs = []
        # Helper for interruptable sleep
        self.sleeper = Sleeper()
        self.running = True
        self.dispatcher = dispatcher
        self.monitor_thread = threading.Thread(target=self.__monitor)
        self.monitor_thread.start()
        log.info("job manager started")
        if self.app.config.get('enable_job_recovery', True):
            self.__check_jobs_at_startup()

    def __check_jobs_at_startup(self):
        """
        Checks all jobs that are in the 'new', 'queued' or 'running' state in
        the job directory
        """
        for job in []:  # TODO
            self.queue.put(job)

    def __monitor(self):
        """
        Continually iterate the waiting jobs, checking is each is ready to 
        run and dispatching if so.
        """
        # HACK: Delay until after forking, we need a way to do post fork notification!!!
        time.sleep(10)
        while self.running:
            try:
                self.__monitor_step()
            except:
                log.exception("Exception in monitor_step")
            # Sleep
            self.sleeper.sleep(1)

    def __monitor_step(self):
        """
        Called repeatedly by `monitor` to process waiting jobs.
        """
        # Pull all new jobs from the queue at once

        # Get job objects and append to watch queue for any which were
        # previously waiting
        jobs_to_check = []
        for job in self.waiting_jobs:
            jobs_to_check.append(job)
        try:
            while 1:
                message = self.queue.get_nowait()
                if message is self.STOP_SIGNAL:
                    return
                # Unpack the message
                job = message
                # Get the job object and append to watch queue
                jobs_to_check.append(job)
        except Empty:
            pass

        # Iterate over new and waiting jobs and look for any that are
        # ready to run
        new_waiting_jobs = []
        for job in jobs_to_check:
            try:
                self.dispatcher.put(job)
            except Exception, e:
                log.exception("failure running job '%s'" % job.id)
        # Update the waiting list
        self.waiting_jobs = new_waiting_jobs
class ActionServerHandler(object):
    """
    Interface to action server which is more useful for behaviors.
    """
    def __init__(self, action_name, action_type):
        self.goal_queue = Queue(1)
        self.result_queue = Queue(1)
        self.lock = Blackboard().lock
        self.action_name = action_name
        self.cancel_next_goal = False
        self._as = SimpleActionServer(action_name, action_type, execute_cb=self.execute_cb, auto_start=False)
        self._as.register_preempt_callback(self.cancel_cb)
        self._as.start()

    def cancel_cb(self):
        self.cancel_next_goal = self._as.is_new_goal_available()

    def execute_cb(self, goal):
        """
        :type goal: MoveGoal
        """
        with self.lock.acquire_timeout(0) as got_lock:
            if got_lock and not self.cancel_next_goal:
                self.my_state = Status.RUNNING
                self.goal_queue.put(goal)
                self.result_queue.get()()
            else:
                self.my_state = Status.FAILURE
                r = self._as.action_server.ActionResultType()
                r.error = DetectShelfLayersResult.SERVER_BUSY
                print_with_prefix('rejected goal because server busy server busy', self.action_name)
                self._as.set_aborted(r)
                self.cancel_next_goal = False

    def get_goal(self):
        try:
            goal = self.goal_queue.get_nowait()
            return goal
        except Empty:
            return None

    def has_goal(self):
        return not self.goal_queue.empty()

    def send_preempted(self, result=None):
        def call_me_now():
            self._as.set_preempted(result)
        self.result_queue.put(call_me_now)

    def send_aborted(self, result=None):
        def call_me_now():
            self._as.set_aborted(result)
        self.result_queue.put(call_me_now)

    def send_result(self, result=None):
        """
        :type result: MoveResult
        """
        def call_me_now():
            self._as.set_succeeded(result)
        self.result_queue.put(call_me_now)

    def is_preempt_requested(self):
        return self._as.is_preempt_requested()
Example #34
0
class DBScheduler(object):
    ''' Database operation scheduler
    We will have one or more read thread and only one write thread.
    '''

    log = logging.getLogger('raceday.DBScheduler')

    def __init__(self):
        from twisted.internet import reactor  # Imported here.inside

        self.reactor = reactor

        engine = get_engine()
        # create_schema(engine)

        self.read_pool = ThreadPool(minthreads=1,
                                    maxthreads=16,
                                    name="ReadPool")

        self.write_pool = ThreadPool(minthreads=1,
                                     maxthreads=1,
                                     name="WritePool")

        self.read_pool.start()
        self.write_pool.start()

        self.signals = SignalManager(dispatcher.Any).connect(
            self.stop_threadpools, spider_closed)

        self.counters = defaultdict(lambda: Counter())

        self.cache = defaultdict(lambda: dict())

        self.write_queue = Queue()
        self.writelock = False  # Write queue mutex

    def stop_threadpools(self):
        self.read_pool.stop()
        self.write_pool.stop()
        for counter, results in self.counters.iteritems():
            print(counter)
            for modelname, count in results.iteritems():
                print('  ', modelname.__name__, '-', count)

    def _do_save(self):
        assert not isInIOThread()

        while not self.write_queue.empty():
            items = []

            try:
                self.writelock = True
                try:
                    while True:
                        items.append(self.write_queue.get_nowait())
                except Empty:
                    pass

                session = Session()

                try:
                    session.add_all(items)
                    session.commit()
                except:
                    session.rollback()
                    raise
                finally:
                    session.close()
            finally:
                self.writelock = False

    def save(self, obj):
        self.write_queue.put(obj)

        if self.writelock:
            return None
        else:
            return deferToThreadPool(self.reactor, self.write_pool,
                                     self._do_save)

    def _do_get_id(self, model, unique, fval, fields):
        assert not isInIOThread()

        return Session().query(model).filter(
            getattr(model, unique) == fval).one().id

    @inlineCallbacks
    def get_id(self, model, unique, fields):
        ''' Get an ID from the cache or from the database.
        If doesn't exist - create an item.
        All database operations are done from
        the separate thread
        '''
        assert isInIOThread()

        fval = fields[unique]

        try:
            result = self.cache[model][fval]
            self.counters['hit'][model] += 1
            returnValue(result)
        except KeyError:
            self.counters['miss'][model] += 1

        selectors = {unique: fval}

        result, created = yield deferToThreadPool(self.reactor, self.read_pool,
                                                  get_or_create, model, fields,
                                                  **selectors)

        result = result.id

        if created:
            self.counters['db_create'][model] += 1
        else:
            self.counters['db_hit'][model] += 1

        self.cache[model][fval] = result
        returnValue(result)
Example #35
0
def parse_cutechess_output(
    p, remote, result, spsa, spsa_tuning, games_to_play, batch_size, tc_limit
):

    saved_stats = copy.deepcopy(result["stats"])
    rounds = {}

    q = Queue()
    t = threading.Thread(target=enqueue_output, args=(p.stdout, q))
    t.daemon = True
    t.start()

    end_time = datetime.datetime.now() + datetime.timedelta(seconds=tc_limit)
    print("TC limit {} End time: {}".format(tc_limit, end_time))

    num_games_updated = 0
    while datetime.datetime.now() < end_time:
        try:
            line = q.get_nowait()
        except Empty:
            if p.poll() is not None:
                break
            time.sleep(1)
            continue

        sys.stdout.write(line)
        sys.stdout.flush()

        # Have we reached the end of the match?  Then just exit
        if "Finished match" in line:
            # The following assertion will fail if there are games without result.
            # Does this ever happen?
            assert num_games_updated == games_to_play
            print("Finished match cleanly")

        # Parse line like this:
        # Warning: New-eb6a21875e doesn't have option ThreatBySafePawn
        if "Warning:" in line and "doesn't have option" in line:
            message = r'Cutechess-cli says: "{}"'.format(line.strip())
            result["message"] = message
            send_api_post_request(remote + "/api/stop_run", result)
            raise Exception(message)

        # Parse line like this:
        # Finished game 1 (stockfish vs base): 0-1 {White disconnects}
        if "disconnects" in line or "connection stalls" in line:
            result["stats"]["crashes"] += 1

        if "on time" in line:
            result["stats"]["time_losses"] += 1

        # Parse line like this:
        # Score of stockfish vs base: 0 - 0 - 1  [0.500] 1
        if "Score" in line:
            chunks = line.split(":")
            chunks = chunks[1].split()
            wld = [int(chunks[0]), int(chunks[2]), int(chunks[4])]

            validate_pentanomial(
                wld, rounds
            )  # check if cutechess-cli result is compatible with
            # our own bookkeeping

            pentanomial = [
                rounds["pentanomial"][i] + saved_stats["pentanomial"][i]
                for i in range(5)
            ]
            result["stats"]["pentanomial"] = pentanomial

            wld_pairs = {}  # trinomial frequencies of completed game pairs

            # rounds['trinomial'] is ordered ldw
            wld_pairs["wins"] = wld[0] - rounds["trinomial"][2]
            wld_pairs["losses"] = wld[1] - rounds["trinomial"][0]
            wld_pairs["draws"] = wld[2] - rounds["trinomial"][1]

            result["stats"]["wins"] = wld_pairs["wins"] + saved_stats["wins"]
            result["stats"]["losses"] = wld_pairs["losses"] + saved_stats["losses"]
            result["stats"]["draws"] = wld_pairs["draws"] + saved_stats["draws"]

            if spsa_tuning:
                spsa["wins"] = wld_pairs["wins"]
                spsa["losses"] = wld_pairs["losses"]
                spsa["draws"] = wld_pairs["draws"]

            num_games_finished = (
                wld_pairs["wins"] + wld_pairs["losses"] + wld_pairs["draws"]
            )

            assert (
                2 * sum(result["stats"]["pentanomial"])
                == result["stats"]["wins"]
                + result["stats"]["losses"]
                + result["stats"]["draws"]
            )
            assert num_games_finished == 2 * sum(rounds["pentanomial"])
            assert num_games_finished <= num_games_updated + batch_size
            assert num_games_finished <= games_to_play

            # Send an update_task request after a batch is full or if we have played all games
            if (num_games_finished == num_games_updated + batch_size) or (
                num_games_finished == games_to_play
            ):
                # Attempt to send game results to the server. Retry a few times upon error
                update_succeeded = False
                for _ in range(5):
                    try:
                        t0 = datetime.datetime.utcnow()
                        response = send_api_post_request(
                            remote + "/api/update_task", result
                        ).json()
                        print(
                            "  Task updated successfully in {}s".format(
                                (datetime.datetime.utcnow() - t0).total_seconds()
                            )
                        )
                        if not response["task_alive"]:
                            # This task is no longer necessary
                            print("Server told us task is no longer needed")
                            return response
                        update_succeeded = True
                        num_games_updated = num_games_finished
                        break
                    except Exception as e:
                        sys.stderr.write("Exception from calling update_task:\n")
                        print(e, file=sys.stderr)
                        # traceback.print_exc(file=sys.stderr)
                    time.sleep(HTTP_TIMEOUT)
                if not update_succeeded:
                    print("Too many failed update attempts")
                    break

        # act on line like this
        # Finished game 4 (Base-5446e6f vs New-1a68b26): 1/2-1/2 {Draw by adjudication}
        if "Finished game" in line:
            update_pentanomial(line, rounds)

    now = datetime.datetime.now()
    if now >= end_time:
        print("{} is past end time {}".format(now, end_time))

    return {"task_alive": True}
Example #36
0
class SpotifyClient(SpotifySessionManager, RunLoopMixin):
    ''' Spotify client that runs all code on a tornado ioloop

    This subclass is intended to be used in the context of an application
    that uses a tornado ioloop running on a single thread to do its work.
    All Spotify callbacks are bounced to the ioloop passed to the constructor
    so that it is not necessary to lock non thread-safe code.
    '''

    audio_buffer_size = 50
    user_agent = PLUGIN_ID
    application_key = Resource.Load('spotify_appkey.key')

    def __init__(self, username, password, ioloop):
        ''' Initializer

        :param username:       The username to connect to spotify with.
        :param password:       The password to authenticate with.
        :param ioloop:         The tornado IOLoop instance to run on.
        '''
        super(SpotifyClient, self).__init__(username, password)
        self.ioloop = ioloop
        self.timer = None
        self.session = None
        self.login_error = None
        self.logging_in = False
        self.stop_callback = None
        self.audio_buffer = None
        self.audio_converter = None
        self.playlist_folders = {}
        self.images = {}

    ''' Public methods (names with unscores are disallowed by Plex) '''

    def is_logging_in(self):
        return self.logging_in

    def is_logged_in(self):
        return self.session is not None

    def needs_restart(self, username, password):
        ''' Determines if the library should be restarted '''
        return self.username != username \
            or self.password != password

    def connect(self):
        ''' Connect to Spotify '''
        self.log("Connecting as %s" % self.username)
        self.logging_in = True
        self.schedule_periodic_check(connect(self))

    def disconnect(self):
        ''' Disconnect from Spotify '''
        if not self.session:
            return
        self.log("Logging out")
        self.session.logout()

    def is_album_playable(self, album):
        ''' Check if an album can be played by a client or not '''
        assert_loaded(album)
        return album.is_available()

    def is_track_playable(self, track):
        ''' Check if a track can be played by a client or not '''
        playable = True
        assert_loaded(track)
        if track.is_local():
            playable = False
        elif not track.availability():
            playable = False
        return playable

    def get_art(self, uri, callback):
        ''' Fetch and return album artwork.

        note:: Currently only album artowk can be retrieved.

        :param uri:            The spotify URI of the album to load art for.
        :param callback:       The callback to invoke when artwork is loaded.
                               Should take image data as a single parameter.
        '''
        self.log("Get artwork: %s" % uri)
        link = Link.from_string(uri)
        if link.type() != Link.LINK_ALBUM:
            raise RuntimeError("Non album artwork not supported")
        album = link.as_album()

        def browse_finished(browser):
            self.load_image(uri, album.cover(), callback)

        return self.browse_album(album, browse_finished)

    def get_playlists(self, folder_id=0):
        ''' Return the user's playlists

        :param folder_id       The id of the playlist folder to return.
        '''
        self.log("Get playlists (folder id: %s)" % folder_id)
        result = []
        if folder_id in self.playlist_folders:
            result = self.playlist_folders[folder_id]
        return result

    def get_starred_tracks(self):
        ''' Return the user's starred tracks

        TODO this should be made async with a callback rather than assuming
        the starred playlist is loaded (will fail if it isn't right now).
        '''
        self.log("Get starred")
        return assert_loaded(self.session.starred()) if self.session else None

    def search(self, query, callback):
        ''' Execute a search

        :param query:          A query string.
        :param callback:       A callback to invoke when the search is finished.
                               Should take the results list as a parameter.
        '''
        self.log("Search (query = %s)" % query)
        return self.session.search(query=query, callback=callback)

    def browse_album(self, album, callback):
        ''' Browse an album, invoking the callback when done

        :param album:          An album instance to browse.
        :param callback:       A callback to invoke when the album is loaded.
                               Should take the browser as a single parameter.
        '''
        link = Link.from_album(album)

        def callback_wrapper(browser, userdata):
            self.log("Album browse complete: %s" % link)
            callback(browser)

        self.log("Browse album: %s" % link)
        return AlbumBrowser(album, callback_wrapper)

    def browse_artist(self, artist, callback):
        ''' Browse an artist, invoking the callback when done

        :param artist:         An artist instance to browse.
        :param callback:       A callback to invoke when the album is loaded.
                               Should take the browser as a single parameter.
        '''
        link = Link.from_artist(artist)

        def callback_wrapper(browser, userdata):
            self.log("Artist browse complete: %s" % Link.from_artist(artist))
            callback(browser)

        self.log("Browse artist: %s" % link)
        browser = ArtistBrowser(artist, "no_tracks", callback_wrapper)
        return browser

    def load_image(self, uri, image_id, callback):
        ''' Load an image from an image id

        :param image_id:       The spotify id of the image to load.
        :param callback:       A callback to invoke when the image is loaded.
                               Should take the image as a single parameter.
        '''
        def callback_wrapper(image):
            self.log("Image loaded: %s" % uri)
            callback(str(image.data()))
            if uri in self.images:
                del self.images[uri]

        self.log("Loading image: %s" % uri)
        if image_id is not None:
            image = self.images.get(uri, self.session.image_create(image_id))
            image.add_load_callback(callback_wrapper)
            self.images[uri] = image
            return image
        else:
            callback(None)

    def load_track(self, uri):
        ''' Load a track from a spotify URI

        Note: this currently polls as there is no API for browsing
        individual tracks

        :param uri:              The spotify URI of the track to load.
        '''
        track = Link.from_string(uri).as_track()
        return self.wait_until_loaded(track, POLL_TIMEOUT)

    def play_track(self, uri, audio_callback, stop_callback):
        ''' Start playing a spotify track

        :param uri:              The spotify URI of the track to play.
        :param audio_callback:   A callback to invoke when audio arrives.
                                 Return a boolean to indicate if more audio can
                                 be processed.
        :param stop_callback:    A callback to invoke when playback is stopped.
        '''
        self.log("Play track: %s" % uri)
        track = self.load_track(uri)
        self.stop_playback()
        self.session.load(track)
        self.session.play(True)
        self.audio_converter = PCMToAIFFConverter(track, audio_callback)
        self.audio_buffer = Queue()
        self.stop_callback = stop_callback

    def stop_playback(self):
        ''' Stop playing the current stream '''
        if self.audio_converter is None:
            return
        self.log("Stop playback")
        if self.stop_callback is not None:
            self.stop_callback()
        self.session.play(0)
        self.session.unload()
        self.stop_callback = None
        self.audio_converter = None
        self.audio_buffer = None
        self.log("Playback stopped")

    ''' Utility methods '''

    def wait_until_loaded(self, spotify_object, timeout):
        ''' Poll a spotify object until it is loaded

        :param spotify_object:   The spotify object to poll.
        :param timeout:          A timeout in seconds.
        '''
        start = time()
        while not spotify_object.is_loaded() and start > time() - timeout:
            message = "Waiting for spotify object: %s" % spotify_object
            self.log(message)
            self.session.process_events()
            sleep(POLL_INTERVAL)
        assert_loaded(spotify_object)
        return spotify_object

    def log(self, message, debug=False):
        ''' Logging helper function

        :param message:    The message to output to the log.
        :param debug:      Only output the message in debug mode?
        '''
        message = "SPOTIFY: %s" % message
        Log.Debug(message) if debug else Log(message)

    def run_on_main_thread(self, callback):
        ''' Bound a call to the main thread '''
        self.invoke_async(callback)

    def schedule_periodic_check(self, session, timeout=0):
        ''' Schedules the next periodic Spotify event processing call

        Must be called from the IO loop thread.
        '''
        def callback():
            self.timer = None
            self.process_events(session)

        self.cancel_periodic_check()
        self.log('Processing next messsage in %.3fs' % timeout, debug=True)
        self.timer = self.schedule_timer(timeout, callback)

    def cancel_periodic_check(self):
        if self.timer is not None:
            self.cancel_timer(self.timer)
            self.timer = None

    def process_events(self, session):
        ''' Process pending Spotify events and schedule the next check '''
        self.log("Processing events", debug=True)
        self.cancel_periodic_check()
        timeout = 0
        while timeout == 0:
            timeout = session.process_events() / 1000.0
        self.schedule_periodic_check(session, timeout)

    ''' Spotify callbacks '''

    def notify_main_thread(self, session=None):
        self.log("Notify main thread", debug=True)
        callback = lambda: self.process_events(session)
        self.run_on_main_thread(callback)

    def logged_in(self, session, error):
        ''' libspotify callback for login attempts '''
        self.logging_in = False
        if error:
            self.log("Error logging in: %s" % error)
            self.login_error = error
        else:
            self.log("Logged in")
            self.session = session
            self.session.playlist_container().add_loaded_callback(
                self.playlists_loaded_callback)

    def logged_out(self, session):
        ''' libspotiy callback for logout requests '''
        if not self.seesion:
            return
        self.log("Logged out")
        self.session = None
        self.cancel_periodic_check()

    def playlists_loaded_callback(self, container, userinfo):
        ''' Callback invoked when playlists are loaded '''
        current_folder = []
        folder_stack = []
        folder_map = {0: current_folder}
        for playlist in list(self.session.playlist_container()):
            if playlist.type() == "folder_start":
                folder_stack.append(current_folder)
                current_folder.append(playlist)
                current_folder = []
                folder_map[playlist.id()] = current_folder
            elif playlist.type() == "folder_end":
                current_folder = folder_stack.pop()
            elif playlist.type() == "placeholder":
                pass
            else:
                current_folder.append(playlist)
        self.playlist_folders = folder_map

    def end_of_track(self, session):
        ''' libspotify callback for when the current track ends '''
        self.log("Track ended")
        self.flush_audio_buffer()
        self.stop_playback()

    def metadata_updated(self, sess):
        ''' libspotify callback when new metadata arrives '''
        self.log("Metadata update", debug=True)

    def log_message(self, sess, message):
        ''' libspotify callback for system messages '''
        self.log("Message (%s)" % message.strip())

    def connection_error(self, sess, error):
        ''' libspotify callback for connection errors '''
        if error is not None:
            self.log("Connection error (%s)" % error.strip())

    def message_to_user(self, sess, message):
        ''' libspotify callback for user messages '''
        self.log("User message (%s)" % message)

    def flush_audio_buffer(self):
        ''' Convert buffered audio data and send it to the caller '''
        while self.audio_converter:
            try:
                self.audio_converter.convert(*self.audio_buffer.get_nowait())
            except Empty:
                return
            except EOFError:
                self.stop_playback()
            except Exception:
                self.log("Playback error: %s" % Plugin.Traceback())
                self.stop_playback()

    def music_delivery(self, session, frames, frame_size, num_frames,
                       sample_type, sample_rate, channels):
        ''' Called when libspotify has audio data ready for consumption

        NOTE: this call is made on a background thread.  If any calls
        need to be made against the Spotify API they *MUST* be bounced
        to the main thread for execution.
        '''
        if num_frames == 0:
            return 0
        copied_frames = str(frames)
        self.audio_buffer.put((copied_frames, num_frames))
        if self.audio_buffer.qsize() >= self.audio_buffer_size:
            self.invoke_async(self.flush_audio_buffer)
        return num_frames
Example #37
0
    def download(self,
                 dids,
                 rse,
                 protocol='srm',
                 pfn=None,
                 nrandom=None,
                 nprocs=None,
                 user_agent='rucio_clients',
                 dir='.',
                 no_subd=False):

        trace_endpoint = client.host
        trace_pattern = {
            'hostname': socket.getfqdn(),
            'account': client.account,
            'uuid': generate_uuid(),
            'eventType': 'download',
            'eventVersion': 'api',
            'appid': self.trace_appid,
            'dataset': self.trace_dataset,
            'datasetScope': self.trace_datasetscope,
            'pq': self.trace_pq,
            'taskid': self.trace_taskid,
            'usrdn': self.trace_usrdn
        }

        # is used account an admin account?
        account_attributes = [
            acc for acc in client.list_account_attributes(client.account)
        ]
        is_admin = False
        for attr in account_attributes[0]:
            if attr['key'] == 'admin' and attr['value'] is True:
                logger.debug('Admin mode enabled')
                is_admin = True
                break

        # extend RSE expression to exclude tape RSEs for non-admin accounts
        rse_expression = rse
        if not is_admin:
            rse_expression = 'istape=False'
            if rse and len(rse.strip()) > 0:
                rse_expression = '(%s)&istape=False' % rse
                logger.debug('RSE-Expression: %s' % rse_expression)

        # Extract the scope, name from the did(s)
        did_list = []
        for did in dids:
            try:
                scope, name = self.extract_scope(did)
                if name.find('*') > -1:
                    for dsn in client.list_dids(scope, filters={'name': name}):
                        did_list.append({'scope': scope, 'name': dsn})
                else:
                    did_list.append({'scope': scope, 'name': name})
            except ValueError as error:
                raise error
                return FAILURE

        if pfn:
            if not rse:
                logger.error(
                    '--rse option is mandatory in combination with --pfn!')
                return FAILURE

            if len(dids) > 1:
                dids = [dids[0]]
                logger.warning(
                    '--pfn option and multiple DIDs given! Only considering first DID...'
                )

        summary = {}
        num_files_to_dl = {}
        input_queue = Queue()
        output_queue = Queue()

        # get replicas for every file of the given dids
        for arg_did in did_list:
            arg_didstr = '%s:%s' % (arg_did['scope'], arg_did['name'])
            summary[arg_didstr] = {}

            # get type of given did; save did if its a dataset
            files_with_replicas = []
            if not pfn:
                try:
                    did_info = client.get_did(arg_did['scope'],
                                              arg_did['name'])
                    did_type = did_info['type'].upper()
                    dataset_scope = '' if did_type == 'FILE' else arg_did[
                        'scope']
                    dataset_name = '' if did_type == 'FILE' else arg_did['name']
                except:
                    logger.error('Failed to get did info for did %s' %
                                 arg_didstr)
                    return FAILURE

                try:
                    files_with_replicas = client.list_replicas(
                        [arg_did],
                        schemes=None,
                        rse_expression=rse_expression,
                        metalink=None)
                except:
                    logger.error(
                        'Failed to get list of files with their replicas for DID %s'
                        % arg_didstr)
                    return FAILURE

                files_with_replicas = [f for f in files_with_replicas]
                if nrandom:
                    random.shuffle(files_with_replicas)
                    files_with_replicas = files_with_replicas[0:nrandom]
            else:
                logger.debug('PFN option overrides replica listing')
                did_type = 'FILE'
                dataset_scope = ''
                dataset_name = ''
                files_with_replicas = [{
                    'bytes': None,
                    'adler32': None,
                    'scope': arg_did['scope'],
                    'name': arg_did['name'],
                    'pfns': {
                        pfn: {
                            'rse': rse
                        }
                    },
                    'rses': {
                        rse: [pfn]
                    }
                }]

            num_files_to_dl[arg_didstr] = len(files_with_replicas)
            for f in files_with_replicas:
                file_scope = f['scope']
                file_name = f['name']
                file_didstr = '%s:%s' % (file_scope, file_name)

                file_exists, dest_dir = self._file_exists(did_type,
                                                          file_scope,
                                                          file_name,
                                                          dir,
                                                          dsn=dataset_name,
                                                          no_subdir=no_subd)
                dest_dir = os.path.abspath(dest_dir)

                if file_exists:
                    logger.info('File %s already exists locally' % file_didstr)

                    out = {}
                    out['dataset_scope'] = dataset_scope
                    out['dataset_name'] = dataset_name
                    out['scope'] = file_scope
                    out['name'] = file_name
                    out['clientState'] = 'ALREADY_DONE'
                    output_queue.put(out)

                    trace = deepcopy(trace_pattern)

                    if 'datasetScope' not in trace:
                        trace['datasetScope'] = dataset_scope
                    if 'dataset' not in trace:
                        trace['dataset'] = dataset_name
                    trace.update({
                        'scope': file_scope,
                        'filename': file_name,
                        'filesize': f['bytes'],
                        'transferStart': time.time(),
                        'transferEnd': time.time(),
                        'clientState': 'ALREADY_DONE'
                    })
                    self.send_trace(trace, trace_endpoint, user_agent)
                else:
                    if not os.path.isdir(dest_dir):
                        logger.debug('Destination dir not found: %s' %
                                     dest_dir)
                        try:
                            os.makedirs(dest_dir)
                        except:
                            logger.error(
                                'Failed to create missing destination directory %s'
                                % dest_dir)
                            return FAILURE
                    if no_subd and os.path.isfile('%s/%s' %
                                                  (dest_dir, file_name)):
                        # Overwrite the files
                        logger.debug('Deleteing existing files: %s' %
                                     file_name)
                        os.remove("%s/%s" % (dest_dir, file_name))
                    f['dataset_scope'] = dataset_scope
                    f['dataset_name'] = dataset_name
                    f['dest_dir'] = dest_dir
                    input_queue.put(f)

        try:
            self.download_rucio(pfn, protocol, input_queue, output_queue,
                                trace_pattern, trace_endpoint, nprocs,
                                user_agent, dir, no_subd)
        except Exception as error:
            logger.error('Exception during download: %s' % str(error))

        while True:
            try:
                item = output_queue.get_nowait()
                output_queue.task_done()
                ds_didstr = '%s:%s' % (item['dataset_scope'],
                                       item['dataset_name'])
                file_didstr = '%s:%s' % (item['scope'], item['name'])

                if ds_didstr in summary or file_didstr in summary:
                    if item['dataset_scope'] == '':
                        summary[file_didstr][file_didstr] = item['clientState']
                    else:
                        summary[ds_didstr][file_didstr] = item['clientState']
                    if item['clientState'] == 'CORRUPTED':
                        try:
                            # client.declare_suspicious_file_replicas([item['pfn']], reason='Corrupted')
                            logger.warning('File %s seems to be corrupted.' %
                                           item['pfn'])
                        except:
                            logger.warning(
                                'File replica %s might be corrupted. Failure to declare it bad to Rucio'
                                % item['pfn'])
            except Empty:
                break
Example #38
0
class DirectEdit(Worker):
    localScanFinished = pyqtSignal()
    directEditUploadCompleted = pyqtSignal()
    openDocument = pyqtSignal(object)
    editDocument = pyqtSignal(object)
    directEditLockError = pyqtSignal(str, str, str)
    directEditConflict = pyqtSignal(str, str, str)
    directEditReadonly = pyqtSignal(object)
    directEditLocked = pyqtSignal(object, object, object)
    '''
    classdocs
    '''
    def __init__(self, manager, folder, url):
        '''
        Constructor
        '''
        super(DirectEdit, self).__init__()
        self._manager = manager
        self._url = url
        self._thread.started.connect(self.run)
        self._event_handler = None
        self._metrics = dict()
        self._metrics['edit_files'] = 0
        self._observer = None
        if type(folder) == str:
            folder = unicode(folder)
        self._folder = folder
        self._local_client = LocalClient(self._folder)
        self._upload_queue = Queue()
        self._lock_queue = Queue()
        self._error_queue = BlacklistQueue()
        self._stop = False
        self._manager.get_autolock_service().orphanLocks.connect(
            self._autolock_orphans)
        self._last_action_timing = -1

    @pyqtSlot(object)
    def _autolock_orphans(self, locks):
        log.trace("Orphans lock: %r", locks)
        for lock in locks:
            if lock.path.startswith(self._folder):
                log.debug("Should unlock: %s", lock.path)
                if not os.path.exists(lock.path):
                    self._manager.get_autolock_service().orphan_unlocked(
                        lock.path)
                    continue
                ref = self._local_client.get_path(lock.path)
                self._lock_queue.put((ref, 'unlock_orphan'))

    def autolock_lock(self, src_path):
        ref = self._local_client.get_path(src_path)
        self._lock_queue.put((ref, 'lock'))

    def autolock_unlock(self, src_path):
        ref = self._local_client.get_path(src_path)
        self._lock_queue.put((ref, 'unlock'))

    def start(self):
        self._stop = False
        super(DirectEdit, self).start()

    def stop(self):
        super(DirectEdit, self).stop()
        self._stop = True

    def stop_client(self, _):
        if self._stop:
            raise ThreadInterrupt

    def handle_url(self, url=None):
        if url is None:
            url = self._url
        if url is None:
            return
        log.debug("DirectEdit load: '%r'", url)
        try:
            info = parse_protocol_url(str(url))
        except UnicodeEncodeError:
            # Firefox seems to be different on the encoding part
            info = parse_protocol_url(unicode(url))
        if info is None:
            return
        # Handle backward compatibility
        if info.get('item_id') is not None:
            self.edit(info['server_url'], info['item_id'])
        else:
            self.edit(info['server_url'],
                      info['doc_id'],
                      user=info['user'],
                      download_url=info['download_url'])

    def _cleanup(self):
        log.debug("Cleanup DirectEdit folder")
        # Should unlock any remaining doc that has not been unlocked or ask
        if self._local_client.exists('/'):
            for child in self._local_client.get_children_info('/'):
                if self._local_client.get_remote_id(
                        child.path, "nxdirecteditlock") is not None:
                    continue
                children = self._local_client.get_children_info(child.path)
                if len(children) > 1:
                    log.warn("Cannot clean this document: %s", child.path)
                    continue
                if (len(children) == 0):
                    # Cleaning the folder it is empty
                    shutil.rmtree(self._local_client.abspath(child.path),
                                  ignore_errors=True)
                    continue
                ref = children[0].path
                try:
                    _, _, _, digest_algorithm, digest = self._extract_edit_info(
                        ref)
                except NotFound:
                    # Engine is not known anymore
                    shutil.rmtree(self._local_client.abspath(child.path),
                                  ignore_errors=True)
                    continue
                try:
                    # Don't update if digest are the same
                    info = self._local_client.get_info(ref)
                    current_digest = info.get_digest(
                        digest_func=digest_algorithm)
                    if (current_digest != digest):
                        log.warn(
                            "Document has been modified and not synchronized, readd to upload queue"
                        )
                        self._upload_queue.put(ref)
                        continue
                except Exception as e:
                    log.debug(e)
                    continue
                # Place for handle reopened of interrupted Edit
                shutil.rmtree(self._local_client.abspath(child.path),
                              ignore_errors=True)
        if not os.path.exists(self._folder):
            os.mkdir(self._folder)

    def _get_engine(self, url, user=None):
        if url is None:
            return None
        if url.endswith('/'):
            url = url[:-1]
        # Simplify port if possible
        if url.startswith('http:') and ':80/' in url:
            url = url.replace(':80/', '/')
        if url.startswith('https:') and ':443/' in url:
            url = url.replace(':443/', '/')
        for engine in self._manager.get_engines().values():
            bind = engine.get_binder()
            server_url = bind.server_url
            if server_url.endswith('/'):
                server_url = server_url[:-1]
            if server_url == url and (user is None or user == bind.username):
                return engine
        # Some backend are case insensitive
        if user is None:
            return None
        user = user.lower()
        for engine in self._manager.get_engines().values():
            bind = engine.get_binder()
            server_url = bind.server_url
            # Simplify port if possible
            if server_url.startswith('http:') and ':80/' in server_url:
                server_url = server_url.replace(':80/', '/')
            if server_url.startswith('https:') and ':443/' in server_url:
                server_url = server_url.replace(':443/', '/')
            if server_url.endswith('/'):
                server_url = server_url[:-1]
            if server_url == url and user == bind.username.lower():
                return engine
        return None

    def _download_content(self,
                          engine,
                          remote_client,
                          info,
                          file_path,
                          url=None):
        file_dir = os.path.dirname(file_path)
        file_name = os.path.basename(file_path)
        file_out = os.path.join(
            file_dir,
            DOWNLOAD_TMP_FILE_PREFIX + file_name + DOWNLOAD_TMP_FILE_SUFFIX)
        # Close to processor method - should try to refactor ?
        pair = engine.get_dao().get_valid_duplicate_file(info.digest)
        if pair:
            local_client = engine.get_local_client()
            existing_file_path = local_client.abspath(pair.local_path)
            log.debug(
                'Local file matches remote digest %r, copying it from %r',
                info.digest, existing_file_path)
            shutil.copy(existing_file_path, file_out)
            if pair.is_readonly():
                log.debug('Unsetting readonly flag on copied file %r',
                          file_out)
                from nxdrive.client.common import BaseClient
                BaseClient.unset_path_readonly(file_out)
        else:
            log.debug('Downloading file %r', info.filename)
            if url is not None:
                remote_client.do_get(url,
                                     file_out=file_out,
                                     digest=info.digest,
                                     digest_algorithm=info.digest_algorithm)
            else:
                remote_client.get_blob(info, file_out=file_out)
        return file_out

    def _display_modal(self, message, values=None):
        from nxdrive.wui.application import SimpleApplication
        from nxdrive.wui.modal import WebModal
        app = SimpleApplication(self._manager, None, {})
        dialog = WebModal(app, app.translate(message, values))
        dialog.add_button("OK", app.translate("OK"))
        dialog.show()
        app.exec_()

    def _prepare_edit(self, server_url, doc_id, user=None, download_url=None):
        start_time = current_milli_time()
        engine = self._get_engine(server_url, user=user)
        if engine is None:
            values = dict()
            if user is None:
                values['user'] = '******'
            else:
                values['user'] = user
            values['server'] = server_url
            log.warn("No engine found for server_url=%s, user=%s, doc_id=%s",
                     server_url, user, doc_id)
            self._display_modal("DIRECT_EDIT_CANT_FIND_ENGINE", values)
            return
        # Get document info
        remote_client = engine.get_remote_doc_client()
        # Avoid any link with the engine, remote_doc are not cached so we can do that
        remote_client.check_suspended = self.stop_client
        rest_client = engine.get_rest_api_client()
        doc = rest_client.fetch(doc_id,
                                fetchDocument=['lock'],
                                enrichers=['permissions'])
        info = remote_client.doc_to_info(doc)
        if (info.lock_owner is not None
                and info.lock_owner != engine.get_remote_user()):
            log.debug(
                "Doc %s was locked by %s on %s, won't download it for edit",
                info.name, info.lock_owner, info.lock_created)
            self.directEditLocked.emit(info.name, info.lock_owner,
                                       info.lock_created)
            return None
        if (info.permissions is not None and 'Write' not in info.permissions):
            log.debug("Doc %s is readonly for %s, won't download it for edit",
                      info.name, user)
            self.directEditReadonly.emit(info.name)
            return None

        filename = info.filename

        # Create local structure
        dir_path = os.path.join(self._folder, doc_id)
        if not os.path.exists(dir_path):
            os.mkdir(dir_path)

        log.debug("Editing %r", filename)
        file_path = os.path.join(dir_path, filename)

        # Download the file
        url = None
        if download_url is not None:
            url = server_url
            if not url.endswith('/'):
                url += '/'
            url += download_url
        tmp_file = self._download_content(engine,
                                          remote_client,
                                          info,
                                          file_path,
                                          url=url)
        if tmp_file is None:
            log.debug("Download failed")
            return
        # Set the remote_id
        dir_path = self._local_client.get_path(os.path.dirname(file_path))
        self._local_client.set_remote_id(dir_path, doc_id)
        self._local_client.set_remote_id(dir_path, server_url, "nxdirectedit")
        if user is not None:
            self._local_client.set_remote_id(dir_path, user,
                                             "nxdirectedituser")
        if info.digest is not None:
            self._local_client.set_remote_id(dir_path, info.digest,
                                             "nxdirecteditdigest")
            # Set digest algorithm if not sent by the server
            digest_algorithm = info.digest_algorithm
            if digest_algorithm is None:
                digest_algorithm = guess_digest_algorithm(info.digest)
            self._local_client.set_remote_id(dir_path, digest_algorithm,
                                             "nxdirecteditdigestalgorithm")
        self._local_client.set_remote_id(dir_path, filename,
                                         "nxdirecteditname")
        # Rename to final filename
        # Under Windows first need to delete target file if exists, otherwise will get a 183 WindowsError
        if sys.platform == 'win32' and os.path.exists(file_path):
            os.unlink(file_path)
        os.rename(tmp_file, file_path)
        self._last_action_timing = current_milli_time() - start_time
        self.openDocument.emit(info)
        return file_path

    def edit(self, server_url, doc_id, user=None, download_url=None):
        try:
            log.debug("Editing doc %s on %s", doc_id, server_url)
            # Handle backward compatibility
            if '#' in doc_id:
                engine = self._get_engine(server_url)
                if engine is None:
                    log.warn(
                        "No engine found for %s, cannot edit file with remote ref %s",
                        server_url, doc_id)
                    return
                self._manager.edit(engine, doc_id)
            else:
                # Download file
                file_path = self._prepare_edit(server_url,
                                               doc_id,
                                               user=user,
                                               download_url=download_url)
                # Launch it
                if file_path is not None:
                    self._manager.open_local_file(file_path)
        except WindowsError as e:
            if e.errno == 13:
                # open file anyway
                if e.filename is not None:
                    self._manager.open_local_file(e.filename)
            else:
                raise e

    def _extract_edit_info(self, ref):
        dir_path = os.path.dirname(ref)
        uid = self._local_client.get_remote_id(dir_path)
        server_url = self._local_client.get_remote_id(dir_path, "nxdirectedit")
        user = self._local_client.get_remote_id(dir_path, "nxdirectedituser")
        engine = self._get_engine(server_url, user=user)
        if engine is None:
            raise NotFound()
        remote_client = engine.get_remote_doc_client()
        remote_client.check_suspended = self.stop_client
        digest_algorithm = self._local_client.get_remote_id(
            dir_path, "nxdirecteditdigestalgorithm")
        digest = self._local_client.get_remote_id(dir_path,
                                                  "nxdirecteditdigest")
        return uid, engine, remote_client, digest_algorithm, digest

    def force_update(self, ref, digest):
        dir_path = os.path.dirname(ref)
        self._local_client.set_remote_id(dir_path, unicode(digest),
                                         "nxdirecteditdigest")
        self._upload_queue.put(ref)

    def _handle_queues(self):
        uploaded = False
        # Lock any documents
        while (not self._lock_queue.empty()):
            try:
                item = self._lock_queue.get_nowait()
                ref = item[0]
                log.trace('Handling DirectEdit lock queue ref: %r', ref)
            except Empty:
                break
            uid = ""
            try:
                dir_path = os.path.dirname(ref)
                uid, _, remote_client, _, _ = self._extract_edit_info(ref)
                if item[1] == 'lock':
                    remote_client.lock(uid)
                    self._local_client.set_remote_id(dir_path, "1",
                                                     "nxdirecteditlock")
                    # Emit the lock signal only when the lock is really set
                    self._manager.get_autolock_service().documentLocked.emit(
                        os.path.basename(ref))
                else:
                    remote_client.unlock(uid)
                    if item[1] == 'unlock_orphan':
                        path = self._local_client.abspath(ref)
                        log.trace("Remove orphan: %s", path)
                        self._manager.get_autolock_service().orphan_unlocked(
                            path)
                        # Clean the folder
                        shutil.rmtree(path, ignore_errors=True)
                    self._local_client.remove_remote_id(
                        dir_path, "nxdirecteditlock")
                    # Emit the signal only when the unlock is done - might want to avoid the call on orphan
                    self._manager.get_autolock_service().documentUnlocked.emit(
                        os.path.basename(ref))
            except ThreadInterrupt:
                raise
            except Exception as e:
                # Try again in 30s
                log.debug("Can't %s document '%s': %r",
                          item[1],
                          ref,
                          e,
                          exc_info=True)
                self.directEditLockError.emit(item[1], os.path.basename(ref),
                                              uid)
        # Unqueue any errors
        item = self._error_queue.get()
        while (item is not None):
            self._upload_queue.put(item.get())
            item = self._error_queue.get()
        # Handle the upload queue
        while (not self._upload_queue.empty()):
            try:
                ref = self._upload_queue.get_nowait()
                log.trace('Handling DirectEdit queue ref: %r', ref)
            except Empty:
                break
            uid, engine, remote_client, digest_algorithm, digest = self._extract_edit_info(
                ref)
            # Don't update if digest are the same
            info = self._local_client.get_info(ref)
            try:
                current_digest = info.get_digest(digest_func=digest_algorithm)
                if current_digest == digest:
                    continue
                start_time = current_milli_time()
                log.trace(
                    "Local digest: %s is different from the recorded one: %s - modification detected for %r",
                    current_digest, digest, ref)
                # TO_REVIEW Should check if server-side blob has changed ?
                # Update the document - should verify the remote hash - NXDRIVE-187
                remote_info = remote_client.get_info(uid)
                if remote_info.digest != digest:
                    # Conflict detect
                    log.trace(
                        "Remote digest: %s is different from the recorded one: %s - conflict detected for %r",
                        remote_info.digest, digest, ref)
                    self.directEditConflict.emit(os.path.basename(ref), ref,
                                                 remote_info.digest)
                    continue
                log.debug('Uploading file %s', self._local_client.abspath(ref))
                remote_client.stream_update(uid,
                                            self._local_client.abspath(ref),
                                            apply_versioning_policy=True)
                # Update hash value
                dir_path = os.path.dirname(ref)
                self._local_client.set_remote_id(dir_path, current_digest,
                                                 'nxdirecteditdigest')
                self._last_action_timing = current_milli_time() - start_time
                self.editDocument.emit(remote_info)
            except ThreadInterrupt:
                raise
            except Exception as e:
                # Try again in 30s
                log.trace("Exception on direct edit: %r", e, exc_info=True)
                self._error_queue.push(ref, ref)
                continue
            uploaded = True
        if uploaded:
            log.debug('Emitting directEditUploadCompleted')
            self.directEditUploadCompleted.emit()
        while (not self._watchdog_queue.empty()):
            evt = self._watchdog_queue.get()
            self.handle_watchdog_event(evt)

    def _execute(self):
        try:
            self._watchdog_queue = Queue()
            self._action = Action("Clean up folder")
            try:
                self._cleanup()
            except ThreadInterrupt:
                raise
            except Exception as ex:
                log.debug(ex)
            self._action = Action("Setup watchdog")
            self._setup_watchdog()
            self._end_action()
            # Load the target url if Drive was not launched before
            self.handle_url()
            log.trace(
                "DirectEdit Entering main loop: continue:%r pause:%r running:%r",
                self._continue, self._pause, self._running)
            while (1):
                self._interact()
                log.trace(
                    "DirectEdit post interact: continue:%r pause:%r running:%r",
                    self._continue, self._pause, self._running)
                try:
                    self._handle_queues()
                except NotFound:
                    pass
                except ThreadInterrupt:
                    raise
                except Exception as ex:
                    log.debug(ex)
                sleep(0.01)
        except ThreadInterrupt:
            raise
        finally:
            self._stop_watchdog()

    def get_metrics(self):
        metrics = super(DirectEdit, self).get_metrics()
        if self._event_handler is not None:
            metrics['fs_events'] = self._event_handler.counter
        metrics['last_action_timing'] = self._last_action_timing
        return dict(metrics.items() + self._metrics.items())

    def _setup_watchdog(self):
        from watchdog.observers import Observer
        log.debug("Watching FS modification on : %s", self._folder)
        self._event_handler = DriveFSEventHandler(self)
        self._observer = Observer()
        self._observer.schedule(self._event_handler,
                                self._folder,
                                recursive=True)
        self._observer.start()

    def _stop_watchdog(self):
        if self._observer is None:
            return
        log.info("Stopping FS Observer thread")
        try:
            self._observer.stop()
        except Exception as e:
            log.warn("Can't stop FS observer : %r", e)
        # Wait for all observers to stop
        try:
            self._observer.join()
        except Exception as e:
            log.warn("Can't join FS observer : %r", e)
        # Delete all observers
        self._observer = None

    def is_lock_file(self, name):
        return False and ((
            name.startswith("~$")  # Office lock file
            or name.startswith(".~lock.")))  # Libre/OpenOffice lock file

    def handle_watchdog_event(self, evt):
        self._action = Action("Handle watchdog event")
        log.debug("Handling watchdog event [%s] on %r", evt.event_type,
                  evt.src_path)
        try:
            src_path = normalize_event_filename(evt.src_path)
            # Event on the folder by itself
            if os.path.isdir(src_path):
                return
            ref = self._local_client.get_path(src_path)
            file_name = os.path.basename(src_path)
            # Disable as we use the global open files instead of editor lock file
            if self.is_lock_file(
                    file_name) and self._manager.get_direct_edit_auto_lock():
                if evt.event_type == 'created':
                    self._lock_queue.put((ref, 'lock'))
                elif evt.event_type == 'deleted':
                    self._lock_queue.put((ref, 'unlock'))
                return
            queue = False
            if evt.event_type == 'modified' or evt.event_type == 'created':
                queue = True
            if evt.event_type == 'moved':
                ref = self._local_client.get_path(evt.dest_path)
                file_name = os.path.basename(evt.dest_path)
                src_path = evt.dest_path
                queue = True
            elif self._local_client.is_temp_file(file_name):
                return
            dir_path = self._local_client.get_path(os.path.dirname(src_path))
            name = self._local_client.get_remote_id(dir_path,
                                                    "nxdirecteditname")
            if name is None:
                return
            if name != file_name:
                return
            if self._manager.get_direct_edit_auto_lock(
            ) and self._local_client.get_remote_id(dir_path,
                                                   "nxdirecteditlock") != "1":
                self._manager.get_autolock_service().set_autolock(
                    src_path, self)
            if queue:
                # ADD TO UPLOAD QUEUE
                self._upload_queue.put(ref)
                return
        except ThreadInterrupt:
            raise
        except Exception as e:
            log.warn("Watchdog exception : %r", e, exc_info=True)
        finally:
            self._end_action()
Example #39
0
if __name__ == '__main__':
    q = Queue()
    filepath = os.path.dirname(os.path.abspath(__file__)).rsplit('\\', 1)[0]

    cont = ['junling.txt']
    for t in cont:
        with open('%s/users/%s' % (filepath, t), 'r') as f:
            for i in f:
                if i.strip() and not i.startswith('#'):
                    name = i.split()[0]
                    passwd = i.split()[1]
                    addr = i.split()[2]
                    try:
                        lockpwd = i.split()[3]
                    except:
                        lockpwd = None
                    t1 = threading.Thread(target=run,
                                          args=(name, passwd, addr, lockpwd))
                    q.put(t1)
    while not q.empty():
        print q.qsize()
        thread = []
        for i in xrange(10):
            try:
                thread.append(q.get_nowait())
            except:
                pass
        for i in thread:
            i.start()
        for i in thread:
            i.join()
Example #40
0
class Gviz(wx.Panel):

    # Mark canvas as dirty when setting showall
    _showall = 0

    def _get_showall(self):
        return self._showall

    def _set_showall(self, showall):
        if showall != self._showall:
            self.dirty = True
            self._showall = showall

    showall = property(_get_showall, _set_showall)

    def __init__(self,
                 parent,
                 size=(200, 200),
                 build_dimensions=[200, 200, 100, 0, 0, 0],
                 grid=(10, 50),
                 extrusion_width=0.5,
                 bgcolor="#000000",
                 realparent=None):
        wx.Panel.__init__(self, parent, -1)
        self.widget = self
        size = [max(1.0, x) for x in size]
        ratio = size[0] / size[1]
        self.SetMinSize((150, 150 / ratio))
        self.parent = realparent if realparent else parent
        self.size = size
        self.build_dimensions = build_dimensions
        self.grid = grid
        self.Bind(wx.EVT_PAINT, self.paint)
        self.Bind(wx.EVT_SIZE, self.resize)
        self.hilight = deque()
        self.hilightarcs = deque()
        self.hilightqueue = Queue(0)
        self.hilightarcsqueue = Queue(0)
        self.clear()
        self.filament_width = extrusion_width  # set it to 0 to disable scaling lines with zoom
        self.update_basescale()
        self.scale = self.basescale
        penwidth = max(
            1.0, self.filament_width * ((self.scale[0] + self.scale[1]) / 2.0))
        self.translate = [0.0, 0.0]
        self.mainpen = wx.Pen(wx.Colour(0, 0, 0), penwidth)
        self.arcpen = wx.Pen(wx.Colour(255, 0, 0), penwidth)
        self.travelpen = wx.Pen(wx.Colour(10, 80, 80), penwidth)
        self.hlpen = wx.Pen(wx.Colour(200, 50, 50), penwidth)
        self.fades = [
            wx.Pen(
                wx.Colour(250 - 0.6**i * 100, 250 - 0.6**i * 100,
                          200 - 0.4**i * 50), penwidth) for i in xrange(6)
        ]
        self.penslist = [self.mainpen, self.travelpen, self.hlpen] + self.fades
        self.bgcolor = wx.Colour()
        self.bgcolor.SetFromName(bgcolor)
        self.blitmap = wx.EmptyBitmap(self.GetClientSize()[0],
                                      self.GetClientSize()[1], -1)
        self.paint_overlay = None

    def inject(self):
        layer = self.layers.index(self.layerindex)
        injector(self.gcode, self.layerindex, layer)

    def editlayer(self):
        layer = self.layers.index(self.layerindex)
        injector_edit(self.gcode, self.layerindex, layer)

    def clearhilights(self):
        self.hilight.clear()
        self.hilightarcs.clear()
        while not self.hilightqueue.empty():
            self.hilightqueue.get_nowait()
        while not self.hilightarcsqueue.empty():
            self.hilightarcsqueue.get_nowait()

    def clear(self):
        self.gcode = None
        self.lastpos = [0, 0, 0, 0, 0, 0, 0]
        self.hilightpos = self.lastpos[:]
        self.lines = {}
        self.pens = {}
        self.arcs = {}
        self.arcpens = {}
        self.layers = {}
        self.layersz = []
        self.clearhilights()
        self.layerindex = 0
        self.showall = 0
        self.dirty = True
        self.partial = False
        self.painted_layers = set()
        wx.CallAfter(self.Refresh)

    def get_currentz(self):
        z = self.layersz[self.layerindex]
        z = 0. if z is None else z
        return z

    def layerup(self):
        if self.layerindex + 1 < len(self.layers):
            self.layerindex += 1
            z = self.get_currentz()
            wx.CallAfter(
                self.parent.SetStatusText,
                _("Layer %d - Going Up - Z = %.03f mm") %
                (self.layerindex + 1, z), 0)
            self.dirty = True
            self.parent.setlayercb(self.layerindex)
            wx.CallAfter(self.Refresh)

    def layerdown(self):
        if self.layerindex > 0:
            self.layerindex -= 1
            z = self.get_currentz()
            wx.CallAfter(
                self.parent.SetStatusText,
                _("Layer %d - Going Down - Z = %.03f mm") %
                (self.layerindex + 1, z), 0)
            self.dirty = True
            self.parent.setlayercb(self.layerindex)
            wx.CallAfter(self.Refresh)

    def setlayer(self, layer):
        if layer in self.layers:
            self.clearhilights()
            self.layerindex = self.layers[layer]
            self.dirty = True
            self.showall = 0
            wx.CallAfter(self.Refresh)

    def update_basescale(self):
        self.basescale = 2 * [
            min(
                float(self.size[0] - 1) / self.build_dimensions[0],
                float(self.size[1] - 1) / self.build_dimensions[1])
        ]

    def resize(self, event):
        old_basescale = self.basescale
        width, height = self.GetClientSizeTuple()
        if width < 1 or height < 1:
            return
        self.size = (width, height)
        self.update_basescale()
        zoomratio = float(self.basescale[0]) / old_basescale[0]
        wx.CallLater(200, self.zoom, 0, 0, zoomratio)

    def zoom(self, x, y, factor):
        if x == -1 and y == -1:
            side = min(self.size)
            x = y = side / 2
        self.scale = [s * factor for s in self.scale]

        self.translate = [
            x - (x - self.translate[0]) * factor,
            y - (y - self.translate[1]) * factor
        ]
        penwidth = max(
            1.0, self.filament_width * ((self.scale[0] + self.scale[1]) / 2.0))
        for pen in self.penslist:
            pen.SetWidth(penwidth)
        self.dirty = True
        wx.CallAfter(self.Refresh)

    def _line_scaler(self, x):
        return (
            self.scale[0] * x[0],
            self.scale[1] * x[1],
            self.scale[0] * x[2],
            self.scale[1] * x[3],
        )

    def _arc_scaler(self, x):
        return (
            self.scale[0] * x[0],
            self.scale[1] * x[1],
            self.scale[0] * x[2],
            self.scale[1] * x[3],
            self.scale[0] * x[4],
            self.scale[1] * x[5],
        )

    def _drawlines(self, dc, lines, pens):
        scaled_lines = map(self._line_scaler, lines)
        dc.DrawLineList(scaled_lines, pens)

    def _drawarcs(self, dc, arcs, pens):
        scaled_arcs = map(self._arc_scaler, arcs)
        dc.SetBrush(wx.TRANSPARENT_BRUSH)
        for i in range(len(scaled_arcs)):
            dc.SetPen(pens[i] if type(pens) == list else pens)
            dc.DrawArc(*scaled_arcs[i])

    def repaint_everything(self):
        width = self.scale[0] * self.build_dimensions[0]
        height = self.scale[1] * self.build_dimensions[1]
        self.blitmap = wx.EmptyBitmap(width + 1, height + 1, -1)
        dc = wx.MemoryDC()
        dc.SelectObject(self.blitmap)
        dc.SetBackground(wx.Brush((250, 250, 200)))
        dc.Clear()
        dc.SetPen(wx.Pen(wx.Colour(180, 180, 150)))
        for grid_unit in self.grid:
            if grid_unit > 0:
                for x in xrange(int(self.build_dimensions[0] / grid_unit) + 1):
                    draw_x = self.scale[0] * x * grid_unit
                    dc.DrawLine(draw_x, 0, draw_x, height)
                for y in xrange(int(self.build_dimensions[1] / grid_unit) + 1):
                    draw_y = self.scale[1] * (self.build_dimensions[1] -
                                              y * grid_unit)
                    dc.DrawLine(0, draw_y, width, draw_y)
            dc.SetPen(wx.Pen(wx.Colour(0, 0, 0)))

        if not self.showall:
            # Draw layer gauge
            dc.SetBrush(wx.Brush((43, 144, 255)))
            dc.DrawRectangle(width - 15, 0, 15, height)
            dc.SetBrush(wx.Brush((0, 255, 0)))
            if self.layers:
                dc.DrawRectangle(
                    width - 14,
                    (1.0 - (1.0 * (self.layerindex + 1)) / len(self.layers)) *
                    height, 13, height - 1)

        if self.showall:
            for i in range(len(self.layersz)):
                self.painted_layers.add(i)
                self._drawlines(dc, self.lines[i], self.pens[i])
                self._drawarcs(dc, self.arcs[i], self.arcpens[i])
            dc.SelectObject(wx.NullBitmap)
            return

        if self.layerindex < len(
                self.layers) and self.layerindex in self.lines:
            for layer_i in range(max(0, self.layerindex - 6), self.layerindex):
                self._drawlines(dc, self.lines[layer_i],
                                self.fades[self.layerindex - layer_i - 1])
                self._drawarcs(dc, self.arcs[layer_i],
                               self.fades[self.layerindex - layer_i - 1])
            self._drawlines(dc, self.lines[self.layerindex],
                            self.pens[self.layerindex])
            self._drawarcs(dc, self.arcs[self.layerindex],
                           self.arcpens[self.layerindex])

        self._drawlines(dc, self.hilight, self.hlpen)
        self._drawarcs(dc, self.hilightarcs, self.hlpen)

        self.paint_hilights(dc)

        dc.SelectObject(wx.NullBitmap)

    def repaint_partial(self):
        if self.showall:
            dc = wx.MemoryDC()
            dc.SelectObject(self.blitmap)
            for i in set(range(len(self.layersz))).difference(
                    self.painted_layers):
                self.painted_layers.add(i)
                self._drawlines(dc, self.lines[i], self.pens[i])
                self._drawarcs(dc, self.arcs[i], self.arcpens[i])
            dc.SelectObject(wx.NullBitmap)

    def paint_hilights(self, dc=None):
        if self.hilightqueue.empty() and self.hilightarcsqueue.empty():
            return
        hl = []
        if not dc:
            dc = wx.MemoryDC()
            dc.SelectObject(self.blitmap)
        while not self.hilightqueue.empty():
            hl.append(self.hilightqueue.get_nowait())
        self._drawlines(dc, hl, self.hlpen)
        hlarcs = []
        while not self.hilightarcsqueue.empty():
            hlarcs.append(self.hilightarcsqueue.get_nowait())
        self._drawarcs(dc, hlarcs, self.hlpen)
        dc.SelectObject(wx.NullBitmap)

    def paint(self, event):
        if self.dirty:
            self.dirty = False
            self.partial = False
            self.repaint_everything()
        elif self.partial:
            self.partial = False
            self.repaint_partial()
        self.paint_hilights()
        dc = wx.PaintDC(self)
        dc.SetBackground(wx.Brush(self.bgcolor))
        dc.Clear()
        dc.DrawBitmap(self.blitmap, self.translate[0], self.translate[1])
        if self.paint_overlay:
            self.paint_overlay(dc)

    def addfile_perlayer(self, gcode, showall=False):
        self.clear()
        self.gcode = gcode
        self.showall = showall
        generator = self.add_parsed_gcodes(gcode)
        generator_output = generator.next()
        while generator_output is not None:
            yield generator_output
            generator_output = generator.next()
        max_layers = len(self.layers)
        if hasattr(self.parent, "layerslider"):
            self.parent.layerslider.SetRange(0, max_layers - 1)
            self.parent.layerslider.SetValue(0)
        yield None

    def addfile(self, gcode=None, showall=False):
        generator = self.addfile_perlayer(gcode, showall)
        while generator.next() is not None:
            continue

    def _get_movement(self, start_pos, gline):
        """Takes a start position and a gcode, and returns a 3-uple containing
        (final position, line, arc), with line and arc being None if not
        used"""
        target = start_pos[:]
        target[5] = 0.0
        target[6] = 0.0
        if gline.current_x is not None: target[0] = gline.current_x
        if gline.current_y is not None: target[1] = gline.current_y
        if gline.current_z is not None: target[2] = gline.current_z
        if gline.e is not None:
            if gline.relative_e:
                target[3] += gline.e
            else:
                target[3] = gline.e
        if gline.f is not None: target[4] = gline.f
        if gline.i is not None: target[5] = gline.i
        if gline.j is not None: target[6] = gline.j

        if gline.command in ["G1"]:
            line = [
                self._x(start_pos[0]),
                self._y(start_pos[1]),
                self._x(target[0]),
                self._y(target[1])
            ]
            return target, line, None
        elif gline.command in ["G0"]:
            return target, None, None
        elif gline.command in ["G2", "G3"]:
            # startpos, endpos, arc center
            arc = [
                self._x(start_pos[0]),
                self._y(start_pos[1]),
                self._x(target[0]),
                self._y(target[1]),
                self._x(start_pos[0] + target[5]),
                self._y(start_pos[1] + target[6])
            ]
            if gline.command == "G2":  # clockwise, reverse endpoints
                arc[0], arc[1], arc[2], arc[3] = arc[2], arc[3], arc[0], arc[1]
            return target, None, arc

    def _y(self, y):
        return self.build_dimensions[1] - (y - self.build_dimensions[4])

    def _x(self, x):
        return x - self.build_dimensions[3]

    def add_parsed_gcodes(self, gcode):
        start_time = time.time()

        layer_idx = 0
        while layer_idx < len(gcode.all_layers):
            layer = gcode.all_layers[layer_idx]
            has_move = False
            for gline in layer:
                if gline.is_move:
                    has_move = True
                    break
            if not has_move:
                yield layer_idx
                layer_idx += 1
                continue
            viz_layer = len(self.layers)
            self.lines[viz_layer] = []
            self.pens[viz_layer] = []
            self.arcs[viz_layer] = []
            self.arcpens[viz_layer] = []
            for gline in layer:
                if not gline.is_move:
                    continue

                target, line, arc = self._get_movement(self.lastpos[:], gline)

                if line is not None:
                    self.lines[viz_layer].append(line)
                    self.pens[viz_layer].append(
                        self.mainpen if target[3] != self.lastpos[3] else self.
                        travelpen)
                elif arc is not None:
                    self.arcs[viz_layer].append(arc)
                    self.arcpens[viz_layer].append(self.arcpen)

                self.lastpos = target
            # Transform into a numpy array for memory efficiency
            self.lines[viz_layer] = numpy.asarray(self.lines[viz_layer],
                                                  dtype=numpy.float32)
            self.pens[viz_layer] = numpy.asarray(self.pens[viz_layer])
            self.arcs[viz_layer] = numpy.asarray(self.arcs[viz_layer],
                                                 dtype=numpy.float32)
            self.arcpens[viz_layer] = numpy.asarray(self.arcpens[viz_layer])
            # Only add layer to self.layers now to prevent the display of an
            # unfinished layer
            self.layers[layer_idx] = viz_layer
            self.layersz.append(layer.z)

            # Refresh display if more than 0.2s have passed
            if time.time() - start_time > 0.2:
                start_time = time.time()
                self.partial = True
                wx.CallAfter(self.Refresh)

            yield layer_idx
            layer_idx += 1

        self.dirty = True
        wx.CallAfter(self.Refresh)
        yield None

    def addgcodehighlight(self, gline):
        if gline.command not in ["G0", "G1", "G2", "G3"]:
            return

        target, line, arc = self._get_movement(self.hilightpos[:], gline)

        if line is not None:
            self.hilight.append(line)
            self.hilightqueue.put_nowait(line)
        elif arc is not None:
            self.hilightarcs.append(arc)
            self.hilightarcsqueue.put_nowait(arc)

        self.hilightpos = target
        wx.CallAfter(self.Refresh)
Example #41
0
class BaseHistorianAgent(Agent):
    '''This is the base agent for historian Agents.
    It automatically subscribes to all device publish topics.

    Event processing occurs in its own thread as to not block the main
    thread.  Both the historian_setup and publish_to_historian happen in
    the same thread.

    By default the base historian will listen to 4 separate root topics (
    datalogger/*, record/*, actuators/*, and device/*.  Messages that are
    published to actuator are assumed to be part of the actuation process.
    Messages published to datalogger will be assumed to be timepoint data that
    is composed of units and specific types with the assumption that they have
    the ability to be graphed easily. Messages published to devices
    are data that comes directly from drivers.  Finally Messages that are
    published to record will be handled as string data and can be customized
    to the user specific situation.

    This base historian will cache all received messages to a local database
    before publishing it to the historian.  This allows recovery for unexpected
    happenings before the successful writing of data to the historian.
    '''

    def __init__(self,
                 retry_period=300.0,
                 submit_size_limit=1000,
                 max_time_publishing=30,
                 **kwargs):
        super(BaseHistorianAgent, self).__init__(**kwargs)
        self._retry_period = retry_period
        self._submit_size_limit = submit_size_limit
        self._max_time_publishing = timedelta(seconds=max_time_publishing)
        self._successful_published = set()
        self._meta_data = defaultdict(dict)

        self._event_queue = Queue()
        self._process_thread = Thread(target = self._process_loop)
        self._process_thread.daemon = True  # Don't wait on thread to exit.
        self._process_thread.start()
        # The topic cache is only meant as a local lookup and should not be
        # accessed via the implemented historians.
        self._backup_cache = {}

    @Core.receiver("onstart")
    def starting_base(self, sender, **kwargs):
        '''
        Subscribes to the platform message bus on the actuator, record,
        datalogger, and device topics to capture data.
        '''
        _log.debug("Starting base historian")

        driver_prefix = topics.DRIVER_TOPIC_BASE
        _log.debug("subscribing to {}".format(driver_prefix))
        self.vip.pubsub.subscribe(peer='pubsub',
                               prefix=driver_prefix,
                               callback=self.capture_device_data)

        _log.debug('Subscribing to: {}'.format(topics.LOGGER_BASE))
        self.vip.pubsub.subscribe(peer='pubsub',
                               prefix=topics.LOGGER_BASE, #"datalogger",
                               callback=self.capture_log_data)

        _log.debug('Subscribing to: '.format(topics.ACTUATOR))
        self.vip.pubsub.subscribe(peer='pubsub',
                               prefix=topics.ACTUATOR,  # actuators/*
                               callback=self.capture_actuator_data)

    @Core.receiver("onstop")
    def stopping(self, sender, **kwargs):
        '''
        Release subscription to the message bus because we are no longer able
        to respond to messages now.
        '''
        try:
            # unsubscribes to all topics that we are subscribed to.
            self.vip.pubsub.unsubscribe(peer='pubsub', prefix=None, callback=None)
        except KeyError:
            # means that the agent didn't start up properly so the pubsub
            # subscriptions never got finished.
            pass

    def capture_log_data(self, peer, sender, bus, topic, headers, message):
        '''Capture log data and submit it to be published by a historian.'''

#         parts = topic.split('/')
#         location = '/'.join(reversed(parts[2:]))

        try:
            # 2.0 agents compatability layer makes sender == pubsub.compat so 
            # we can do the proper thing when it is here
            if sender == 'pubsub.compat':
                data = jsonapi.loads(message)
            else:
                data = message
        except ValueError as e:
            _log.error("message for {topic} bad message string: {message_string}".format(topic=topic,
                                                                                     message_string=message[0]))
            return
        except IndexError as e:
            _log.error("message for {topic} missing message string".format(topic=topic))
            return

        source = 'log'
        _log.debug("Queuing {topic} from {source} for publish".format(topic=topic,
                                                                      source=source))
        for point, item in data.iteritems():
#             ts_path = location + '/' + point
            if 'Readings' not in item or 'Units' not in item:
                _log.error("logging request for {path} missing Readings or Units".format(path=ts_path))
                continue
            units = item['Units']
            dtype = item.get('data_type', 'float')
            tz = item.get('tz', None)
            if dtype == 'double':
                dtype = 'float'

            meta = {'units': units, 'type': dtype}

            readings = item['Readings']

            if not isinstance(readings, list):
                readings = [(datetime.utcnow(), readings)]
            elif isinstance(readings[0],str):
                my_ts, my_tz = process_timestamp(readings[0])
                readings = [(my_ts,readings[1])]
                if tz:
                    meta['tz'] = tz
                elif my_tz:
                    meta['tz'] = my_tz

            self._event_queue.put({'source': source,
                                   'topic': topic+'/'+point,
                                   'readings': readings,
                                   'meta':meta})

    def capture_device_data(self, peer, sender, bus, topic, headers, message):
        '''Capture device data and submit it to be published by a historian.
        
        Filter out only the */all topics for publishing to the historian.
        '''
        
        if not ALL_REX.match(topic):
#             _log.debug("Unmatched topic: {}".format(topic))
            return
        
        # Because of the above if we know that all is in the topic so
        # we strip it off to get the base device
        parts = topic.split('/')
        device = '/'.join(parts[1:-1]) #'/'.join(reversed(parts[2:]))
        
        _log.debug("found topic {}".format(topic))
        #peer, sender, bus, topic, headers, message
        timestamp_string = headers.get(headers_mod.DATE)
        timestamp, my_tz = process_timestamp(timestamp_string)
        

        try:
            # 2.0 agents compatability layer makes sender == pubsub.compat so 
            # we can do the proper thing when it is here
            if sender == 'pubsub.compat':
                values = jsonapi.loads(message[0])
            else:
                values = message[0]
        except ValueError as e:
            _log.error("message for {topic} bad message string: {message_string}".format(topic=topic,
                                                                                     message_string=message[0]))
            return
        except IndexError as e:
            _log.error("message for {topic} missing message string".format(topic=topic))
            return

        meta = {}
        try:
            # 2.0 agents compatability layer makes sender == pubsub.compat so 
            # we can do the proper thing when it is here
            if sender == 'pubsub.compat':
                meta = jsonapi.loads(message[1])
            else:
                meta = message[1]
        except ValueError as e:
            _log.warning("meta data for {topic} bad message string: {message_string}".format(topic=topic,
                                                                                     message_string=message[0]))
        except IndexError as e:
            _log.warning("meta data for {topic} missing message string".format(topic=topic))


        source = 'scrape'
        _log.debug("Queuing {topic} from {source} for publish".format(topic=topic,
                                                                      source=source))

        for key, value in values.iteritems():
            point_topic = device + '/' + key
            self._event_queue.put({'source': source,
                                   'topic': point_topic,
                                   'readings': [(timestamp,value)],
                                   'meta': meta.get(key,{})})

    def capture_actuator_data(self, topic, headers, message, match):
        '''Capture actuation data and submit it to be published by a historian.
        '''
        timestamp_string = headers.get('time')
        if timestamp_string is None:
            _log.error("message for {topic} missing timetamp".format(topic=topic))
            return
        try:
            timestamp = parse(timestamp_string)
        except (ValueError, TypeError) as e:
            _log.error("message for {topic} bad timetamp string: {ts_string}".format(topic=topic,
                                                                                     ts_string=timestamp_string))
            return

        parts = topic.split('/')
        topic = '/'.join(parts[ACTUATOR_TOPIC_PREFIX_PARTS:])

        try:
            value = message[0]
        except ValueError as e:
            _log.error("message for {topic} bad message string: {message_string}".format(topic=topic,
                                                                                     message_string=message[0]))
            return
        except IndexError as e:
            _log.error("message for {topic} missing message string".format(topic=topic))
            return

        source = 'actuator'
        _log.debug("Queuing {topic} from {source} for publish".format(topic=topic,
                                                                      source=source))


        self._event_queue.put({'source': source,
                               'topic': topic,
                               'readings': [timestamp,value]})


    def _process_loop(self):
        '''
        The process loop is called off of the main thread and will not exit
        unless the main agent is shutdown.
        '''

        _log.debug("Starting process loop.")
        self._setup_backup_db()
        self.historian_setup()

        # now that everything is setup we need to make sure that the topics
        # are syncronized between


        #Based on the state of the back log and whether or not sucessful
        #publishing is currently happening (and how long it's taking)
        #we may or may not want to wait on the event queue for more input
        #before proceeding with the rest of the loop.
        #wait_for_input = not bool(self._get_outstanding_to_publish())
        wait_for_input = not bool(self._get_outstanding_to_publish())

        while True:
            try:
                _log.debug("Reading from/waiting for queue.")
                new_to_publish = [self._event_queue.get(wait_for_input, self._retry_period)]
            except Empty:
                _log.debug("Queue wait timed out. Falling out.")
                new_to_publish = []

            if new_to_publish:
                _log.debug("Checking for queue build up.")
                while True:
                    try:
                        new_to_publish.append(self._event_queue.get_nowait())
                    except Empty:
                        break

            self._backup_new_to_publish(new_to_publish)

            wait_for_input = True
            start_time = datetime.utcnow()
            
            _log.debug("Calling publish_to_historian.")
            while True:
                to_publish_list = self._get_outstanding_to_publish()
                if not to_publish_list:
                    break
                
                try:
                    self.publish_to_historian(to_publish_list)
                except Exception as exp:
                    _log.error("An unhandled exception has occured while " \
                               "publishing to historian.")
                    _log.exception(exp)
                
                if not self._any_sucessfull_publishes():
                    break
                self._cleanup_successful_publishes()

                now = datetime.utcnow()
                if now - start_time > self._max_time_publishing:
                    wait_for_input = False
                    break
        _log.debug("Finished processing")

    def _setup_backup_db(self):
        ''' Creates a backup database for the historian if doesn't exist.'''

        _log.debug("Setting up backup DB.")
        self._connection = sqlite3.connect('backup.sqlite',
                                           detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)

        c = self._connection.cursor()
        c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='outstanding';")

        if c.fetchone() is None:
            _log.debug("Configuring backup BD for the first time.")
            self._connection.execute('''PRAGMA auto_vacuum = FULL''')
            self._connection.execute('''CREATE TABLE outstanding
                                        (id INTEGER PRIMARY KEY,
                                         ts timestamp NOT NULL,
                                         source TEXT NOT NULL,
                                         topic_id INTEGER NOT NULL,
                                         value_string TEXT NOT NULL,
                                         UNIQUE(ts, topic_id, source))''')

        c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='metadata';")

        if c.fetchone() is None:
            self._connection.execute('''CREATE TABLE metadata
                                        (source TEXT NOT NULL,
                                         topic_id INTEGER NOT NULL,
                                         name TEXT NOT NULL,
                                         value TEXT NOT NULL,
                                         UNIQUE(topic_id, source, name))''')
        else:
            c.execute("SELECT * FROM metadata")
            for row in c:
                self._meta_data[(row[0], row[1])][row[2]] = row[3]

        c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='topics';")

        if c.fetchone() is None:
            self._connection.execute('''create table topics
                                        (topic_id INTEGER PRIMARY KEY,
                                         topic_name TEXT NOT NULL,
                                         UNIQUE(topic_name))''')
        else:
            c.execute("SELECT * FROM topics")
            for row in c:
                self._backup_cache[row[0]] = row[1]
                self._backup_cache[row[1]] = row[0]

        c.close()

        self._connection.commit()

    def _get_outstanding_to_publish(self):
        _log.debug("Getting oldest outstanding to publish.")
        c = self._connection.cursor()
        c.execute('select * from outstanding order by ts limit ?', (self._submit_size_limit,))

        results = []
        for row in c:
            _id = row[0]
            timestamp = row[1]
            source = row[2]
            topic_id = row[3]
            value = jsonapi.loads(row[4])
            meta = self._meta_data[(source, topic_id)].copy()
            results.append({'_id':_id,
                            'timestamp': timestamp.replace(tzinfo=pytz.UTC),
                            'source': source,
                            'topic': self._backup_cache[topic_id],
                            'value': value,
                            'meta': meta})

        c.close()

        return results

    def _cleanup_successful_publishes(self):
        _log.debug("Cleaning up successfully published values.")
        c = self._connection.cursor()

        if None in self._successful_published:
            c.execute('''DELETE FROM outstanding
                        WHERE ROWID IN
                        (SELECT ROWID FROM outstanding
                          ORDER BY ts LIMIT ?)''', (self._submit_size_limit,))
        else:
            temp = list(self._successful_published)
            temp.sort()
            c.executemany('''DELETE FROM outstanding
                            WHERE id = ?''',
                            ((_id,) for _id in
                             self._successful_published))

        self._connection.commit()

        self._successful_published = set()

    def _any_sucessfull_publishes(self):
        return bool(self._successful_published)

    def _backup_new_to_publish(self, new_publish_list):
        _log.debug("Backing up unpublished values.")
        c = self._connection.cursor()

        for item in new_publish_list:
            source = item['source']
            topic = item['topic']
            meta = item.get('meta', {})
            values = item['readings']

            topic_id = self._backup_cache.get(topic)

            if topic_id is None:
                c.execute('''INSERT INTO topics values (?,?)''', (None, topic))
                c.execute('''SELECT last_insert_rowid()''')
                row = c.fetchone()
                topic_id = row[0]
                self._backup_cache[topic_id] = topic
                self._backup_cache[topic] = topic_id

            for name, value in meta.iteritems():
                c.execute('''INSERT OR REPLACE INTO metadata values(?, ?, ?, ?)''',
                            (source,topic_id,name,value))
                self._meta_data[(source,topic_id)][name] = value


            
            for timestamp, value in values:
                c.execute('''INSERT OR REPLACE INTO outstanding values(NULL, ?, ?, ?, ?)''',
                          (timestamp,source,topic_id,jsonapi.dumps(value)))

        self._connection.commit()

    def report_published(self, record):
        if isinstance(record, list):
            for x in record:
                self._successful_published.add(x['_id'])
        else:
            self._successful_published.add(record['_id'])

    def report_all_published(self):
        self._successful_published.add(None)

    @abstractmethod
    def publish_to_historian(self, to_publish_list):
        '''Main publishing method for historian Agents.'''

    def historian_setup(self):
        '''Optional setup routine, run in the processing thread before
Example #42
0
class ChaseLogger(object):
    """ Chase Data Logger Class.
        Log all chase data into a file as lines of JSON.
    """
    def __init__(self, filename=None, log_dir="./log_files"):

        if filename is not None:
            # Use user-supplied filename if provided
            self.filename = filename
        else:
            # Otherwise, create a filename based on the current time.
            self.filename = os.path.join(
                log_dir,
                datetime.datetime.utcnow().strftime("%Y%m%d-%H%MZ.log"))

        self.file_lock = Lock()

        # Input Queue.
        self.input_queue = Queue()

        # Open the file.
        try:
            self.f = open(self.filename, "a")
            logging.info("Logging - Opened log file %s." % self.filename)
        except Exception as e:
            self.log_error("Logging - Could not open log file - %s" % str(e))
            return

        # Start queue processing thread.
        self.input_processing_running = True
        self.log_process_thread = Thread(target=self.process_queue)
        self.log_process_thread.start()

    def add_car_position(self, data):
        """ Log a chase car position update.
        Input dict expected to be in the format:
        {
            'time'  :   _time_dt,
            'lat'   :   _lat,
            'lon'   :   _lon,
            'alt'   :   _alt,
            'comment':  _comment
        }

        """

        data["log_type"] = "CAR POSITION"
        data["log_time"] = pytz.utc.localize(
            datetime.datetime.utcnow()).isoformat()

        # Convert the input datetime object into a string.
        data["time"] = data["time"].isoformat()

        # Add it to the queue if we are running.
        if self.input_processing_running:
            self.input_queue.put(data)
        else:
            self.log_error("Processing not running, discarding.")

    def add_balloon_telemetry(self, data):
        """ Log balloon telemetry.
        """

        data["log_type"] = "BALLOON TELEMETRY"
        data["log_time"] = pytz.utc.localize(
            datetime.datetime.utcnow()).isoformat()

        # Convert the input datetime object into a string.
        data["time"] = data["time_dt"].isoformat()
        # Remove the time_dt element (this cannot be serialised to JSON).
        data.pop("time_dt")

        # Add it to the queue if we are running.
        if self.input_processing_running:
            self.input_queue.put(data)
        else:
            self.log_error("Processing not running, discarding.")

    def add_balloon_prediction(self, data):
        """ Log a prediction run """

        data["log_type"] = "PREDICTION"
        data["log_time"] = pytz.utc.localize(
            datetime.datetime.utcnow()).isoformat()

        # Add it to the queue if we are running.
        if self.input_processing_running:
            self.input_queue.put(data)
        else:
            self.log_error("Processing not running, discarding.")

    def add_bearing(self, data):
        """ Log a packet of bearing data """

        data["log_type"] = "BEARING"
        data["log_time"] = pytz.utc.localize(
            datetime.datetime.utcnow()).isoformat()

        # Add it to the queue if we are running.
        if self.input_processing_running:
            self.input_queue.put(data)
        else:
            self.log_error("Processing not running, discarding.")

    def process_queue(self):
        """ Process data from the input queue, and write telemetry to log files.
        """
        self.log_info("Started Chase Logger Thread.")

        while self.input_processing_running:

            # Process everything in the queue.
            self.file_lock.acquire()
            while self.input_queue.qsize() > 0:
                try:
                    _data = self.input_queue.get_nowait()
                    _data_str = json.dumps(_data)
                    self.f.write(_data_str + "\n")
                except Exception as e:
                    self.log_error("Error processing data - %s" % str(e))

            self.file_lock.release()
            # Sleep while waiting for some new data.
            time.sleep(5)

    def running(self):
        """ Check if the logging thread is running. 

        Returns:
            bool: True if the logging thread is running.
        """
        return self.input_processing_running

    def close(self):
        try:
            self.input_processing_running = False
            self.f.close()
        except Exception as e:
            self.log_error("Error when closing - %s" % str(e))

        self.log_info("Stopped Telemetry Logger Thread.")

    def log_debug(self, line):
        """ Helper function to log a debug message with a descriptive heading. 
        Args:
            line (str): Message to be logged.
        """
        logging.debug("Chase Logger - %s" % line)

    def log_info(self, line):
        """ Helper function to log an informational message with a descriptive heading. 
        Args:
            line (str): Message to be logged.
        """
        logging.info("Chase Logger - %s" % line)

    def log_error(self, line):
        """ Helper function to log an error message with a descriptive heading. 
        Args:
            line (str): Message to be logged.
        """
        logging.error("Chase Logger - %s" % line)
Example #43
0
class ParseWorker(Thread):

    daemon = True
    SLEEP_TIME = 1

    def __init__(self):
        Thread.__init__(self)
        self.requests = Queue()
        self.request_count = 0
        self.parse_items = {}

    def run(self):
        mod, func = 'calibre.gui2.tweak_book.preview', 'parse_html'
        try:
            # Connect to the worker and send a dummy job to initialize it
            self.worker = offload_worker(priority='low')
            self.worker(mod, func, '<p></p>')
        except:
            import traceback
            traceback.print_exc()
            return

        while True:
            time.sleep(self.SLEEP_TIME)
            x = self.requests.get()
            requests = [x]
            while True:
                try:
                    requests.append(self.requests.get_nowait())
                except Empty:
                    break
            if shutdown in requests:
                self.worker.shutdown()
                break
            request = sorted(requests, reverse=True)[0]
            del requests
            pi, data = request[1:]
            try:
                res = self.worker(mod, func, data)
            except:
                import traceback
                traceback.print_exc()
            else:
                parsed_data = res['result']
                if res['tb']:
                    prints("Parser error:")
                    prints(res['tb'])
                else:
                    pi.parsed_data = parsed_data

    def add_request(self, name):
        data = get_data(name)
        ldata, hdata = len(data), hash(data)
        pi = self.parse_items.get(name, None)
        if pi is None:
            self.parse_items[name] = pi = ParseItem(name)
        else:
            if pi.length == ldata and pi.fingerprint == hdata:
                return
            pi.parsed_data = None
        pi.length, pi.fingerprint = ldata, hdata
        self.requests.put((self.request_count, pi, data))
        self.request_count += 1

    def shutdown(self):
        self.requests.put(shutdown)

    def get_data(self, name):
        return getattr(self.parse_items.get(name, None), 'parsed_data', None)

    def clear(self):
        self.parse_items.clear()
Example #44
0
class ProcessLogger(object):  # pylint: disable=too-many-public-methods,too-many-instance-attributes
    """
    The Process Logger.

    Used to log messages to the Trace Logger.
    """
    def __init__(self,
                 instance_name,
                 message_service,
                 publish_callback=None,
                 register_broadcast_callback=None):
        """
        :param instance_name: Name of the Process Logger instance.
        :type instance_name: str
        :param message_service: The Message Service to use. If ``None``, then
            ``publish_callback`` must be specified.
        :type message_service: systemlink.messagebus.message_service.MessageService
            or None
        :param publish_callback: The callback for when publishing completes.
            Ignored and may be ``None`` if ``message_service`` is specified.
            This is a callable object or function that takes one argument of type
            :class:`systemlink.messagebus.message_base.MessageBase`.
        :type publish_callback: callable or None
        :param register_broadcast_callback: The callback for when a broadcast
            is registered. Ignored and may be ``None`` if ``message_service``
            is specified. This is a callable object or function that takes
            two arguments, the first argument of type
            :class:`systemlink.messagebus.generic_message.GenericMessage`
            and the second argument is a callable object or function that
            also takes two arguments itself, the first is of type ``type``
            which is the message class type and the second is of type
            :class:`systemlink.messagebus.generic_message.GenericMessage`.
        :type register_broadcast_callback: callable or None
        """
        self._log_to_trace_logger = False
        self._active_log_buffer = LogBuffer()
        if message_service is not None:
            message_service.trace_logger = self.make_trace_logger(
                message_service.service_name)
        self._stop_logging_thread = False
        self._instance_name = instance_name
        if message_service is not None:
            self._publish_callback = message_service.publisher.publish_message_callback
        else:
            self._publish_callback = publish_callback
        self._buffer_pool = BufferPool()
        self._buffers_ready_to_log = Queue()
        if message_service is not None:
            message_service.register_callback(
                trace_logger_messages.TraceLoggerSnapshotBroadcast,
                self._update_trace_points_callback)
        else:
            register_broadcast_callback(
                trace_logger_messages.TraceLoggerSnapshotBroadcast,
                self._update_trace_points_callback)
        self._trace_points = {}

        self._logging_thread_wakeup_event = threading.Semaphore(0)

        self._logging_thread = threading.Thread(
            target=self._logging_thread_func)
        self._logging_thread.daemon = True
        self._logging_thread.start()

        # Prompt the TraceLoggerService to broadcast an update
        routed_message = \
            trace_logger_messages.TraceLoggerBroadcastTracePointsRoutedMessage()
        self._publish_callback(routed_message)

    def __del__(self):
        self.close()

    def close(self):
        """
        Close this instance of :class:`ProcessLogger`.
        """
        if self._stop_logging_thread:
            return
        self._stop_logging_thread = True
        self._logging_thread_wakeup_event.release()
        self._logging_thread.join()
        self._logging_thread = None

    def make_trace_logger(self, name, parent=None, log_to_trace_logger=False):
        """
        Create an instance of
        :class:`systemlink.messagebus.trace_logger.TraceLogger`.

        :param name: The last part of the name to use for this Trace Logger.
            Will not be the full name if ``parent`` is not ``None``.
            May be ``None``.
        :type name: str or None
        :param parent: The parent Trace Logger object used to create this one.
            May be ``None``.
        :type parent: systemlink.messagebus.trace_logger.TraceLogger
        :return: A new instance of
            :class:`systemlink.messagebus.trace_logger.TraceLogger`.
        :rtype: systemlink.messagebus.trace_logger.TraceLogger
        :param log_to_trace_logger: ``True`` if the created :class:`TraceLogger`
            instance should automatically send Python logging to the Trace
            Logger service. Only one :class:`TraceLogger` instance may do so
            per :class:`systemlink.messagebus.process_logger.ProcessLogger`
            instance. ``False`` otherwise.
        :type log_to_trace_logger: bool
        """
        return TraceLogger(name,
                           parent,
                           self,
                           log_to_trace_logger=log_to_trace_logger)

    @property
    def current_trace_points(self):
        """
        Get the current trace points.

        :return: The current trace points.
        :rtype: list(systemlink.messagebus.trace_point.TracePoint)
        """
        return list(self._trace_points.values())

    def make_or_lookup_trace_point(self, name):
        """
        Create or find a Trace Point.

        :param name: Name of the Trace Point.
        :type name: str
        :return: The associated Trace Point.
        :rtype: systemlink.messagebus.trace_point.TracePoint
        """
        ret = self._trace_points.get(name)
        if not ret:
            trace_point = TracePoint(name=name)
            self._trace_points[name] = trace_point
            ret = trace_point
        routed_message = \
            trace_logger_messages.TraceLoggerRegisterTracePointRoutedMessage(name)
        routed_message.ignore_response = True
        self._publish_callback(routed_message)
        return ret

    def log(self, logging_module_name, log_string, trace_point=None):
        """
        Log a string to the Trace Logger.

        :param logging_module_name: The name of the module that wants to log.
        :type logging_module_name: str
        :param log_string: The string to log.
        :type log_string: str
        :param trace_point: A
            :class:`systemlink.messagebus.trace_point.TracePoint` object or
            ``None`` if there is no associated Trace Point.
        :type trace_point: systemlink.messagebus.trace_point.TracePoint or None
        """
        self._internal_log(logging_module_name, trace_point, LOG_TYPE_LOG,
                           log_string)

    def log_info(self, logging_module_name, log_string, trace_point=None):
        """
        Log an information string to the Trace Logger.

        :param logging_module_name: The name of the module that wants to log.
        :type logging_module_name: str
        :param log_string: The string to log.
        :type log_string: str
        :param trace_point: A
            :class:`systemlink.messagebus.trace_point.TracePoint` object or
            ``None`` if there is no associated Trace Point.
        :type trace_point: systemlink.messagebus.trace_point.TracePoint or None
        """
        self._internal_log(logging_module_name, trace_point, LOG_TYPE_INFO,
                           log_string)

    def log_error(self, logging_module_name, log_string, trace_point=None):
        """
        Log an error string to the Trace Logger.

        :param logging_module_name: The name of the module that wants to log.
        :type logging_module_name: str
        :param log_string: The string to log.
        :type log_string: str
        :param trace_point: A
            :class:`systemlink.messagebus.trace_point.TracePoint` object or
            ``None`` if there is no associated Trace Point.
        :type trace_point: systemlink.messagebus.trace_point.TracePoint or None
        """
        self._internal_log(logging_module_name, trace_point, LOG_TYPE_ERROR,
                           log_string)

    def _swap_active_buffer(self, mandatory):
        """
        There are 2 buffers in use by the Process Logger. The ``active``
        buffer is of type :class:`systemlink.messagebus.log_buffer.LogBuffer`
        and it is the buffer that is being added to by calls to ``log``,
        ``log_info``, and ``log_error``. The ``ready_to_log`` queue is a
        :class:`queue.Queue` of
        :class:`systemlink.messagebus.log_buffer.LogBuffer` and is used to send
        the log messages via AMQP to the Trace Logger service. This function
        will move the ``active`` buffer to the ``ready_to_log`` queue and
        create a new ``active`` buffer.

        :param mandatory: If ``True``, will always move the ``active``
            buffer to the ``ready_to_log`` queue. If ``False`` will only
            move the ``active`` buffer to the ``ready_to_log`` queue if the
            ``active`` buffer is not empty.
        :type mandatory: bool
        """
        if mandatory or self._active_log_buffer:
            old_buffer = self._active_log_buffer
            self._active_log_buffer = self._buffer_pool.get_buffer()
            try:
                self._buffers_ready_to_log.put_nowait(old_buffer)
            except Full:
                pass

    def _internal_log(self, logging_module_name, trace_point, log_type,
                      log_string):
        """
        Internal function to log to the Trace Logger.

        :param logging_module_name: The name of the module that wants to log.
        :type logging_module_name: str
        :param log_string: The string to log.
        :type log_string: str
        :param trace_point: A
            :class:`systemlink.messagebus.trace_point.TracePoint` object or
            ``None`` if there is no associated Trace Point.
        :type trace_point: systemlink.messagebus.trace_point.TracePoint or None
        """
        if not self._stop_logging_thread:
            logged = self._active_log_buffer.add_entry(logging_module_name,
                                                       trace_point, log_type,
                                                       log_string)
            if logged:
                return
            self._swap_active_buffer(True)
            self._logging_thread_wakeup_event.release()
            self._active_log_buffer.add_entry(logging_module_name, trace_point,
                                              log_type, log_string)

    def _logging_thread_func(self):
        """
        The thread function for logging.

        It will write the log immediately if the logging thread wakeup event
        is set. Otherwise, it will wait 100 milliseconds and then log.
        On exit, it will also log everything left in the buffer.
        """
        try:
            while not self._stop_logging_thread:
                if sys.version_info[0] >= 3:
                    self._logging_thread_wakeup_event.acquire(timeout=0.1)  # pylint: disable=unexpected-keyword-arg
                elif not self._logging_thread_wakeup_event.acquire(
                        blocking=False):
                    time.sleep(0.1)
                if self._buffers_ready_to_log.empty():
                    self._swap_active_buffer(False)
                self._write_the_log()
        except (KeyboardInterrupt, SystemExit):
            LOGGER.debug(
                'ProcessLogger logging_thread exiting due to process exit')
        self._swap_active_buffer(True)
        self._write_the_log()

    def _write_the_log(self):
        """
        Write everything currently in the ``self._buffers_ready_to_log`` queue
        to the Trace Logger service. It will stop logging if it encounters a
        :class:`systemlink.messagebus.exceptions.SystemLinkException`.
        """
        done = False
        while not done:
            if not self._buffers_ready_to_log.empty():
                ready_buffer = self._buffers_ready_to_log.get_nowait()

                routed_message = (
                    trace_logger_messages.TraceLoggerStoreEntriesRoutedMessage(
                        ready_buffer.entries))
                routed_message.ignore_response = True

                try:
                    self._publish_callback(routed_message)
                    self._buffer_pool.return_buffer(ready_buffer)
                except SystemLinkException:
                    done = True
            else:
                done = True

    @property
    def log_to_trace_logger(self):
        """
        Get whether this ProcessLogger instance is automatically sending
        Python logging to the Trace Logger service.

        :return: ``True`` if Python logging is automatically sent to
            the Trace Logger service. ``False`` otherwise.
        :rtype: bool
        """
        return self._log_to_trace_logger

    def _update_trace_points_callback(self, generic_message):
        """
        This is invoked by the framework as a callback when a message is
        received that triggers the condition of the callback.

        :param generic_message: The message received that triggered this callback.
        :type generic_message: :class:`systemlink.messagebus.generic_message.GenericMessage`
        """
        snapshot = trace_logger_messages.TraceLoggerSnapshotBroadcast.from_message(
            generic_message)
        if not snapshot.settings:
            return

        lc_instance_name = self._instance_name.lower()

        for trace_point_setting in snapshot.settings:
            name = trace_point_setting.name
            trace_point_value = trace_point_setting.enabled

            for trace_point_exception in trace_point_setting.exceptions:
                if trace_point_exception.lower() == lc_instance_name:
                    trace_point_value = not trace_point_setting.enabled
                    break

            trace_point = self._trace_points.get(name)
            if trace_point is None:
                trace_point = TracePoint(name)
                if trace_point_value:
                    trace_point.enable()
                else:
                    trace_point.disable()

                self._trace_points[name] = trace_point
            else:
                if trace_point.is_enabled != trace_point_value:
                    if trace_point_value:
                        trace_point.enable()
                    else:
                        trace_point.disable()
Example #45
0
class ModelDriver(Driver):
    r"""Base class for Model drivers and for running executable based models.

    Args:
        name (str): Driver name.
        args (str or list): Argument(s) for running the model on the command
            line. This should be a complete command including the necessary
            executable and command line arguments to that executable.
        is_server (bool, optional): If True, the model is assumed to be a server
            and an instance of :class:`yggdrasil.drivers.ServerDriver`
            is started. Defaults to False.
        client_of (str, list, optional): The names of ne or more servers that
            this model is a client of. Defaults to empty list.
        with_strace (bool, optional): If True, the command is run with strace (on
            Linux) or dtrace (on MacOS). Defaults to False.
        strace_flags (list, optional): Flags to pass to strace (or dtrace).
            Defaults to [].
        with_valgrind (bool, optional): If True, the command is run with valgrind.
            Defaults to False.
        valgrind_flags (list, optional): Flags to pass to valgrind. Defaults to [].
        model_index (int, optional): Index of model in list of models being run.
            Defaults to 0.
        **kwargs: Additional keyword arguments are passed to parent class.

    Attributes:
        args (list): Argument(s) for running the model on the command line.
        process (:class:`yggdrasil.tools.YggPopen`): Process used to run
            the model.
        is_server (bool): If True, the model is assumed to be a server and an
            instance of :class:`yggdrasil.drivers.ServerDriver` is
            started.
        client_of (list): The names of server models that this model is a
            client of.
        with_strace (bool): If True, the command is run with strace or dtrace.
        strace_flags (list): Flags to pass to strace/dtrace.
        with_valgrind (bool): If True, the command is run with valgrind.
        valgrind_flags (list): Flags to pass to valgrind.
        model_index (int): Index of model in list of models being run.

    Raises:
        RuntimeError: If both with_strace and with_valgrind are True.

    """

    _language = 'executable'
    _schema_type = 'model'
    _schema_required = ['name', 'language', 'args', 'working_dir']
    _schema_properties = {
        'name': {
            'type': 'string'
        },
        'language': {
            'type': 'string'
        },
        'args': {
            'type': 'array',
            'items': {
                'type': 'string'
            }
        },
        'inputs': {
            'type': 'array',
            'default': [],
            'items': {
                '$ref': '#/definitions/comm'
            }
        },
        'outputs': {
            'type': 'array',
            'default': [],
            'items': {
                '$ref': '#/definitions/comm'
            }
        },
        'working_dir': {
            'type': 'string'
        },
        'is_server': {
            'type': 'boolean',
            'default': False
        },
        'client_of': {
            'type': 'array',
            'items': {
                'type': 'string'
            },
            'default': []
        },
        'with_strace': {
            'type': 'boolean',
            'default': False
        },
        'strace_flags': {
            'type': 'array',
            'default': [],
            'items': {
                'type': 'string'
            }
        },
        'with_valgrind': {
            'type': 'boolean',
            'default': False
        },
        'valgrind_flags': {
            'type': 'array',
            'default': ['--leak-check=full'],  # '-v'
            'items': {
                'type': 'string'
            }
        }
    }

    def __init__(self, name, args, model_index=0, **kwargs):
        for k, v in self._schema_properties.items():
            if k in [
                    'name', 'language', 'args', 'inputs', 'outputs',
                    'working_dir'
            ]:
                continue
            default = v.get('default', None)
            setattr(self, k, kwargs.pop(k, default))
        super(ModelDriver, self).__init__(name, **kwargs)
        self.debug(str(args))
        if not isinstance(args, list):
            args = [args]
        self.args = []
        for a in args:
            self.args.append(str(a))
        self.model_process = None
        self.queue = Queue()
        self.queue_thread = None
        self.event_process_kill_called = Event()
        self.event_process_kill_complete = Event()
        # Strace/valgrind
        if self.with_strace and self.with_valgrind:
            raise RuntimeError("Trying to run with strace and valgrind.")
        if (((self.with_strace or self.with_valgrind)
             and platform._is_win)):  # pragma: windows
            raise RuntimeError("strace/valgrind options invalid on windows.")
        self.model_index = model_index
        self.env_copy = ['LANG', 'PATH', 'USER']
        self._exit_line = b'EXIT'
        # print(os.environ.keys())
        for k in self.env_copy:
            if k in os.environ:
                self.env[k] = os.environ[k]

    @classmethod
    def is_installed(self):
        r"""Determine if this model driver is installed on the current
        machine.

        Returns:
            bool: Truth of if this model driver can be run on the current
                machine.

        """
        return False

    def set_env(self):
        env = copy.deepcopy(self.env)
        env.update(os.environ)
        env['YGG_SUBPROCESS'] = "True"
        env['YGG_MODEL_INDEX'] = str(self.model_index)
        return env

    def before_start(self):
        r"""Actions to perform before the run starts."""
        env = self.set_env()
        pre_args = []
        if self.with_strace:
            if platform._is_linux:
                pre_cmd = 'strace'
            elif platform._is_mac:
                pre_cmd = 'dtrace'
            pre_args += [pre_cmd] + self.strace_flags
        elif self.with_valgrind:
            pre_args += ['valgrind'] + self.valgrind_flags
        # print(pre_args + self.args)
        self.model_process = tools.YggPopen(pre_args + self.args,
                                            env=env,
                                            cwd=self.working_dir,
                                            forward_signals=False,
                                            shell=platform._is_win)
        # Start thread to queue output
        self.queue_thread = tools.YggThreadLoop(
            target=self.enqueue_output_loop, name=self.name + '.EnqueueLoop')
        self.queue_thread.start()

    def enqueue_output_loop(self):
        r"""Keep passing lines to queue."""
        # if self.model_process_complete:
        #     self.debug("Process complete")
        #     self.queue_thread.set_break_flag()
        #     self.queue.put(self._exit_line)
        #     return
        try:
            line = self.model_process.stdout.readline()
        except BaseException as e:  # pragma: debug
            print(e)
            line = ""
        if len(line) == 0:
            # self.info("%s: Empty line from stdout" % self.name)
            self.queue_thread.set_break_flag()
            self.queue.put(self._exit_line)
            self.debug("End of model output")
            try:
                self.model_process.stdout.close()
            except BaseException:  # pragma: debug
                pass
        else:
            try:
                self.queue.put(line.decode('utf-8'))
            except BaseException as e:  # pragma: debug
                warnings.warn("Error in printing output: %s" % e)

    def before_loop(self):
        r"""Actions before loop."""
        self.debug('Running %s from %s with cwd %s and env %s', self.args,
                   os.getcwd(), self.working_dir, pformat(self.env))

    def run_loop(self):
        r"""Loop to check if model is still running and forward output."""
        # Continue reading until there is not any output
        try:
            line = self.queue.get_nowait()
        except Empty:
            # if self.queue_thread.was_break:
            #     self.debug("No more output")
            #     self.set_break_flag()
            # This sleep is necessary to allow changes in queue without lock
            self.sleep()
            return
        else:
            if (line == self._exit_line):
                self.debug("No more output")
                self.set_break_flag()
            else:
                self.print_encoded(line, end="")
                sys.stdout.flush()

    def after_loop(self):
        r"""Actions to perform after run_loop has finished. Mainly checking
        if there was an error and then handling it."""
        self.debug('')
        if self.queue_thread is not None:
            self.queue_thread.join(self.sleeptime)
            if self.queue_thread.is_alive():
                self.info("Queue thread still alive")
                # Loop was broken from outside, kill the queueing thread
                self.kill_process()
                # self.queue_thread.set_break_flag()
                # try:
                #     self.model_process.stdout.close()
                # except BaseException:  # pragma: debug
                #     self.error("Close during concurrent operation")
                return
        self.wait_process(self.timeout, key_suffix='.after_loop')
        self.kill_process()

    @property
    def model_process_complete(self):
        r"""bool: Has the process finished or not. Returns True if the process
        has not started."""
        if self.model_process is None:  # pragma: debug
            return True
        return (self.model_process.poll() is not None)

    def wait_process(self, timeout=None, key=None, key_suffix=None):
        r"""Wait for some amount of time for the process to finish.

        Args:
            timeout (float, optional): Time (in seconds) that should be waited.
                Defaults to None and is infinite.
            key (str, optional): Key that should be used to register the timeout.
                Defaults to None and set based on the stack trace.

        Returns:
            bool: True if the process completed. False otherwise.

        """
        if not self.was_started:  # pragma: debug
            return True
        T = self.start_timeout(timeout,
                               key_level=1,
                               key=key,
                               key_suffix=key_suffix)
        while ((not T.is_out)
               and (not self.model_process_complete)):  # pragma: debug
            self.sleep()
        self.stop_timeout(key_level=1, key=key, key_suffix=key_suffix)
        return self.model_process_complete

    def kill_process(self):
        r"""Kill the process running the model, checking return code."""
        if not self.was_started:  # pragma: debug
            self.debug('Process was never started.')
            self.set_break_flag()
            self.event_process_kill_called.set()
            self.event_process_kill_complete.set()
        if self.event_process_kill_called.is_set():  # pragma: debug
            self.debug('Process has already been killed.')
            return
        self.event_process_kill_called.set()
        with self.lock:
            self.debug('')
            if not self.model_process_complete:  # pragma: debug
                self.error("Process is still running. Killing it.")
                try:
                    self.model_process.kill()
                    self.debug("Waiting %f s for process to be killed",
                               self.timeout)
                    self.wait_process(self.timeout, key_suffix='.kill_process')
                except BaseException:  # pragma: debug
                    self.exception("Error killing model process")
            assert (self.model_process_complete)
            if self.model_process.returncode != 0:
                self.error("return code of %s indicates model error.",
                           str(self.model_process.returncode))
            self.event_process_kill_complete.set()
            if self.queue_thread is not None:
                if not self.was_break:  # pragma: debug
                    # Wait for messages to be printed
                    self.debug("Waiting for queue_thread to finish up.")
                    self.queue_thread.wait(self.timeout)
                if self.queue_thread.is_alive():  # pragma: debug
                    self.debug(
                        "Setting break flag for queue_thread to finish up.")
                    self.queue_thread.set_break_flag()
                    self.queue_thread.wait(self.timeout)
                    try:
                        self.model_process.stdout.close()
                        self.queue_thread.wait(self.timeout)
                    except BaseException:  # pragma: debug
                        self.exception("Closed during concurrent action")
                    if self.queue_thread.is_alive():  # pragma: debug
                        self.error("Queue thread was not terminated.")

    def graceful_stop(self):
        r"""Gracefully stop the driver."""
        self.debug('')
        self.wait_process(self.timeout, key_suffix='.graceful_stop')
        super(ModelDriver, self).graceful_stop()
Example #46
0
class PopenWrapper(subprocess.Popen):
    _locked = False

    def __init__(self,
                 args,
                 bufsize='1',
                 stdin=subprocess.PIPE,
                 stdout=subprocess.PIPE,
                 stderr=subprocess.PIPE,
                 **kwargs):
        super(PopenWrapper, self).__init__(args,
                                           bufsize=bufsize,
                                           stdin=stdin,
                                           stdout=stdout,
                                           stderr=stderr,
                                           **kwargs)
        if "--logfile" in args:
            logfile_path = args[args.index("--logfile") + 1]
            logfile = open(logfile_path, 'a')
        else:
            logfile = None
        self.stdout_queue = Queue()
        self.stdout_thread = Thread(
            target=enqueue_stream,
            args=(self.stdout, self.stdout_queue, logfile),
        )
        self.stdout_thread.daemon = True
        self.stdout_thread.start()

        self.stderr_queue = Queue()
        self.stderr_thread = Thread(
            target=enqueue_stream,
            args=(self.stderr, self.stderr_queue, logfile),
        )
        self.stderr_thread.daemon = True
        self.stderr_thread.start()

    def get_stdout_nowait(self):
        try:
            return self.stdout_queue.get_nowait()
        except Empty:
            return None

    def get_stderr_nowait(self):
        try:
            return self.stderr_queue.get_nowait()
        except Empty:
            return None

    _output_generator = None

    def get_output_nowait(self):
        if self._output_generator is None:

            def output_generator():
                while True:
                    yield self.get_stdout_nowait()
                    yield self.get_stderr_nowait()

            self._output_generator = output_generator()
        return self._output_generator.next()

    def communicate(self, *args, **kwargs):
        raise ValueError("Cannot communicate with a PopenWrapper")
Example #47
0
 def _make_graph(self, degrees, stub_tries=100):
     """
         This function constructs the actual network.
         Depending on the construction method that has been chosen the network is either generated by a
             probabilistic method (if self.method = 'proba') or by a edge wiring method (self.method = 'stubs').
     """
     if self.method == 'proba':
         two_k = float(sum(degrees))
         self.nn = [[] for _ in xrange(self.n)]
         for i in xrange(len(degrees) - 1):
             for j in xrange(i + 1, len(degrees)):
                 if min(degrees[i] * degrees[j] / two_k,
                        1) > random.sample():
                     self.nn[i].append(j)
                     self.nn[j].append(i)
         self._convert_to_array()
         self.degrees = map(lambda x: len(x), self.nn)
         return 0
     else:
         if sum(degrees) > self.n * (self.n - 1):
             print 'WARNING: according to the chose degree sequence the network should be more than fully connected'
             print '\t the degree sequence is corrected such that each node has at most n-1 neighbours.'
         self.degrees = map(lambda x: min(self.n - 1, int(round(x))),
                            degrees)
         # check if the sum of the degrees is odd
         s_deg = sum(self.degrees)
         if s_deg / 2 != s_deg / 2.:  # if it is odd, correct (+1) as otherwise the stub method cannot work
             self.degrees[0] += 1
         stubs = []
         self.nn = [[] for _ in xrange(self.n)]
         for node in xrange(self.n):
             stubs.extend([node for _ in xrange(self.degrees[node])])
         # do self._stub_attempts attempts to get the connections right
         while stub_tries:
             length = len(stubs)
             length_queue = Queue(maxsize=self._stub_attempts)
             for _ in xrange(self._stub_attempts):
                 length_queue.put_nowait(0)
             while length:
                 if length / 2 != length / 2.:
                     print length, 'on'
                 try:
                     n_1 = self._get_rand_element(stubs)
                 except ValueError:
                     break
                 try:
                     n_2 = self._get_rand_element(stubs)
                 except ValueError:
                     stubs.append(n_1)
                     break
                 if n_1 != n_2:
                     if n_2 not in self.nn[n_1]:
                         self.nn[n_1].append(n_2)
                         self.nn[n_2].append(n_1)
                     else:
                         stubs.append(n_2)
                         stubs.append(n_1)
                 else:
                     stubs.append(n_2)
                     stubs.append(n_1)
                 length = len(
                     stubs
                 )  # need only to be recomputed if stubs.append is not executed
                 if length_queue.get_nowait(
                 ) - length == 0:  # if after self._stub_attempts the stubs has the same
                     # length, give up
                     break
                 length_queue.put_nowait(length)
             #here the remaining elements from the stubs list
             #cannot be matched.
             counter = copy(self._rewiring_attempts)
             if len(stubs):
                 print 'trying to fix'
                 print len(stubs)
             while len(stubs) and counter:
                 n_1 = self._get_rand_element(stubs)  # take a stubs
                 # get the list of potential neighbours for this stub
                 pot_neigh_n_1 = filter(lambda x: x not in self.nn[n_1],
                                        range(self.n))
                 try:
                     rand_node_1 = self._get_rand_element(pot_neigh_n_1)
                     # get a neighbour of rand_node_1
                     rand_node_2 = self.nn[rand_node_1][random.randint(
                         0, len(self.nn[rand_node_1]))]
                 except ValueError:
                     stubs.append(n_1)
                     continue
                 self.nn[n_1].append(
                     rand_node_1)  # connect the stub to the random node
                 self.nn[rand_node_1].append(n_1)  # "
                 self.nn[rand_node_1].remove(rand_node_2)  #
                 self.nn[rand_node_2].remove(rand_node_1)
                 # rand_node_2 has a free stub now try to match it with another stub
                 candidates = filter(
                     lambda x: x not in self.nn[rand_node_2], stubs)
                 if candidates:
                     new_mate = self._get_rand_element(candidates)
                     stubs.pop(stubs.index(
                         new_mate))  # remove the new_mate from the stubs
                     self.nn[rand_node_2].append(new_mate)
                     self.nn[new_mate].append(rand_node_2)
                 else:
                     # nothing to match, put the stub of rand_node_2 into the list
                     stubs.append(rand_node_2)
                 print len(stubs), counter
                 counter -= 1
             if not len(stubs):  # it is constructed
                 self._convert_to_array()
                 return 0
             print 'new_try'
             stub_tries -= 1
         raise self.ConstructionError("""The Graph could not be constructed.
         You can try to launch the function again, but
         consider that you probably chose a graph that is
         too dense which makes it hard for the stubs algorithm
         to find a coherent list of links. In case it fails several
         times consider using the "proba" method.""")
Example #48
0
class OwnedPartition(object):
    """A partition that is owned by a SimpleConsumer.

    Used to keep track of offsets and the internal message queue.
    """
    def __init__(self, partition, semaphore=None):
        """
        :param partition: The partition to hold
        :type partition: :class:`pykafka.partition.Partition`
        :param semaphore: A Semaphore that counts available messages and
            facilitates non-busy blocking
        :type semaphore: :class:`pykafka.utils.compat.Semaphore`
        """
        self.partition = partition
        self._messages = Queue()
        self._messages_arrived = semaphore
        self.last_offset_consumed = 0
        self.next_offset = 0
        self.fetch_lock = threading.Lock()

    @property
    def message_count(self):
        """Count of messages currently in this partition's internal queue"""
        return self._messages.qsize()

    def flush(self):
        self._messages = Queue()
        log.info("Flushed queue for partition %d", self.partition.id)

    def set_offset(self, last_offset_consumed):
        """Set the internal offset counters

        :param last_offset_consumed: The last committed offset for this
            partition
        :type last_offset_consumed: int
        """
        self.last_offset_consumed = last_offset_consumed
        self.next_offset = last_offset_consumed + 1

    def build_offset_request(self, new_offset):
        """Create a :class:`pykafka.protocol.PartitionOffsetRequest` for this
            partition

        :param new_offset: The offset to which to set this partition. This
            setting indicates how to reset the consumer's internal offset
            counter when an OffsetOutOfRangeError is encountered.
            There are two special values. Specify -1 to receive the latest
            offset (i.e. the offset of the next coming message) and -2 to
            receive the earliest available offset.
        :type new_offset: :class:`pykafka.common.OffsetType` or int
        """
        return PartitionOffsetRequest(self.partition.topic.name,
                                      self.partition.id, new_offset, 1)

    def build_fetch_request(self, max_bytes):
        """Create a :class:`pykafka.protocol.FetchPartitionRequest` for this
            partition.

        :param max_bytes: The number of bytes of messages to
            attempt to fetch
        :type max_bytes: int
        """
        return PartitionFetchRequest(self.partition.topic.name,
                                     self.partition.id, self.next_offset,
                                     max_bytes)

    def build_offset_commit_request(self):
        """Create a :class:`pykafka.protocol.PartitionOffsetCommitRequest`
            for this partition
        """
        return PartitionOffsetCommitRequest(self.partition.topic.name,
                                            self.partition.id,
                                            self.last_offset_consumed,
                                            int(time.time() * 1000), 'pykafka')

    def build_offset_fetch_request(self):
        """Create a PartitionOffsetFetchRequest for this partition
        """
        return PartitionOffsetFetchRequest(self.partition.topic.name,
                                           self.partition.id)

    def consume(self):
        """Get a single message from this partition"""
        try:
            message = self._messages.get_nowait()
            self.last_offset_consumed = message.offset
            return message
        except Empty:
            return None

    def enqueue_messages(self, messages):
        """Put a set of messages into the internal message queue

        :param messages: The messages to enqueue
        :type messages: Iterable of :class:`pykafka.common.Message`
        """
        for message in messages:
            if message.offset < self.last_offset_consumed:
                log.debug(
                    "Skipping enqueue for offset (%s) "
                    "less than last_offset_consumed (%s)", message.offset,
                    self.last_offset_consumed)
                continue
            message.partition = self.partition
            if message.partition_id != self.partition.id:
                log.error(
                    "Partition %s enqueued a message meant for partition %s",
                    self.partition.id, message.partition_id)
            message.partition_id = self.partition.id
            self._messages.put(message)
            self.next_offset = message.offset + 1

            if self._messages_arrived is not None:
                self._messages_arrived.release()
Example #49
0
File: ui.py Project: alan2h/calibre
class Main(
        MainWindow,
        MainWindowMixin,
        DeviceMixin,
        EmailMixin,  # {{{
        TagBrowserMixin,
        CoverFlowMixin,
        LibraryViewMixin,
        SearchBoxMixin,
        SavedSearchBoxMixin,
        SearchRestrictionMixin,
        LayoutMixin,
        UpdateMixin,
        EbookDownloadMixin):

    'The main GUI'

    proceed_requested = pyqtSignal(object, object)
    book_converted = pyqtSignal(object, object)
    shutting_down = False

    def __init__(self, opts, parent=None, gui_debug=None):
        MainWindow.__init__(self,
                            opts,
                            parent=parent,
                            disable_automatic_gc=True)
        self.setWindowIcon(QApplication.instance().windowIcon())
        self.jobs_pointer = Pointer(self)
        self.proceed_requested.connect(self.do_proceed,
                                       type=Qt.QueuedConnection)
        self.proceed_question = ProceedQuestion(self)
        self.job_error_dialog = JobError(self)
        self.keyboard = Manager(self)
        get_gui.ans = self
        self.opts = opts
        self.device_connected = None
        self.gui_debug = gui_debug
        self.iactions = OrderedDict()
        # Actions
        for action in interface_actions():
            if opts.ignore_plugins and action.plugin_path is not None:
                continue
            try:
                ac = self.init_iaction(action)
            except:
                # Ignore errors in loading user supplied plugins
                import traceback
                traceback.print_exc()
                if action.plugin_path is None:
                    raise
                continue
            ac.plugin_path = action.plugin_path
            ac.interface_action_base_plugin = action
            self.add_iaction(ac)
        self.load_store_plugins()

    def init_iaction(self, action):
        ac = action.load_actual_plugin(self)
        ac.plugin_path = action.plugin_path
        ac.interface_action_base_plugin = action
        action.actual_iaction_plugin_loaded = True
        return ac

    def add_iaction(self, ac):
        acmap = self.iactions
        if ac.name in acmap:
            if ac.priority >= acmap[ac.name].priority:
                acmap[ac.name] = ac
        else:
            acmap[ac.name] = ac

    def load_store_plugins(self):
        from calibre.gui2.store.loader import Stores
        self.istores = Stores()
        for store in available_store_plugins():
            if self.opts.ignore_plugins and store.plugin_path is not None:
                continue
            try:
                st = self.init_istore(store)
                self.add_istore(st)
            except:
                # Ignore errors in loading user supplied plugins
                import traceback
                traceback.print_exc()
                if store.plugin_path is None:
                    raise
                continue
        self.istores.builtins_loaded()

    def init_istore(self, store):
        st = store.load_actual_plugin(self)
        st.plugin_path = store.plugin_path
        st.base_plugin = store
        store.actual_istore_plugin_loaded = True
        return st

    def add_istore(self, st):
        stmap = self.istores
        if st.name in stmap:
            if st.priority >= stmap[st.name].priority:
                stmap[st.name] = st
        else:
            stmap[st.name] = st

    def initialize(self, library_path, db, listener, actions, show_gui=True):
        from calibre.gui2 import Application
        app = Application([])
        app.load_builtin_fonts()
        w = QtWidgets.QWidget()
        w.show()
        app.exec_()
        #time.sleep(1000)
        opts = self.opts
        self.preferences_action, self.quit_action = actions
        self.library_path = library_path
        self.library_broker = GuiLibraryBroker(db)
        self.content_server = None
        self.server_change_notification_timer = t = QTimer(self)
        self.server_changes = Queue()
        t.setInterval(1000), t.timeout.connect(
            self.handle_changes_from_server_debounced), t.setSingleShot(True)
        self._spare_pool = None
        self.must_restart_before_config = False
        self.listener = Listener(listener)
        self.check_messages_timer = QTimer()
        self.check_messages_timer.timeout.connect(
            self.another_instance_wants_to_talk)
        self.check_messages_timer.start(1000)

        for ac in self.iactions.values():
            try:
                ac.do_genesis()
            except Exception:
                # Ignore errors in third party plugins
                import traceback
                traceback.print_exc()
                if getattr(ac, 'plugin_path', None) is None:
                    raise
        self.donate_action = QAction(QIcon(I('donate.png')),
                                     _('&Donate to support calibre'), self)
        for st in self.istores.values():
            st.do_genesis()
        MainWindowMixin.init_main_window_mixin(self, db)

        # Jobs Button {{{
        self.job_manager = JobManager()
        self.jobs_dialog = JobsDialog(self, self.job_manager)
        self.jobs_button = JobsButton(parent=self)
        self.jobs_button.initialize(self.jobs_dialog, self.job_manager)
        # }}}

        LayoutMixin.init_layout_mixin(self)
        DeviceMixin.init_device_mixin(self)

        self.progress_indicator = ProgressIndicator(self)
        self.progress_indicator.pos = (0, 20)
        self.verbose = opts.verbose
        self.get_metadata = GetMetadata()
        self.upload_memory = {}
        self.metadata_dialogs = []
        self.default_thumbnail = None
        self.tb_wrapper = textwrap.TextWrapper(width=40)
        self.viewers = collections.deque()
        self.system_tray_icon = None
        do_systray = config['systray_icon'] or opts.start_in_tray
        if do_systray:
            self.system_tray_icon = factory(
                app_id='com.calibre-ebook.gui').create_system_tray_icon(
                    parent=self, title='calibre')
        if self.system_tray_icon is not None:
            self.system_tray_icon.setIcon(
                QIcon(I('lt.png', allow_user_override=False)))
            if not (iswindows or isosx):
                self.system_tray_icon.setIcon(
                    QIcon.fromTheme('calibre-tray',
                                    self.system_tray_icon.icon()))
            self.system_tray_icon.setToolTip(self.jobs_button.tray_tooltip())
            self.system_tray_icon.setVisible(True)
            self.jobs_button.tray_tooltip_updated.connect(
                self.system_tray_icon.setToolTip)
        elif do_systray:
            prints(
                'Failed to create system tray icon, your desktop environment probably'
                ' does not support the StatusNotifier spec https://www.freedesktop.org/wiki/Specifications/StatusNotifierItem/'
            )
        self.system_tray_menu = QMenu(self)
        self.toggle_to_tray_action = self.system_tray_menu.addAction(
            QIcon(I('page.png')), '')
        self.toggle_to_tray_action.triggered.connect(
            self.system_tray_icon_activated)
        self.system_tray_menu.addAction(self.donate_action)
        self.eject_action = self.system_tray_menu.addAction(
            QIcon(I('eject.png')), _('&Eject connected device'))
        self.eject_action.setEnabled(False)
        self.addAction(self.quit_action)
        self.system_tray_menu.addAction(self.quit_action)
        self.keyboard.register_shortcut('quit calibre',
                                        _('Quit calibre'),
                                        default_keys=('Ctrl+Q', ),
                                        action=self.quit_action)
        if self.system_tray_icon is not None:
            self.system_tray_icon.setContextMenu(self.system_tray_menu)
            self.system_tray_icon.activated.connect(
                self.system_tray_icon_activated)
        self.quit_action.triggered[bool].connect(self.quit)
        self.donate_action.triggered[bool].connect(self.donate)
        self.minimize_action = QAction(_('Minimize the calibre window'), self)
        self.addAction(self.minimize_action)
        self.keyboard.register_shortcut('minimize calibre',
                                        self.minimize_action.text(),
                                        default_keys=(),
                                        action=self.minimize_action)
        self.minimize_action.triggered.connect(self.showMinimized)

        self.esc_action = QAction(self)
        self.addAction(self.esc_action)
        self.keyboard.register_shortcut('clear current search',
                                        _('Clear the current search'),
                                        default_keys=('Esc', ),
                                        action=self.esc_action)
        self.esc_action.triggered.connect(self.esc)

        self.shift_esc_action = QAction(self)
        self.addAction(self.shift_esc_action)
        self.keyboard.register_shortcut('focus book list',
                                        _('Focus the book list'),
                                        default_keys=('Shift+Esc', ),
                                        action=self.shift_esc_action)
        self.shift_esc_action.triggered.connect(self.shift_esc)

        self.ctrl_esc_action = QAction(self)
        self.addAction(self.ctrl_esc_action)
        self.keyboard.register_shortcut('clear virtual library',
                                        _('Clear the virtual library'),
                                        default_keys=('Ctrl+Esc', ),
                                        action=self.ctrl_esc_action)
        self.ctrl_esc_action.triggered.connect(self.ctrl_esc)

        self.alt_esc_action = QAction(self)
        self.addAction(self.alt_esc_action)
        self.keyboard.register_shortcut('clear additional restriction',
                                        _('Clear the additional restriction'),
                                        default_keys=('Alt+Esc', ),
                                        action=self.alt_esc_action)
        self.alt_esc_action.triggered.connect(
            self.clear_additional_restriction)

        # ###################### Start spare job server ########################
        QTimer.singleShot(1000, self.create_spare_pool)

        # ###################### Location Manager ########################
        self.location_manager.location_selected.connect(self.location_selected)
        self.location_manager.unmount_device.connect(
            self.device_manager.umount_device)
        self.location_manager.configure_device.connect(
            self.configure_connected_device)
        self.location_manager.update_device_metadata.connect(
            self.update_metadata_on_device)
        self.eject_action.triggered.connect(self.device_manager.umount_device)

        # ################### Update notification ###################
        UpdateMixin.init_update_mixin(self, opts)

        # ###################### Search boxes ########################
        SearchRestrictionMixin.init_search_restriction_mixin(self)
        SavedSearchBoxMixin.init_saved_seach_box_mixin(self)

        # ###################### Library view ########################
        LibraryViewMixin.init_library_view_mixin(self, db)
        SearchBoxMixin.init_search_box_mixin(self)  # Requires current_db

        self.library_view.model().count_changed_signal.connect(
            self.iactions['Choose Library'].count_changed)
        if not gprefs.get('quick_start_guide_added', False):
            try:
                add_quick_start_guide(self.library_view)
            except:
                import traceback
                traceback.print_exc()
        for view in ('library', 'memory', 'card_a', 'card_b'):
            v = getattr(self, '%s_view' % view)
            v.selectionModel().selectionChanged.connect(self.update_status_bar)
            v.model().count_changed_signal.connect(self.update_status_bar)

        self.library_view.model().count_changed()
        self.bars_manager.database_changed(self.library_view.model().db)
        self.library_view.model().database_changed.connect(
            self.bars_manager.database_changed, type=Qt.QueuedConnection)

        # ########################## Tags Browser ##############################
        TagBrowserMixin.init_tag_browser_mixin(self, db)
        self.library_view.model().database_changed.connect(
            self.populate_tb_manage_menu, type=Qt.QueuedConnection)

        # ######################## Search Restriction ##########################
        if db.prefs['virtual_lib_on_startup']:
            self.apply_virtual_library(db.prefs['virtual_lib_on_startup'])
        self.rebuild_vl_tabs()

        # ########################## Cover Flow ################################

        CoverFlowMixin.init_cover_flow_mixin(self)

        self._calculated_available_height = min(max_available_height() - 15,
                                                self.height())
        self.resize(self.width(), self._calculated_available_height)

        self.build_context_menus()

        for ac in self.iactions.values():
            try:
                ac.gui_layout_complete()
            except:
                import traceback
                traceback.print_exc()
                if ac.plugin_path is None:
                    raise

        if config['autolaunch_server']:
            self.start_content_server()

        self.read_settings()

        self.finalize_layout()
        self.bars_manager.start_animation()
        self.set_window_title()

        for ac in self.iactions.values():
            try:
                ac.initialization_complete()
            except:
                import traceback
                traceback.print_exc()
                if ac.plugin_path is None:
                    raise
        self.set_current_library_information(current_library_name(),
                                             db.library_id, db.field_metadata)

        register_keyboard_shortcuts()
        self.keyboard.finalize()
        if show_gui:
            # Note this has to come after restoreGeometry() because of
            # https://bugreports.qt.io/browse/QTBUG-56831
            self.show()
        if self.system_tray_icon is not None and self.system_tray_icon.isVisible(
        ) and opts.start_in_tray:
            self.hide_windows()
        self.auto_adder = AutoAdder(gprefs['auto_add_path'], self)

        # Now that the gui is initialized we can restore the quickview state
        # The same thing will be true for any action-based operation with a
        # layout button
        from calibre.gui2.actions.show_quickview import get_quickview_action_plugin
        qv = get_quickview_action_plugin()
        if qv:
            qv.qv_button.restore_state()
        self.save_layout_state()

        # Collect cycles now
        gc.collect()

        QApplication.instance().shutdown_signal_received.connect(self.quit)
        if show_gui and self.gui_debug is not None:
            QTimer.singleShot(10, self.show_gui_debug_msg)

        self.iactions['Connect Share'].check_smartdevice_menus()
        QTimer.singleShot(1, self.start_smartdevice)
        QTimer.singleShot(100, self.update_toggle_to_tray_action)

    def show_gui_debug_msg(self):
        info_dialog(self,
                    _('Debug mode'),
                    '<p>' +
                    _('You have started calibre in debug mode. After you '
                      'quit calibre, the debug log will be available in '
                      'the file: %s<p>The '
                      'log will be displayed automatically.') % self.gui_debug,
                    show=True)

    def esc(self, *args):
        self.search.clear()

    def shift_esc(self):
        self.current_view().setFocus(Qt.OtherFocusReason)

    def ctrl_esc(self):
        self.apply_virtual_library()
        self.current_view().setFocus(Qt.OtherFocusReason)

    def start_smartdevice(self):
        message = None
        if self.device_manager.get_option('smartdevice', 'autostart'):
            try:
                message = self.device_manager.start_plugin('smartdevice')
            except:
                message = 'start smartdevice unknown exception'
                prints(message)
                import traceback
                traceback.print_exc()
        if message:
            if not self.device_manager.is_running('Wireless Devices'):
                error_dialog(
                    self,
                    _('Problem starting the wireless device'),
                    _('The wireless device driver had problems starting. '
                      'It said "%s"') % message,
                    show=True)
        self.iactions['Connect Share'].set_smartdevice_action_state()

    def start_content_server(self, check_started=True):
        from calibre.srv.embedded import Server
        if not gprefs.get('server3_warning_done', False):
            gprefs.set('server3_warning_done', True)
            if os.path.exists(os.path.join(config_dir, 'server.py')):
                try:
                    os.remove(os.path.join(config_dir, 'server.py'))
                except EnvironmentError:
                    pass
                warning_dialog(
                    self,
                    _('Content server changed!'),
                    _('calibre 3 comes with a completely re-written content server.'
                      ' As such any custom configuration you have for the content'
                      ' server no longer applies. You should check and refresh your'
                      ' settings in Preferences->Sharing->Sharing over the net'
                      ),
                    show=True)
        self.content_server = Server(
            self.library_broker, Dispatcher(self.handle_changes_from_server))
        self.content_server.state_callback = Dispatcher(
            self.iactions['Connect Share'].content_server_state_changed)
        if check_started:
            self.content_server.start_failure_callback = \
                Dispatcher(self.content_server_start_failed)
        self.content_server.start()

    def handle_changes_from_server(self, library_path, change_event):
        if DEBUG:
            prints('Received server change event: {} for {}'.format(
                change_event, library_path))
        if self.library_broker.is_gui_library(library_path):
            self.server_changes.put((library_path, change_event))
            self.server_change_notification_timer.start()

    def handle_changes_from_server_debounced(self):
        if self.shutting_down:
            return
        changes = []
        while True:
            try:
                library_path, change_event = self.server_changes.get_nowait()
            except Empty:
                break
            if self.library_broker.is_gui_library(library_path):
                changes.append(change_event)
        if changes:
            handle_changes(changes, self)

    def content_server_start_failed(self, msg):
        self.content_server = None
        error_dialog(self,
                     _('Failed to start Content server'),
                     _('Could not start the Content server. Error:\n\n%s') %
                     msg,
                     show=True)

    def resizeEvent(self, ev):
        MainWindow.resizeEvent(self, ev)
        self.search.setMaximumWidth(self.width() - 150)

    def create_spare_pool(self, *args):
        if self._spare_pool is None:
            num = min(detect_ncpus(), int(config['worker_limit'] / 2.0))
            self._spare_pool = Pool(max_workers=num, name='GUIPool')

    def spare_pool(self):
        ans, self._spare_pool = self._spare_pool, None
        QTimer.singleShot(1000, self.create_spare_pool)
        return ans

    def do_proceed(self, func, payload):
        if callable(func):
            func(payload)

    def no_op(self, *args):
        pass

    def system_tray_icon_activated(self, r=False):
        if r in (QSystemTrayIcon.Trigger, QSystemTrayIcon.MiddleClick, False):
            if self.isVisible():
                if self.isMinimized():
                    self.showNormal()
                else:
                    self.hide_windows()
            else:
                self.show_windows()
                if self.isMinimized():
                    self.showNormal()

    @property
    def is_minimized_to_tray(self):
        return getattr(self, '__systray_minimized', False)

    def ask_a_yes_no_question(self,
                              title,
                              msg,
                              det_msg='',
                              show_copy_button=False,
                              ans_when_user_unavailable=True,
                              skip_dialog_name=None,
                              skipped_value=True):
        if self.is_minimized_to_tray:
            return ans_when_user_unavailable
        return question_dialog(self,
                               title,
                               msg,
                               det_msg=det_msg,
                               show_copy_button=show_copy_button,
                               skip_dialog_name=skip_dialog_name,
                               skip_dialog_skipped_value=skipped_value)

    def update_toggle_to_tray_action(self, *args):
        if hasattr(self, 'toggle_to_tray_action'):
            self.toggle_to_tray_action.setText(
                _('Hide main window') if self.isVisible(
                ) else _('Show main window'))

    def hide_windows(self):
        for window in QApplication.topLevelWidgets():
            if isinstance(window, (MainWindow, QDialog)) and \
                    window.isVisible():
                window.hide()
                setattr(window, '__systray_minimized', True)
        self.update_toggle_to_tray_action()

    def show_windows(self, *args):
        for window in QApplication.topLevelWidgets():
            if getattr(window, '__systray_minimized', False):
                window.show()
                setattr(window, '__systray_minimized', False)
        self.update_toggle_to_tray_action()

    def test_server(self, *args):
        if self.content_server is not None and \
                self.content_server.exception is not None:
            error_dialog(self, _('Failed to start Content server'),
                         unicode(self.content_server.exception)).exec_()

    @property
    def current_db(self):
        return self.library_view.model().db

    def refresh_all(self):
        m = self.library_view.model()
        m.db.data.refresh(clear_caches=False, do_search=False)
        self.saved_searches_changed(recount=False)
        m.resort()
        m.research()
        self.tags_view.recount()

    def another_instance_wants_to_talk(self):
        try:
            msg = self.listener.queue.get_nowait()
        except Empty:
            return
        if msg.startswith('launched:'):
            import json
            try:
                argv = json.loads(msg[len('launched:'):])
            except ValueError:
                prints('Failed to decode message from other instance: %r' %
                       msg)
                if DEBUG:
                    error_dialog(
                        self,
                        'Invalid message',
                        'Received an invalid message from other calibre instance.'
                        ' Do you have multiple versions of calibre installed?',
                        det_msg='Invalid msg: %r' % msg,
                        show=True)
                argv = ()
            if isinstance(argv, (list, tuple)) and len(argv) > 1:
                files = [
                    os.path.abspath(p) for p in argv[1:]
                    if not os.path.isdir(p) and os.access(p, os.R_OK)
                ]
                if files:
                    self.iactions['Add Books'].add_filesystem_book(files)
            self.setWindowState(self.windowState() & ~Qt.WindowMinimized
                                | Qt.WindowActive)
            self.show_windows()
            self.raise_()
            self.activateWindow()
        elif msg.startswith('refreshdb:'):
            m = self.library_view.model()
            m.db.new_api.reload_from_db()
            self.refresh_all()
        elif msg.startswith('shutdown:'):
            self.quit(confirm_quit=False)
        elif msg.startswith('bookedited:'):
            parts = msg.split(':')[1:]
            try:
                book_id, fmt, library_id = parts[:3]
                book_id = int(book_id)
                m = self.library_view.model()
                db = m.db.new_api
                if m.db.library_id == library_id and db.has_id(book_id):
                    db.format_metadata(book_id,
                                       fmt,
                                       allow_cache=False,
                                       update_db=True)
                    db.update_last_modified((book_id, ))
                    m.refresh_ids((book_id, ))
            except Exception:
                import traceback
                traceback.print_exc()
        else:
            print msg

    def current_view(self):
        '''Convenience method that returns the currently visible view '''
        idx = self.stack.currentIndex()
        if idx == 0:
            return self.library_view
        if idx == 1:
            return self.memory_view
        if idx == 2:
            return self.card_a_view
        if idx == 3:
            return self.card_b_view

    def booklists(self):
        return self.memory_view.model().db, self.card_a_view.model(
        ).db, self.card_b_view.model().db

    def library_moved(self, newloc, copy_structure=False, allow_rebuild=False):
        if newloc is None:
            return
        with self.library_broker:
            default_prefs = None
            try:
                olddb = self.library_view.model().db
                if copy_structure:
                    default_prefs = olddb.prefs
            except:
                olddb = None
            if copy_structure and olddb is not None and default_prefs is not None:
                default_prefs[
                    'field_metadata'] = olddb.new_api.field_metadata.all_metadata(
                    )
            db = self.library_broker.prepare_for_gui_library_change(newloc)
            if db is None:
                try:
                    db = LibraryDatabase(newloc, default_prefs=default_prefs)
                except apsw.Error:
                    if not allow_rebuild:
                        raise
                    import traceback
                    repair = question_dialog(
                        self,
                        _('Corrupted database'),
                        _('The library database at %s appears to be corrupted. Do '
                          'you want calibre to try and rebuild it automatically? '
                          'The rebuild may not be completely successful.') %
                        force_unicode(newloc, filesystem_encoding),
                        det_msg=traceback.format_exc())
                    if repair:
                        from calibre.gui2.dialogs.restore_library import repair_library_at
                        if repair_library_at(newloc, parent=self):
                            db = LibraryDatabase(newloc,
                                                 default_prefs=default_prefs)
                        else:
                            return
                    else:
                        return
            self.library_path = newloc
            prefs['library_path'] = self.library_path
            self.book_on_device(None, reset=True)
            db.set_book_on_device_func(self.book_on_device)
            self.library_view.set_database(db)
            self.tags_view.set_database(db, self.alter_tb)
            self.library_view.model().set_book_on_device_func(
                self.book_on_device)
            self.status_bar.clear_message()
            self.search.clear()
            self.saved_search.clear()
            self.book_details.reset_info()
            # self.library_view.model().count_changed()
            db = self.library_view.model().db
            self.iactions['Choose Library'].count_changed(db.count())
            self.set_window_title()
            self.apply_named_search_restriction(
                '')  # reset restriction to null
            self.saved_searches_changed(
                recount=False)  # reload the search restrictions combo box
            if db.prefs['virtual_lib_on_startup']:
                self.apply_virtual_library(db.prefs['virtual_lib_on_startup'])
            self.rebuild_vl_tabs()
            for action in self.iactions.values():
                action.library_changed(db)
            self.library_broker.gui_library_changed(db, olddb)
            if self.device_connected:
                self.set_books_in_library(self.booklists(), reset=True)
                self.refresh_ondevice()
                self.memory_view.reset()
                self.card_a_view.reset()
                self.card_b_view.reset()
            self.set_current_library_information(current_library_name(),
                                                 db.library_id,
                                                 db.field_metadata)
            self.library_view.set_current_row(0)
        # Run a garbage collection now so that it does not freeze the
        # interface later
        gc.collect()

    def set_window_title(self):
        db = self.current_db
        restrictions = [
            x for x in (db.data.get_base_restriction_name(),
                        db.data.get_search_restriction_name()) if x
        ]
        restrictions = ' :: '.join(restrictions)
        font = QFont()
        if restrictions:
            restrictions = ' :: ' + restrictions
            font.setBold(True)
            font.setItalic(True)
        self.virtual_library.setFont(font)
        title = u'{0} - || {1}{2} ||'.format(
            __appname__, self.iactions['Choose Library'].library_name(),
            restrictions)
        self.setWindowTitle(title)

    def location_selected(self, location):
        '''
        Called when a location icon is clicked (e.g. Library)
        '''
        page = 0 if location == 'library' else 1 if location == 'main' else 2 if location == 'carda' else 3
        self.stack.setCurrentIndex(page)
        self.book_details.reset_info()
        for x in ('tb', 'cb'):
            splitter = getattr(self, x + '_splitter')
            splitter.button.setEnabled(location == 'library')
        for action in self.iactions.values():
            action.location_selected(location)
        if location == 'library':
            self.virtual_library_menu.setEnabled(True)
            self.highlight_only_button.setEnabled(True)
            self.vl_tabs.setEnabled(True)
        else:
            self.virtual_library_menu.setEnabled(False)
            self.highlight_only_button.setEnabled(False)
            self.vl_tabs.setEnabled(False)
            # Reset the view in case something changed while it was invisible
            self.current_view().reset()
        self.set_number_of_books_shown()
        self.update_status_bar()

    def job_exception(self,
                      job,
                      dialog_title=_('Conversion error'),
                      retry_func=None):
        if not hasattr(self, '_modeless_dialogs'):
            self._modeless_dialogs = []
        minz = self.is_minimized_to_tray
        if self.isVisible():
            for x in list(self._modeless_dialogs):
                if not x.isVisible():
                    self._modeless_dialogs.remove(x)
        try:
            if 'calibre.ebooks.DRMError' in job.details:
                if not minz:
                    from calibre.gui2.dialogs.drm_error import DRMErrorMessage
                    d = DRMErrorMessage(
                        self,
                        _('Cannot convert') + ' ' +
                        job.description.split(':')[-1].partition('(')[-1][:-1])
                    d.setModal(False)
                    d.show()
                    self._modeless_dialogs.append(d)
                return

            if 'calibre.ebooks.oeb.transforms.split.SplitError' in job.details:
                title = job.description.split(':')[-1].partition('(')[-1][:-1]
                msg = _('<p><b>Failed to convert: %s') % title
                msg += '<p>' + _('''
                Many older e-book reader devices are incapable of displaying
                EPUB files that have internal components over a certain size.
                Therefore, when converting to EPUB, calibre automatically tries
                to split up the EPUB into smaller sized pieces.  For some
                files that are large undifferentiated blocks of text, this
                splitting fails.
                <p>You can <b>work around the problem</b> by either increasing the
                maximum split size under <i>EPUB output</i> in the conversion dialog,
                or by turning on Heuristic Processing, also in the conversion
                dialog. Note that if you make the maximum split size too large,
                your e-book reader may have trouble with the EPUB.
                        ''')
                if not minz:
                    d = error_dialog(self,
                                     _('Conversion Failed'),
                                     msg,
                                     det_msg=job.details)
                    d.setModal(False)
                    d.show()
                    self._modeless_dialogs.append(d)
                return

            if 'calibre.ebooks.mobi.reader.mobi6.KFXError:' in job.details:
                if not minz:
                    title = job.description.split(':')[-1].partition(
                        '(')[-1][:-1]
                    msg = _('<p><b>Failed to convert: %s') % title
                    idx = job.details.index(
                        'calibre.ebooks.mobi.reader.mobi6.KFXError:')
                    msg += '<p>' + re.sub(
                        r'(https:\S+)', r'<a href="\1">{}</a>'.format(
                            _('here')),
                        job.details[idx:].partition(':')[2].strip())
                    d = error_dialog(self,
                                     _('Conversion failed'),
                                     msg,
                                     det_msg=job.details)
                    d.setModal(False)
                    d.show()
                    self._modeless_dialogs.append(d)
                return

            if 'calibre.web.feeds.input.RecipeDisabled' in job.details:
                if not minz:
                    msg = job.details
                    msg = msg[msg.
                              find('calibre.web.feeds.input.RecipeDisabled:'):]
                    msg = msg.partition(':')[-1]
                    d = error_dialog(self, _('Recipe Disabled'),
                                     '<p>%s</p>' % msg)
                    d.setModal(False)
                    d.show()
                    self._modeless_dialogs.append(d)
                return

            if 'calibre.ebooks.conversion.ConversionUserFeedBack:' in job.details:
                if not minz:
                    import json
                    payload = job.details.rpartition(
                        'calibre.ebooks.conversion.ConversionUserFeedBack:'
                    )[-1]
                    payload = json.loads('{' + payload.partition('{')[-1])
                    d = {
                        'info': info_dialog,
                        'warn': warning_dialog,
                        'error': error_dialog
                    }.get(payload['level'], error_dialog)
                    d = d(self,
                          payload['title'],
                          '<p>%s</p>' % payload['msg'],
                          det_msg=payload['det_msg'])
                    d.setModal(False)
                    d.show()
                    self._modeless_dialogs.append(d)
                return
        except:
            pass
        if job.killed:
            return
        try:
            prints(job.details, file=sys.stderr)
        except:
            pass
        if not minz:
            self.job_error_dialog.show_error(dialog_title,
                                             _('<b>Failed</b>') + ': ' +
                                             unicode(job.description),
                                             det_msg=job.details,
                                             retry_func=retry_func)

    def read_settings(self):
        geometry = config['main_window_geometry']
        if geometry is not None:
            self.restoreGeometry(geometry)
        self.read_layout_settings()

    def write_settings(self):
        with gprefs:  # Only write to gprefs once
            config.set('main_window_geometry', self.saveGeometry())
            dynamic.set('sort_history', self.library_view.model().sort_history)
            self.save_layout_state()
            self.stack.tb_widget.save_state()

    def quit(self,
             checked=True,
             restart=False,
             debug_on_restart=False,
             confirm_quit=True):
        if self.shutting_down:
            return
        if confirm_quit and not self.confirm_quit():
            return
        try:
            self.shutdown()
        except:
            pass
        self.restart_after_quit = restart
        self.debug_on_restart = debug_on_restart
        QApplication.instance().quit()

    def donate(self, *args):
        open_url(QUrl('https://calibre-ebook.com/donate'))

    def confirm_quit(self):
        if self.job_manager.has_jobs():
            msg = _('There are active jobs. Are you sure you want to quit?')
            if self.job_manager.has_device_jobs():
                msg = '<p>'+__appname__ + \
                      _(''' is communicating with the device!<br>
                      Quitting may cause corruption on the device.<br>
                      Are you sure you want to quit?''')+'</p>'

            if not question_dialog(self, _('Active jobs'), msg):
                return False

        if self.proceed_question.questions:
            msg = _(
                'There are library updates waiting. Are you sure you want to quit?'
            )
            if not question_dialog(self, _('Library updates waiting'), msg):
                return False

        from calibre.db.delete_service import has_jobs
        if has_jobs():
            msg = _('Some deleted books are still being moved to the Recycle '
                    'Bin, if you quit now, they will be left behind. Are you '
                    'sure you want to quit?')
            if not question_dialog(self, _('Active jobs'), msg):
                return False

        return True

    def shutdown(self, write_settings=True):
        self.shutting_down = True
        self.show_shutdown_message()
        self.server_change_notification_timer.stop()

        from calibre.customize.ui import has_library_closed_plugins
        if has_library_closed_plugins():
            self.show_shutdown_message(
                _('Running database shutdown plugins. This could take a few seconds...'
                  ))

        self.grid_view.shutdown()
        db = None
        try:
            db = self.library_view.model().db
            cf = db.clean
        except:
            pass
        else:
            cf()
            # Save the current field_metadata for applications like calibre2opds
            # Goes here, because if cf is valid, db is valid.
            db.new_api.set_pref('field_metadata',
                                db.field_metadata.all_metadata())
            db.commit_dirty_cache()
            db.prefs.write_serialized(prefs['library_path'])
        for action in self.iactions.values():
            if not action.shutting_down():
                return
        if write_settings:
            self.write_settings()
        self.check_messages_timer.stop()
        if hasattr(self, 'update_checker'):
            self.update_checker.shutdown()
        self.listener.close()
        self.job_manager.server.close()
        self.job_manager.threaded_server.close()
        self.device_manager.keep_going = False
        self.auto_adder.stop()
        # Do not report any errors that happen after the shutdown
        # We cannot restore the original excepthook as that causes PyQt to
        # call abort() on unhandled exceptions
        import traceback

        def eh(t, v, tb):
            try:
                traceback.print_exception(t, v, tb, file=sys.stderr)
            except:
                pass

        sys.excepthook = eh

        mb = self.library_view.model().metadata_backup
        if mb is not None:
            mb.stop()

        self.library_view.model().close()

        try:
            try:
                if self.content_server is not None:
                    # If the Content server has any sockets being closed then
                    # this can take quite a long time (minutes). Tell the user that it is
                    # happening.
                    self.show_shutdown_message(
                        _('Shutting down the Content server. This could take a while...'
                          ))
                    s = self.content_server
                    self.content_server = None
                    s.exit()
            except:
                pass
        except KeyboardInterrupt:
            pass
        self.hide_windows()
        if self._spare_pool is not None:
            self._spare_pool.shutdown()
        from calibre.db.delete_service import shutdown
        shutdown()
        time.sleep(2)
        self.istores.join()
        return True

    def run_wizard(self, *args):
        if self.confirm_quit():
            self.run_wizard_b4_shutdown = True
            self.restart_after_quit = True
            try:
                self.shutdown(write_settings=False)
            except:
                pass
            QApplication.instance().quit()

    def closeEvent(self, e):
        if self.shutting_down:
            return
        self.write_settings()
        if self.system_tray_icon is not None and self.system_tray_icon.isVisible(
        ):
            if not dynamic['systray_msg'] and not isosx:
                info_dialog(
                    self,
                    'calibre',
                    'calibre ' +
                    _('will keep running in the system tray. To close it, '
                      'choose <b>Quit</b> in the context menu of the '
                      'system tray.'),
                    show_copy_button=False).exec_()
                dynamic['systray_msg'] = True
            self.hide_windows()
            e.ignore()
        else:
            if self.confirm_quit():
                try:
                    self.shutdown(write_settings=False)
                except:
                    import traceback
                    traceback.print_exc()
                e.accept()
            else:
                e.ignore()
Example #50
0
class AsyncFutureDnsResolver(object):
    """An executor that spawns a small thread pool for performing DNS
    lookups.  DNS lookup task sare submitted by using calling this
    object as a function and those tasks will be completed
    asynchronously by threads in a thread pool.  The completed tasks
    are posted back to an internal queue and the :meth:`poll` method
    gets the completed tasks from the queue and notifies their
    subscribers of completion.  Tasks with done callback methods will
    be called by poll on the same threat that poll is called on.

    >>> resolver = AsyncFutureDnsResolver()
    >>>
    >>> lookup_result = None
    >>>
    >>> def lookup_finished(future):
    ...   global lookup_result
    ...   lookup_result = future.result()
    ...
    >>> future = resolver('localhost', 80)
    >>> future.add_done_callback(lookup_finished)
    >>>
    >>> while not future.done():
    ...   # Calling poll services callbacks and sets future to done.
    ...   resolver.poll()
    ...   sleep(0.1)
    ...
    >>> assert future.done()
    >>>
    >>> # Only timeout=0 presently supported.
    >>> assert not future.exception(timeout=0)
    >>> assert not future.cancelled()
    >>> # future.result(0) contains outcome of asynchronous call
    >>> # to socket.getaddrinfo.  Note that at this time only timeout=0
    >>> # is supported by this limited api.

    .. aafig::

                                 |                      Worker Threads
                                 |                      +------------+
                                 |                  +-->| Worker 0   |-->+
                                 |                  |   +------------+   |
                                 |                  |                    |
                                 |                  |   +------------+   |
                                 |                  +-->| Worker 1   |-->+
                                 |                  |   +------------+   |
                                 |                  |                    |
                                 |                  |   +------------+   |
      resolver('google.com', 80) |--> task queue----+-->| Worker ... |-->+
                                 |                  |   +------------+   |
                                 |                  |                    |
          AsyncFutureDnsResolver |                  |   +------------+   |
                          Thread |                  +-->| Worker n   |-->+
                                 |                      +------------+   |
                                 |                                       |
                                 |                                       |
                resolver.poll()  |<-- completion queue<------------------+
                                 |
    """
    def __init__(self, thread_pool_size=1):
        self.__closed = False
        self.__work_queue = Queue()
        self.__done_queue = Queue()
        self.__threads = []
        self.__rd, self.__wd = os.pipe()

        flags = fcntl.fcntl(self.__rd, fcntl.F_GETFL)
        fcntl.fcntl(self.__rd, fcntl.F_SETFL, flags | os.O_NONBLOCK)

        flags = fcntl.fcntl(self.__wd, fcntl.F_GETFL)
        fcntl.fcntl(self.__wd, fcntl.F_SETFL, flags | os.O_NONBLOCK)

        for i in range(0, thread_pool_size):
            t = threading.Thread(target=_worker_task,
                                 args=(self.__work_queue, self.__wd,
                                       self.__done_queue))
            t.daemon = True
            self.__threads.append(t)
            t.start()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()
        return False

    def closed(self):
        """bool: True if the object has been closed; False otherwise."""
        return self.__closed

    def close(self):
        """Closes resolver by completing all tasks in queue and joining
         with worker threads.  New dns resolutions cannot be scheduled
         after this method begins executing (calling the resolver will
         result in an assertion failure)."""
        if not self.closed():
            self.__closed = True
            for i in xrange(0, len(self.__threads)):
                self.__work_queue.put(_Poison)

            for thread in self.__threads:
                thread.join()
                self.poll()

            os.close(self.__wd)
            os.close(self.__rd)

    def __call__(self, host, port, family=0, socktype=0, proto=0, flags=0):
        """Queues an asynchronous DNS resolution task.

        Parameters
        ----------
        host: str or None
            A host `str` must contain either a domain name for lookup
            or a string representation of an IPv4/v6 address.
        port: str or int or None
            A string service name such as 'http', a numeric port number,
            or None.
        family: int
            One of the socket.AF_* constants.
        socktype: int
            One of the socket.SOCK_* constants.
        proto: int
            socket.IPPROTO_TCP
        flags: int
            One of several of the AI_* constants; default is zero.

        Returns
        -------
        Future
            If the DNS lookup succeeds then the `future.result()` will
            immediately return a 5-tuple with a structure like
            (family, socktype, proto, canonname, sockaddr).  On failure
            then `future.exception()` will immediately return a
            `socket.gaierror`.

            In these tuples, family, socktype, proto are all integers
            and are meant to be passed to the socket() function.
            canonname will be a string representing the canonical name
            of the host if AI_CANONNAME is part of the flags argument;
            else canonname will be empty. sockaddr is a tuple
            describing a socket address, whose format depends on the
            returned family (a (address, port) 2-tuple for AF_INET, a
            (address, port, flow info, scope id) 4-tuple for AF_INET6),
            and is meant to be passed to the socket.connect() method.
        """
        assert not self.__closed, 'Async dns lookup after resolver closed.'

        getaddrinfo_params = (host, port, family, socktype, proto, flags)
        future = _Future(socket.getaddrinfo, *getaddrinfo_params)
        self.__work_queue.put(future)

        return future

    def read_fd(self):
        """int: fileno"""
        return self.__rd

    def poll(self):
        """Calls done callbacks of any newly completed futures."""
        try:
            rc = os.read(self.__rd, 1)
        except OSError as e:
            if e.errno is errno.EAGAIN:
                # No data available in pipe.
                pass
            else:
                # Unknown error; crash!
                raise
        else:
            if rc:
                # A byte was read.
                try:
                    future = self.__done_queue.get_nowait()
                except Empty:
                    pass
                else:
                    future._notify()
Example #51
0
class ROSEUSBridgeNode:
    def __init__(self):
        self.pub = rospy.Publisher('output', RawCommand)
        self.sub = rospy.Subscriber('input', RawCommand, self.raw_command_cb)
        rospy.on_shutdown(self._on_node_shutdown)
        while not rospy.is_shutdown():
            self.launch_roseus()
            self.main_loop()
            rospy.loginfo("respawning...")
            self.kill_roseus()

    def get_output(self, out, queue):
        while self.roseus_process.poll() is None:
            data = out.read(BUFF_SIZE)
            if data:
                queue.put(str(data))
        else:
            rospy.logerr("thread is dead")

    def launch_roseus(self):
        cmd = ['rosrun', 'roseus', 'roseus']
        self.roseus_process = subprocess.Popen(cmd,
                                               stdout=subprocess.PIPE,
                                               stderr=subprocess.PIPE,
                                               stdin=subprocess.PIPE,
                                               bufsize=BUFF_SIZE,
                                               close_fds=ON_POSIX,
                                               env=os.environ.copy(),
                                               preexec_fn=os.setpgrp)
        self.stdout_queue = Queue()
        self.stderr_queue = Queue()
        self.received_cmd = Queue()

        self.get_stdout_thread = Thread(target=self.get_output,
                                        args=(self.roseus_process.stdout,
                                              self.stdout_queue))
        self.get_stdout_thread.daemon = True
        self.get_stderr_thread = Thread(target=self.get_output,
                                        args=(self.roseus_process.stderr,
                                              self.stderr_queue))
        self.get_stderr_thread.daemon = True
        self.get_stdout_thread.start()
        self.get_stderr_thread.start()

    def kill_roseus(self):
        try:
            rospy.loginfo("send SIGTERM to roseus")
            self.roseus_process.terminate()
        except Exception as e:
            rospy.logwarn("escalated to kill")
            try:
                self.roseus_process.kill()
            except Exception as e:
                rospy.logerr('could not kill roseus: ' + str(e))

    def main_loop(self):
        while self.roseus_process.poll() is None:
            stdout = ""
            stderr = ""
            try:
                cmd = self.received_cmd.get_nowait()
                rospy.logdebug("write to stdin: " + cmd)
                self.roseus_process.stdin.write(cmd)
                self.roseus_process.stdin.flush()
            except IOError as e:
                if e.errno == errno.EINTR:
                    continue
            except Empty as e:
                pass
            except Exception as e:
                rospy.logwarn('error: ' + str(e))
            try:
                while not self.stdout_queue.empty():
                    stdout += self.stdout_queue.get_nowait()
            except Empty as e:
                pass
            try:
                while not self.stderr_queue.empty():
                    stderr += self.stderr_queue.get_nowait()
            except Empty as e:
                pass
            except Exception as e:
                rospy.logwarn('error: ' + str(e))
            if stdout != "":
                self.pub.publish(stdout.strip())
                rospy.logdebug("stdout: " + stdout)
            if stderr != "":
                self.pub.publish(stderr.strip())
                rospy.logdebug("stderr: " + stderr)
            rospy.sleep(0.1)
        else:
            rospy.logwarn("roseus process has been stopped.")

        if self.roseus_process.returncode != 0:
            rospy.logerr('roseus process exits abnormally with exit code: ' + str(self.roseus_process.returncode))

    def raw_command_cb(self, msg):
        cmd_str = str(msg.data).rstrip(' \t\r\n\0') + os.linesep
        self.received_cmd.put(cmd_str)

    def _on_node_shutdown(self):
        self.kill_roseus()
Example #52
0
class SMSd(object):
    '''
    Very basic SMS relay
    Receive, acknowledge and forward SMS-RP messages
    '''
    #
    # verbosity level: list of log types to display when calling
    # self._log(logtype, msg)
    DEBUG = ('ERR', 'WNG', 'INF', 'DBG')
    #
    TRACK_PDU = True
    #
    # time resolution for consuming the queue for TP msg
    QUEUE_TO = 0.1
    #
    # SMS relay phone number
    RP_OA = {'Type': 1, 'NumberingPlan': 1, 'Num': '1234'}
    #
    # TP settings for sending handcrafted SMS DELIVER to UEs
    TP_OA = {'Type': 1, 'NumberingPlan': 1, 'Num': '12341234'}
    TP_PID = {'Format': 0, 'Telematic': {'Telematic': 0, 'Protocol': 0}}
    TP_DCS = {'Group': 0, 'Charset': 0, 'Class': 0}
    #
    # timezone for TP_SCTS information (float)
    TIMEZONE = 0.0
    #
    # CorenetServer reference, for checking UE MSISDN and sending MT-SMS
    Server = None

    def __init__(self):
        self._pdu = []
        # dict with dicts of ongoing RP transactions indexed by RP ref and
        #                    ongoing TP transactions indexed by TP msg ref
        # indexed by UE msisdn
        self.Proc = {}
        # dict with lists of RP-DATA and TP procedures in error, indexed by UE msisdn
        self.Err = {}
        #
        # set 2 queues to process / forward or inject TP messages within a background thread
        self._forward_q = Queue()
        self._inject_q = Queue()
        self._forwarding = True
        self._forward_t = threadit(self.forward)
        self._log('INF', 'SMS relay started')

    def _log(self, logtype='DBG', msg=''):
        # logtype: 'ERR', 'WNG', 'INF', 'DBG'
        if logtype in self.DEBUG:
            log('[%s] [SMSd] %s' % (logtype, msg))

    def stop(self):
        if self._forwarding:
            self._forwarding = False
            self._forward_t.join()

    def forward(self):
        # consume the queue
        while self._forwarding:
            try:
                tp_msg, num = self._forward_q.get_nowait()
            except Empty:
                try:
                    tp_msg, num = self._inject_q.get_nowait()
                except Empty:
                    sleep(self.QUEUE_TO)
                else:
                    self.send_tp(tp_msg, num)
            else:
                self.process_tp(tp_msg, num)

    def init_ue(self, num):
        self.Proc[num] = {
            'RP': {},  # dict of ongoing RP procedures at the RP layer
            'TP': {}  # dict of ongoing TP procedures at the TP layer
        }
        self.Err[num] = {
            'RP': [],  # list of RP procedures in error
            'TP': []  # list of TP procedures in error
        }

    def process_rp(self, rp_msg, num):
        """process an RP message `rp_msg' sent by a UE with a given MSISDN `num',
        
        returns an RP ACK or ERROR if rp_msg is DATA or SMMA
                None if rp_msg is ACK or ERROR
        """
        if not isinstance(rp_msg, NAS.SMS_RP):
            self._log('WNG', 'process_rp: invalid rp_msg')
            return None
        #
        if self.TRACK_PDU:
            self._pdu.append((time(), 'UL', rp_msg))
        #
        if num not in self.Proc:
            self.init_ue(num)
        #
        if rp_msg._name == 'RP_DATA_MO':
            # this will return an RP_ACK or RP_ERR
            ret = self._process_rp_data(rp_msg, num)
        elif rp_msg._name == 'RP_SMMA':
            # this will return an RP_ACK or RP_ERR
            ret = self._process_rp_smma(rp_msg, num)
        elif rp_msg._name in ('RP_ACK_MO', 'RP_ERROR_MO'):
            # check the ref together with num
            ret = self._process_rp_ack_err(rp_msg, num)
        else:
            self._log('WNG', 'process_rp: invalid message %r' % rp_msg)
            ret = None
        #
        if ret and self.TRACK_PDU:
            self._pdu.append((time(), 'DL', ret))
        return ret

    def _process_rp_data(self, rp_msg, num):
        ref = rp_msg[2].get_val()
        rp_procs = self.Proc[num]['RP']
        rp_procs[ref] = (rp_msg, None)
        #
        # check RP orig / dest address
        if rp_msg[3][0].get_val() > 0:
            rp_orig = rp_msg[3][1]
            self._log(
                'WNG',
                'process_rp_data: non-empty originator address, %r' % rp_orig)
            # invalid mandatory information
            del rp_procs[ref]
            return NAS.RP_ERROR_MT(val={
                'Ref': ind[1],
                'RPCause': {
                    'Value': 96
                }
            })
        #
        if rp_msg[4][0].get_val() > 0:
            rp_dest = rp_msg[4][1]
            if rp_dest['Num'].decode() != self.RP_OA['Num']:
                self._log('INF',
                          'process_rp_data: destination address, %r' % rp_dest)
        else:
            self._log('WNG', 'process_rp_data: empty destination address')
            # invalid mandatory information
            del rp_procs[ref]
            return NAS.RP_ERROR_MT(val={
                'Ref': ind[1],
                'RPCause': {
                    'Value': 96
                }
            })
        #
        if not isinstance(rp_msg[5][1], NAS.SMS_TP):
            self._log('WNG',
                      'process_rp_data: invalid TP data, %r' % tp_msg[5])
            # invalid mandatory information
            del rp_procs[ref]
            return NAS.RP_ERROR_MT(val={
                'Ref': ind[1],
                'RPCause': {
                    'Value': 96
                }
            })
        #
        # process TP in the background thread
        self._insert_tp(rp_msg[5][1], num)
        # acknowledge RP
        rp_ack = NAS.RP_ACK_MT(val={'Ref': ref})
        del rp_procs[ref]
        return rp_ack

    def _process_rp_smma(self, rp_msg, num):
        ref = rp_msg[2].get_val()
        self._log('INF',
                  'process_rp_smma: procedure ref (%s, %i)' % (num, ref))
        return NAS.RP_ACK_MT(val={'Ref': ref})

    def _process_rp_ack_err(self, rp_msg, num):
        rp_msg_name = rp_msg._name[:-3].replace('_', '-')
        ref = rp_msg[2].get_val()
        rp_procs = self.Proc[num]['RP']
        if ref in rp_procs:
            rp_req, tp_ref = rp_procs[ref]
            rp_ud = rp_msg['RPUserData']
            if not rp_ud.get_trans() and isinstance(rp_ud[2], NAS.SMS_TP):
                # SMS_DELIVER_REPORT_RP_ACK/ERROR provided
                if rp_msg._name == 'RP_ACK_MO':
                    # TP status 0: Short message transaction completed - Short message received by the SME
                    stat = 0
                else:
                    # TP status 64: Permanent error, SC is not making any more transfer attempts - Remote procedure error
                    stat = 64
                self._report_status(rp_req, tp_ref, stat)
                # TODO: check if it requires an RP-ACK back
            # delete the RP procedure
            del rp_procs[ref]
            if rp_msg_name == 'RP-ACK':
                self._log(
                    'DBG',
                    'process_rp_ack_err: procedure ref (%s, %i) completed' %
                    (num, ref))
            else:
                self.Err[num]['RP'].append(rp_req)
                self._log('INF', 'process_rp_ack_err: procedure ref (%s, %i) in error with cause %r'\
                          % (num, ref, rp_msg[3][1]))
        else:
            self._log(
                'INF', 'process_rp_ack_err: procedure ref (%s, %i) unknown' %
                (num, ref))
        return None

    def _report_status(self, rp_req, tp_ref, stat=64):
        # when a downlink RP-DATA fails within CorenetServer (-> discard_rp())
        # or receiving an RP-ACK/ERROR-MO with TP data (SMS-DELIVER-REPORT-RP-ACK/ERROR)
        # we need to start an SMS-STATUS-REPORT toward to original sender
        # 1) reassociate to the SMS SUBMIT of the initial sender
        try:
            tp_oa = rp_req[5][1]['TP_OA']['Num'].decode()
        except Exception:
            self._log(
                'WNG',
                'report_status: unable to retrieve the TP originating address')
        else:
            if tp_oa in self.Proc:
                tp_procs = self.Proc[tp_oa]['TP']
                if tp_ref in tp_procs:
                    tp_req, atime = self.Proc[tp_oa]['TP'][tp_ref]
                    # 2) send a status report to the initial sender and delete the TP transaction
                    del self.Proc[tp_oa]['TP'][tp_ref]
                    tp_stat = self._create_tp_stat_rep(tp_req, tp_oa, atime,
                                                       stat)
                    self._inject_tp(tp_stat, tp_oa)
                    self._log(
                        'DBG', 'report_status: delete TP procedure (%s, %i)' %
                        (tp_oa, tp_ref))
                    return
            # no status report was requested, hence we just pass our way
            self._log(
                'DBG',
                'report_status: no SMS SUBMIT requiring status report for %s' %
                tp_oa)

    def _insert_tp(self, tp_msg, num):
        """put the tp_msg within the forwarding queue,
        and let the forwarding thread take care of it
        """
        try:
            self._forward_q.put_nowait((tp_msg, num))
        except Full as err:
            self._log('ERR', 'insert_tp: TP forwarding queue is full (%i), deleting it, %s'\
                      % (self._forward_q.qsize(), err))
            self._forward_q = Queue()

    def process_tp(self, tp_msg, num):
        """process a TP message `tp_msg' sent by a UE with a given MSISDN `num'
        """
        if tp_msg._name == 'SMS_SUBMIT':
            # should forward TP user data in an SMS DELIVER to the TP dest
            self._process_tp_submit(tp_msg, num)
        elif tp_msg._name == 'SMS_COMMAND':
            # correspond to an MS invoking an operation within the SMS-Center
            self._process_tp_cmd(tp_msg, num)
        else:
            # SMS_DELIVER_REPORT_RP_ACK and SMS_DELIVER_REPORT_RP_ERROR
            # are processed within _process_rp_ack_err()
            self._log('WNG', 'process_tp: invalid message %r' % tp_msg)
            return None

    def _process_tp_submit(self, tp_msg, num):
        atime = localtime()
        if tp_msg[0].get_val():
            # the sender UE requests a status report as a result of the SMS DELIVER process
            tp_ref = tp_msg[6].get_val()
        else:
            tp_ref = None
        #
        # check TP dest addr
        num_dest = tp_msg[7]['Num'].decode()
        if num_dest in self.Server.MSISDN:
            imsi = self.Server.MSISDN[num_dest]
        else:
            # unknown msisdn
            # status 65: incompatible dest
            self._log('INF',
                      'process_tp_submit: destination unknown, %s' % num_dest)
            if tp_ref:
                tp_stat = self._create_tp_stat_rep(tp_msg, num, atime, stat=65)
                self.send_tp(tp_stat, num)
            return
        #
        if imsi in self.Server.UE:
            ued = self.Server.UE[imsi]
        else:
            # UE never attached
            # status 34: no response from SME
            self._log('INF',
                      'process_tp_submit: destination offline, %s' % num_dest)
            if tp_ref:
                tp_stat = self._create_tp_stat_rep(tp_msg, num, atime, stat=34)
                self.send_tp(tp_stat, num)
            return
        #
        # build tp_deliver
        if tp_ref is not None:
            # keep track of the SMS SUBMIT for further status report
            self.Proc[num]['TP'][tp_ref] = (tp_msg, atime)
        tp_del = self._create_tp_deliver(tp_msg, num, atime)
        self.send_tp(tp_del, num_dest, report_ref=tp_ref)

    def _process_tp_cmd(self, tp_msg, num):
        self._log('INF', 'process_tp_cmd: CDL %i, CD 0x%s'\
                  % (tp_msg['TP_CDL'].get_val(),
                     hexlify(tp_msg['TP_CD'].get_val()).decode('ascii')))
        atime = localtime()
        if tp_msg[0].get_val():
            # the sender UE requests a status report of the result of the SMS COMMAND process
            tp_stat = self._create_tp_stat_rep(tp_msg, num, atime, stat=0)
            self.send_tp(tp_stat, num_dest)

    def _create_tp_stat_rep(self, tp_msg, num, atime, stat=64):
        tp_srq = 1 if isinstance(tp_msg, NAS.SMS_COMMAND) else 0
        tp_mr = tp_msg[6].get_val()
        tp_ra = {'Type': 1, 'NumberingPlan': 1, 'Num': num}
        tp_scts = (atime, self.TIMEZONE)
        if 0 <= stat <= 255:
            tp_stat = stat
        else:
            tp_stat = 64
        #
        tp_stat = NAS.SMS_STATUS_REPORT(
            val={
                'TP_SRQ': tp_srq,
                'TP_MR': tp_mr,
                'TP_RA': tp_ra,
                'TP_SCTS': tp_scts,
                'TP_ST': stat
            })
        tp_stat['TP_PI'].set_trans(True)
        self._set_tp_scts(tp_stat['TP_DT'])
        return tp_stat

    def _create_tp_deliver(self, tp_msg, num, atime):
        tp_sri = tp_msg[0].get_val()
        tp_udhi = tp_msg[1].get_val()
        tp_oa = {'Type': 1, 'NumberingPlan': 1, 'Num': num}
        tp_pid = tp_msg[8].get_val()
        tp_dcs = tp_msg[9].get_val()
        tp_msg_ud = tp_msg['TP_UD']
        if tp_udhi:
            tp_udh = tp_msg_ud[1][1].get_val()
        else:
            tp_udh = None
        tp_ud = tp_msg_ud[2].get_val()
        #
        tp_del = NAS.SMS_DELIVER(
            val={
                'TP_SRI': tp_sri,
                'TP_UDHI': tp_udhi,
                'TP_OA': tp_oa,
                'TP_PID': tp_pid,
                'TP_DCS': tp_dcs,
                'TP_UD': {
                    'UDH': {
                        'UDH': tp_udh
                    },
                    'UD': tp_ud
                }
            })
        self._set_tp_scts(tp_del['TP_SCTS'])
        return tp_del

    def _set_tp_scts(self, tp_scts):
        if tp_scts.get_len() == 7:
            T = localtime()
            tp_scts.encode(localtime(), tz=self.TIMEZONE)
        else:
            self._log('WNG', 'set_tp_scts: custom timestamping unhandled')

    def _inject_tp(self, tp_msg, num):
        """put the tp_msg within the injection queue,
        and let the forwarding thread take care of it
        """
        try:
            self._inject_q.put_nowait((tp_msg, num))
        except Full as err:
            self._log('ERR', 'inject_tp: TP injection queue is full (%i), deleting it, %s'\
                      % (self._inject_q.qsize(), err))
            self._inject_q = Queue()

    def _get_new_rp_ref(self, num):
        if num not in self.Proc:
            self.init_ue(num)
            return 0
        else:
            for i in range(0, 257):
                if i not in self.Proc[num]['RP']:
                    break
            if i == 256:
                # no RP ref available...
                self._log('ERR', 'get_new_rp_ref: no RP ref available, clearing all procedure for %s'\
                          % num)
                self.Proc[num]['RP'].clear()
                self.Proc[num]['TP'].clear()
                return 0
            else:
                return i

    def send_tp(self, tp_msg, num, tp_ref=None):
        """send the SMS TP message `tp_msg' to UE msisdn `num'
        associate the TP transaction ref `tp_ref' to the RP transaction
        """
        # wrap the TP msg into an RP DATA msg
        ref = self._get_new_rp_ref(num)
        rp_msg = NAS.RP_DATA_MT(val={
            'Ref': ref,
            'RPOriginatorAddress': self.RP_OA
        })
        rp_msg.set_tpdu(tp_msg)
        self.Proc[num]['RP'][ref] = (rp_msg, tp_ref)
        self._log('DBG', 'sending TP msg with RP ref %i' % ref)
        self.send_rp(rp_msg, num)

    def send_rp(self, rp_msg, num):
        if self.TRACK_PDU:
            self._pdu.append((time(), 'DL', rp_msg))
        self.Server.send_smsrp(num, rp_msg)

    def discard_rp(self, rp_msg, num):
        """discard an RP message `rp_msg' sent to UE with msisdn `num'
        """
        if num not in self.Proc:
            return
        rp_procs = self.Proc[num]['RP']
        ref = rp_msg[2].get_val()
        if ref not in rp_procs:
            return
        rp_req, tp_ref = rp_procs[ref]
        if tp_ref is not None:
            # downlink RP-DATA failed within corenet, status report required
            # TP status 97 : Temporary error, SC is not making any more transfer attempts - SME busy
            self._report_status(rp_req, tp_ref, 97)
        # delete the RP transaction
        del rp_procs[ref]
        self._log('INF',
                  'discard_rp: delete RP procedure (%s, %i)' % (num, ref))

    #--------------------------------------------------------------------------#
    # custom methods to send TP messages from the SMSd to UEs
    #--------------------------------------------------------------------------#

    def send_text(self, text, num):
        """sends a given text (ascii string, that will be converted to SMS 7bit)
        to a given phone number
        """
        tp_dcs = self.TP_DCS
        self.TP_DCS = {'Group': 0, 'Charset': 0, 'Class': 0}  # GSM 7bit
        self.send_tpud(text, num=num)
        self.TP_DCS = tp_dcs

    def send_tpud(self, ud, num):
        """sends a given user-data (directly the data buffer, or a tuple with 
        options and the data buffer) to a given phone number
        
        each option must be a 2-tuple (Tag, Value) were Tag is an uint8 and Value
        is a buffer
        """
        # TODO: implement SMS UD fragmentation into several tp_msg
        try:
            tp_msg = NAS.SMS_DELIVER(
                val={
                    'TP_MMS': 1,  # no more messages
                    'TP_OA': self.TP_OA,
                    'TP_PID': self.TP_PID,
                    'TP_DCS': self.TP_DCS
                })
            self._set_tp_scts(tp_msg['TP_SCTS'])
            if isinstance(ud, (list, tuple)):
                if len(ud) > 1:
                    # UD header IEs
                    tp_msg['TP_UDHI'].set_val(1)
                    tp_msg['TP_UD']['UDH']['UDH'].set_val([{
                        'T': udh[0],
                        'V': udh[1]
                    } for udh in ud[:-1]])
                data = ud[-1]
            else:
                data = ud
            tp_msg['TP_UD']['UD'].set_val(data)
        except Exception:
            self._log('WNG', 'invalid TP UD')
        else:
            self._inject_tp(tp_msg, num)
Example #53
0
class SyncRequestDispatchQueue(object):
    MAX_TASK_ACK_TIME = 0.5

    instance = None

    def __init__(self):
        self._queue = Queue(maxsize=256)
        self._workers = 1
        self._pending_workers = 0
        self._workers_lock = Lock()
        self._primary_worker = Thread(
            target=self._dispatch_request_worker,
            name="Primary SyncQueue Dispatcher"
        )
        self._primary_worker.daemon = True
        self._primary_worker.start()
        self._closed = False
        self._max_workers = 1
        self._promise = 0

    @staticmethod
    def get_queue():
        if not SyncRequestDispatchQueue.instance:
            SyncRequestDispatchQueue.instance = SyncRequestDispatchQueue()

        return SyncRequestDispatchQueue.instance

    def _dispatch_request_worker(self):

        name = current_thread().name

        if __debug__:
            syncqueuelogger.debug('New Worker(%s)', name)

        task = self._queue.get()
        while task and not self._closed:
            ack, on_error, func, args = task

            with self._workers_lock:
                ack.set()
                self._pending_workers += 1

            try:
                if __debug__:
                    syncqueuelogger.debug('Process task(%s) - start', name)

                func(*args)

                if __debug__:
                    syncqueuelogger.debug('Process task(%s) - complete', name)

            except Exception, e:
                if __debug__:
                    syncqueuelogger.debug(
                        'Process task(%s) - exception: func=%s args=%s exc:%s/%s',
                            name, func, args, type(e), e)

                if on_error:
                    on_error(e)

            del func, args

            with self._workers_lock:
                self._queue.task_done()
                self._pending_workers -= 1

            again = False
            task = None

            try:
                task = self._queue.get_nowait()
                if __debug__:
                    syncqueuelogger.debug('Task acquired(%s) (no wait)', name)

            except Empty:
                with self._workers_lock:
                    if not self._closed and (self._promise or self._workers <= self._pending_workers + 1):
                        again = True
                    else:
                        self._workers -= 1

            if again:
                if __debug__:
                    syncqueuelogger.debug('Wait for task to be queued(%s)', name)

                task = self._queue.get()

                if __debug__:
                    syncqueuelogger.debug('Task acquired(%s)', name)

        if __debug__:
            if not task:
                syncqueuelogger.debug('Worker(%s) closed by explicit request', name)
Example #54
0
def getevent_position(resolution):
    if cmp(deviceName, '5LM7N16224000261') == 0 or cmp(
            deviceName, 'R4WG45TCUCUCVGKN') == 0:
        geteventCmd = 'adb -s %s shell getevent -lt /dev/input/event5' % deviceName  #华为Mate8, oppo
    elif cmp(deviceName, '63a9bca7') == 0:
        geteventCmd = 'adb -s %s shell getevent -lt /dev/input/event4' % deviceName  #vivo
    p = Popen(geteventCmd, shell=True, stdout=PIPE)
    print p.pid
    positionRecord = collections.OrderedDict()
    q = Queue()
    t = Thread(target=enqueue_output, args=(p.stdout, q))
    t.daemon = True  # thread dies with the program
    t.start()

    noOutput = False
    hasLine = False
    screenType = check_portrait_landscape()
    max_width = resolution[0]
    while noOutput is False or hasLine is False:
        if finishFlag is True:
            break
        try:
            line = q.get_nowait()  # or q.get(timeout=.1)
        except Empty:
            noOutput = True
            if hasLine is False:
                screenType = check_portrait_landscape()
                time.sleep(0.5)
        else:
            hasLine = True
            noOutput = False
            splitLine = line.split()
            timeValue = splitLine[1][0:-1]
            positionValue = int(splitLine[4], 16)
            if positionRecord.has_key(timeValue) is False:
                positionRecord[timeValue] = []
            positionRecord[timeValue].append(positionValue)
    p.kill()
    p.wait()
    #checkGetEventCmd = 'ps aux | grep getevent'
    #os.system(checkGetEventCmd)
    clickOp = collections.OrderedDict()
    if len(positionRecord) == 1:
        startTime = positionRecord.keys()[0]
        positionValue = positionRecord[startTime]
        if cmp(screenType, 'landscape') == 0:
            landscape_x = positionValue[1]
            landscape_y = max_width - positionValue[0]
            positionValue = [landscape_x, landscape_y]
        clickOp['clickType'] = 'tap'
        clickOp['clickStartTime'] = startTime
        clickOp['clickPosition'] = positionValue
        clickOp['screenType'] = screenType
        print clickOp
    elif len(positionRecord) > 1:
        startTime = positionRecord.keys()[0]
        startPosition = positionRecord[startTime]
        if cmp(screenType, 'landscape') == 0:
            landscape_x = startPosition[1]
            landscape_y = max_width - startPosition[0]
            startPosition = [landscape_x, landscape_y]
        endTime = positionRecord.keys()[-1]
        endPosition = positionRecord[endTime]
        if cmp(screenType, 'landscape') == 0:
            landscape_x = endPosition[1]
            landscape_y = max_width - endPosition[0]
            endPosition = [landscape_x, landscape_y]
        startVect = numpy.array(startPosition)
        endVect = numpy.array(endPosition)
        eucDist = round(numpy.linalg.norm(endVect - startVect), 3)
        if eucDist < 20:
            clickOp['clickType'] = 'tap'
            clickOp['clickStartTime'] = startTime
            clickOp['clickPosition'] = startPosition
            clickOp['screenType'] = screenType
        else:
            clickOp['clickType'] = 'swipe'
            clickOp['clickStartTime'] = startTime
            clickOp['clickEndTime'] = endTime
            clickOp['clickStartPosition'] = startPosition
            clickOp['clickEndPosition'] = endPosition
            clickOp['eucDist'] = eucDist
            clickOp['screenType'] = screenType
        print clickOp
    print 'end of getevent position....'
    return clickOp
Example #55
0
class Saver(QObject):  # {{{
    def __init__(self,
                 parent,
                 db,
                 callback,
                 rows,
                 path,
                 opts,
                 spare_server=None):
        QObject.__init__(self, parent)
        self.pd = ProgressDialog(_('Saving...'), parent=parent)
        self.spare_server = spare_server
        self.db = db
        self.opts = opts
        self.pd.setModal(True)
        self.pd.show()
        self.pd.set_min(0)
        self.pd.set_msg(_('Collecting data, please wait...'))
        self._parent = parent
        self.callback = callback
        self.callback_called = False
        self.rq = Queue()
        self.ids = [
            x for x in map(db.id, [r.row() for r in rows]) if x is not None
        ]
        self.pd_max = len(self.ids)
        self.pd.set_max(0)
        self.pd.value = 0
        self.failures = set([])

        from calibre.ebooks.metadata.worker import SaveWorker
        self.worker = SaveWorker(self.rq,
                                 db,
                                 self.ids,
                                 path,
                                 self.opts,
                                 spare_server=self.spare_server)
        self.pd.canceled_signal.connect(self.canceled)
        self.continue_updating = True
        single_shot(self.update)

    def canceled(self):
        self.continue_updating = False
        if self.worker is not None:
            self.worker.canceled = True
        self.pd.hide()
        if not self.callback_called:
            self.callback(self.worker.path, self.failures, self.worker.error)
            self.callback_called = True

    def update(self):
        if not self.continue_updating:
            return
        if not self.worker.is_alive():
            # Check that all ids were processed
            while self.ids:
                # Get all queued results since worker is dead
                before = len(self.ids)
                self.get_result()
                if before == len(self.ids):
                    # No results available => worker died unexpectedly
                    for i in list(self.ids):
                        self.failures.add(('id:%d' % i, 'Unknown error'))
                        self.ids.remove(i)

        if not self.ids:
            self.continue_updating = False
            self.pd.hide()
            if not self.callback_called:
                try:
                    # Give the worker time to clean up and set worker.error
                    self.worker.join(2)
                except:
                    pass  # The worker was not yet started
                self.callback_called = True
                self.callback(self.worker.path, self.failures,
                              self.worker.error)

        if self.continue_updating:
            self.get_result()
            single_shot(self.update)

    def get_result(self):
        try:
            id, title, ok, tb = self.rq.get_nowait()
        except Empty:
            return
        if self.pd.max != self.pd_max:
            self.pd.max = self.pd_max
        self.pd.value += 1
        self.ids.remove(id)
        if not isinstance(title, unicode):
            title = str(title).decode(preferred_encoding, 'replace')
        self.pd.set_msg(_('Saved') + ' ' + title)

        if not ok:
            self.failures.add((title, tb))
Example #56
0
class Resource(Task):
    def __init__(self, dataPath = os.path.join("..", "data")):
        self.resultQueue = Queue()
        self.dataPaths = [dataPath]
        self.loaderSemaphore = BoundedSemaphore(value = 1)
        self.loaders = []

        #myfingershurt: the following should be global, and only done at startup.  Not every damn time a file is loaded.
        self.songPath = []
        self.baseLibrary = Config.get("setlist", "base_library")
        #evilynux - Support for songs in ~/.fretsonfire/songs (GNU/Linux and MacOS X)
        if self.baseLibrary == "None" and os.name == "posix":
            path = os.path.expanduser("~/." + Version.PROGRAM_UNIXSTYLE_NAME)
            if os.path.isdir(path):
                self.baseLibrary = path
                Config.set("setlist", "base_library", path)

        if self.baseLibrary and os.path.isdir(self.baseLibrary):
            self.songPath = [self.baseLibrary]

        self.logLoadings = Config.get("game", "log_loadings")

    #myfingershurt: Need a function to refresh the base library after a new one is selected:
    def refreshBaseLib(self):
        self.baseLibrary = Config.get("setlist", "base_library")
        if self.baseLibrary and os.path.isdir(self.baseLibrary):
            self.songPath = [self.baseLibrary]

    def addDataPath(self, path):
        if path not in self.dataPaths:
            self.dataPaths = [path] + self.dataPaths

    def removeDataPath(self, path):
        if path in self.dataPaths:
            self.dataPaths.remove(path)

    def fileName(self, *name, **args):

        #myfingershurt: the following should be global, and only done at startup.  Not every damn time a file is loaded.
        songPath = self.songPath

        if not args.get("writable", False):
            for dataPath in self.dataPaths + songPath:
                readOnlyPath = os.path.join(dataPath, *name)
                # If the requested file is in the read-write path and not in the
                # read-only path, use the existing read-write one.
                if os.path.isfile(readOnlyPath):
                    return readOnlyPath
                elif os.path.isdir(readOnlyPath):
                    return readOnlyPath
                readWritePath = os.path.join(getWritableResourcePath(), *name)
                if os.path.isfile(readWritePath):
                    return readWritePath
            return readOnlyPath
        else:
            for dataPath in [self.dataPaths[-1]] + songPath:
                readOnlyPath = os.path.join(dataPath, *name)
                if not (os.path.isfile(readOnlyPath) or os.path.isdir(readOnlyPath)):
                    continue
                try:
                    # First see if we can write to the original file
                    if os.access(readOnlyPath, os.W_OK):
                        return readOnlyPath
                    # If the original file does not exist, see if we can write to its directory
                    if not os.path.isfile(readOnlyPath) and os.access(os.path.dirname(readOnlyPath), os.W_OK):
                        pass
                except:
                    raise
                # If the resource exists in the read-only path, make a copy to the
                # read-write path.
                readWritePath = os.path.join(getWritableResourcePath(), *name)
                if not os.path.isfile(readWritePath) and os.path.isfile(readOnlyPath):
                    log.notice("Copying '%s' to writable data directory." % "/".join(name))
                    try:
                        os.makedirs(os.path.dirname(readWritePath))
                    except:
                        pass
                    shutil.copy(readOnlyPath, readWritePath)
                    self.makeWritable(readWritePath)
                # Create directories if needed
                if not os.path.isdir(readWritePath) and os.path.isdir(readOnlyPath):
                    log.notice("Creating writable directory '%s'." % "/".join(name))
                    os.makedirs(readWritePath)
                    self.makeWritable(readWritePath)
                return readWritePath
            return readOnlyPath

    def makeWritable(self, path):
        os.chmod(path, stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)

    def load(self, target = None, name = None, function = lambda: None, synch = False, onLoad = None, onCancel = None):

        if self.logLoadings == 1:
            log.notice("Loading %s.%s %s" % (target.__class__.__name__, name, synch and "synchronously" or "asynchronously"))

        l = Loader(target, name, function, self.resultQueue, self.loaderSemaphore, onLoad = onLoad, onCancel = onCancel)
        if synch:
            l.load()
            return l.finish()
        else:
            self.loaders.append(l)
            l.start()
            return l

    def run(self, ticks):
        try:
            loader = self.resultQueue.get_nowait()
            loader.finish()
            self.loaders.remove(loader)
        except Empty:
            pass
Example #57
0
class Adder(QObject):  # {{{

    ADD_TIMEOUT = 900  # seconds (15 minutes)

    def __init__(self, parent, db, callback, spare_server=None):
        QObject.__init__(self, parent)
        self.pd = ProgressDialog(_('Adding...'), parent=parent)
        self.pd.setMaximumWidth(min(600, int(available_width() * 0.75)))
        self.spare_server = spare_server
        self.db = db
        self.pd.setModal(True)
        self.pd.show()
        self._parent = parent
        self.rfind = self.worker = None
        self.callback = callback
        self.callback_called = False
        self.pd.canceled_signal.connect(self.canceled)

    def add_recursive(self, root, single=True):
        self.path = root
        self.pd.set_msg(_('Searching in all sub-directories...'))
        self.pd.set_min(0)
        self.pd.set_max(0)
        self.pd.value = 0
        self.rfind = RecursiveFind(self, self.db, root, single)
        self.rfind.update.connect(self.pd.set_msg, type=Qt.QueuedConnection)
        self.rfind.found.connect(self.add, type=Qt.QueuedConnection)
        self.rfind.start()

    def add(self, books):
        if isinstance(books, basestring):
            error_dialog(self.pd,
                         _('Path error'),
                         _('The specified directory could not be processed.'),
                         det_msg=books,
                         show=True)
            return self.canceled()
        if not books:
            info_dialog(self.pd, _('No books'), _('No books found'), show=True)
            return self.canceled()
        books = [[b] if isinstance(b, basestring) else b for b in books]
        restricted = set()
        for i in xrange(len(books)):
            files = books[i]
            restrictedi = set(f for f in files if not os.access(f, os.R_OK))
            if restrictedi:
                files = [f for f in files if os.access(f, os.R_OK)]
                books[i] = files
            restricted |= restrictedi
        if restrictedi:
            det_msg = u'\n'.join(restrictedi)
            warning_dialog(self.pd,
                           _('No permission'),
                           _('Cannot add some files as you do not have '
                             ' permission to access them. Click Show'
                             ' Details to see the list of such files.'),
                           det_msg=det_msg,
                           show=True)
        books = list(filter(None, books))
        if not books:
            return self.canceled()
        self.rfind = None
        from calibre.ebooks.metadata.worker import read_metadata
        self.rq = Queue()
        tasks = []
        self.ids = {}
        self.nmap = {}
        self.duplicates = []
        for i, b in enumerate(books):
            tasks.append((i, b))
            self.ids[i] = b
            self.nmap[i] = os.path.basename(b[0])
        self.worker = read_metadata(tasks,
                                    self.rq,
                                    spare_server=self.spare_server)
        self.pd.set_min(0)
        self.pd.set_max(len(self.ids))
        self.pd.value = 0
        self.db_adder = DBAdder(self, self.db, self.ids, self.nmap)
        self.db_adder.start()
        self.last_added_at = time.time()
        self.entry_count = len(self.ids)
        self.continue_updating = True
        single_shot(self.update)

    def canceled(self):
        self.continue_updating = False
        if self.rfind is not None:
            self.rfind.canceled = True
        if self.worker is not None:
            self.worker.canceled = True
        if hasattr(self, 'db_adder'):
            self.db_adder.end()
        self.pd.hide()
        if not self.callback_called:
            self.callback(self.paths, self.names, self.infos)
            self.callback_called = True

    def duplicates_processed(self):
        self.db_adder.end()
        if not self.callback_called:
            self.callback(self.paths, self.names, self.infos)
            self.callback_called = True
        if hasattr(self, '__p_d'):
            self.__p_d.hide()

    def update(self):
        if self.entry_count <= 0:
            self.continue_updating = False
            self.pd.hide()
            self.process_duplicates()
            return

        try:
            id, opf, cover = self.rq.get_nowait()
            self.db_adder.input_queue.put((id, opf, cover))
            self.last_added_at = time.time()
        except Empty:
            pass

        try:
            title = self.db_adder.output_queue.get_nowait()
            self.pd.value += 1
            self.pd.set_msg(_('Added') + ' ' + title)
            self.last_added_at = time.time()
            self.entry_count -= 1
        except Empty:
            pass

        if (time.time() - self.last_added_at) > self.ADD_TIMEOUT:
            self.continue_updating = False
            self.pd.hide()
            self.db_adder.end()
            if not self.callback_called:
                self.callback([], [], [])
                self.callback_called = True
            error_dialog(self._parent,
                         _('Adding failed'),
                         _('The add books process seems to have hung.'
                           ' Try restarting calibre and adding the '
                           'books in smaller increments, until you '
                           'find the problem book.'),
                         show=True)

        if self.continue_updating:
            single_shot(self.update)

    def process_duplicates(self):
        duplicates = self.db_adder.duplicates
        if not duplicates:
            return self.duplicates_processed()
        self.pd.hide()
        from calibre.gui2.dialogs.duplicates import DuplicatesQuestion
        d = DuplicatesQuestion(self.db, duplicates, self._parent)
        duplicates = tuple(d.duplicates)
        if duplicates:
            pd = QProgressDialog(_('Adding duplicates...'), '', 0,
                                 len(duplicates), self._parent)
            pd.setCancelButton(None)
            pd.setValue(0)
            pd.show()
            self.__p_d = pd
            self.__d_a = DuplicatesAdder(self._parent, self.db, duplicates,
                                         self.db_adder)
            self.__d_a.added.connect(pd.setValue)
            self.__d_a.adding_done.connect(self.duplicates_processed)
        else:
            return self.duplicates_processed()

    def cleanup(self):
        if hasattr(self, 'pd'):
            self.pd.hide()
        if hasattr(self, 'worker') and hasattr(self.worker, 'tdir') and \
                self.worker.tdir is not None:
            if os.path.exists(self.worker.tdir):
                try:
                    shutil.rmtree(self.worker.tdir)
                except:
                    pass
        self._parent = None
        self.pd.setParent(None)
        del self.pd
        self.pd = None
        if hasattr(self, 'db_adder'):
            self.db_adder.setParent(None)
            del self.db_adder
            self.db_adder = None

    @property
    def number_of_books_added(self):
        return getattr(getattr(self, 'db_adder', None),
                       'number_of_books_added', 0)

    @property
    def merged_books(self):
        return getattr(getattr(self, 'db_adder', None), 'merged_books',
                       set([]))

    @property
    def critical(self):
        return getattr(getattr(self, 'db_adder', None), 'critical', {})

    @property
    def paths(self):
        return getattr(getattr(self, 'db_adder', None), 'paths', [])

    @property
    def names(self):
        return getattr(getattr(self, 'db_adder', None), 'names', [])

    @property
    def infos(self):
        return getattr(getattr(self, 'db_adder', None), 'infos', [])
Example #58
0
class DBAdder(QObject):  # {{{
    def __init__(self, parent, db, ids, nmap):
        QObject.__init__(self, parent)

        self.db, self.ids, self.nmap = db, dict(**ids), dict(**nmap)
        self.critical = {}
        self.number_of_books_added = 0
        self.duplicates = []
        self.names, self.paths, self.infos = [], [], []
        self.input_queue = Queue()
        self.output_queue = Queue()
        self.merged_books = set([])
        self.auto_convert_books = set()

    def end(self):
        if (gprefs['manual_add_auto_convert'] and self.auto_convert_books):
            from calibre.gui2.ui import get_gui
            gui = get_gui()
            gui.iactions['Convert Books'].auto_convert_auto_add(
                self.auto_convert_books)

        self.input_queue.put((None, None, None))

    def start(self):
        try:
            id, opf, cover = self.input_queue.get_nowait()
        except Empty:
            single_shot(self.start)
            return
        if id is None and opf is None and cover is None:
            return
        name = self.nmap.pop(id)
        title = None
        if DEBUG:
            st = time.time()
        try:
            title = self.add(id, opf, cover, name)
        except:
            import traceback
            self.critical[name] = traceback.format_exc()
            title = name
        self.output_queue.put(title)
        if DEBUG:
            prints('Added', title, 'to db in:', time.time() - st, 'seconds')
        single_shot(self.start)

    def process_formats(self, opf, formats):
        imp = opf[:-4] + '.import'
        if not os.access(imp, os.R_OK):
            return formats
        fmt_map = {}
        for line in open(imp, 'rb').readlines():
            if ':' not in line:
                continue
            f, _, p = line.partition(':')
            fmt_map[f] = p.rstrip()
        fmts = []
        for fmt in formats:
            e = os.path.splitext(fmt)[1].replace('.', '').lower()
            fmts.append(fmt_map.get(e, fmt))
            if not os.access(fmts[-1], os.R_OK):
                fmts[-1] = fmt
        return fmts

    def add(self, id, opf, cover, name):
        formats = self.ids.pop(id)
        if opf.endswith('.error'):
            mi = MetaInformation('', [_('Unknown')])
            self.critical[name] = open(opf,
                                       'rb').read().decode('utf-8', 'replace')
        else:
            try:
                mi = OPF(opf).to_book_metadata()
            except:
                import traceback
                mi = MetaInformation('', [_('Unknown')])
                self.critical[name] = traceback.format_exc()
        formats = self.process_formats(opf, formats)
        if not mi.title:
            mi.title = os.path.splitext(name)[0]
        mi.title = mi.title if isinstance(mi.title, unicode) else \
                   mi.title.decode(preferred_encoding, 'replace')
        if mi.application_id == '__calibre_dummy__':
            mi.application_id = None
        if self.db is not None:
            if cover:
                with open(cover, 'rb') as f:
                    cover = f.read()
            orig_formats = formats
            formats = [f2 for f2 in formats if not f2.lower().endswith('.opf')]
            if prefs['add_formats_to_existing']:  # automerge is on
                identical_book_list = self.db.find_identical_books(mi)
                if identical_book_list:  # books with same author and nearly same title exist in db
                    self.merged_books.add((mi.title, ' & '.join(mi.authors)))
                    seen_fmts = set([])

                    for identical_book in identical_book_list:
                        ib_fmts = self.db.formats(identical_book,
                                                  index_is_id=True)
                        if ib_fmts:
                            seen_fmts |= set(ib_fmts.split(','))
                        replace = gprefs['automerge'] == 'overwrite'
                        self.add_formats(identical_book,
                                         formats,
                                         replace=replace)
                    if gprefs['automerge'] == 'new record':
                        incoming_fmts = \
                            set([os.path.splitext(path)[-1].replace('.',
                                '').upper() for path in formats])
                        if incoming_fmts.intersection(seen_fmts):
                            # There was at least one duplicate format
                            # so create a new record and put the
                            # incoming formats into it
                            # We should arguably put only the duplicate
                            # formats, but no real harm is done by having
                            # all formats
                            id_ = self.db.create_book_entry(
                                mi, cover=cover, add_duplicates=True)
                            self.number_of_books_added += 1
                            self.add_formats(id_, formats)

                else:
                    # books with same author and nearly same title do not exist in db
                    id_ = self.db.create_book_entry(mi,
                                                    cover=cover,
                                                    add_duplicates=True)
                    self.number_of_books_added += 1
                    self.add_formats(id_, formats)

            else:  # automerge is off
                id_ = self.db.create_book_entry(mi,
                                                cover=cover,
                                                add_duplicates=False)
                if id_ is None:
                    self.duplicates.append((mi, cover, orig_formats))
                else:
                    self.add_formats(id_, formats)
                    self.auto_convert_books.add(id_)
                    self.number_of_books_added += 1
        else:
            self.names.append(name)
            self.paths.append(formats[0])
            self.infos.append(mi)
        return mi.title

    def add_formats(self, id, formats, replace=True):
        for path in formats:
            fmt = os.path.splitext(path)[-1].replace('.', '').upper()
            with open(path, 'rb') as f:
                self.db.add_format(id,
                                   fmt,
                                   f,
                                   index_is_id=True,
                                   notify=False,
                                   replace=replace)
Example #59
0
class YoutubeDLDownloader(object):
    """Python class for downloading videos using youtube-dl & subprocess.

    Attributes:
        OK, ERROR, STOPPED, ALREADY, FILESIZE_ABORT, WARNING (int): Integers
            that describe the return code from the download() method. The
            larger the number the higher is the hierarchy of the code.
            Codes with smaller hierachy cannot overwrite codes with higher
            hierarchy.

    Args:
        youtubedl_path (string): Absolute path to youtube-dl binary.

        data_hook (function): Optional callback function to retrieve download
            process data.

        log_data (function): Optional callback function to write data to
            the log file.

    Warnings:
        The caller is responsible for calling the close() method after he has
        finished with the object in order for the object to be able to properly
        close down itself.

    Example:
        How to use YoutubeDLDownloader from a python script.

            from downloaders import YoutubeDLDownloader

            def data_hook(data):
                print data

            downloader = YoutubeDLDownloader('/usr/bin/youtube-dl', data_hook)

            downloader.download(<URL STRING>, ['-f', 'flv'])

    """

    OK = 0
    WARNING = 1
    ERROR = 2
    FILESIZE_ABORT = 3
    ALREADY = 4
    STOPPED = 5

    def __init__(self, youtubedl_path, data_hook=None, log_data=None):
        self.youtubedl_path = youtubedl_path
        self.data_hook = data_hook
        self.log_data = log_data

        self._return_code = self.OK
        self._proc = None

        self._stderr_queue = Queue()
        self._stderr_reader = PipeReader(self._stderr_queue)

    def download(self, url, options):
        """Download url using given options.

        Args:
            url (string): URL string to download.
            options (list): Python list that contains youtube-dl options.

        Returns:
            An integer that shows the status of the download process.
            There are 6 different return codes.

            OK (0): The download process completed successfully.
            WARNING (1): A warning occured during the download process.
            ERROR (2): An error occured during the download process.
            FILESIZE_ABORT (3): The corresponding url video file was larger or
                smaller from the given filesize limit.
            ALREADY (4): The given url is already downloaded.
            STOPPED (5): The download process was stopped by the user.

        """
        self._return_code = self.OK

        cmd = self._get_cmd(url, options)
        self._create_process(cmd)

        if self._proc is not None:
            self._stderr_reader.attach_filedescriptor(self._proc.stderr)

        while self._proc_is_alive():
            stdout = self._proc.stdout.readline().rstrip()
            stdout = convert_item(stdout, to_unicode=True)

            if stdout:
                data_dict = extract_data(stdout)
                self._extract_info(data_dict)
                self._hook_data(data_dict)

        # Read stderr after download process has been completed
        # We don't need to read stderr in real time
        while not self._stderr_queue.empty():
            stderr = self._stderr_queue.get_nowait().rstrip()
            stderr = convert_item(stderr, to_unicode=True)

            self._log(stderr)

            if self._is_warning(stderr):
                self._set_returncode(self.WARNING)
            else:
                self._set_returncode(self.ERROR)

        # Set return code to ERROR if we could not start the download process
        # or the childs return code is greater than zero
        # NOTE: In Linux if the called script is just empty Python exits
        # normally (ret=0), so we cant detect this or similar cases
        # using the code below
        # NOTE: In Unix a negative return code (-N) indicates that the child
        # was terminated by signal N (e.g. -9 = SIGKILL)
        if self._proc is None or self._proc.returncode > 0:
            self._return_code = self.ERROR

        if self._proc is not None and self._proc.returncode > 0:
            self._log('Child process exited with non-zero code: {}'.format(
                self._proc.returncode))

        self._last_data_hook()

        return self._return_code

    def stop(self):
        """Stop the download process and set return code to STOPPED. """
        if self._proc_is_alive():

            if os.name == 'nt':
                # os.killpg is not available on Windows
                # See: https://bugs.python.org/issue5115
                self._proc.kill()

                # When we kill the child process on Windows the return code
                # gets set to 1, so we want to reset the return code back to 0
                # in order to avoid creating logging output in the download(...)
                # method
                self._proc.returncode = 0
            else:
                os.killpg(self._proc.pid, signal.SIGKILL)

            self._set_returncode(self.STOPPED)

    def close(self):
        """Destructor like function for the object. """
        self._stderr_reader.join()

    def _set_returncode(self, code):
        """Set self._return_code only if the hierarchy of the given code is
        higher than the current self._return_code. """
        if code >= self._return_code:
            self._return_code = code

    def _is_warning(self, stderr):
        return stderr.split(':')[0] == 'WARNING'

    def _last_data_hook(self):
        """Set the last data information based on the return code. """
        data_dictionary = {}

        if self._return_code == self.OK:
            data_dictionary['status'] = 'Finished'
        elif self._return_code == self.ERROR:
            data_dictionary['status'] = 'Error'
            data_dictionary['speed'] = ''
            data_dictionary['eta'] = ''
        elif self._return_code == self.WARNING:
            data_dictionary['status'] = 'Warning'
            data_dictionary['speed'] = ''
            data_dictionary['eta'] = ''
        elif self._return_code == self.STOPPED:
            data_dictionary['status'] = 'Stopped'
            data_dictionary['speed'] = ''
            data_dictionary['eta'] = ''
        elif self._return_code == self.ALREADY:
            data_dictionary['status'] = 'Already Downloaded'
        else:
            data_dictionary['status'] = 'Filesize Abort'

        self._hook_data(data_dictionary)

    def _extract_info(self, data):
        """Extract informations about the download process from the given data.

        Args:
            data (dict): Python dictionary that contains different
                keys. The keys are not standar the dictionary can also be
                empty when there are no data to extract. See extract_data().

        """
        if 'status' in data:
            if data['status'] == 'Already Downloaded':
                # Set self._return_code to already downloaded
                # and trash that key
                self._set_returncode(self.ALREADY)
                data['status'] = None

            if data['status'] == 'Filesize Abort':
                # Set self._return_code to filesize abort
                # and trash that key
                self._set_returncode(self.FILESIZE_ABORT)
                data['status'] = None

    def _log(self, data):
        """Log data using the callback function. """
        if self.log_data is not None:
            self.log_data(data)

    def _hook_data(self, data):
        """Pass data back to the caller. """
        if self.data_hook is not None:
            self.data_hook(data)

    def _proc_is_alive(self):
        """Returns True if self._proc is alive else False. """
        if self._proc is None:
            return False

        return self._proc.poll() is None

    def _get_cmd(self, url, options):
        """Build the subprocess command.

        Args:
            url (string): URL string to download.
            options (list): Python list that contains youtube-dl options.

        Returns:
            Python list that contains the command to execute.

        """
        if os.name == 'nt':
            cmd = [self.youtubedl_path] + options + [url]
        else:
            if (system_youtube_dl is None):
                cmd = ['python', self.youtubedl_path] + options + [url]
            else:
                cmd = [system_youtube_dl] + options + [url]
            #print("Executing cmd: ", cmd)

        return cmd

    def _create_process(self, cmd):
        """Create new subprocess.

        Args:
            cmd (list): Python list that contains the command to execute.

        """
        info = preexec = None

        # Keep a unicode copy of cmd for the log
        ucmd = cmd

        if os.name == 'nt':
            # Hide subprocess window
            info = subprocess.STARTUPINFO()
            info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
        else:
            # Make subprocess the process group leader
            # in order to kill the whole process group with os.killpg
            preexec = os.setsid

        # Encode command for subprocess
        # Refer to http://stackoverflow.com/a/9951851/35070
        if sys.version_info < (3, 0):
            cmd = convert_item(cmd, to_unicode=False)

        try:
            self._proc = subprocess.Popen(cmd,
                                          stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE,
                                          preexec_fn=preexec,
                                          startupinfo=info)
        except (ValueError, OSError) as error:
            self._log('Failed to start process: {}'.format(ucmd))
            self._log(convert_item(str(error), to_unicode=True))
Example #60
0
def extract_torrents(provider, client):
    """ Main torrent extraction generator for non-API based providers

    Args:
        provider  (str): Provider ID
        client (Client): Client class instance

    Yields:
        tuple: A torrent result
    """
    definition = definitions[provider]
    definition = get_alias(definition, get_setting("%s_alias" % provider))
    log.debug("Extracting torrents from %s using definitions: %s" %
              (provider, repr(definition)))

    if not client.content:
        raise StopIteration

    dom = Html().feed(client.content)

    key_search = get_search_query(definition, "key")
    row_search = get_search_query(definition, "row")
    name_search = get_search_query(definition, "name")
    torrent_search = get_search_query(definition, "torrent")
    info_hash_search = get_search_query(definition, "infohash")
    size_search = get_search_query(definition, "size")
    seeds_search = get_search_query(definition, "seeds")
    peers_search = get_search_query(definition, "peers")
    referer_search = get_search_query(definition, "referer")

    log.debug("[%s] Parser: %s" % (provider, repr(definition['parser'])))

    q = Queue()
    threads = []
    needs_subpage = 'subpage' in definition and definition['subpage']

    if needs_subpage:

        def extract_subpage(q, name, torrent, size, seeds, peers, info_hash,
                            referer):
            try:
                log.debug("[%s] Getting subpage at %s" %
                          (provider, repr(torrent)))
            except Exception as e:
                import traceback
                log.error("[%s] Subpage logging failed with: %s" %
                          (provider, repr(e)))
                map(log.debug, traceback.format_exc().split("\n"))

            # New client instance, otherwise it's race conditions all over the place
            subclient = Client()
            subclient.passkey = client.passkey
            headers = {}

            if get_setting("use_cloudhole", bool):
                subclient.clearance = get_setting('clearance')
                subclient.user_agent = get_setting('user_agent')
            if "subpage_mode" in definition:
                if definition["subpage_mode"] == "xhr":
                    headers['X-Requested-With'] = 'XMLHttpRequest'
                    headers['Content-Language'] = ''

            if referer:
                headers['Referer'] = referer

            uri = torrent.split('|')  # Split cookies for private trackers
            subclient.open(uri[0].encode('utf-8'), headers=headers)

            if 'bittorrent' in subclient.headers.get('content-type', ''):
                log.debug('[%s] bittorrent content-type for %s' %
                          (provider, repr(torrent)))
                if len(uri) > 1:  # Stick back cookies if needed
                    torrent = '%s|%s' % (torrent, uri[1])
            else:
                try:
                    torrent = extract_from_page(provider, subclient.content)
                    if torrent and not torrent.startswith('magnet') and len(
                            uri) > 1:  # Stick back cookies if needed
                        torrent = '%s|%s' % (torrent, uri[1])
                except Exception as e:
                    import traceback
                    log.error(
                        "[%s] Subpage extraction for %s failed with: %s" %
                        (provider, repr(uri[0]), repr(e)))
                    map(log.debug, traceback.format_exc().split("\n"))

            ret = (name, info_hash, torrent, size, seeds, peers)
            q.put_nowait(ret)

    if not dom:
        raise StopIteration

    if get_setting("use_debug_parser", bool):
        log.debug(
            "[%s] Parser debug | Page content: %s" %
            (provider, client.content.replace('\r', '').replace('\n', '')))

    key = eval(key_search) if key_search else ""
    if key_search and get_setting("use_debug_parser", bool):
        key_str = key.__str__()
        log.debug(
            "[%s] Parser debug | Matched '%s' iteration for query '%s': %s" %
            (provider, 'key', key_search, key_str.replace('\r', '').replace(
                '\n', '')))

    items = eval(row_search)
    if get_setting("use_debug_parser", bool):
        log.debug("[%s] Parser debug | Matched %d items for '%s' query '%s'" %
                  (provider, len(items), 'row', row_search))

    for item in items:
        if get_setting("use_debug_parser", bool):
            item_str = item.__str__()
            log.debug(
                "[%s] Parser debug | Matched '%s' iteration for query '%s': %s"
                % (provider, 'row', row_search, item_str.replace(
                    '\r', '').replace('\n', '')))

        if not item:
            continue

        name = eval(name_search) if name_search else ""
        torrent = eval(torrent_search) if torrent_search else ""
        size = eval(size_search) if size_search else ""
        seeds = eval(seeds_search) if seeds_search else ""
        peers = eval(peers_search) if peers_search else ""
        info_hash = eval(info_hash_search) if info_hash_search else ""
        referer = eval(referer_search) if referer_search else ""

        if 'magnet:?' in torrent:
            torrent = torrent[torrent.find('magnet:?'):]

        if get_setting("use_debug_parser", bool):
            log.debug(
                "[%s] Parser debug | Matched '%s' iteration for query '%s': %s"
                % (provider, 'name', name_search, name))
            log.debug(
                "[%s] Parser debug | Matched '%s' iteration for query '%s': %s"
                % (provider, 'torrent', torrent_search, torrent))
            log.debug(
                "[%s] Parser debug | Matched '%s' iteration for query '%s': %s"
                % (provider, 'size', size_search, size))
            log.debug(
                "[%s] Parser debug | Matched '%s' iteration for query '%s': %s"
                % (provider, 'seeds', seeds_search, seeds))
            log.debug(
                "[%s] Parser debug | Matched '%s' iteration for query '%s': %s"
                % (provider, 'peers', peers_search, peers))
            if info_hash_search:
                log.debug(
                    "[%s] Parser debug | Matched '%s' iteration for query '%s': %s"
                    % (provider, 'info_hash', info_hash_search, info_hash))
            if referer_search:
                log.debug(
                    "[%s] Parser debug | Matched '%s' iteration for query '%s': %s"
                    % (provider, 'info_hash', referer_search, referer))

        # Pass client cookies with torrent if private
        if (definition['private'] or get_setting(
                "use_cloudhole", bool)) and not torrent.startswith('magnet'):
            user_agent = USER_AGENT
            if get_setting("use_cloudhole", bool):
                user_agent = get_setting("user_agent")

            if client.passkey:
                torrent = torrent.replace('PASSKEY', client.passkey)
            elif client.token:
                headers = {
                    'Authorization': client.token,
                    'User-Agent': user_agent
                }
                log.debug("[%s] Appending headers: %s" %
                          (provider, repr(headers)))
                torrent = append_headers(torrent, headers)
                log.debug("[%s] Torrent with headers: %s" %
                          (provider, repr(torrent)))
            else:
                log.debug("[%s] Cookies: %s" %
                          (provider, repr(client.cookies())))
                parsed_url = urlparse(definition['root_url'])
                cookie_domain = '{uri.netloc}'.format(uri=parsed_url).replace(
                    'www.', '')
                cookies = []
                # log.debug("[%s] cookie_domain: %s" % (provider, cookie_domain))
                for cookie in client._cookies:
                    # log.debug("[%s] cookie for domain: %s (%s=%s)" % (provider, cookie.domain, cookie.name, cookie.value))
                    if cookie_domain in cookie.domain:
                        cookies.append(cookie)
                if cookies:
                    headers = {
                        'Cookie':
                        ";".join(
                            ["%s=%s" % (c.name, c.value) for c in cookies]),
                        'User-Agent':
                        user_agent
                    }
                    # log.debug("[%s] Appending headers: %s" % (provider, repr(headers)))
                    torrent = append_headers(torrent, headers)
                    # log.debug("[%s] Torrent with headers: %s" % (provider, repr(torrent)))

        if name and torrent and needs_subpage and not torrent.startswith(
                'magnet'):
            if not torrent.startswith('http'):
                torrent = definition['root_url'] + torrent.encode('utf-8')
            t = Thread(target=extract_subpage,
                       args=(q, name, torrent, size, seeds, peers, info_hash,
                             referer))
            threads.append(t)
        else:
            yield (name, info_hash, torrent, size, seeds, peers)

    if needs_subpage:
        log.debug("[%s] Starting subpage threads..." % provider)
        for t in threads:
            t.start()
        for t in threads:
            t.join()

        log.debug("[%s] Threads returned: %s" % (provider, repr(threads)))

        for i in range(q.qsize()):
            ret = q.get_nowait()
            log.debug("[%s] Queue %d got: %s" % (provider, i, repr(ret)))
            yield ret