class SimulationWorkerPool:

        def __init__(self, simulation_nodes, simulation_data):

            self.simulation_nodes = simulation_nodes
            self.simulation_queue = Queue()

            for item in simulation_data:
                self.simulation_queue.put(item)

            self.result_queue = Queue()
            self.fail_queue = Queue()
            self.simulation_workers = []

            logger.info("Creating " + str(len(simulation_nodes)) + " simulation worker(s).")
            for node in self.simulation_nodes:
                    worker = SimulationWorker(node, self.simulation_queue, self.result_queue, self.fail_queue)
                    self.simulation_workers.append(worker)

        def run(self):
            """
            Run simulations on this simulation worker pool.
            """
            for worker in self.simulation_workers:
                worker.start()

        def await(self):
            """
            Await completion of all queued simulations and retrieve their results.
            :return: list containing the results of successful simulations as first element and a list of all failed simulations as second element
            """
            self.simulation_queue.join()
            return self.result_queue, self.fail_queue
Example #2
0
def main():
    """
    Main function of the proxy scanner.
    """
    global pl, output, q

    parser = ArgumentParser(description='Scans a list of proxies to determine which work for HTTPS.')
    parser.add_argument('--output', default='output/proxies.txt', type=str,
        help='The file in which to store the found proxies.')
    parser.add_argument('--threads', default=10, type=int,
        help='Number of threads to use.')

    args = parser.parse_args()
    output = args.output

    threads = args.threads
    q = Queue(threads * 3)

    print 'Starting threads.'
    for x in xrange(threads):
        t = Thread(target=check_proxies)
        t.daemon = True
        t.start()

    print 'Queueing proxies.'
    for proxy in proxies.proxies:
        q.put(proxy)
    q.join()

    save_proxies()
Example #3
0
class AntiFlapping(object):
    """
    AntiFlapping class to process event in a timely maneer
    """
    def __init__(self, window):
        self.window = window
        self.tasks = Queue(maxsize=1)
        self._window_ended = True
        self._thread = Thread(name="AntiFlapping", target=self._run)
        self._thread.start()

    def newEvent(self, func, kwargs={}):
        """
        newEvent Triggered.
        """
        if not self.tasks.full() and self._window_ended:
            self.tasks.put({'func': func, 'args':kwargs})

    def _run(self):
        """
        internal runloop that will fire tasks in order.
        """
        while True:
            task = self.tasks.get()
            self._window_ended = False
            sleep(self.window)
            self._window_ended = True
            if task['args']:
                task['func'](**task['args'])
            else:
                task['func']()
Example #4
0
class Content:

  def __init__(self):
    self.outputs = Queue()

    d = time.strftime('%y-%m-%d %H-%M',time.localtime())
    self.f = open( 'content.txt','a')
    self.f.write('[')
    
    t = Thread(target=self.run)
    t.setDaemon(True)
    t.start()

  def __del__(self):
    self.outputs.join()
    self.f.write(']')
    self.f.close()

  def write(self,url,dom,targets):
    result = {}
    result['url'] = url
    if dom != None:
      for t in targets:
        m = dom.xpath(t['xpath'])
        if len(m) >= 1:
          print m[0].text
          result[t['name']] = m[0].text
    output = json.dumps(result) + ',\n'
    self.outputs.put(output)

  def run(self):
    while True:
      output = self.outputs.get()
      self.f.write(output)
    self.f.close()
Example #5
0
def PagePool(word, sfile):
    '''
    :param word: 关键词
    :param sfile: 保存抓取结果的文件
    :return: 无
    '''
    if os.path.exists(sfile):
        print "file already exists"
        #return
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, compress',
        'Accept-Language': 'en-us;q=0.5,en;q=0.3',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'
    }
    url = 'http://news.baidu.com/ns?ct=0&rn=20&ie=utf-8&bs=' + word + '&rsv_bp=1&sr=0&cl=2&f=8&prevct=no&tn=newstitle&word=' + word

    page_pool = set(url)    #url池,用来记录已经被爬取过的url
    page_queue = Queue()    #url队列,保存等待被爬取的url
    page_queue.put(url)

    while not page_queue.empty():
        url = page_queue.get()
        html = requests.get(url=url, headers=headers)
        #先获取当前页面的带爬取url
        urls = etree.HTML(html.content).xpath('//p[@id="page"]//a')
        for u in urls:
            _url = 'http://news.baidu.com' + u.get('href')
            if _url not in page_pool:
                page_pool.add(_url)
                page_queue.put(_url)
        #爬取当前页
        GetFromPage(html, sfile)
Example #6
0
class TestStatsdLoggingDelegation(unittest.TestCase):
    def setUp(self):
        self.port = 9177
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.sock.bind(('localhost', self.port))
        self.queue = Queue()
        self.reader_thread = Thread(target=self.statsd_reader)
        self.reader_thread.setDaemon(1)
        self.reader_thread.start()

    def tearDown(self):
        # The "no-op when disabled" test doesn't set up a real logger, so
        # create one here so we can tell the reader thread to stop.
        if not getattr(self, 'logger', None):
            self.logger = utils.get_logger({
                'log_statsd_host': 'localhost',
                'log_statsd_port': str(self.port),
            }, 'some-name')
        self.logger.increment('STOP')
        self.reader_thread.join(timeout=4)
        self.sock.close()
        del self.logger
        time.sleep(0.15)  # avoid occasional "Address already in use"?

    def statsd_reader(self):
        while True:
            try:
                payload = self.sock.recv(4096)
                if payload and 'STOP' in payload:
                    return 42
                self.queue.put(payload)
            except Exception, e:
                sys.stderr.write('statsd_reader thread: %r' % (e,))
                break
Example #7
0
class EmitterThread(threading.Thread):

    def __init__(self, *args, **kwargs):
        self.__name = kwargs['name']
        self.__emitter = kwargs.pop('emitter')()
        self.__logger = kwargs.pop('logger')
        self.__config = kwargs.pop('config')
        self.__max_queue_size = kwargs.pop('max_queue_size', 100)
        self.__queue = Queue(self.__max_queue_size)
        threading.Thread.__init__(self, *args, **kwargs)
        self.daemon = True

    def run(self):
        while True:
            (data, headers) = self.__queue.get()
            try:
                self.__logger.debug('Emitter %r handling a packet', self.__name)
                self.__emitter(data, self.__logger, self.__config)
            except Exception:
                self.__logger.error('Failure during operation of emitter %r', self.__name, exc_info=True)

    def enqueue(self, data, headers):
        try:
            self.__queue.put((data, headers), block=False)
        except Full:
            self.__logger.warn('Dropping packet for %r due to backlog', self.__name)
Example #8
0
def bfs2(pos, game_map, player_distances):
    # Assigns each square as either ours (closest to player), theirs (closest to opponent),
    # or neutral (equal)
    fringe = Queue()
    fringe.put((pos, []))
    ours = []
    theirs = [pos]
    neutral = []
    # Keep track of nodes visited, don't revisit nodes
    visited = {pos}
    while not fringe.empty():
        node = fringe.get()
        nodeCoords = node[0]
        nodePath = node[1]
        # Assign the node by comparing path lengths
        path_length = len(nodePath)
        for successor in getSuccessors(nodeCoords, game_map):
            if successor not in visited:
                visited.add(successor)
                successorPath = nodePath + [successor]
                fringe.put((successor, successorPath))
                # Assign the node by comparing path lengths
                player_path_length = player_distances[successor]
                if path_length + 1 > player_path_length:
                    ours.append(successor)
                elif path_length + 1 < player_path_length:
                    theirs.append(successor)
                else:
                    neutral.append(successor)
    return ours, theirs, neutral
Example #9
0
class PooledPg:
	"""A very simple PostgreSQL connection pool.

	After you have created the connection pool,
	you can get connections using getConnection().
	"""

	def __init__(self, maxconnections, *args, **kwargs):
		"""Set up the PostgreSQL connection pool.

		maxconnections: the number of connections cached in the pool
		args, kwargs: the parameters that shall be used to establish
			the PostgreSQL connections using pg.connect()
		"""
		# Since there is no connection level safety, we
		# build the pool using the synchronized queue class
		# that implements all the required locking semantics.
		from Queue import Queue
		self._queue = Queue(maxconnections)
		# Establish all database connections (it would be better to
		# only establish a part of them now, and the rest on demand).
		for i in range(maxconnections):
			self.cache(PgConnection(*args, **kwargs))

	def cache(self, con):
		""""Add or return a connection to the pool."""
		self._queue.put(con)

	def connection(self):
		""""Get a connection from the pool."""
		return PooledPgConnection(self, self._queue.get())
Example #10
0
def fetch_friends(start, max = 10000):
	q = Queue()
	q.put(start)
	count = 0
	while not q.empty():
		user = q.get()
		count = count + 1
		if count % 10 == 0:
			final_f.flush()
		if count > max:
			return
		print 'processing user:'******'Got number of friends:' + str(len(all_that))
			for one_friend in all_that:
				friend_user, nick = one_friend
				if len(friend_user) == 0:
					print 'len_frien_user is 0'
					continue
				users_dict[user].append(friend_user)
				nicks[friend_user] = nick
				my_nick = nicks[user] if user in nicks.keys() else 'NoNo'
				print user,',',my_nick,'::::',friend_user,',',nick
				final_f.write((user+','+my_nick+'::::'+friend_user + ','+nick + '\n').encode('utf-8'))
		else:
			print 'already exists!'
		next = users_dict[user]
		for m in next:
			q.put(m)
Example #11
0
class LinkQueue:
    """
    A Thread-safe queue of unique elements.
    """

    def __init__(self, maxsize=10000):
        self.items = []
        self.items_lock = threading.Lock()
        self.queue = Queue(maxsize=maxsize)

    def __str__(self):
        return self.items.__str__()

    def put_all(self, items):
        for i in items:
            with self.items_lock:
                self.put(i)

    def put(self, item):
        if item not in self.items:
            self.queue.put(item)
            self.items.append(item)

    def pop(self):
        item = self.queue.get(block=True)
        return item

    def size(self):
        return self.queue.qsize()
    def test_create_cell_recalculator_should(self, mock_recalculate):
        unrecalculated_queue = Queue()
        unrecalculated_queue.put(1)
        unrecalculated_queue.put(1)
        unrecalculated_queue.task_done = Mock()

        leaf_queue = Queue()
        leaf_queue.put(sentinel.one)
        leaf_queue.put(sentinel.two)
        leaf_queue.task_done = Mock()

        target = create_cell_recalculator(leaf_queue, unrecalculated_queue, sentinel.graph, sentinel.context)
        target()

        self.assertTrue(unrecalculated_queue.empty())
        self.assertEquals(
            unrecalculated_queue.task_done.call_args_list,
            [ ((), {}), ((), {}), ]
        )

        self.assertTrue(leaf_queue.empty())

        self.assertEquals(
            mock_recalculate.call_args_list,
            [
                ((sentinel.one, leaf_queue, sentinel.graph, sentinel.context), {}),
                ((sentinel.two, leaf_queue, sentinel.graph, sentinel.context), {})
            ]
        )

        self.assertEquals(
            leaf_queue.task_done.call_args_list,
            [ ((), {}), ((), {}), ]
        )
Example #13
0
class BlockingDirectoryIterator(object):
    """
    iterator that blocks and yields new files added to a directory

    use like this:
        for filename in PollingDirectoryIterator('/tmp','A*.DAT').get_files():
            print filename
    """
    def __init__(self, directory, wildcard, interval=1):
        self._values = Queue()
        self._exception = None
        self._ready = Event()
        self._poller = DirectoryPoller(directory, wildcard, self._on_condition, self._on_exception, interval)
        self._poller.start()
    def __iter__(self):
        return self
    def get_files(self):
        while True:
            # could have exception or list of filenames
            out = self._values.get()
            if isinstance(out, Exception):
                raise out
            else:
                yield out
    def cancel(self):
        self._poller.shutdown()
    def _on_condition(self, filenames):
        for file in filenames:
            self._values.put(file)
    def _on_exception(self, exception):
        self._values.put(exception)
Example #14
0
class ConnectionPool(object):

    def __init__(self, source_connection, min=2, max=None, preload=True):
        self.source_connection = source_connection
        self.min = min
        self.max = max
        self.preload = preload
        self.source_connection.pool = self

        self._connections = Queue()
        self._dirty = deque()

        self._connections.put(self.source_connection)
        for i in range(min - 1):
            self._connections.put_nowait(self._new_connection())

    def acquire(self, block=False, timeout=None, connect_timeout=None):
        try:
            conn = self._connections.get(block=block, timeout=timeout)
        except QueueEmpty:
            conn = self._new_connection()
        self._dirty.append(conn)
        if connect_timeout is not None:
            conn.connect_timeout = connect_timeout
        return conn

    def release(self, connection):
        self._dirty.remove(connection)
        self._connections.put_nowait(connection)

    def _new_connection(self):
        if len(self._dirty) >= self.max:
            raise ConnectionLimitExceeded(self.max)
        return copy(self.source_connection)
Example #15
0
    def run(self):
        '''
        Does the job
        '''
        self.parser.add_option("-l", "--list", default=False, action="store_true", 
            help = "If present, list hosts configured in site.xml")
        self.parser.add_option("-a", "--artm", default=False, action="store_true", 
            help = "If present, include lo-art-1 to cycle/off")
        self.parser.add_option("-c", "--cob", default=False, action="store_true", 
            help = "If present, reboot only cob-* machines. cob-dmc is not rebooted because it belong to CONTROL subsystem.")
        self.parser.add_option("-o", "--off", default=False, action="store_true", 
            help = "If present, turn off the machines instead cycle them.")
        self.parser.add_option("-t", "--timeout", default=150, 
            help = "Set timeout to wait the recovered hosts. Default is 150 secs")
        self.parse()
        self.parse_args()
        self.get_hosts()
        if self.list is False:
            lastpdu = 'none'
            for host in self.hosts:
                currentpdu = str(self.get_pdu(host)[0])
                if currentpdu != lastpdu:
                    lastpdu = currentpdu
                    self.pstrip_cmd(self.get_pdu(host))
                    time.sleep(1)
                else:
                    time.sleep(2)
                    lastpdu = currentpdu
                    self.pstrip_cmd(self.get_pdu(host))
            if self.verbose:
                print self._get_time()+" Waiting for hosts ..."
            if self.off is False:
                queue = Queue()
                for host in self.hosts:
                    queue.put(host)
                    self.remaining_hosts.append(host)
                for host in self.hosts:
                    rh =  Thread(target=self.recover_host,args=(host, self.timeout,queue))
                    rh.setDaemon(True)
                    rh.start()
                queue.all_tasks_done.acquire()
                try:
                    endtime = time.time() + self.timeout
                    while queue.unfinished_tasks:
                        remaining = endtime -  time.time()
                        if remaining <= 0.0:
                            raise timeOut('Time Out Raise!!!')
                        queue.all_tasks_done.wait(remaining)
                except timeOut:
                    print "%s Probably %d hosts are still rebooting, please check ..." % (self._get_time(), int(queue.unfinished_tasks))
                    print "%s Please check these hosts:" % self._get_time()
                    for h in self.remaining_hosts:
                        print "%s ---> \033[31m%s\033[0m" % (self._get_time(), h)
                finally:
                    queue.all_tasks_done.release()

        else:
            print "Hosts configured in site.xml"
            for host in self.hosts:
                print host
def ThreadV():
	global queue
	global sina
	queue = Queue()
	sina = sina_data()
	#程序开始运行时,统计服务器还剩余的remaining_ip_hits数和reset_time_in_seconds数,详细信息见http://open.weibo.com/wiki/Account/rate_limit_status
	rateLimit = client.account.rate_limit_status.get()
	print 'remaining_ip_hits:%d reset_time_in_seconds:%d\n'%(rateLimit['remaining_ip_hits'],rateLimit['reset_time_in_seconds'])
	time.sleep(2)
	#bp_statuses_log为记录断点日志文件
	if not os.path.exists('/home/mii/weibo_crawler/lijun_thread/bp_statuses_log'):
		place = 0
		f = open('/home/mii/weibo_crawler/lijun_thread/bp_statuses_log','w')
		f.close()
	elif len(open('/home/mii/weibo_crawler/lijun_thread/bp_statuses_log').read()) == 0:
		place = 0
	else:
		place = int(open('bp_statuses_log').read().strip())
		Count.count = place
	#从断点处开始获取大V昵称,并放入队列queue中
	keys = open('shanghai3','r').readlines()[place:]
	for key in keys:
		queue.put(key)
	#开启多线程
	n = 5
	for i in range(n):
		t = threadv()
		t.start()
Example #17
0
def start_bruteforce():
    global session
    global thread_lock
    queue = Queue(0)
    start_threads_with_args(crack, 15, queue)
    print"[!] Trying fast bruteforce..."
    for x in range(0, 1000):
        if thread_lock:
            break
        queue.put("123abc456def789%03d" % x)
    while True:
        if session != "":
            return session
        if queue.empty():
            break
    print "[!] Trying slow bruteforce..."
    for milliseconds in range(0, how_many):
        if thread_lock:
            break
        queue.put("123abc456def789%s" % (start + milliseconds))
    while True:
        if session != "":
            return session
        if queue.empty():
            break
    return session
Example #18
0
class Worker:
    def __init__(self):
        self.q = Queue()
        self.t = Thread(target=self._handle)
        self.t.setDaemon(True)
        self.t.start()

    def _handle(self):
        while True:
            reset_caches()

            fn = self.q.get()
            try:
                fn()
                self.q.task_done()
            except:
                import traceback
                print traceback.format_exc()

    def do(self, fn, *a, **kw):
        fn1 = lambda: fn(*a, **kw)
        self.q.put(fn1)

    def join(self):
        self.q.join()
def main():
    print "[Facebook Album Downloader v1]"
    start = timeit.default_timer()

    # hide images
    prefs = {"profile.managed_default_content_settings.images": 2}
    extensions = webdriver.ChromeOptions()
    extensions.add_experimental_option("prefs", prefs)
    browser = webdriver.Chrome(executable_path="chromedriver", chrome_options=extensions)

    findAlbum(browser)
    createAlbumPath()

    queue = Queue()

    for x in range(max_workers):
        worker = DownloadWorker(queue)
        worker.daemon = True
        worker.start()

    print "[Getting Image Links]"
    linkImages = getImageLinks(browser)
    print "[Found: " + str(len(linkImages)) + "]"

    for fullRes in linkImages:
        queue.put(fullRes)

    print "[Downloading...]"
    queue.join()

    browser.quit()

    stop = timeit.default_timer()
    print "[Time taken: %ss]" % str(stop - start)
    raw_input("Press any key to continue...")
class WorkerThread(Thread):

    def __init__(self):
        """Create a worker thread. Start it by calling the start() method."""
        self.queue = Queue()
        Thread.__init__(self)

    def stop(self):
        """Stop the thread a.s.a.p., meaning whenever the currently running
        job is finished."""
        self.working = 0
        self.queue.put(None)

    def scheduleWork(self, func, *args, **kwargs):
        """Schedule some work to be done in the worker thread."""
        self.queue.put((func, args, kwargs))

    def run(self):
        """Fetch work from a queue, block when there's nothing to do.
        This method is called by Thread, don't call it yourself."""
        self.working = 1
        while self.working:
            work = self.queue.get()
            if work is None or not self.working:
                break
            func, args, kwargs = work
            pool = NSAutoreleasePool.alloc().init()
            try:
                func(*args, **kwargs)
            finally:
                # delete all local references; if they are the last refs they
                # may invoke autoreleases, which should then end up in our pool
                del func, args, kwargs, work
                del pool
class SharedCounter(object):
    """Thread-safe counter.

    Please note that the final value is not synchronized, this means
    that you should not update the value by using a previous value, the only
    reliable operations are increment and decrement.

    Example

        >>> max_clients = SharedCounter(initial_value=10)

        # Thread one
        >>> max_clients += 1 # OK (safe)

        # Thread two
        >>> max_clients -= 3 # OK (safe)

        # Main thread
        >>> if client >= int(max_clients): # Max clients now at 8
        ...    wait()


        >>> max_client = max_clients + 10 # NOT OK (unsafe)

    """

    def __init__(self, initial_value):
        self._value = initial_value
        self._modify_queue = Queue()

    def increment(self, n=1):
        """Increment value."""
        self += n
        return int(self)

    def decrement(self, n=1):
        """Decrement value."""
        self -= n
        return int(self)

    def _update_value(self):
        self._value += sum(consume_queue(self._modify_queue))
        return self._value

    def __iadd__(self, y):
        """``self += y``"""
        self._modify_queue.put(y * +1)
        return self

    def __isub__(self, y):
        """``self -= y``"""
        self._modify_queue.put(y * -1)
        return self

    def __int__(self):
        """``int(self) -> int``"""
        return self._update_value()

    def __repr__(self):
        return "<SharedCounter: int(%s)>" % str(int(self))
Example #22
0
class PhotoStream(object):
	def __init__(self):
		self.client = api.FiveHundredPx(CONSUMER_KEY, CONSUMER_SECRET)
		self.photo_queue = Queue() #use a queue to store photos grabbed, so that the parser can continuously do the process
		self.new_user_list = []

	def get_pop_photo_stream(self):
		count = 0
		results = self.client.get_photos(rpp=100, feature='popular', tags=1)
		for photo in results:
			self.photo_queue.put(photo)
			# print "get photo %d"%count
			count += 1
			if count==PHOTO_GRAB_PER_TIME:
				break

	def parse_photo_stream(self):
		while not self.photo_queue.empty():
			photo = self.photo_queue.get()
			self.save_photo_stream_to_db(photo)

	def save_photo_stream_to_db(self, photo):
		#save popular photos into photos collection
		photo_collection = mydb.photos
		#check if the photo exists
		photo_check = photo_collection.find_one({'id':photo['id']})
		#photo has not seen before, insert
		if photo_check is None:
			photo_collection.insert(photo)
		#has seen, update time varying fields
		else:
			photo_collection.update({'id':photo['id']},photo)
Example #23
0
def main():
    ts = time()
    start = input("Please input the start value: ")
    stop = input("Please input the stop value: ")
    # set the number of threads
    threads = input("Please input the download numbers every piece: ")
    k = stop-start
    # acquire the download links
    links = [l for l in get_link(start,stop)]
    # set the download storage directory
    down_dir = setup_dir()
    queue = Queue()
    # judge download numbers if greater than threads or not
    # if K< = threads ,set the k for threads,else set the threads for the number of thread
    if k <= threads:
        for x in range(k-1):
            print queue.qsize()
            worker = DownloadWorker(queue)
            worker.setDaemon(True)
            worker.start()
    else:
        for x in range(threads):
            worker = DownloadWorker(queue)
            worker.setDaemon(True)
            worker.start()
    # traverse the links and put the link to queue
    for link in links:
        queue.put((down_dir,link))
    # the new queue joining
    queue.join()
    print 'Took {}'.format(time()-ts)
    print "The finished time:" + ctime()
Example #24
0
def Create_index_from_url( url, depth ):
    if depth > MAX_DEPTH:
        return []
    url_queue = Queue()
    url_queue.put( url )
    checked = []

    IndexGen = Index_Generator()
    while not url_queue.empty() :

        current_url = url_queue.get()

        checked.append( current_url )

        try:
            html = Get_page( current_url )
        except:
            print "Exception"
            continue
        if depth > 0:
            for link in Link_generator( html ):
                #print link
                if link not in checked:
                    url_queue.put( link )
            depth = depth - 1

        html = nltk.clean_html( html )
        IndexGen.gen_url_index( current_url, html )
        result_index = {}
        result_index = IndexGen.get_index_dict()
        for key in result_index:
            result_index[key].sort()

    return result_index
Example #25
0
class WebSocket(protocol.Protocol):
    websockets = []

    @classmethod
    def add_socket(self, ws):
        print "adding a websocket"
        WebSocket.websockets.append(ws)

    @classmethod
    def broadcast(self, message):
        for ws in WebSocket.websockets:
            ws.message_queue.put(message)
            ws.send_all_messages()

    def connectionMade(self):
        self.message_queue = Queue()
        for i in range(len(BitClient.message_list)):
            self.message_queue.put(BitClient.message_list[i])
        self.send_all_messages()

    def connectionLost(self, reason):
        print "connection lost for", self
        WebSocket.websockets.remove(self)

    def send_all_messages(self):
        print "SENDING ALL MESSAGES"
        while not self.message_queue.empty():
            self.transport.write(self.message_queue.get())
Example #26
0
class Scheduler(object):
    def __init__(self):
        self.ready = Queue()
        self.taskmap = {}

    def new(self, target):
        newtask = Task(target)
        self.taskmap[newtask.tid] = newtask
        self.schedule(newtask)
        return newtask.tid

    def exit(self, task):
        print "Task %d terminated" % task.tid
        del self.taskmap[task.tid]

    def schedule(self, task):
        self.ready.put(task)

    def mainloop(self):
        while self.taskmap:
            task = self.ready.get()
            try:
                result = task.run()
                if isinstance(result, SystemCall):
                    result.task = task
                    result.sched = self
                    result.handle()
                    continue
            except StopIteration:
                self.exit(task)
                continue
            self.schedule(task)
Example #27
0
    def get_direction(self, start, end):
        opened = Queue()
        opened.put(start)
        openedBefore = {}
        cameFrom = {}
        destination = False

        DIRECTIONS = ['n', 'w', 'e', 's']

        while (not opened.empty()) and (not destination):
            current = opened.get()
            for direction in DIRECTIONS:
                newLoc = self.destination(current, direction)
                if (not openedBefore.get(newLoc, False)) and self.passable(newLoc):
                    #Open a node
                    cameFrom[newLoc] = (current, direction)
                    opened.put(newLoc)
                    openedBefore[newLoc] = True

                    #If goal is found
                    if(newLoc == end):
                        destination = newLoc

        if not destination:
            getLogger().debug("Path not found")
            return '#'

        while cameFrom[destination][0] != start:
            destination = cameFrom[destination][0]

        getLogger().debug("Found: " + str(cameFrom[destination][1]))
        return cameFrom[destination][1]
Example #28
0
class MassShotter():

    def __init__(self, urls, output, prefix=None, thread_count=5):
        self.urls = urls
        self.output = output
        self.prefix = prefix
        self.thread_count = thread_count
        self.queue = Queue()
        self.threads = []

    def run(self):
        if not path.exists(self.output):
            makedirs(self.output)

        print('Filling queue with %i urls and deduplicating' % len(self.urls))
        seen = set()
        seen_add = seen.add
        for url in [x for x in urls if x not in seen and not seen_add(x)]:
            self.queue.put(url)

        init_size = self.queue.qsize()
        print('Only %i urls to screen.' % init_size)

        # Fill threads list
        for i in xrange(0, self.thread_count):
            t = ShotterThread(self.queue, self.output)
            self.threads.append(t)

        # Start all threads
        [x.start() for x in self.threads]
        # Wait for all of them to finish
        [x.join() for x in self.threads]
Example #29
0
class CommandQueue(object):
    """Asynchronous command queue that can be used to communicate with blip.pl 
    in the background."""
    
    def __init__(self):
        self.queue = Queue()
        self.worker = threading.Thread(target=self.__worker)
        self.worker.setDaemon(True)
	
    def __del__(self):
        self.Finish()
        
    def __worker(self):
        while True:
            item = self.queue.get(True)
            item()
            self.queue.task_done()
	    
    def Finish(self):
        """Finishes all commands in the queue"""
	
        self.queue.join()
	
    def HasPendingCommands(self):
        """Returns True if the queue is busy"""
	
        return self.queue.qsize() > 0
	
    def Enqueue(self, command):
        """Enqueues a command in the queue. 
        Command must refer to a function without parameters."""

        self.queue.put(command)
Example #30
0
class AsyncWriter(threading.Thread):

    def __init__(self, wait_period=1):
        super(AsyncWriter, self).__init__()
        self.daemon = True
        self.wait_period = wait_period
        self.running = threading.Event()
        self._stop_signal = threading.Event()
        self._queue = Queue()

    def write(self, stuff):
        if self._stop_signal.is_set():
            raise IOError('Attempting to writer to {} after it has been closed.'.format(self.__class__.__name__))
        self._queue.put(stuff)

    def do_write(self, stuff):
        raise NotImplementedError()

    def run(self):
        self.running.set()
        while True:
            if self._stop_signal.is_set() and self._queue.empty():
                break
            try:
                self.do_write(self._queue.get(block=True, timeout=self.wait_period))
            except Empty:
                pass  # carry on
        self.running.clear()

    def stop(self):
        self._stop_signal.set()

    def wait(self):
        while self.running.is_set():
            time.sleep(self.wait_period)
Example #31
0
            ohlc = ohlc.sort_values('index')
            records = ohlc.to_dict('records')
            market.write_data_to_arctic(symbol, records, TRADE_TABLE)
            queue.task_done()
        except Exception as e:
            print str(f) + ' fail run again ' + str(e)
            queue.task_done()


def file_lists(rootDir):
    for lists in os.listdir(rootDir):
        sub_path = os.path.join(rootDir, lists)
        if sub_path.endswith('.csv'):
            files.append(sub_path)
        if os.path.isdir(sub_path):
            file_lists(sub_path)


if __name__ == '__main__':
    file_lists(root_path)

    for f in files:
        queue.put(str(f))

    for i in range(50):
        t = Thread(target=do_job)
        t.daemon = True
        t.start()

    queue.join()
Example #32
0
class EscposDriver(Thread):
    def __init__(self):
        Thread.__init__(self)
        self.queue = Queue()
        self.lock = Lock()
        self.status = {'status': 'connecting', 'messages': []}

    def connected_usb_devices(self):
        connected = []

        # printers can either define bDeviceClass=7, or they can define one of
        # their interfaces with bInterfaceClass=7. This class checks for both.
        class FindUsbClass(object):
            def __init__(self, usb_class):
                self._class = usb_class

            def __call__(self, device):
                # first, let's check the device
                if device.bDeviceClass == self._class:
                    return True
                # transverse all devices and look through their interfaces to
                # find a matching class
                for cfg in device:
                    intf = usb.util.find_descriptor(
                        cfg, bInterfaceClass=self._class)

                    if intf is not None:
                        return True

                return False

        printers = usb.core.find(find_all=True, custom_match=FindUsbClass(7))

        # if no printers are found after this step we will take the
        # first epson or star device we can find.
        # epson
        if not printers:
            printers = usb.core.find(find_all=True, idVendor=0x04b8)
        # star
        if not printers:
            printers = usb.core.find(find_all=True, idVendor=0x0519)

        for printer in printers:
            try:
                description = usb.util.get_string(
                    printer, 256,
                    printer.iManufacturer) + " " + usb.util.get_string(
                        printer, 256, printer.iProduct)
            except Exception as e:
                _logger.error("Can not get printer description: %s" %
                              (e.message or repr(e)))
                description = 'Unknown printer'
            connected.append({
                'vendor': printer.idVendor,
                'product': printer.idProduct,
                'name': description
            })

        return connected

    def lockedstart(self):
        with self.lock:
            if not self.isAlive():
                self.daemon = True
                self.start()

    def get_escpos_printer(self):

        printers = self.connected_usb_devices()
        if len(printers) > 0:
            self.set_status('connected', 'Connected to ' + printers[0]['name'])
            return Usb(printers[0]['vendor'], printers[0]['product'])
        else:
            self.set_status('disconnected', 'Printer Not Found')
            return None

    def get_status(self):
        self.push_task('status')
        return self.status

    def open_cashbox(self, printer):
        printer.cashdraw(2)
        printer.cashdraw(5)

    def set_status(self, status, message=None):
        _logger.info(status + ' : ' + (message or 'no message'))
        if status == self.status['status']:
            if message != None and (len(self.status['messages']) == 0
                                    or message != self.status['messages'][-1]):
                self.status['messages'].append(message)
        else:
            self.status['status'] = status
            if message:
                self.status['messages'] = [message]
            else:
                self.status['messages'] = []

        if status == 'error' and message:
            _logger.error('ESC/POS Error: ' + message)
        elif status == 'disconnected' and message:
            _logger.warning('ESC/POS Device Disconnected: ' + message)

    def run(self):
        printer = None
        if not escpos:
            _logger.error(
                'ESC/POS cannot initialize, please verify system dependencies.'
            )
            return
        while True:
            try:
                error = True
                timestamp, task, data = self.queue.get(True)

                printer = self.get_escpos_printer()

                if printer == None:
                    if task != 'status':
                        self.queue.put((timestamp, task, data))
                    error = False
                    time.sleep(5)
                    continue
                elif task == 'receipt':
                    if timestamp >= time.time() - 1 * 60 * 60:
                        self.print_receipt_body(printer, data)
                        printer.cut()
                elif task == 'xml_receipt':
                    if timestamp >= time.time() - 1 * 60 * 60:
                        printer.receipt(data)
                elif task == 'cashbox':
                    if timestamp >= time.time() - 12:
                        self.open_cashbox(printer)
                elif task == 'printstatus':
                    self.print_status(printer)
                elif task == 'status':
                    pass
                error = False

            except NoDeviceError as e:
                print "No device found %s" % str(e)
            except HandleDeviceError as e:
                print "Impossible to handle the device due to previous error %s" % str(
                    e)
            except TicketNotPrinted as e:
                print "The ticket does not seems to have been fully printed %s" % str(
                    e)
            except NoStatusError as e:
                print "Impossible to get the status of the printer %s" % str(e)
            except Exception as e:
                self.set_status('error', str(e))
                errmsg = str(
                    e) + '\n' + '-' * 60 + '\n' + traceback.format_exc(
                    ) + '-' * 60 + '\n'
                _logger.error(errmsg)
            finally:
                if error:
                    self.queue.put((timestamp, task, data))
                if printer:
                    printer.close()

    def push_task(self, task, data=None):
        self.lockedstart()
        self.queue.put((time.time(), task, data))

    def print_status(self, eprint):
        localips = ['0.0.0.0', '127.0.0.1', '127.0.1.1']
        hosting_ap = os.system('pgrep hostapd') == 0
        ssid = subprocess.check_output(
            'iwconfig 2>&1 | grep \'ESSID:"\' | sed \'s/.*"\\(.*\\)"/\\1/\'',
            shell=True).rstrip()
        mac = subprocess.check_output(
            'ifconfig | grep -B 1 \'inet addr\' | grep -o \'HWaddr .*\' | sed \'s/HWaddr //\'',
            shell=True).rstrip()
        ips = [
            c.split(':')[1].split(' ')[0]
            for c in commands.getoutput("/sbin/ifconfig").split('\n')
            if 'inet addr' in c
        ]
        ips = [ip for ip in ips if ip not in localips]
        eprint.text('\n\n')
        eprint.set(align='center', type='b', height=2, width=2)
        eprint.text('PosBox Status\n')
        eprint.text('\n')
        eprint.set(align='center')

        if hosting_ap:
            eprint.text('Wireless network:\nPosbox\n\n')
        elif ssid:
            eprint.text('Wireless network:\n' + ssid + '\n\n')

        if len(ips) == 0:
            eprint.text(
                'ERROR: Could not connect to LAN\n\nPlease check that the PosBox is correc-\ntly connected with a network cable,\n that the LAN is setup with DHCP, and\nthat network addresses are available'
            )
        elif len(ips) == 1:
            eprint.text('IP Address:\n' + ips[0] + '\n')
        else:
            eprint.text('IP Addresses:\n')
            for ip in ips:
                eprint.text(ip + '\n')

        if len(ips) >= 1:
            eprint.text('\nMAC Address:\n' + mac + '\n')
            eprint.text('\nHomepage:\nhttp://' + ips[0] + ':8069\n')

        eprint.text('\n\n')
        eprint.cut()

    def print_receipt_body(self, eprint, receipt):
        def check(string):
            return string != True and bool(string) and string.strip()

        def price(amount):
            return ("{0:." + str(receipt['precision']['price']) +
                    "f}").format(amount)

        def money(amount):
            return ("{0:." + str(receipt['precision']['money']) +
                    "f}").format(amount)

        def quantity(amount):
            if math.floor(amount) != amount:
                return ("{0:." + str(receipt['precision']['quantity']) +
                        "f}").format(amount)
            else:
                return str(amount)

        def printline(left, right='', width=40, ratio=0.5, indent=0):
            lwidth = int(width * ratio)
            rwidth = width - lwidth
            lwidth = lwidth - indent

            left = left[:lwidth]
            if len(left) != lwidth:
                left = left + ' ' * (lwidth - len(left))

            right = right[-rwidth:]
            if len(right) != rwidth:
                right = ' ' * (rwidth - len(right)) + right

            return ' ' * indent + left + right + '\n'

        def print_taxes():
            taxes = receipt['tax_details']
            for tax in taxes:
                eprint.text(
                    printline(tax['tax']['name'],
                              price(tax['amount']),
                              width=40,
                              ratio=0.6))

        # Receipt Header
        if receipt['company']['logo']:
            eprint.set(align='center')
            eprint.print_base64_image(receipt['company']['logo'])
            eprint.text('\n')
        else:
            eprint.set(align='center', type='b', height=2, width=2)
            eprint.text(receipt['company']['name'] + '\n')

        eprint.set(align='center', type='b')
        if check(receipt['company']['contact_address']):
            eprint.text(receipt['company']['contact_address'] + '\n')
        if check(receipt['company']['phone']):
            eprint.text('Tel:' + receipt['company']['phone'] + '\n')
        if check(receipt['company']['vat']):
            eprint.text('VAT:' + receipt['company']['vat'] + '\n')
        if check(receipt['company']['email']):
            eprint.text(receipt['company']['email'] + '\n')
        if check(receipt['company']['website']):
            eprint.text(receipt['company']['website'] + '\n')
        if check(receipt['header']):
            eprint.text(receipt['header'] + '\n')
        if check(receipt['cashier']):
            eprint.text('-' * 32 + '\n')
            eprint.text('Served by ' + receipt['cashier'] + '\n')

        # Orderlines
        eprint.text('\n\n')
        eprint.set(align='center')
        for line in receipt['orderlines']:
            pricestr = price(line['price_display'])
            if line['discount'] == 0 and line[
                    'unit_name'] == 'Unit(s)' and line['quantity'] == 1:
                eprint.text(
                    printline(line['product_name'], pricestr, ratio=0.6))
            else:
                eprint.text(printline(line['product_name'], ratio=0.6))
                if line['discount'] != 0:
                    eprint.text(
                        printline('Discount: ' + str(line['discount']) + '%',
                                  ratio=0.6,
                                  indent=2))
                if line['unit_name'] == 'Unit(s)':
                    eprint.text(
                        printline(quantity(line['quantity']) + ' x ' +
                                  price(line['price']),
                                  pricestr,
                                  ratio=0.6,
                                  indent=2))
                else:
                    eprint.text(
                        printline(quantity(line['quantity']) +
                                  line['unit_name'] + ' x ' +
                                  price(line['price']),
                                  pricestr,
                                  ratio=0.6,
                                  indent=2))

        # Subtotal if the taxes are not included
        taxincluded = True
        if money(receipt['subtotal']) != money(receipt['total_with_tax']):
            eprint.text(printline('', '-------'))
            eprint.text(
                printline(_('Subtotal'),
                          money(receipt['subtotal']),
                          width=40,
                          ratio=0.6))
            print_taxes()
            #eprint.text(printline(_('Taxes'),money(receipt['total_tax']),width=40, ratio=0.6))
            taxincluded = False

        # Total
        eprint.text(printline('', '-------'))
        eprint.set(align='center', height=2)
        eprint.text(
            printline(_('         TOTAL'),
                      money(receipt['total_with_tax']),
                      width=40,
                      ratio=0.6))
        eprint.text('\n\n')

        # Paymentlines
        eprint.set(align='center')
        for line in receipt['paymentlines']:
            eprint.text(
                printline(line['journal'], money(line['amount']), ratio=0.6))

        eprint.text('\n')
        eprint.set(align='center', height=2)
        eprint.text(
            printline(_('        CHANGE'),
                      money(receipt['change']),
                      width=40,
                      ratio=0.6))
        eprint.set(align='center')
        eprint.text('\n')

        # Extra Payment info
        if receipt['total_discount'] != 0:
            eprint.text(
                printline(_('Discounts'),
                          money(receipt['total_discount']),
                          width=40,
                          ratio=0.6))
        if taxincluded:
            print_taxes()
            #eprint.text(printline(_('Taxes'),money(receipt['total_tax']),width=40, ratio=0.6))

        # Footer
        if check(receipt['footer']):
            eprint.text('\n' + receipt['footer'] + '\n\n')
        eprint.text(receipt['name'] + '\n')
        eprint.text(
            str(receipt['date']['date']).zfill(2) + '/' +
            str(receipt['date']['month'] + 1).zfill(2) + '/' +
            str(receipt['date']['year']).zfill(4) + ' ' +
            str(receipt['date']['hour']).zfill(2) + ':' +
            str(receipt['date']['minute']).zfill(2))
Example #33
0
class MQTTClient():
    def __init__(self):
        self._mqttc = mqttc.Client("admins")
        self._mqttc.on_message = self.mqtt_on_message
        self._mqttc.on_connect = self.mqtt_on_connect
        self._mqttc.on_publish = self.mqtt_on_publish
        self._mqttc.on_subscribe = self.mqtt_on_subscribe
        self._mqttc.on_disconnect = self.mqtt_on_disconnect
        self._mqttc_connection_status = "NULL"
        self._active_time = datetime.datetime.now()
        self._push_queue = Queue()
        self._published = set()
        return

    def _conn_str(self, _code):
        _d = {
            0: "Connection successful",
            1: "Connection refused - incorrect protocol version",
            2: "Connection refused - invalid client identifier",
            3: "Connection refused - server unavailable",
            4: "Connection refused - bad username or password",
            5: "Connection refused - not authorised",
        }
        _str = _d.get(_code)
        if _str == None:
            _str = "unknown error"
        return _str

    def mqtt_on_connect(self, mqttc, userdata, flags, rc):
        logging.info("mqtt_on_connect rc: " + self._conn_str(rc))
        if rc == 0:
            self._mqttc_connection_status = "CONNECTED"
        return

    def mqtt_on_message(self, mqttc, userdata, msg):
        logging.info(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
        return

    def mqtt_on_publish(self, mqttc, userdata, mid):
        logging.info("published mid: " + str(mid))
        if mid in self._published:
            self._published.remove(mid)
        self._active_time = datetime.datetime.now()
        return

    def mqtt_on_subscribe(self, mqttc, userdata, mid, granted_qos):
        logging.info("Subscribed: " + str(mid) + " " + str(granted_qos))
        return

    def mqtt_on_log(self, mqttc, userdata, level, string):
        logging.info("on-log:" + string)
        return

    def mqtt_on_disconnect(self, mqttc, userdata, rc):
        logging.info("mqtt-on-disconnect:" + str(rc))
        self._mqttc_connection_status = "NULL"
        return

    def username_pw_set(self, user, password):
        self._mqttc.username_pw_set(user, password)

    def connect(self):
        if self._mqttc_connection_status == "CONNECTED":
            return

        self._mqttc_connection_status = "CONNECTING"
        self._published = set()

        # server will auto disconnect after 120"
        # client should disconnect before server
        # for mqttclient can not get the server disconnect event??
        _r = self._mqttc.connect(MQTT_HOST, MQTT_PORT, 120)
        return

    def publish_one(self, _token, _body):
        #topic, payload, qos=1, retain=True):
        qos = 1
        retain = True
        _body = json.dumps(_body)
        self._push_queue.put([_token, _body, qos, retain])
        return True

    def disconnect(self):
        if self._mqttc_connection_status == "NULL":
            return
        self._mqttc_connection_status = "NULL"
        self._mqttc.disconnect()
        return

    def outdate(self, _delta):
        if self._mqttc_connection_status == "NULL":
            return
        _now = datetime.datetime.now()
        if _now - self._active_time > _delta:
            self.disconnect()
        return

    def loop(self):
        if self._mqttc_connection_status == "NULL":
            self.connect()
            return

        if self._mqttc_connection_status != "CONNECTED":
            logging.info("looping for: " + self._mqttc_connection_status)
            self._mqttc.loop()
            return

        if self._push_queue.empty() == True:
            if len(self._published) > 0:
                self._mqttc.loop()
                return

        _push = self._push_queue.get(False)
        if _push == None:
            if len(self._published) > 0:
                self._mqttc.loop()
            return
        self._push_queue.task_done()

        result, mid = self._mqttc.publish(*_push)
        if result == mqttc.MQTT_ERR_SUCCESS:
            self._published.add(mid)
        elif result == mqttc.MQTT_ERR_NO_CONN:
            self._push_queue.put(_push)
            self.connect()
        else:
            self._push_queue.put(_push)
            logging.info("WHAT HAPPEND?? %d" % result)

        self._mqttc.loop()
        return

    def start_send(self):
        while True:
            self.loop()

            if self._push_queue.empty() == True and len(self._published) == 0:
                logging.info("nothing to push")
                break

            if self._mqttc_connection_status == "NULL":
                logging.info("mqttclient connection error")
                break
        return
Example #34
0
class DFrotz(object):
    """
    Simple interface to dfrotz
    """
    def __init__(self, path_to_dfrotz, path_to_game, path_to_backupfile):
        self.outputQ = Queue()
        self.process = Popen([path_to_dfrotz, '-w500', path_to_game],
                             stdin=PIPE,
                             stdout=PIPE)
        self.readoutputthread = Thread(
            target=self._fillQ, args=[self.process.stdout, self.outputQ])
        self.readoutputthread.daemon = True
        self.readoutputthread.start()
        self.backupfile = path_to_backupfile

    def _fillQ(self, output, Q):
        for line in iter(output.readline, ''):
            Q.put(line)
        output.close()

    def get_output(self):
        ret_string = ''
        while not self.outputQ.empty():
            line = self.outputQ.get()
            if not ('Score' in line and 'Moves' in line) \
                    and not line.startswith('.') \
                    and not line.startswith('> >'):
                ret_string += line
        return output_formatter(ret_string.decode('iso-8859-1').strip())

    def do(self, command):
        """
        pipe command to frotz

        @param string command, to be send to frotz-process
            special commands: quit, save, restore
        @return bool terminate
        """
        if command == 'quit':
            # FIXME allow user to shut down the bot?
            self.process.stdin.write('quit\n')
            sleep(0.1)
            self.process.stdin.write('Y\n')
            sleep(0.1)
            self.readoutputthread.join()
            self.outputQ.queue.clear()
            self.outputQ.put('Ok, quitted.')
            return True
        elif command == 'save':
            self.process.stdin.write('save\n')
            sleep(0.1)
            self.process.stdin.write(self.backupfile + '\n')
            sleep(0.1)
            if os.path.exists(self.backupfile):  # overwrite
                self.process.stdin.write('Y\n')
            sleep(0.1)
            self.outputQ.queue.clear()
            self.outputQ.put('Ok, saved.')
            return False
        elif command == 'restore':
            self.process.stdin.write('restore\n')
            sleep(0.1)
            self.process.stdin.write(self.backupfile + '\n')
            sleep(0.1)
            self.outputQ.queue.clear()
            self.outputQ.put('Ok, restored.')
            return False
        else:
            try:
                command = command.decode('utf-8')
            except:
                pass
            self.process.stdin.write(command.encode('iso-8859-1') + '\n')
            return False
        response = urlopen(req)
        html_content = response.read()
        print html_content

        file1.write(html_content)

        file1.close()
    else:
        pass


if __name__ == '__main__':

    filename = 'C:/Users/srv.sngh92/Desktop/urls_list.txt'

    q = Queue(concurrent * 2)
    for i in range(concurrent):
        t = Thread(target=doWork)
        t.daemon = True
        t.start()
    try:
        with open(filename) as f:
            data = f.readlines()
        for row in data:
            url = row.split("\n")
            q.put(url[0])
        q.join()
    except KeyboardInterrupt:
        sys.exit(1)
Example #36
0
if len(sys.argv) != 2:
  print 'Usage: python shit.py <input file>'

with open(sys.argv[1]) as f:
  testcases = map(int, f.readlines()[1:])

remaining = set(testcases) 

found = {1:1}
if 1 in remaining:
  remaining.remove(1)
  
iters = 1
currQ = Queue()
currQ.put(1)
while remaining:
  iters = iters+1
  nextQ = Queue()
  while not currQ.empty():
    x = currQ.get()
    if (x+1) not in found:
      found[x+1] = iters
      nextQ.put(x+1)
      if (x+1) in remaining:
        remaining.remove(x+1)
    y = int(str(x)[::-1])
    if y not in found:
      found[y] = iters
      nextQ.put(y)
      if y in remaining:
Example #37
0
class Master(Thread):  # 解析info_hash
    def __init__(self):
        Thread.__init__(self)
        self.setDaemon(True)
        self.queue = Queue()
        self.cache = Queue()
        self.count = 0
        self.mutex = threading.RLock()  # 可重入锁,使单线程可以再次获得已经获得的?
        self.waitDownload = Queue()
        self.metadata_queue = Queue()
        # self.dbconn = mdb.connect(DB_HOST, DB_USER, DB_PASS, 'oksousou', charset='utf8')
        # self.dbconn.autocommit(False)
        # self.dbcurr = self.dbconn.cursor()
        # self.dbcurr.execute('SET NAMES utf8')
        self.visited = set()

    def lock(self):  # 加锁
        self.mutex.acquire()

    def unlock(self):  # 解锁
        self.mutex.release()

    def work(self, item):

        print "start thread", item
        while True:
            self.prepare_download_metadata()
            self.lock()
            self.download_metadata()
            self.unlock()

            self.lock()
            self.got_torrent()
            self.unlock()

    def start_work(self, max):

        for item in xrange(max):
            t = threading.Thread(target=self.work, args=(item, ))
            t.setDaemon(True)
            t.start()

    # 入队的种子效率更高
    def log_announce(self, binhash, address=None):
        if self.queue.qsize(
        ) < INFO_HASH_LEN:  # 大于INFO_HASH_LEN就不要入队,否则后面来不及处理
            if is_ip_allowed(address[0]):
                self.queue.put([address, binhash])  # 获得info_hash

    def log(self, infohash, address=None):
        if self.queue.qsize(
        ) < INFO_HASH_LEN:  # 大于INFO_HASH_LEN/2就不要入队,否则后面来不及处理
            if is_ip_allowed(address[0]):
                self.queue.put([address, infohash])

    def prepare_download_metadata(self):

        if self.queue.qsize() == 0:
            sleep(2)
        # 从queue中获得info_hash用来下载
        address, binhash = self.queue.get()
        if binhash in self.visited:
            return
        if len(self.visited) > 100000:  # 大于100000重置队列,认为已经访问过了
            self.visited = set()
        self.visited.add(binhash)
        # 跟新已经访问过的info_hash
        info_hash = binhash.encode('hex')
        utcnow = datetime.datetime.utcnow()

        self.cache.put((address, binhash, utcnow))  # 装入缓存队列

    def download_metadata(self):

        if self.cache.qsize() > CACHE_LEN / 2:  # 出队更新下载
            while self.cache.qsize() > 0:  # 排空队列
                address, binhash, utcnow = self.cache.get()
                info_hash = binhash.encode('hex')
                self.dbcurr.execute(
                    'SELECT id FROM search_hash WHERE info_hash=%s',
                    (info_hash, ))
                y = self.dbcurr.fetchone()
                if y:
                    # 更新最近发现时间,请求数
                    self.dbcurr.execute(
                        'UPDATE search_hash SET last_seen=%s, requests=requests+1 WHERE info_hash=%s',
                        (utcnow, info_hash))
                else:
                    self.waitDownload.put((address, binhash))
            self.dbconn.commit()
            if self.waitDownload.qsize() > WAIT_DOWNLOAD:
                while self.waitDownload.qsize() > 0:
                    address, binhash = self.waitDownload.get()
                    t = threading.Thread(target=dlTorrent.download_metadata,
                                         args=(address, binhash,
                                               self.metadata_queue))
                    t.setDaemon(True)
                    t.start()

    def decode(self, s):
        if type(s) is list:
            s = ';'.join(s)
        u = s
        for x in (self.encoding, 'utf8', 'gbk', 'big5'):
            try:
                u = s.decode(x)
                return u
            except:
                pass
        return s.decode(self.encoding, 'ignore')

    def decode_utf8(self, d, i):
        if i + '.utf-8' in d:
            return d[i + '.utf-8'].decode('utf8')
        return self.decode(d[i])

    def parse_metadata(self, data):  # 解析种子
        info = {}
        self.encoding = 'utf8'
        try:
            torrent = bdecode(data)  # 编码后解析
            if not torrent.get('name'):
                return None
        except:
            return None
        detail = torrent
        info['name'] = self.decode_utf8(detail, 'name')
        if 'files' in detail:
            info['files'] = []
            for x in detail['files']:
                if 'path.utf-8' in x:
                    v = {
                        'path': self.decode('/'.join(x['path.utf-8'])),
                        'length': x['length']
                    }
                else:
                    v = {
                        'path': self.decode('/'.join(x['path'])),
                        'length': x['length']
                    }
                if 'filehash' in x:
                    v['filehash'] = x['filehash'].encode('hex')
                info['files'].append(v)
            info['length'] = sum([x['length'] for x in info['files']])
        else:
            info['length'] = detail['length']
        info['data_hash'] = hashlib.md5(detail['pieces']).hexdigest()
        return info

    def got_torrent(self):
        if self.metadata_queue.qsize() == 0:
            return
        binhash, address, data, start_time = self.metadata_queue.get()
        if not data:
            return
        try:
            info = self.parse_metadata(data)
            if not info:
                return
        except:
            traceback.print_exc()
            return

        temp = time.time()
        x = time.localtime(float(temp))
        utcnow = time.strftime("%Y-%m-%d %H:%M:%S", x)  # get time now

        info_hash = binhash.encode('hex')  # 磁力
        info['info_hash'] = info_hash
        # need to build tags
        info['tagged'] = False
        info['classified'] = False
        info['requests'] = 1
        info['last_seen'] = utcnow
        info['create_time'] = utcnow
        info['source_ip'] = address[0]

        if info.get('files'):
            files = [z for z in info['files'] if not z['path'].startswith('_')]
            if not files:
                files = info['files']
        else:
            files = [{'path': info['name'], 'length': info['length']}]
        files.sort(key=lambda z: z['length'], reverse=True)
        bigfname = files[0]['path']
        info['extension'] = metautils.get_extension(bigfname).lower()
        info['category'] = metautils.get_category(info['extension'])

        try:
            try:
                print '\n', 'Saved', info['info_hash'], info['name'], (
                    time.time() - start_time), 's', address[0]
            except:
                print '\n', 'Saved', info['info_hash']
            ret = self.dbcurr.execute(
                'INSERT INTO search_hash(info_hash,category,data_hash,name,extension,classified,source_ip,tagged,'
                +
                'length,create_time,last_seen,requests) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)',
                (info['info_hash'], info['category'], info['data_hash'],
                 info['name'], info['extension'], info['classified'],
                 info['source_ip'], info['tagged'], info['length'],
                 info['create_time'], info['last_seen'], info['requests']))
            if self.count % 50 == 0:
                self.dbconn.commit()
                if self.count > 100000:
                    self.count = 0
        except:
            print self.name, 'save error', self.name, info
            traceback.print_exc()
            return
class LogRedirector(eg.PluginBase):

    text = Text
    default = None
    ioFile = None
    commitThread = None

    def OpenFile(self, logfile):
        self.ioFile = openFile(logfile, encoding = 'utf-8', mode = 'a')
        return self.ioFile
    
    def SetFlag(self, val):
        self.flag = val


    def __start__(
        self,
        mode = 0,
        logfile = None,
        interval = 1,
        maxSize = 20,
        minSize = 10,
        check = False
    ):
        if check:
            self.q = Queue()
            self.flag = True
        self.default = eg.Log._WriteLine
        self.check = check
        
        def WriteLine2File(when, indent, wRef, line):
            wref = wRef.__repr__().split(" ") if wRef else ""
            if len(wref) == 1:
                wref = "EVENT: "
            elif len(wref) == 7:
                wref = wref[4][1:-5].upper()+": "
            wref = wref if wref != "PLUGIN: " else ""
            if self.check:
                self.q.put("%s  %s%s%s\r\n" % (str(dt.fromtimestamp(when))[:19],indent*3*" ",wref,line.strip()))
                if self.flag:
                    while not self.q.empty():
                        self.ioFile.write(self.q.get())
            else:
                self.ioFile.write("%s  %s%s%s\r\n" % (str(dt.fromtimestamp(when))[:19],indent*3*" ",wref,line.strip()))
        
        if mode == 0:
            def extWriteLine(self, line, icon, wRef, when, indent):
                self.ctrl.WriteLine(line, icon, wRef, when, indent)
            self.ioFile = None
        else:
            self.OpenFile(logfile)
            fct = FileCommitThread(
                self.ioFile,
                self,
                60 * interval,
                maxSize,
                minSize,
                check
            )
            fct.start()
            self.commitThread = fct
        if mode == 1:
            def extWriteLine(self, line, icon, wRef, when, indent):
                self.ctrl.WriteLine(line, icon, wRef, when, indent)
                WriteLine2File(when, indent, wRef, line)
        if mode == 2:
            def extWriteLine(self, line, icon, wRef, when, indent):
                WriteLine2File(when, indent, wRef, line)
        eg.Log._WriteLine = extWriteLine


    def __stop__(self):
        if self.commitThread:
            fct = self.commitThread
            if fct.isAlive():
                fct.AbortThread()
            del self.commitThread
        self.commitThread = None
        eg.Log._WriteLine = self.default
        if self.ioFile:
            if self.check:
                while not self.q.empty():
                    self.ioFile.write(self.q.get())
            self.ioFile.close()
            self.ioFile = None


    def Configure(
        self,
        mode = 0,
        logfile = None,
        interval = 1,
        maxSize = 20,
        minSize = 10,
        check = False
    ):
        text = self.text
        panel = eg.ConfigPanel(self)
        self.logfile = logfile
        logFileCtrl = MyFileBrowseButton(
            panel,
            toolTip = text.toolTipFile,
            dialogTitle = text.browseFile,
            buttonText = eg.text.General.browse,
            startDirectory = eg.configDir,
            defaultFile = "EventGhost_Log.txt"
        )
        logFileCtrl.GetTextCtrl().SetEditable(False)
        logLabel = wx.StaticText(panel, -1, text.label)
        radioBox = wx.RadioBox(
            panel, 
            -1, 
            text.logMode, 
            choices = text.modes, 
            style=wx.RA_SPECIFY_ROWS
        )
        radioBox.SetSelection(mode)
        commitLabel = wx.StaticText(panel, -1, text.commitLabel)
        commitCtrl = eg.SpinIntCtrl(
            panel,
            -1,
            interval,
            min=1,
            max=99,
        )
        sizeCheck = wx.CheckBox(panel, -1, "")
        sizeCheck.SetValue(check)
        sizeLabel_1 = wx.StaticText(panel, -1, text.size_1)
        sizeCtrl_1 = eg.SpinIntCtrl(
            panel,
            -1,
            maxSize,
            min=2,
            max=50,
        )
        sizeLabel_2 = wx.StaticText(panel, -1, text.size_2)
        sizeCtrl_2 = eg.SpinIntCtrl(
            panel,
            -1,
            minSize,
            min=1,
            max=49,
        )
        sizeLabel_3 = wx.StaticText(panel, -1, text.size_3)
        commitSizer = wx.BoxSizer(wx.HORIZONTAL)
        commitSizer.Add(commitLabel, 0, wx.TOP, 3)
        commitSizer.Add(commitCtrl, 0, wx.LEFT, 8)
        sizeSizer = wx.BoxSizer(wx.HORIZONTAL)
        sizeSizer.Add(sizeCheck, 0, wx.TOP|wx.RIGHT, 3)
        sizeSizer.Add(sizeLabel_1, 0, wx.TOP, 3)
        sizeSizer.Add(sizeCtrl_1, 0, wx.LEFT|wx.RIGHT, 5)
        sizeSizer.Add(sizeLabel_2, 0, wx.TOP, 3)
        sizeSizer.Add(sizeCtrl_2, 0, wx.LEFT|wx.RIGHT, 5)
        sizeSizer.Add(sizeLabel_3, 0, wx.TOP, 3)
        sizerAdd = panel.sizer.Add
        sizerAdd(radioBox, 0, wx.TOP | wx.EXPAND, 2)
        sizerAdd(logLabel, 0, wx.TOP, 15)
        sizerAdd(logFileCtrl, 0, wx.TOP | wx.EXPAND, 2)
        sizerAdd(commitSizer, 0, wx.TOP | wx.EXPAND, 15)
        sizerAdd(sizeSizer, 0, wx.TOP | wx.EXPAND, 15)


        def DummyHandle(evt):
            pass
        sizeCtrl_1.Bind(wx.EVT_TEXT, DummyHandle)
        sizeCtrl_2.Bind(wx.EVT_TEXT, DummyHandle)


        def Validation(event = None):
            flg_1 = len(self.logfile) > 0
            val = bool(radioBox.GetSelection())
            flg_2 = not val or (val and self.logfile is not None)
            flg_3 = sizeCtrl_1.GetValue() > sizeCtrl_2.GetValue()
            flg = flg_1 and flg_2 and flg_3
            panel.dialog.buttonRow.okButton.Enable(flg)
            panel.dialog.buttonRow.applyButton.Enable(flg)
        sizeCtrl_1.Bind(eg.EVT_VALUE_CHANGED, Validation)
        sizeCtrl_2.Bind(eg.EVT_VALUE_CHANGED, Validation)


        def logFileChange(event):
            val = logFileCtrl.GetTextCtrl().GetValue()
            if val.lower() == u"%s\\log.txt" % unicode(eg.configDir).lower():
                PlaySound('SystemExclamation', SND_ASYNC)
                MessageBox(
                    panel.GetHandle(),
                    text.mess % unicode(eg.configDir),
                    "EventGhost - Log redirector",
                    48
                    )
                logFileCtrl.GetTextCtrl().SetValue(self.logfile)
                return
            self.logfile = val
            Validation()
        logFileCtrl.Bind(wx.EVT_TEXT, logFileChange)


        def onSizeCheck(event = None):
            val=sizeCheck.GetValue()
            sizeLabel_1.Enable(val)
            sizeCtrl_1.Enable(val)
            sizeLabel_2.Enable(val)
            sizeCtrl_2.Enable(val)
            sizeLabel_3.Enable(val)            
        sizeCheck.Bind(wx.EVT_CHECKBOX, onSizeCheck)
        onSizeCheck()


        def onRadioBox(event = None):
            val = bool(radioBox.GetSelection())
            if not val or self.logfile is None:
                logFileCtrl.GetTextCtrl().ChangeValue("")
            else:
                logFileCtrl.GetTextCtrl().ChangeValue(self.logfile)
            logLabel.Enable(val)
            logFileCtrl.Enable(val)
            commitLabel.Enable(val)
            commitCtrl.Enable(val)
            sizeCheck.Enable(val)
            if val:
                val=sizeCheck.GetValue()
            sizeCheck.SetValue(val)
            onSizeCheck()
        radioBox.Bind(wx.EVT_RADIOBOX, onRadioBox)
        onRadioBox()

        while panel.Affirmed():
            val = bool(radioBox.GetSelection())
            panel.SetResult(
                val,
                logFileCtrl.GetTextCtrl().GetValue() if val else self.logfile,
                commitCtrl.GetValue(),
                sizeCtrl_1.GetValue(),
                sizeCtrl_2.GetValue(),                
                sizeCheck.GetValue()
            )
Example #39
0
class TestRender(unittest.TestCase):
    @staticmethod
    def createSnapshotDir(n,
                          capture_template=None,
                          size=(50, 50),
                          write_metadata=True):
        """Create n random snapshots in a named temporary folder. Return the absolute path to the directory.
        The user is responsible for deleting the directory when done."""
        # Make the temp folder.
        dir = mkdtemp() + '/'
        # Make sure any nested folders specified by the capture template exist.
        try:
            os.makedirs(
                os.path.dirname("{0}{1}".format(dir, capture_template) % 0))
        except OSError as e:
            if e.errno == errno.EEXIST:
                pass
            else:
                raise

        # Make images and save them with the correct names.
        random.seed(0)

        with open(
                os.path.join(dir, METADATA_FILE_NAME)
                if write_metadata else os.devnull, 'w') as f:
            writer = DictWriter(f, METADATA_FIELDS)
            for i in range(n):
                image = Image.new('RGB',
                                  size=size,
                                  color=tuple(
                                      randint(0, 255) for _ in range(3)))
                image_path = "{0}{1}".format(dir, capture_template) % i
                image.save(image_path, 'JPEG')
                writer.writerow({
                    'snapshot_number': i,
                    'file_name': os.path.basename(image_path),
                    'time_taken': i * 1000
                })

        return dir

    @staticmethod
    def createWatermark():
        """Create a watermark image in a temp file. The file will be deleted when the handle is garbage collected.
        Returns the (temporary) file handle."""
        # Make the temp file.
        watermark_file = NamedTemporaryFile()
        # Make an image and save it to the temp file.
        image = Image.new('RGB', size=(50, 50), color=(0, 0, 255))
        image.save(watermark_file, 'JPEG')
        return watermark_file

    def createRenderingJob(self, rendering):
        self.on_render_start = Mock(return_value=None)
        self.on_render_success = Mock(return_value=None)
        self.on_render_error = Mock(return_value=None)
        return TimelapseRenderJob(
            job_id=self.rendering_job_id,
            rendering=rendering,
            logging=self.octolapse_settings.current_logging_profile(),
            print_filename=self.print_name,
            capture_dir=self.snapshot_dir_path,
            snapshot_filename_format=self.capture_template,
            output_tokens=Render._get_output_tokens(self.data_directory,
                                                    "COMPLETED",
                                                    self.print_name,
                                                    self.print_start_time,
                                                    self.print_end_time),
            octoprint_timelapse_folder=self.octoprint_timelapse_folder,
            ffmpeg_path=self.ffmpeg_path,
            threads=1,
            time_added=0,
            on_render_start=self.on_render_start,
            on_success=self.on_render_success,
            on_error=self.on_render_error,
            clean_after_success=True,
            clean_after_fail=True)

    def doTestCodec(self, name, extension, codec_name):
        """
        Tests a particular codec setup.
        :param name: The internal Octolapse name of the codec.
        :param extension: The file extension we should expect out of this configuration.
        :param codec_name: The expected name that ffprobe should return for this codec.
        """
        self.snapshot_dir_path = TestRender.createSnapshotDir(
            10, self.capture_template, size=(50, 50))
        # Create the job.
        r = Rendering(guid=uuid.uuid4(), name="Use {} codec".format(name))
        r.update({'output_format': name})
        job = self.createRenderingJob(rendering=r)

        # Start the job.
        job._render()

        # Assertions.
        self.on_render_start.assert_called_once()
        self.on_render_success.assert_called_once()
        self.on_render_error.assert_not_called()
        output_files = os.listdir(self.octoprint_timelapse_folder)
        self.assertEqual(
            len(output_files), 1,
            "Incorrect amount of output files detected! Found {}. Expected only timelapse output."
            .format(output_files))
        output_filename = output_files[0]
        self.assertRegexpMatches(
            output_filename,
            re.compile('.*\.{}$'.format(extension), re.IGNORECASE))
        output_filepath = os.path.join(self.octoprint_timelapse_folder,
                                       output_filename)
        self.assertGreater(os.path.getsize(output_filepath), 0)
        # Check the codec using ffprobe to make sure it matches what we expect.
        actual_codec = subprocess.check_output([
            "ffprobe", "-v", "error", "-select_streams", "v:0",
            "-show_entries", "stream=codec_name", "-of",
            "default=noprint_wrappers=1:nokey=1", output_filepath
        ]).strip()
        self.assertEqual(actual_codec, codec_name)

    def setUp(self):
        self.octolapse_settings = OctolapseSettings(NamedTemporaryFile().name)
        self.rendering_job_id = "job_id"

        self.print_name = "print_name"
        self.print_start_time = 0
        self.print_end_time = 100

        # Create fake snapshots.
        self.capture_template = get_snapshot_filename(self.print_name,
                                                      SnapshotNumberFormat)
        self.data_directory = mkdtemp()
        self.octoprint_timelapse_folder = mkdtemp()

        self.ffmpeg_path = "ffmpeg"
        self.render_task_queue = Queue(maxsize=1)
        self.render_task_queue.put(self.rendering_job_id)

    def tearDown(self):
        rmtree(self.snapshot_dir_path)
        rmtree(self.data_directory)
        rmtree(self.octoprint_timelapse_folder)

    def test_basicRender(self):
        self.snapshot_dir_path = TestRender.createSnapshotDir(
            10, self.capture_template, size=(50, 50))
        # Create the job.
        job = self.createRenderingJob(rendering=None)

        # Start the job.
        job._render()

        # Assertions.
        self.on_render_start.assert_called_once()
        self.on_render_success.assert_called_once()
        self.on_render_error.assert_not_called()
        output_files = os.listdir(self.octoprint_timelapse_folder)
        self.assertEqual(
            len(output_files), 1,
            "Incorrect amount of output files detected! Found {}. Expected only timelapse output."
            .format(output_files))
        output_filename = output_files[0]
        self.assertRegexpMatches(output_filename,
                                 re.compile('.*\.mp4$', re.IGNORECASE))
        self.assertGreater(
            os.path.getsize(
                os.path.join(self.octoprint_timelapse_folder,
                             output_filename)), 0)

    def test_noMetadata(self):
        self.snapshot_dir_path = TestRender.createSnapshotDir(
            10, self.capture_template, size=(50, 50), write_metadata=False)
        # Create the job.
        job = self.createRenderingJob(rendering=None)

        # Start the job.
        job._render()

        # Assertions.
        self.on_render_start.assert_called_once()
        self.on_render_success.assert_called_once()
        self.on_render_error.assert_not_called()
        output_files = os.listdir(self.octoprint_timelapse_folder)
        self.assertEqual(
            len(output_files), 1,
            "Incorrect amount of output files detected! Found {}. Expected only timelapse output."
            .format(output_files))
        output_filename = output_files[0]
        self.assertRegexpMatches(output_filename,
                                 re.compile('.*\.mp4$', re.IGNORECASE))
        self.assertGreater(
            os.path.getsize(
                os.path.join(self.octoprint_timelapse_folder,
                             output_filename)), 0)

    def test_noffmpeg(self):
        self.snapshot_dir_path = TestRender.createSnapshotDir(
            10, self.capture_template, size=(50, 50), write_metadata=False)
        # Create the job.
        job = self.createRenderingJob(rendering=None)
        job._ffmpeg = None

        # Start the job.
        job._render()

        # Assertions.
        self.on_render_start.assert_called_once()
        self.on_render_success.assert_not_called()
        self.on_render_error.assert_called_once()
        output_files = os.listdir(self.octoprint_timelapse_folder)
        self.assertEqual(
            len(output_files), 0,
            "Expected no output files to be generated.".format(output_files))

    def test_watermark(self):
        self.snapshot_dir_path = TestRender.createSnapshotDir(
            10, self.capture_template, size=(50, 50))
        # Create the job.
        watermark_file = self.createWatermark()
        r = Rendering(guid=uuid.uuid4(), name="Render with Watermark")
        r.update({
            'enable_watermark': True,
            'selected_watermark': watermark_file.name
        })
        job = self.createRenderingJob(rendering=r)

        # Start the job.
        job._render()

        # Assertions.
        self.on_render_start.assert_called_once()
        self.on_render_success.assert_called_once()
        self.on_render_error.assert_not_called()
        output_files = os.listdir(self.octoprint_timelapse_folder)
        self.assertEqual(
            len(output_files), 1,
            "Incorrect amount of output files detected! Found {}. Expected only timelapse output."
            .format(output_files))
        output_filename = output_files[0]
        self.assertRegexpMatches(output_filename,
                                 re.compile('.*\.mp4$', re.IGNORECASE))
        self.assertGreater(
            os.path.getsize(
                os.path.join(self.octoprint_timelapse_folder,
                             output_filename)), 0)

    # True parameterized testing in unittest seems pretty complicated.
    # I'll just manually generate tests for items in this list.
    CODECS_AND_EXTENSIONS = {
        'avi': dict(name='avi', extension='avi', codec_name='mpeg4'),
        'flv': dict(name='flv', extension='flv', codec_name='flv'),
        'gif': dict(name='gif', extension='gif', codec_name='gif'),
        'h264': dict(name='h264', extension='mp4', codec_name='h264'),
        'mp4': dict(name='mp4', extension='mp4', codec_name='mpeg4'),
        'mpeg': dict(name='mpeg', extension='mpeg', codec_name='mpeg2video'),
        'vob': dict(name='vob', extension='vob', codec_name='mpeg2video')
    }

    def test_avi_codec(self):
        self.doTestCodec(**self.CODECS_AND_EXTENSIONS['avi'])

    def test_flv_codec(self):
        self.doTestCodec(**self.CODECS_AND_EXTENSIONS['flv'])

    def test_gif_codec(self):
        self.doTestCodec(**self.CODECS_AND_EXTENSIONS['gif'])

    def test_h264_codec(self):
        self.doTestCodec(**self.CODECS_AND_EXTENSIONS['h264'])

    def test_mp4_codec(self):
        self.doTestCodec(**self.CODECS_AND_EXTENSIONS['mp4'])

    def test_mpeg_codec(self):
        self.doTestCodec(**self.CODECS_AND_EXTENSIONS['mpeg'])

    def test_vob_codec(self):
        self.doTestCodec(**self.CODECS_AND_EXTENSIONS['vob'])

    def test_overlay(self):
        self.snapshot_dir_path = TestRender.createSnapshotDir(
            10, self.capture_template, size=(640, 480))
        # Create the job.
        r = Rendering(guid=uuid.uuid4(), name="Render with overlay")
        r.update({
            'overlay_text_template':
            "Current Time: {current_time}\nTime elapsed: {time_elapsed}",
            'overlay_font_path': get_system_fonts()[0]
        })
        job = self.createRenderingJob(rendering=r)

        # Start the job.
        job._render()

        # Assertions.
        self.on_render_start.assert_called_once()
        self.on_render_success.assert_called_once()
        self.on_render_error.assert_not_called()
        output_files = os.listdir(self.octoprint_timelapse_folder)
        self.assertEqual(
            len(output_files), 1,
            "Incorrect amount of output files detected! Found {}. Expected only timelapse output."
            .format(output_files))
        output_filename = output_files[0]
        self.assertRegexpMatches(output_filename,
                                 re.compile('.*\.mp4$', re.IGNORECASE))
        output_filepath = os.path.join(self.octoprint_timelapse_folder,
                                       output_filename)
        self.assertGreater(os.path.getsize(output_filepath), 0)

    def test_overlay_invalid_font(self):
        self.snapshot_dir_path = TestRender.createSnapshotDir(
            10, self.capture_template, size=(640, 480))
        # Create the job.
        r = Rendering(guid=uuid.uuid4(), name="Render with overlay")
        r.update({
            'overlay_text_template':
            "Current Time: {current_time}\nTime elapsed: {time_elapsed}",
            'overlay_font_path': '/dev/null'
        })
        job = self.createRenderingJob(rendering=r)

        # Start the job.
        job._render()

        # Assertions.
        self.on_render_start.assert_called_once()
        self.on_render_success.assert_not_called()
        self.on_render_error.assert_called_once()
        output_files = os.listdir(self.octoprint_timelapse_folder)
        self.assertEqual(
            len(output_files), 0,
            "Expected no output files to be generated.".format(output_files))
Example #40
0
def render_tiles(bbox,
                 mapfile,
                 tile_dir,
                 minZoom=0,
                 maxZoom=20,
                 name="unknown",
                 num_threads=NUM_THREADS):
    print "render_tiles(", bbox, mapfile, tile_dir, minZoom, maxZoom, name, ")"

    # Launch rendering threads
    queue = Queue(32)
    printLock = threading.Lock()
    renderers = {}
    for i in range(num_threads):
        renderer = RenderThread(tile_dir, mapfile, queue, printLock, maxZoom)
        render_thread = threading.Thread(target=renderer.loop)
        render_thread.start()
        print "Started render thread %s" % render_thread.getName()
        renderers[i] = render_thread

    if not os.path.isdir(tile_dir):
        os.mkdir(tile_dir)

    gprj = GeographicProjection(maxZoom + 1)
    ll0 = (bbox[0], bbox[3])
    ll1 = (bbox[2], bbox[1])

    for z in range(minZoom, maxZoom + 1):
        px0 = gprj.fromLLtoPixel(ll0, z)
        px1 = gprj.fromLLtoPixel(ll1, z)

        # check if we have directories in place
        zoom = "%s" % z
        if not os.path.isdir(tile_dir + zoom):
            os.mkdir(tile_dir + zoom)

        for x in range(int(px0[0] / TILE_SIZEF), int(px1[0] / TILE_SIZEF) + 1):
            # Validate x co-ordinate
            if (x < 0) or (x > 2**z):
                continue

            # check if we have directories in place
            str_x = "%s" % x
            if not os.path.isdir(tile_dir + zoom + '/' + str_x):
                os.mkdir(tile_dir + zoom + '/' + str_x)

            for y in range(int(px1[1] / TILE_SIZEF),
                           int(px0[1] / TILE_SIZEF) + 1):
                # Validate x co-ordinate
                if (y < 0) or (y >= 2**z):
                    continue

                #str_y = "%s" % y
                str_y = "%s" % ((2**z - 1) - y)

                tile_uri = tile_dir + zoom + '/' + str_x + '/' + str_y + '.png'

                # Submit tile to be rendered into the queue
                t = (name, tile_uri, x, y, z)
                try:
                    queue.put(t)
                except KeyboardInterrupt:
                    raise SystemExit("Ctrl-c detected, exiting...")

    # Signal render threads to exit by sending empty request to queue
    for i in range(num_threads):
        queue.put(None)
    # wait for pending rendering jobs to complete
    queue.join()
    for i in range(num_threads):
        renderers[i].join()
    while True:
        # grabs ip,cmd from queue
        ip = q.get()
        print "Thread %s: Running %s to %s" % (i, cmd, ip)
        subprocess.call("ssh root@%s %s" % (ip, cmd), shell=True)
        q.task_done()


# grab ips and cmd from config
ips, cmds = readConfig()

# Determing Number of threads to use,but max out at 25
if len(ips) < 25:
    num_threads = len(ips)
else:
    num_threads = 25

# start thread pool

for i in range(num_threads):
    for cmd in cmds:
        worker = Thread(target=launcher, args=(i, queue, cmd))
        worker.setDaemon(True)
        worker.start()
print "Main Thread Waiting"
for ip in ips:
    queue.put(ip)
queue.join()
end = time.time()
print "Dispatch Completed in %s seconds" % end - start
Example #42
0
class HttpParser(object):
    """parse http req & resp"""
    def __init__(self, processor):
        """
        :type processor: HttpDataProcessor
        """
        self.cur_type = None
        self.cur_data_queue = None
        self.inited = False
        self.is_http = False

        self.task_queue = None
        self.worker = None
        self.processor = processor

    def send(self, http_type, data, m_time):
        lm_time = 0
        lm_time = m_time
        if not self.inited:
            self._init(http_type, data)
            self.inited = True

        if not self.is_http:
            return

        if self.cur_type == http_type:
            self.cur_data_queue.put(data)
            return

        self.cur_type = http_type
        if self.cur_data_queue is not None:
            # finish last task
            self.cur_data_queue.put(None)
        # start new task
        self.cur_data_queue = Queue()
        self.cur_data_queue.put(data)
        queuedata = [self.cur_type, self.cur_data_queue, lm_time]
        self.task_queue.put(queuedata)

    def _init(self, http_type, data):
        if not utils.is_request(data) or http_type != HttpType.REQUEST:
            # not a http request
            self.is_http = False
        else:
            self.is_http = True
            self.task_queue = Queue(
            )  # one task is an http request or http response stream
            self.worker = threading.Thread(target=self.process_tasks,
                                           args=(self.task_queue, ))
            self.worker.setDaemon(True)
            self.worker.start()

    def process_tasks(self, task_queue):
        message = RequestMessage()
        m_time = 0
        while True:
            queuedata = task_queue.get()
            httptype = queuedata[0]
            data_queue = queuedata[1]
            try:
                m_time = queuedata[2]
            except:
                pass
            #httptype, data_queue = task_queue.get()

            if httptype is None:
                # finished
                self.processor.finish()
                break

            reader = DataReader(data_queue)
            try:
                if httptype == HttpType.REQUEST:
                    self.read_request(reader, message, m_time)
                elif httptype == HttpType.RESPONSE:
                    self.read_response(reader, message)
            except Exception:
                #import traceback

                #traceback.print_exc()
                # consume all data.
                # reader.skipall()
                break

    def finish(self):
        if self.task_queue is not None:
            self.task_queue.put((None, None))
            if self.cur_data_queue is not None:
                self.cur_data_queue.put(None)
            self.worker.join()

    def read_headers(self, reader, lines):
        """
        :type reader: DataReader
        :type lines: list
        :return: dict
        """
        header_dict = defaultdict(str)
        while True:
            line = reader.readline()
            if line is None:
                break
            line = line.strip()
            if not line:
                break
            lines.append(line)

            key, value = utils.parse_http_header(line)
            if key is None:
                # incorrect headers.
                continue

            header_dict[key.lower()] = value
        return header_dict

    def read_http_req_header(self, reader):
        """read & parse http headers"""
        line = reader.readline()
        if line is None:
            return None
        line = line.strip()
        if not utils.is_request(line):
            return None

        req_header = HttpRequestHeader()
        items = line.split(b' ')
        if len(items) == 3:
            req_header.method = items[0]
            req_header.uri = items[1]
            req_header.protocol = items[2]

        lines = [line]
        header_dict = self.read_headers(reader, lines)
        for key in header_dict.iterkeys():
            CTCore.client.add_header(key, header_dict[key])

        if b"content-length" in header_dict:
            req_header.content_len = int(header_dict[b"content-length"])
        if b'chunked' in header_dict[b"transfer-encoding"]:
            req_header.chunked = True
        req_header.content_type = header_dict[b'content-type']
        req_header.user_agent = header_dict[b'user-agent']
        req_header.compress = utils.get_compress_type(
            header_dict[b"content-encoding"])
        req_header.host = header_dict[b"host"]
        if b'expect' in header_dict:
            req_header.expect = header_dict[b'expect']

        req_header.referer = ""
        if b"referer" in header_dict:
            req_header.referer = header_dict[b'referer']

        req_header.raw_data = b'\n'.join(lines)
        return req_header

    def read_http_resp_header(self, reader):
        """read & parse http headers"""
        line = reader.readline()
        if line is None:
            return line
        line = line.strip()

        if not utils.is_response(line):
            return None
        resp_header = HttpResponseHeader()
        resp_header.status_line = line
        try:
            resp_header.status_code = int(line.split(' ')[1])
        except:
            pass

        lines = [line]
        header_dict = self.read_headers(reader, lines)
        if b"content-length" in header_dict:
            resp_header.content_len = int(header_dict[b"content-length"])
        if b"location" in header_dict:
            resp_header.redirect_to = header_dict[b"location"]
        if b'chunked' in header_dict[b"transfer-encoding"]:
            resp_header.chunked = True
        resp_header.content_type = header_dict[b'content-type']
        resp_header.compress == utils.get_compress_type(
            header_dict[b"content-encoding"])
        resp_header.connection_close = (header_dict[b'connection'] == b'close')
        resp_header.raw_data = b'\n'.join(lines)

        resp_header.filename = ""
        if b"content-disposition" in header_dict:
            cnt_dis = header_dict[b'content-disposition']
            if cnt_dis.find("filename=") > -1:
                resp_header.filename = cnt_dis.split('=')[1].rstrip()

        return resp_header

    def read_chunked_body(self, reader, skip=False):
        """ read chunked body """
        result = []
        orig_chunked_resp = []
        # read a chunk per loop
        while True:
            # read chunk size line
            cline = reader.readline()
            if cline is None:
                # error occurred.
                if not skip:
                    return b''.join(result)
                else:
                    return
            chunk_size_end = cline.find(b';')
            if chunk_size_end < 0:
                chunk_size_end = len(cline)
                # skip chunk extension
            chunk_size_str = cline[0:chunk_size_end]
            # the last chunk
            if chunk_size_str[0] == b'0':
                # chunk footer header
                # TODO: handle additional http headers.
                while True:
                    cline = reader.readline()
                    if cline is None or len(cline.strip()) == 0:
                        break
                if not skip:
                    orig_chunked_resp.append(b'0\r\n\r\n')
                    return b''.join(result), b''.join(orig_chunked_resp)
                else:
                    return
                    # chunk size
            chunk_size_str = chunk_size_str.strip()
            try:
                chunk_len = int(chunk_size_str, 16)
            except:
                return b''.join(result)

            data = reader.read(chunk_len)
            if data is None:
                # skip all
                # error occurred.
                if not skip:
                    return b''.join(result)
                else:
                    return
            if not skip:
                result.append(data)
                orig_chunked_resp.append(cline + data + b'\r\n')

            # a CR-LF to end this chunked response
            reader.readline()

    def read_request(self, reader, message, m_time):
        """ read and output one http request. """
        if message.expect_header and not utils.is_request(reader.fetchline()):
            req_header = message.expect_header
            message.expect_header = None
        else:
            req_header = self.read_http_req_header(reader)
            req_header.time = m_time
            if req_header is None:
                # read header error, we skip all data.
                reader.skipall()
                return
            if req_header.expect:
                # it is expect:continue-100 post request
                message.expect_header = req_header

        # deal with body
        if not req_header.chunked:
            content = reader.read(req_header.content_len)
        else:
            content = self.read_chunked_body(reader)

        _filter = config.get_filter()
        show = _filter.by_domain(req_header.host) and _filter.by_uri(
            req_header.uri)
        message.filtered = not show
        if show:
            self.processor.on_http_req(req_header, content)

    def read_response(self, reader, message):
        """
        read and output one http response
        """
        resp_header = self.read_http_resp_header(reader)
        if resp_header is None:
            reader.skipall()
            return

        if message.expect_header:
            if resp_header.status_code == 100:
                # expected 100, we do not read body
                reader.skipall()
                return

        orig_chunked_resp = ""
        # read body
        if not resp_header.chunked:
            if resp_header.content_len == 0:
                if resp_header.connection_close:
                    # we can't get content length, so assume it till the end of data.
                    resp_header.content_len = 10000000
                else:
                    # we can't get content length, and is not a chunked body, we cannot do nothing,
                    # just read all data.
                    resp_header.content_len = 10000000
            content = reader.read(resp_header.content_len)
            if content is not None:
                resp_header.content_len = len(content)
            else:
                resp_header.content_len = 0
        else:
            content, orig_chunked_resp = self.read_chunked_body(reader)

        if not message.filtered:
            self.processor.on_http_resp(resp_header, content,
                                        orig_chunked_resp)
Example #43
0
                       limit=size,
                       data=True)
    orig_rules = deepcopy(q._rules)

    things = list(q)
    while things:
        yield things

        q._rules = deepcopy(orig_rules)
        q._after(things[len(things) - 1])
        things = list(q)


solr_queue = Queue()
for i in range(20):
    solr_queue.put(pysolr.Solr(g.solr_url))


class SolrConnection(object):
    """
        Represents a connection to Solr, properly limited to N
        concurrent connections. Used like

            with SolrConnection() as s:
                s.add(things)
    """
    def __init__(self, commit=False, optimize=False):
        self.commit = commit
        self.optimize = optimize

    def __enter__(self):
Example #44
0
        visited[child] = visited[vertex] + 1
        queue.put(child)

    return children


def count_days():
    while not queue.empty():
        vertex = queue.get()
        graph[vertex] = get_children(vertex)


q = int(raw_input())
for i in xrange(q):
    a, b = map(int, raw_input().split())
    adjacent = []

    for j in xrange(a):
        adjacent.append(map(int, raw_input().split()))

    x, y = map(int, raw_input().split())
    queue = Queue()
    visited = Counter()
    graph = defaultdict(list)

    vertex = (x - 1, y - 1)
    queue.put(vertex)
    visited[vertex] = 0
    count_days()

    print max(visited.values())
Example #45
0
    #默认ping 扫描
    if isping == 'yes':
        if lowversion == True:
            print "your python may not support ping ,please update python to 2.7"
            exit()
        pinglist = []
        q = Queue()
        lock = threading.Lock()

        for i in xrange(threads):
            t = Thread(target=pinger)
            t.setDaemon(True)
            t.start()

        for ip in ips:
            q.put(ip)
        q.join()

    else:
        pinglist = ips

    if len(pinglist) == 0:
        print "not find any live machine - -|||"
        exit()

    print "[*] Scanning for live machines done,it has Elapsed time:%s " % (
        time.time() - starttime)

    #=========================我是分割线=============================================#

    #多线程扫描端口,并且识别出端口是什么类型服务
Example #46
0
class Modem():
    #Serial port settings
    read_buffer_size = 1024

    #Some constants
    linesep = '\r\n'
    ok_response = 'OK'
    error_response = 'ERROR'
    clcc_header = "+CLCC:"
    clip_header = "+CLIP:"
    clcc_enabled = False

    #Status storage variables
    status = {"state": "idle", "type": None}

    #The Caller ID variable - is set when a call is received and cleard when a call ends
    #When set, it looks like this:
    #current_callerid = {"number":"something", "type":"unknown"/"international"/"national"/"network-specific"}
    current_callerid = None

    def __init__(self,
                 serial_path="/dev/ttyAMA0",
                 serial_timeout=0.5,
                 read_timeout=0.2):
        self.serial_path = serial_path
        self.serial_timeout = serial_timeout
        self.read_timeout = read_timeout
        self.executing_command = Event()
        self.should_monitor = Event()
        self.unexpected_queue = Queue()

    def init(self):
        self.port = Serial(self.serial_path,
                           115200,
                           timeout=self.serial_timeout)
        self.at()
        self.enable_verbosity()
        logger.debug("Battery voltage is: {}".format(self.get_voltage()))
        self.enable_clcc()
        self.enable_clip()
        self.set_message_mode("pdu")
        #self.at_command("AT+CSSN=1,1")
        self.save_settings()

    def deinit(self):
        try:
            self.port.close()
        except:  #Could be not created or already closed
            pass

#Functions that the user will be calling

    def call(self, number):
        #ATD in CLCC is going to generate CLCC data straight away,
        #so that's going into the queue to be processed separately
        response = self.at_command("ATD{};".format(number), nook=True)
        self.queue_unexpected_data(response)
        return True

    def ussd(self, string):
        result = self.at_command('AT+CUSD=1,"{}"'.format(string))

    def hangup(self):
        return self.at_command("ATH", noresponse=True)

    def answer(self):
        return self.at_command("ATA")

    #Debugging helpers

    def pprint_status(self):
        print("--------------------------")
        print("New state: {}".format(self.status["state"]))
        if self.current_callerid:
            print("Caller ID: {}, type: {}".format(
                self.current_callerid["number"],
                self.current_callerid["type"]))

    def print_callerid(self, callerid):
        if self.current_callerid:
            print("Incoming: {} ({})".format(self.current_callerid["number"],
                                             self.current_callerid["type"]))

    #Call state set function - that also calls a callback

    def set_state(self, key, value):
        self.status[key] = value
        if callable(self.update_state_cb):
            self.update_state_cb(key, value)

    #Callbacks that change the call state and clean state variables
    #Not to be overridden directly as they might have desirable side effects
    #Also, they're called in a hackish way and overriding would fail anyway

    #Call-specific callbacks

    #  "0":on_talking,
    def on_talking(self):
        #Call answered, voice comms established
        self.set_state("state", "talking")
        #self.pprint_status()

    #  "1":on_held,
    def on_held(self):
        #Held call signal
        if self.status["type"] == "incoming":
            self.set_state("state", "held")
        else:
            self.set_state("state", "holding")
        #self.pprint_status()

    #  "2":on_dialing,
    def on_dialing(self):
        assert (self.status["type"] == "outgoing")
        self.set_state("state", "dialing")
        #self.pprint_status()

    #  "3":on_alerting,
    def on_alerting(self):
        assert (self.status["type"] == "outgoing")
        self.set_state("state", "alerting")
        #self.pprint_status()

    #  "4":on_incoming,
    def on_incoming(self):
        assert (self.status["type"] == "incoming")
        self.set_state("state", "incoming_call")
        #self.pprint_status()

    #  "5":on_waiting,
    def on_waiting(self):
        assert (self.status["type"] == "incoming")
        self.set_state("state", "incoming")
        #self.pprint_status()

    #  "6":on_disconnect
    def on_disconnect(self, incoming=True):
        #Either finished or missed call
        if self.status["type"] == "incoming" and self.status["state"] not in [
                "held", "talking"
        ]:
            self.set_state("state", "missed_call")
        else:
            self.set_state("state", "finished")
        Timer(3, self.on_idle).start()
        #self.pprint_status()

    def on_idle(self):
        #Cleans up variables and sets state to "idle"
        #Only runs from threading.Timer since modem sends no "idle" CLCC message
        #Safety check to ensure this doesn't run during a call
        #if call happens right after previous call ends:
        if self.status["state"] not in ["active_call", "held"]:
            self.current_callerid = None
            self.set_state("state", "idle")
            self.set_state("type", None)
            #self.pprint_status()

    #SMS callbacks

    def on_incoming_message(self, cmti_line):
        #New message signal
        logger.debug("You've got mail! Line: {}".format(
            cmti_line[len("+CMTI:"):]).strip())
        self.read_all_messages()

    def read_all_messages(self):
        prev_timeout = self.serial_timeout
        self.serial_timeout = 1  #TODO: get message count and base timeout on that
        output = self.at_command("AT+CMGL")
        self.serial_timeout = prev_timeout
        if len(output) % 2 == 1:
            logger.warning("CMGL output lines not in pairs?")
            logger.debug("PDATA: {}".format(repr(output)))
            return False
        cmgl_header = "+CMGL: "
        for i in range(len(output) / 2):
            header = output[i * 2]
            if not header.startswith(cmgl_header):
                logger.warning(
                    "Line presumed to be CMGL doesn't start with CMGL header!")
                continue
            id, x, x, pdu_len = header[len(cmgl_header):].split(",")
            smsc_pdu_str = output[(i * 2) + 1]
            self.decode_message(smsc_pdu_str, pdu_len, id)

    def decode_message(self, smsc_pdu_str, pdu_len, id):
        logger.debug("Reading message {}".format(id))
        pdu_len = int(pdu_len)  #Just in case
        smsc_len = len(
            smsc_pdu_str) - pdu_len * 2  #We get PDU length in octets
        if smsc_len == 0:
            logger.warning("No SMSC in PDU - seems like it can happen!")
        pdu_str = smsc_pdu_str[smsc_len:]  #Discarding SMSC info
        #SMSC info might actually be useful in the future - maybe its spoofing could be detected? Does it even happen?
        smspdu.pdu.dump(pdu_str)

    #Non-CLCC exclusive callbacks
    #(the non-CLCC path might not even work that well, for what I know)
    def on_ring(self):
        logger.debug("Ring ring ring bananaphone!")

    #AT command-controlled modem settings and simple functions

    def get_manufacturer(self):
        return self.at_command("AT+CGMI")

    def get_model(self):
        return self.at_command("AT+CGMM")

    def get_imei(self):
        return self.at_command("AT+GSN")

    def save_settings(self):
        self.at_command("AT&W")

    def enable_verbosity(self):
        return self.at_command('AT+CMEE=1')

    def enable_clcc(self):
        self.clcc_enabled = True
        return self.at_command('AT+CLCC=1')

    def set_message_mode(self, mode_str):
        if mode_str.lower() == "text":
            return self.at_command('AT+CMGF=1')
        elif mode_str.lower() == "pdu":
            return self.at_command('AT+CMGF=0')
        else:
            raise ValueError("Wrong message mode: {}".format(mode_str))

    def enable_clip(self):
        return self.at_command('AT+CLIP=1')

    def at(self):
        response = self.at_command('AT')
        if response is True: return
        raise ATError(expected=self.ok_response, received=response)

    #Auxiliary functions that aren't related to phone functionality
    #TODO: Expose this to an external API of sorts

    def get_voltage(self):
        answer = self.at_command('AT+CBC')
        if not answer.startswith('+CBC'):
            return 0.0  #TODO - this needs to be better!
        voltage_str = answer.split(':')[1].split(',')[2]
        voltage = round(int(voltage_str) / 1000.0, 2)
        return voltage

    #Call status and Caller ID message processing code
    #This is where we get call information info

    def process_clcc(self, clcc_line):
        clcc_line = clcc_line[len(self.clcc_header):]
        clcc_line = clcc_line.strip()
        elements = clcc_line.split(',')
        if len(elements) < 5:
            logger.debug("Unrecognized number of CLCC elements!")
            logger.debug("PDATA: " + repr(elements))
            return
        elif len(elements) > 8:
            logger.warning("Too much CLCC elements!")
            logger.warning("PDATA: " + repr(elements))
            elements = elements[:8]
        if len(elements) > 7:  #Elements 5 and 6 are present
            self.set_callerid(elements[5], elements[6])
        call_type = elements[1]
        call_status = elements[2]
        new_state = "incoming" if call_type == "1" else "outgoing"
        self.set_state("type", new_state)
        self.clcc_mapping[call_status](self)

    def process_clip(self, line):
        clip_line = line[len(self.clip_header):]
        clip_line = clip_line.strip()
        elements = clip_line.split(',')
        if len(elements) < 2:
            raise ATError(expected="valid CLIP string with >2 elements",
                          received=line)
        elif len(elements) < 6:
            logger.warning("Less than 6 CLIP elements, noting")
            logger.warning("PDATA: " + repr(elements))
        elif len(elements) > 6:
            logger.error("Too much CLIP elements, what's wrong?!")
            logger.error("PDATA: " + repr(elements))
            elements = elements[:6]
        number = elements[0]
        type_id = elements[1]
        self.set_callerid(number, type_id)

    def set_callerid(self, number, type_id):
        clip_type_mapping = {
            "129": "unknown",
            "161": "national",
            "145": "international",
            "177": "network-specific"
        }
        if type_id not in clip_type_mapping.keys():
            logger.error(
                "PDATA: CLIP type id {} not found in type mapping!".format(
                    type_id))
            type = "unknown"
        else:
            type = clip_type_mapping[type_id]
        #Setting status variable
        self.current_callerid = {"number": number.strip('\"'), "type": type}

    clcc_mapping = {
        "0": on_talking,
        "1": on_held,
        "2": on_dialing,
        "3": on_alerting,
        "4": on_incoming,
        "5": on_waiting,
        "6": on_disconnect
    }

    def on_clcc(self, clcc_line):
        for i in range(4):
            if not has_nonascii(clcc_line) or not is_csv(clcc_line):
                break
            logger.error("Garbled call info line! Try {}, line: {}".format(
                i, clcc_line))
            sleep(1)
            clcc_response = self.at_command("AT+CLCC", nook=True)
            logger.error(repr(lines))
            for line in lines:
                if line.startswith(self.clcc_header):
                    clcc_line = line
                else:
                    self.queue_unexpected_data(line)
        if has_nonascii(clcc_line) or not is_csv(clcc_line):
            logger.error("Still garbled CLCC line!")
            return
        logger.info("Call info OK, line: {}".format(
            repr(clcc_line[len(self.clcc_header):])).strip())
        self.process_clcc(clcc_line)

    def on_clip(self, line):
        self.process_clip(line)

    #Low-level functions

    def check_input(self):
        input = self.port.read(self.read_buffer_size)
        if input:
            self.queue_unexpected_data(input)

    def at_command(self, command, noresponse=False, nook=False):
        self.executing_command.set()
        self.check_input()
        self.port.write(command + self.linesep)
        echo = self.port.read(len(command))  #checking for command echo
        if echo != command:
            raise ATError(received=echo, expected=command)
        #print(repr(self.port.read(len(self.linesep)+1)))
        self.port.read(
            len(self.linesep) + 1
        )  #shifting through the line separator - that +1 seems to be necessary when we're reading right after the echo
        if noresponse:
            return True  #one of commands that doesn't need a response
        answer = self.port.read(self.read_buffer_size)
        self.executing_command.clear()
        lines = filter(None, answer.split(self.linesep))
        #print(lines)
        if nook: return lines
        if self.ok_response not in lines:  #expecting OK as one of the elements
            raise ATError(expected=self.ok_response, received=lines)
        #We can have a sudden undervoltage warning, though
        #I'll assume the OK always goes last in the command
        #So we can pass anything after OK to the unexpected line parser
        ok_response_index = lines.index(self.ok_response)
        if ok_response_index + 1 < len(lines):
            self.queue_unexpected_data(lines[(ok_response_index + 1):])
            lines = lines[:(ok_response_index + 1)]
        if len(lines) == 1:  #Single-line response
            if lines[0] == self.ok_response:
                return True
            else:
                return lines[0]
        else:
            lines = lines[:-1]
            if len(lines) == 1:
                return lines[0]
            else:
                return lines

    #Functions for background monitoring of any unexpected input

    def queue_unexpected_data(self, data):
        self.unexpected_queue.put(data)

    def process_incoming_data(self, data):
        logger.debug("Incoming data: {}".format(repr(data)))
        if isinstance(data, str):
            data = data.split(self.linesep)
        lines = filter(None, data)
        for line in lines:
            #print(line)
            #Now onto the callbacks
            #We should ignore some messages if we're using CLIP
            #As those messages will appear anyway, but processing them
            #would be redundant. It could be much prettier, though.
            if line == "OK":
                continue
            if line == "RING":
                if not self.clcc_enabled:
                    self.on_ring()
                continue
            if line == "BUSY":
                if not self.clcc_enabled:
                    self.on_busy()
                continue
            if line == "HANGUP":
                if not self.clcc_enabled:
                    self.on_hangup()
                continue
            if line == "NO ANSWER":
                if not self.clcc_enabled:
                    self.on_noanswer()
                continue
            if line == "NO CARRIER":
                if not self.clcc_enabled:
                    self.on_nocarrier()
                continue
            if line in ["SMS Ready", "Call Ready"]:
                continue  #Modem just reset
            if line.startswith("+CMTI:"):
                self.on_incoming_message(line)
                continue
            if line.startswith(self.clcc_header):
                self.on_clcc(line)
                continue
            if line.startswith(self.clip_header):
                self.on_clip(line)
                continue
            self.parse_unexpected_message(line)

    def parse_unexpected_message(self, data):
        #haaaax
        if self.linesep[::-1] in "".join(data):
            lines = "".join(data).split(self.linesep[::-1])
        logger.debug("Unexpected line: {}".format(data))

    #The monitor thread - it receives data from the modem and calls callbacks

    def monitor(self):
        while self.should_monitor.isSet():
            logger.info("Monitoring...")
            if not self.executing_command.isSet():
                #First, the serial port
                #print("Reading data through serial!")
                data = self.port.read(self.read_buffer_size)
                if data:
                    logger.debug("Got data through serial!")
                    self.process_incoming_data(data)
                #Then, the queue of unexpected messages received from other commands
                #print("Reading data from queue!")
                try:
                    data = self.unexpected_queue.get_nowait()
                except Empty:
                    pass
                else:
                    logger.debug("Got data from queue!")
                    self.process_incoming_data(data)
            #print("Got to sleep")
            sleep(self.serial_timeout)
            #print("Returned from sleep")
            #try:
            #    print(modem.at_command("AT+CPAS"))
            #except:
            #    print("CPAS exception")
        logger.info("Stopped monitoring!")

    def start_monitor(self):
        self.should_monitor.set()
        self.thread = Thread(target=self.monitor)
        self.thread.daemon = True
        self.thread.start()

    def stop_monitor(self):
        self.should_monitor.clear()
Example #47
0
class EventEngine2(object):
    """计时器使用Python线程的事件驱动引擎"""
    def __init__(self):
        # 事件队列
        self.__queue = Queue()
        # 事件引擎开关
        self.__active = False
        # 事件处理线程
        self.__thread = Thread(target=self.__run)
        # 计时器,用于触发计时器事件
        self.__timer = Thread(target=self.__runTimer)
        self.__timerActive = False
        self.__timerSleep = 1.2

        self.__handlers = {}

    # ---------------------------------------------
    def __run(self):
        while self.__active:
            try:
                event = self.__queue.get(block=True,
                                         timeout=1)  # 获取事件的阻塞时间设为1秒
                self.__process(event)
            except Empty:
                pass

    # ---------------------------------------------
    def __process(self, event):
        """处理事件"""
        # 检查是否存在对该事件进行监听的处理函数
        if event.type_ in self.__handlers:
            # 若存在,则按顺序将事件传递给处理函数执行
            [handler(event) for handler in self.__handlers[event.type_]]

    # ---------------------------------------------
    def __runTimer(self):
        """运行在计时器线程中的循环函数"""
        while self.__timerActive:
            # 创建计时器事件
            event = Event(type_=EVENT_TIMER)

            # 向队列中存入计时器事件
            self.put(event)

            # 等待
            sleep(self.__timerSleep)

    # ---------------------------------------------
    def start(self):
        """引擎启动"""
        # 将引擎设为启动
        self.__active = True

        # 启动事件处理线程
        self.__thread.start()

        # 启动计时器,计时器事件间隔默认设定为1秒
        self.__timerActive = True
        self.__timer.start()

    # ---------------------------------------------
    def stop(self):
        """停止引擎"""
        # 将引擎设为停止
        self.__active = False

        # 停止计时器
        self.__timerActive = False
        self.__timer.join()

        # 等待事件处理线程退出
        self.__thread.join()

    # ---------------------------------------------
    def register(self, type_, handler):
        """注册事件处理函数监听"""
        # 尝试获取该事件类型对应的处理函数列表,若无则创建
        try:
            handlerList = self.__handlers[type_]
        except KeyError:
            handlerList = []
            self.__handlers[type_] = handlerList

        # 若要注册的处理器不在该事件的处理器列表中,则注册该事件
        if handler not in handlerList:
            handlerList.append(handler)

    # ---------------------------------------------
    def unregister(self, type_, handler):
        """注销事件处理函数监听"""
        # 尝试获取该事件类型对应的处理函数列表,若无则忽略该次注销请求
        try:
            handlerList = self.__handlers[type_]

            # 如果该函数存在于列表中,则移除
            if handler in handlerList:
                handlerList.remove(handler)

            # 如果函数列表为空,则从引擎中移除该事件类型
            if not handlerList:
                del self.__handlers[type_]
        except KeyError:
            pass

    # ---------------------------------------------
    def put(self, event):
        """向事件队列中存入事件"""
        self.__queue.put(event)
Example #48
0
class TTS(object):
    """
    TTS abstract class to be implemented by all TTS engines.

    It aggregates the minimum required parameters and exposes
    ``execute(sentence)`` function.
    """
    __metaclass__ = ABCMeta

    def __init__(self, lang, voice, validator, phonetic_spelling=True):
        super(TTS, self).__init__()
        self.lang = lang or 'en-us'
        self.voice = voice
        self.filename = '/tmp/tts.wav'
        self.validator = validator
        self.phonetic_spelling = phonetic_spelling
        self.enclosure = None
        random.seed()
        self.queue = Queue()
        self.playback = PlaybackThread(self.queue)
        self.playback.start()
        self.clear_cache()
        self.spellings = self.load_spellings()

    def load_spellings(self):
        """Load phonetic spellings of words as dictionary"""
        path = join('text', self.lang, 'phonetic_spellings.txt')
        spellings_file = resolve_resource_file(path)
        if not spellings_file:
            return {}
        try:
            with open(spellings_file) as f:
                lines = filter(bool, f.read().split('\n'))
            lines = [i.split(':') for i in lines]
            return {key.strip(): value.strip() for key, value in lines}
        except ValueError:
            LOG.exception('Failed to load phonetic spellings.')
            return {}

    def begin_audio(self):
        """Helper function for child classes to call in execute()"""
        # Create signals informing start of speech
        self.ws.emit(Message("recognizer_loop:audio_output_start"))

    def end_audio(self):
        """
            Helper function for child classes to call in execute().

            Sends the recognizer_loop:audio_output_end message, indicating
            that speaking is done for the moment. It also checks if cache
            directory needs cleaning to free up disk space.
        """

        self.ws.emit(Message("recognizer_loop:audio_output_end"))
        # Clean the cache as needed
        cache_dir = mycroft.util.get_cache_directory("tts")
        mycroft.util.curate_cache(cache_dir, min_free_percent=100)

        # This check will clear the "signal"
        check_for_signal("isSpeaking")

    def init(self, ws):
        self.ws = ws
        self.playback.init(self)
        self.enclosure = EnclosureAPI(self.ws)
        self.playback.enclosure = self.enclosure

    def get_tts(self, sentence, wav_file):
        """
            Abstract method that a tts implementation needs to implement.
            Should get data from tts.

            Args:
                sentence(str): Sentence to synthesize
                wav_file(str): output file

            Returns: (wav_file, phoneme) tuple
        """
        pass

    def execute(self, sentence):
        """
            Convert sentence to speech.

            The method caches results if possible using the hash of the
            sentence.

            Args:
                sentence:   Sentence to be spoken
        """
        create_signal("isSpeaking")
        if self.phonetic_spelling:
            for word in re.findall(r"[\w']+", sentence):
                if word in self.spellings:
                    sentence = sentence.replace(word, self.spellings[word])

        key = str(hashlib.md5(sentence.encode('utf-8', 'ignore')).hexdigest())
        wav_file = os.path.join(mycroft.util.get_cache_directory("tts"),
                                key + '.' + self.type)

        if os.path.exists(wav_file):
            LOG.debug("TTS cache hit")
            phonemes = self.load_phonemes(key)
        else:
            wav_file, phonemes = self.get_tts(sentence, wav_file)
            if phonemes:
                self.save_phonemes(key, phonemes)

        self.queue.put((self.type, wav_file, self.visime(phonemes)))

    def visime(self, phonemes):
        """
            Create visimes from phonemes. Needs to be implemented for all
            tts backend

            Args:
                phonemes(str): String with phoneme data
        """
        return None

    def clear_cache(self):
        """ Remove all cached files. """
        if not os.path.exists(mycroft.util.get_cache_directory('tts')):
            return
        for f in os.listdir(mycroft.util.get_cache_directory("tts")):
            file_path = os.path.join(mycroft.util.get_cache_directory("tts"),
                                     f)
            if os.path.isfile(file_path):
                os.unlink(file_path)

    def save_phonemes(self, key, phonemes):
        """
            Cache phonemes

            Args:
                key:        Hash key for the sentence
                phonemes:   phoneme string to save
        """

        cache_dir = mycroft.util.get_cache_directory("tts")
        pho_file = os.path.join(cache_dir, key + ".pho")
        try:
            with open(pho_file, "w") as cachefile:
                cachefile.write(phonemes)
        except:
            LOG.debug("Failed to write .PHO to cache")
            pass

    def load_phonemes(self, key):
        """
            Load phonemes from cache file.

            Args:
                Key:    Key identifying phoneme cache
        """
        pho_file = os.path.join(mycroft.util.get_cache_directory("tts"),
                                key + ".pho")
        if os.path.exists(pho_file):
            try:
                with open(pho_file, "r") as cachefile:
                    phonemes = cachefile.read().strip()
                return phonemes
            except:
                LOG.debug("Failed to read .PHO from cache")
        return None

    def __del__(self):
        self.playback.stop()
        self.playback.join()
Example #49
0
def store_image_from_filename(db, filename, widths=[]):
    '''
    Saves the image from the filename into one of the active media servers.

    db - A connection to the data base.
    filename - A valid filename (string) or list of filenames (list of strings). 
    Filename(s) should be a full path or relative, that the application can actually read
    widths - A list of integers with the widths of the images you would like
        to be saved in the media server. Like [220, 40]

    This saves one image (if width is empty or None) or many images (if width is a list
    of integers). The images are saved renamed, so you don't need to generate random
    names or unique names.

    Returns :
    1) For one image - a dict(int: string) the keys are the widths of the images, the values
        are the actual URL to serve the image. To obtain the URL for the image in the
        original width (and size), look for the key 0 (zero)
    2) For many images - a dict of dicts(int:string) the keys are names of the images with extension,
    the values are dicts which keys are width value specified as input parameter and values are URL for the image
    To obtain the URL for the image in the original width (and size), look for the key 0 (zero)
    To obtain the URL for the image in the specified width as an input parameter, look for the specified width

    Ad.1.
        >>> dict_of_images = store_image_from_filename(db, '/tmp/myfile.jpg', widths=(212, 202))
        >>> dict_of_images[0] # this returns the URL for the image with the original width (and size).
        'http://http://32.media.mypinnings.com/asd/qwe/zxc/asdqwezxcasdfasdfasdf.jpg'
        >>> dict_of_images[212] # returns the URL for the image scaled to a width of 212
        'http://http://32.media.mypinnings.com/asd/qwe/zxc/asdqwezxcasdfasdfasdf_212.jpg'
        >>> dict_of_images[212] # returns the URL for the image scaled to a width of 202
        'http://http://32.media.mypinnings.com/asd/qwe/zxc/asdqwezxcasdfasdfasdf_202.jpg'
    Ad.2.
        >>> filepath0 = '/tmp/tux0.png'
        >>> filepath1 = '/tmp/tux1.png'
        >>> fileslist = []
        fileslist.append(filepath0)
        fileslist.append(filepath1)
        >>> dict_of_dicts_of_images = store_image_from_filename(db, fileslist, widths=(212, 202))
        >>> {'tux0.png': {0: {'url': 'http://32.media.mypinnings.com/98u/196/46a/03419071145676531021621434279195.png'},
        202: {'url': 'http://32.media.mypinnings.com/327/v35/4ej/03419071145676531021621434279195_202.png', 'width': 202, 'height': 246},
        212: {'url': 'http://32.media.mypinnings.com/7lr/2o7/ema/03419071145676531021621434279195_212.png', 'width': 212, 'height': 258},
        222: {'url': 'http://32.media.mypinnings.com/98u/196/46a/03419071145676531021621434279195.png', 'width': 222, 'height': 271}},
        'tux1.png': {0: {'url': 'http://32.media.mypinnings.com/c1x/zqt/5ku/08492460583817663485035255370941.png'},
        202: {'url': 'http://32.media.mypinnings.com/96f/3nj/c1m/08492460583817663485035255370941_202.png', 'width': 202, 'height': 246},
        212: {'url': 'http://32.media.mypinnings.com/j2y/p5y/q56/08492460583817663485035255370941_212.png', 'width': 212, 'height': 258},
        222: {'url': 'http://32.media.mypinnings.com/c1x/zqt/5ku/08492460583817663485035255370941.png', 'width': 222, 'height': 271}}}

    The image(s) is(are) saved in its original size, plus the sizes of the widths you
    specify. The aspect ratio is preserved when scaling. The image is renamed,
    so use the returned dictionary to obtain the filenames for each size.
    '''

    images_queue = Queue()
    images_by_width = dict()
    images_by_width_list = dict()

    def processstoringimages(queue_element, queue_type):
        while True:
            server = _get_an_active_server(db)
            path, _, original_extension = _split_path_for(queue_element)
            new_filename = _generate_a_new_filename(server, original_extension)
            new_filename = os.path.join(path, new_filename)
            os.rename(queue_element, new_filename)
            image_url = _upload_file_to_bucket(server, new_filename)
            original_image_width, original_image_height = _get_image_size(
                new_filename)

            if str == queue_type:
                images_by_width[original_image_width] = {
                    'url': image_url,
                    'width': original_image_width,
                    'height': original_image_height
                }
                images_by_width[0] = {
                    'url': image_url,
                    'width': original_image_width,
                    'height': original_image_height
                }
                if widths:
                    for width in widths:
                        scaled_image_filename, width, height = _scale_image(
                            new_filename, width)
                        scaled_image_url = _upload_file_to_bucket(
                            server, scaled_image_filename)
                        images_by_width[width] = {
                            'url': scaled_image_url,
                            'width': width,
                            'height': height
                        }
                        os.unlink(scaled_image_filename)
                os.unlink(new_filename)
                queue_element.task_done()
            elif list == queue_type:
                if widths:
                    smalldict = dict()
                    smalldict[0] = {'url': image_url}
                    smalldict[original_image_width] = {
                        'url': image_url,
                        'width': original_image_width,
                        'height': original_image_height
                    }

                    for width in widths:
                        scaled_image_filename, width, height = _scale_image(
                            new_filename, width)
                        scaled_image_url = _upload_file_to_bucket(
                            server, scaled_image_filename)
                        smalldict[width] = {
                            'url': scaled_image_url,
                            'width': width,
                            'height': height
                        }
                        os.unlink(scaled_image_filename)
                    images_by_width_list[_ + original_extension] = smalldict
                os.unlink(new_filename)
                queue_element.task_done()

    filetype = type(filename)
    if str == filetype:
        images_queue.put(filename)
    elif list == filetype:
        for files in range(0, len(filename)):
            images_queue.put(filename[files])
    else:
        print "Unsupported filename data type"
        return None

    with concurrent.futures.ThreadPoolExecutor(
            max_workers=NUM_THREADS) as executor:
        for num in range(0, images_queue.qsize()):
            executor.submit(processstoringimages, images_queue.get(), filetype)

    with images_queue.mutex:
        images_queue.queue.clear()

    if str == filetype:
        return images_by_width
    elif list == filetype:
        return images_by_width_list
    else:
        return None
Example #50
0
class EventEngine(object):
    """事件驱动引擎

    事件驱动引擎中所有的变量都设置为了私有,这是为了防止不小心
    从外部修改了这些变量的值或状态,导致bug。

    变量说明
    __queue:私有变量,事件队列
    __active:私有变量,事件引擎开关
    __thread:私有变量,事件处理线程
    __timer:私有变量,计时器
    __handlers:私有变量,事件处理函数字典

    方法说明
    __run: 私有方法,事件处理线程连续运行用
    __process: 私有方法,处理事件,调用注册在引擎中的监听函数
    __onTimer:私有方法,计时器固定事件间隔触发后,向事件队列中存入计时器事件
    start: 公共方法,启动引擎
    stop:公共方法,停止引擎
    register:公共方法,向引擎中注册监听函数
    unregister:公共方法,向引擎中注销监听函数
    put:公共方法,向事件队列中存入新的事件

    事件监听函数必须定义为输入参数仅为一个event对象,即:

    函数
    def func(event)

    对象方法
    def method(self, event)

    """

    # --------------------------------
    def __init__(self):
        # 事件队列
        self.__queue = Queue()
        # 事件引擎开关
        self.__active = False
        # 事件处理线程
        self.__thread = Thread(target=self.__run)
        # 计时器,用于触发计时器事件
        self.__timer = QTimer()
        self.__timer.timeout.connect(self.__onTimer)
        # __handlers在这里是一个字典,用于保存对应的事件调用关系
        # 其中每个键对应的值是一个列表,列表中保存了对该事件进行监听的函数
        self.__handlers = {}

    # --------------------------------
    def __run(self):
        """引擎运行"""
        while self.__active == True:
            try:
                event = self.__queue.get(block=True,
                                         timeout=1)  # 获取事件的阻塞时间设为1秒
                self.__process(event)
            except Empty:
                pass

    # --------------------------
    def __process(self, event):
        """处理事件"""
        # 检查是否存在对该事件进行监听的处理函数
        print self.__queue
        if event.type_ in self.__handlers:
            # 若存在,则按顺序将事件传递给处理函数执行
            [handler(event) for handler in self.__handlers[event.type_]]

    # ----------------------------------
    def __onTimer(self):
        """向事件队列中存入计时器事件"""
        # 创建计时器事件
        event = Event(type_=EVENT_TIMER)
        # 向队列中存入计时器事件
        self.put(event)

    # ---------------------------------------
    def start(self):
        """引擎启动"""
        # 将引擎设为启动
        self.__active = True
        # 启动事件处理线程
        self.__thread.start()
        # 启动计时器,计时器时间间隔默认设为0.6秒
        self.__timer.start(600)

    # ---------------------------
    def stop(self):
        """停止引擎"""
        # 将引擎设为停止
        self.__active = False
        # 停止计时器
        self.__timer.stop()
        # 等待事件处理线程退出
        self.__thread.join()

    # -----------------------------------------
    def register(self, type_, handler):
        """注册事件处理函数监听"""
        # 尝试获取该事件类型对应的处理函数列表,若无则创建
        try:
            handlerList = self.__handlers[type_]
        except KeyError:
            handlerList = []
            self.__handlers[type_] = handlerList
        # 若要注册的处理器不在该事件的处理器列表中,则注册该事件
        if handler not in handlerList:
            handlerList.append(handler)
            # print 'handlerList'
            # print handlerList

    # ----------------------------
    def unregister(self, type_, handler):
        """注销事件处理函数监听"""
        try:
            handlerList = self.__handlers[type_]
            # 如果该函数存在于列表中,则移除
            if handler in handlerList:
                handlerList.remove(handler)
            # 如果函数列表为空,则从引擎中移除该事件类型
            if not handlerList:
                del self.__handlers[type_]
        except KeyError:
            pass

    # ---------------------------------------
    def put(self, event):
        """向事件队列中存入事件"""
        self.__queue.put(event)
class MiniPIXAcquisition(Thread):
    def __init__(self,
                 minipix,
                 pixet,
                 variable_frate=False,
                 shutter_time=.5,
                 detector_area=DESIRED_DETECTOR_AREA_3_PERCENT,
                 **kwargs):
        """
        :param minipix: MiniPIX object
        :param variable_frate: Capture with a variable frame rate if set to true
        :param shutter_time: Initial shutter time
        :param detector_area: Detector area parameter used by variable frame rate algorithm
        """
        Thread.__init__(self, **kwargs)
        self.minipix = minipix
        self.pixet = pixet
        self.variable = variable_frate
        self.shutter_time = shutter_time
        self.detector_area = detector_area
        self.max_shutter_time = 2
        self.min_shutter_time = .03  # 30 frames per second
        self.max_ramp_rate = 0
        self.data = Queue()
        self.stop_acquisitions = Event()
        self.shutdown_flag = Event()

    def _take_aquisition(self):
        """
        :param shutter_time: Length of time to expose MiniPIX for
        :return:
        """

        self.minipix.doSimpleAcquisition(1,
                                         self.shutter_time,
                                         self.pixet.PX_FTYPE_AUTODETECT,
                                         'output.pmf')
        frame = self.minipix.lastAcqFrameRefInc()

        return frame.data()

    @staticmethod
    def _total_hit_pixels(frame):
        """
        :param frame: Frame of acquired MiniPIX data
        :return:
        """
        total_hit_pixels = len(nonzero(frame)[0])
        return total_hit_pixels

    def _variable_frame_rate(self):
        acq = self._take_aquisition()
        count = self._total_hit_pixels(acq)
        self.data.put((acq, count))

        while not self.stop_acquisitions.is_set():
            hit_rate = count / self.shutter_time
            if hit_rate != 0:
                self.shutter_time = self.detector_area / hit_rate
            else:
                self.shutter_time = self.max_shutter_time

            if self.shutter_time < self.min_shutter_time:
                self.shutter_time = self.min_shutter_time
            if self.shutter_time > self.max_shutter_time:
                self.shutter_time = self.max_shutter_time

            acq = self._take_aquisition()
            count = self._total_hit_pixels(acq)
            self.data.put((acq, count))

    def _constant_frame_rate(self):
        while not self.stop_acquisitions.is_set():
            acq = self._take_aquisition()
            self.data.put((acq, self._total_hit_pixels(acq)))

    def _begin_acquisitions(self):
        if self.variable:
            self._variable_frame_rate()
        else:
            self._constant_frame_rate()

    def pause_acquisitions(self):
        self.stop_acquisitions.set()

    def start_acquisitions(self):
        self.stop_acquisitions.clear()

    def shutdown(self):
        self.stop_acquisitions.set()
        self.shutdown_flag.set()

    def get_last_acquisition(self, block=True):
        return self.data.get(block=block)

    def run(self):
        while not self.shutdown_flag.is_set():
            self._begin_acquisitions()
Example #52
0
class Scanner(Thread):
    def __init__(self):
        Thread.__init__(self)
        self.lock = Lock()
        self.status = {'status': 'connecting', 'messages': []}
        self.input_dir = '/dev/input/by-id/'
        self.open_devices = []
        self.barcodes = Queue()
        self.keymap = {
            2: ("1", "!"),
            3: ("2", "@"),
            4: ("3", "#"),
            5: ("4", "$"),
            6: ("5", "%"),
            7: ("6", "^"),
            8: ("7", "&"),
            9: ("8", "*"),
            10: ("9", "("),
            11: ("0", ")"),
            12: ("-", "_"),
            13: ("=", "+"),
            # 14 BACKSPACE
            # 15 TAB
            16: ("q", "Q"),
            17: ("w", "W"),
            18: ("e", "E"),
            19: ("r", "R"),
            20: ("t", "T"),
            21: ("y", "Y"),
            22: ("u", "U"),
            23: ("i", "I"),
            24: ("o", "O"),
            25: ("p", "P"),
            26: ("[", "{"),
            27: ("]", "}"),
            # 28 ENTER
            # 29 LEFT_CTRL
            30: ("a", "A"),
            31: ("s", "S"),
            32: ("d", "D"),
            33: ("f", "F"),
            34: ("g", "G"),
            35: ("h", "H"),
            36: ("j", "J"),
            37: ("k", "K"),
            38: ("l", "L"),
            39: (";", ":"),
            40: ("'", "\""),
            41: ("`", "~"),
            # 42 LEFT SHIFT
            43: ("\\", "|"),
            44: ("z", "Z"),
            45: ("x", "X"),
            46: ("c", "C"),
            47: ("v", "V"),
            48: ("b", "B"),
            49: ("n", "N"),
            50: ("m", "M"),
            51: (",", "<"),
            52: (".", ">"),
            53: ("/", "?"),
            55: ("*", "*"),
            # 54 RIGHT SHIFT
            57: (" ", " "),
            71: ("7", "7"),
            72: ("8", "8"),
            73: ("9", "9"),
            75: ("4", "4"),
            76: ("5", "5"),
            77: ("6", "6"),
            79: ("1", "1"),
            80: ("2", "2"),
            81: ("3", "3"),
            82: ("0", "0"),
            # 96 enter
        }

    def lockedstart(self):
        with self.lock:
            if not self.isAlive():
                self.daemon = True
                self.start()

    def set_status(self, status, message=None):
        if status == self.status['status']:
            if message != None and message != self.status['messages'][-1]:
                self.status['messages'].append(message)
        else:
            self.status['status'] = status
            if message:
                self.status['messages'] = [message]
            else:
                self.status['messages'] = []

        if status == 'error' and message:
            _logger.error('Barcode Scanner Error: ' + message)
        elif status == 'disconnected' and message:
            _logger.info('Disconnected Barcode Scanner: %s', message)

    def get_devices(self):
        try:
            if not evdev:
                return None

            new_devices = [
                device for device in listdir(self.input_dir)
                if join(self.input_dir, device) not in
                [dev.evdev.fn for dev in self.open_devices]
            ]
            scanners = [
                device for device in new_devices
                if ('semico_usb_keyboard-event-kbd' in device.lower()) or (
                    'scann' in device.lower())
            ]
            for device in scanners:
                print('device = ' + device.lower())

            for device in scanners:
                _logger.debug('opening device %s',
                              join(self.input_dir, device))
                self.open_devices.append(
                    ScannerDevice(join(self.input_dir, device)))

            if self.open_devices:
                self.set_status(
                    'connected', 'Connected to ' +
                    str([dev.evdev.name for dev in self.open_devices]))
            else:
                self.set_status('disconnected', 'Barcode Scanner Not Found')

            return self.open_devices
        except Exception as e:
            self.set_status('error', str(e))
            return []

    def release_device(self, dev):
        self.open_devices.remove(dev)

    def get_barcode(self):
        """ Returns a scanned barcode. Will wait at most 5 seconds to get a barcode, and will
            return barcode scanned in the past if they are not older than 5 seconds and have not
            been returned before. This is necessary to catch barcodes scanned while the POS is
            busy reading another barcode
        """
        print "Metodo get_barcode"
        self.lockedstart()

        while True:
            try:
                timestamp, barcode = self.barcodes.get(True, 5)
                if timestamp > time.time() - 5:
                    return barcode
            except Empty:
                return ''

    def get_status(self):
        self.lockedstart()
        return self.status

    def _get_open_device_by_fd(self, fd):
        for dev in self.open_devices:
            if dev.evdev.fd == fd:
                return dev

    def run(self):
        """ This will start a loop that catches all keyboard events, parse barcode
            sequences and put them on a timestamped queue that can be consumed by
            the point of sale's requests for barcode events
        """

        self.barcodes = Queue()

        barcode = []
        shift = False
        devices = None

        while True:  # barcodes loop
            devices = self.get_devices()

            try:
                while True:  # keycode loop
                    r, w, x = select(
                        {dev.fd: dev
                         for dev in [d.evdev for d in devices]}, [], [], 5)
                    if len(r) == 0:  # timeout
                        break

                    for fd in r:
                        device = self._get_open_device_by_fd(fd)

                        if not evdev.util.is_device(device.evdev.fn):
                            _logger.info('%s disconnected', str(device.evdev))
                            self.release_device(device)
                            break

                        events = device.evdev.read()

                        for event in events:
                            if event.type == evdev.ecodes.EV_KEY:
                                # _logger.debug('Evdev Keyboard event %s',evdev.categorize(event))
                                #print ('evdev event %s ', evdev.categorize(event))
                                print('event code: ', event.code)
                                if event.value == 1:  # keydown events
                                    if event.code in self.keymap:
                                        if device.shift:
                                            device.barcode.append(
                                                self.keymap[event.code][1])
                                            print 'con shift ', self.keymap[
                                                event.code][1]
                                        else:
                                            device.barcode.append(
                                                self.keymap[event.code][0])
                                            print "sin shift", self.keymap[
                                                event.code][0]
                                    elif event.code == 42 or event.code == 54:  # SHIFT
                                        device.shift = True
                                    elif event.code == 15:
                                        device.barcode = []
                                    elif event.code == 14:
                                        device.barcode.pop()
                                    elif event.code == 28 or event.code == 96:  # ENTER, end of barcode
                                        print device.barcode
                                        requests.request(
                                            "GET",
                                            "https://192.168.100.9:5000/find?codigo="
                                            + ''.join(device.barcode),
                                            verify=False)
                                        print "Enviando datos a la 192.168.100.9"
                                        _logger.debug(
                                            'pushing barcode %s from %s',
                                            ''.join(device.barcode),
                                            str(device.evdev))
                                        self.barcodes.put(
                                            (time.time(),
                                             ''.join(device.barcode)))
                                        device.barcode = []
                                elif event.value == 0:  #keyup events
                                    if event.code == 42 or event.code == 54:  # LEFT SHIFT
                                        device.shift = False

            except Exception as e:
                self.set_status('error', str(e))
Example #53
0
class IoService(object):
    
    def __init__(self, name, udpPort):
        self.name = name
        self.udpPort = udpPort
        self.eventQueue = Queue()
        self.alive, self.stopped = False, False
        self.incomingMessageCallback = []
        self.timers = {}

    def __repr__(self):
        return "{}({}, {}, alive={}, stopped={})".format(
            self.__class__.__name__, self.name, self.udpPort, self.alive, self.stopped)

    def addIncomingMessageCallback(self, callback):
        self.incomingMessageCallback.append(callback)
    
    def removeIncomingMessageCallback(self, callback):
        self.incomingMessageCallback.remove(callback)
    
    def start(self):
        self.ioHandlerThread = threading.Thread(target=self.__ioHandlerThreadFunc__)
        self.callbackHandlerThread = threading.Thread(target=self.__callbackHandlerThreadFunc__)
        self.alive = True
        [t.start() for t in (self.ioHandlerThread, self.callbackHandlerThread)]

    def stop(self):
        if self.stopped:
            raise RuntimeError("{} already stopped".format(self))
        if not self.alive:
            raise RuntimeError("{} not started".format(self))
        self.alive, self.stopped = False, True
        self.eventQueue.put(("STOP", None))
        [t.join() for t in (self.ioHandlerThread, self.callbackHandlerThread)]
    
    def createTimer(self, duration, callback, *args, **kwargs):
        if self.stopped:
            raise RuntimeError("{} already stopped".format(self))
        if not self.alive:
            raise RuntimeError("{} not started".format(self))
        return threading.Timer(duration, self.__onTimerExpiration__, args=[callback, args, kwargs])
    
    def __onTimerExpiration__(self, callback, args, kwargs):
        self.asyncCall(callback, *args, **kwargs)
    
    def asyncCall(self, callback, *args, **kwargs):
        self.eventQueue.put(("ASYNCCALL", (callback, args, kwargs)))

    def sendMessage(self, destination, interface, channelInfo, message):
        if self.stopped:
            raise RuntimeError("{} already stopped".format(self))
        if not self.alive:
            raise RuntimeError("{} not started".format(self))
        def snd(message, addr):
            packet = {
                "source": self.name,
                "interface": interface,
                "channelInfo": channelInfo,
                "message": message,
            }
            successful = self.sock.sendto(str(packet), addr) != -1
            return successful
        if isinstance(destination, tuple):
            return snd(message, destination)
        elif isinstance(destination, str):
            peerAddr = self.peers.get(destination)
            if not peerAddr:
                raise Exception("No peer found associated with '{}', ignoring message...".format(destination))
            else:
                return snd(message, peerAddr)

    def __ioHandlerThreadFunc__(self):
        self.peers = {}
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
        self.sock.settimeout(0.1)
        self.sock.bind((localhost(), self.udpPort))
        while self.alive:
            try:
                msg, addr = self.sock.recvfrom(2048)
            except socket.timeout:
                continue
            except socket.error:
                continue
            try:
                packet = eval(msg)
            except SyntaxError:
                assertionLogger.error("eval({}) raised SyntaxError, ignoring message...".format(msg))
                continue
            source = packet["source"]
            msgTraceLogger.info("{}{} -> {}({}): {}".format(source, addr, self.name, self.udpPort, pprint.pformat(packet)))
            if not self.peers.get(source):
                self.peers[source] = addr
            self.eventQueue.put(("PACKET", packet))
        self.sock.close()
    
    def __callbackHandlerThreadFunc__(self):
        while self.alive:
            event, param = self.eventQueue.get()
            if event == "STOP":
                break
            elif event == "PACKET":
                packet = param
                for cb in self.incomingMessageCallback:
                    cb(packet["source"], packet["interface"], packet["channelInfo"], packet["message"])
            elif event == "ASYNCCALL":
                callback, args, kwargs = param
                callback(*args, **kwargs)
class WorkerThread(threading.Thread):
    def __init__(self):
        super(WorkerThread, self).__init__()
        self.input_queue = Queue(60)
        self.__terminate = False
        self.isBusy = False

    def add_task(self, task):
        callback = task['callback']
        try:
            self.input_queue.put(task, block=False)
        except:
            callback(None, None, None)

        return

    def close(self):
        self.isBusy = False
        self.__terminate = True
        self.input_queue.put(None)
        return

    def run(self):
        while True:
            task = self.input_queue.get()
            if task is None:
                break
            if self.__terminate:
                break
            try:
                self.isBusy = True
                type = task['opType']
                if type == CFC_OP_TYPE.DOWNLOAD:
                    self.__run_download(**task)
                elif type == CFC_OP_TYPE.READ:
                    self.__run_read(**task)
                elif type == CFC_OP_TYPE.WRITE:
                    self.__run_write(**task)
                elif type == CFC_OP_TYPE.CHECK:
                    self.__run_check(**task)
            except:
                LOG_CURRENT_EXCEPTION()

            self.isBusy = False
            self.input_queue.task_done()

        self.input_queue.task_done()
        return

    def __run_download(self, url, modified_time, callback, **params):
        startTime = time.time()
        try:
            fh = file = None
            last_modified = expires = None
            req = urllib2.Request(url)
            req.add_header('User-Agent', _CLIENT_VERSION)
            if modified_time and isinstance(modified_time, str):
                req.add_header('If-Modified-Since', modified_time)
                opener = urllib2.build_opener(NotModifiedHandler())
                fh = opener.open(req, timeout=10)
                headers = fh.info()
                if hasattr(fh, 'code'):
                    code = fh.code
                    if code in (304, 200):
                        info = fh.info()
                        last_modified = info.getheader('Last-Modified')
                        expires = info.getheader('Expires')
                    if code == 200:
                        file = fh.read()
            else:
                opener = urllib2.build_opener(urllib2.BaseHandler())
                fh = opener.open(req, timeout=10)
                info = fh.info()
                last_modified = info.getheader('Last-Modified')
                expires = info.getheader('Expires')
                file = fh.read()
            if expires is None:
                expires = makeHttpTime(time.gmtime())
            else:
                ctime = getSafeDstUTCTime()
                expiresTmp = parseHttpTime(expires)
                if expiresTmp > ctime + _MAX_LIFE_TIME or expiresTmp < ctime:
                    expires = makeHttpTime(
                        time.gmtime(time.time() + _MAX_LIFE_TIME))
        except urllib2.HTTPError as e:
            LOG_WARNING('Http error. Code: %d, url: %s' % (e.code, url))
        except urllib2.URLError as e:
            LOG_WARNING('Url error. Reason: %s, url: %s' %
                        (str(e.reason), url))
        except Exception as e:
            LOG_ERROR("Client couldn't download file.", e, url)
        finally:
            if fh:
                fh.close()

        _LOG_EXECUTING_TIME(startTime, '__run_download', 10.0)
        callback(file, last_modified, expires)
        return

    def __run_read(self, name, db, callback, **params):
        file = None
        try:
            startTime = time.time()
            if db is not None and db.has_key(name):
                file = db[name]
            _LOG_EXECUTING_TIME(startTime, '__run_read')
        except Exception as e:
            LOG_WARNING("Client couldn't read file.", e, name)

        callback(file, None, None)
        return

    def __run_write(self, name, data, db, callback, **params):
        try:
            startTime = time.time()
            if db is not None:
                db[name] = data
            _LOG_EXECUTING_TIME(startTime, '__run_write', 5.0)
        except:
            LOG_CURRENT_EXCEPTION()

        callback(None, None, None)
        return

    def __run_check(self, name, db, callback, **params):
        res = False
        try:
            startTime = time.time()
            if db is not None:
                res = db.has_key(name)
            _LOG_EXECUTING_TIME(startTime, '__run_check')
        except:
            LOG_CURRENT_EXCEPTION()

        callback(res, None, None)
        return
Example #55
0
query2 = ["234", "345", "456"]


def worker():
    while True:
        item = q.get()
        print "start worker {} at {}".format(item, time.time())
        time.sleep(10)
        print "end worker {} at {}".format(item, time.time())
        q.task_done()


q = Queue()
num_worker_threads = 3
for i in range(num_worker_threads):
    t = Thread(target=worker)
    t.daemon = True
    t.start()

#items = [ "qwe", "asd", "zxc" ]
#for item in items:
#    q.put(item)
#    time.sleep(1)
#q.join()
count = 1
while True:
    print "queue - {}".format(count)
    q.put(count)
    count = count + 1
    time.sleep(3)
class FileWriter(Thread):
    """
    This thread reads log lines from a queue and writes these to a file passed as log_file_path.
    The log line queue is filled with new log lines by calling put().
    Thread quits if stop() is called. If an exception is raised when writing to file, this thread
    will callback to its owner to stop operation.
    Setting the read_queue_timer for reading the queue determine the responsiveness to stop call
    and is optional.
    """
    READ_NEW_LOGLINE_TMO = 0.5

    def __init__(self,
                 log_file_path,
                 callback,
                 read_queue_timeout=READ_NEW_LOGLINE_TMO,
                 encoding='utf8'):
        """
        :param log_file_path: The file path to write log lines to.
        :param callback: A callback method for calling back to application when error occurs.
        :param read_queue_timeout: The read timeout to avoid blocking.
        :param encoding: The encoding format when writing to file.
        """
        super(FileWriter, self).__init__(name = self.__class__.__name__)
        self._read_queue_timeout = read_queue_timeout
        self._log_file_path = log_file_path
        self._encoding = encoding

        self.setDaemon(True)
        self._log_line_queue = Queue()
        self._stop = Event()
        self.logger = logging.getLogger(self.__class__.__name__)
        self._callback = callback
        codecs.register_error('backslashreplace', self.backslash_replace)

    def __repr__(self):
        return '{}({!r}, {!r}, {!r}, {!r})'.format(self.__class__.__name__,
                                                   self.getName(),
                                                   self._read_queue_timeout,
                                                   self._log_file_path,
                                                   self._encoding)

    def put(self, text_line):
        """
        Puts a text line to the text queue to be written to the specified file for logging.
        :param text_line: A text line to be written to file.
        """
        self._log_line_queue.put(text_line)  # Queue calls are thread-safe

    def stop(self):
        """
        Stop writing to a log file from the internal queue and commit suicide.
        """
        self._stop.set()
        self.logger.debug('writer stopped')
        if self.is_alive():
            self.join()
        self.logger.debug('writer has terminated')

    @staticmethod
    def backslash_replace(error):
        """
        An error handler to be called if escape characters are read from the log line queue input.
        """
        return u"".join([u"\\x{:x}".format(ord(error.object[i]))
                         for i in range(error.start, error.end)]), error.end

    def run(self):
        try:
            with codecs.open(self._log_file_path, 'wb', self._encoding) as log_file:
                self.logger.info('start writing to file.')

                while not self._stop.is_set():
                    try:  # timeout avoids blocking in order to be responsive to stop calls
                        log_line = self._log_line_queue.get(timeout=self._read_queue_timeout)
                    except QueueEmpty:
                        continue
                    else:
                        self._log_line_queue.task_done()
                        log_file.write(log_line + '\n')
        except Exception as e:  # this may occur if codecs fails somehow
            self.logger.error('Error: {}'.format(e))
            self._callback('{} has stopped running. error: {}'.format(self.getName(), str(e)))  # call back error

        self.logger.info('stopped writing to file.')
Example #57
0
class rsync_in_parallel(object):
    """Main class for managing parallel rsyncs"""
    def __init__(self, rsync_cmd, num_threads=2, debug=False):
        """arguments are:
        the user's rsync command,
        the number of threads to spawn for file transfers (default=2),
        and a flag to show debug information (default=False)"""
        self.rsync_cmd = rsync_cmd
        self.num_threads = num_threads
        self.debug = debug
        self._initialize_file_transfer_list()

        self.queue = Queue()

        for i in range(self.num_threads):
            worker = Thread(target=self._launcher, args=(i, ))
            worker.setDaemon(True)
            worker.start()

    def _initialize_file_transfer_list(self):
        """This method constructs a list of files for (later) parallel transfer"""

        # we run the user's rsync command, but we add two flags:
        #   --dry-run --itemize-changes
        # this allows us to find files that need to be transferred
        p = subprocess.Popen(self.rsync_cmd + " --dry-run --itemize-changes",
                             shell=True,
                             stdout=subprocess.PIPE)
        out = p.stdout.readlines()
        # see the rsync man page docs for a complete description of the --itemize-changes output
        # to make sense of the regular expression below; we are looking to transfer files
        # ('f' in the second column below). we will tranfer dirs, etc. later, and all at once.
        # rsync 3.09 uses 11 characters for -i output: YXcstpoguax
        # rsync 2.68 uses  9 characters for -i output: YXcstpogz
        re_obj = re.compile(
            r"^[<>ch.]f[c.+][s.+][tT.+][p.+][o.+][g.+][uz.+][a.+]?[x.+]?\s(?P<file_name>.+)$"
        )

        # a list of all files for parallel/threaded sync
        self.file_list = []
        for line in out:
            #print "LINE:" + line
            match = re_obj.match(line.strip())

            if (match):
                file_path = match.groupdict()['file_name']
                self.file_list.append('/' + file_path)
                #print "MATCH:" + file_path

        if len(self.file_list) == 0:
            print "WARN: no files will be transferred in parallel; check the output of --dry-run --itemize-changes with your rsync command to verify"

    def _launcher(self, i):
        """Spawns an rsync process to update/sync a single file"""
        while True:
            file_path = self.queue.get()
            if self.debug:
                print "Thread %s: %s" % (i, file_path)

            # take the users's rsync command but use --files-from to just send a specific file
            # (parent directories of the file will be created automatically if they are needed)
            temp = tempfile.NamedTemporaryFile()
            temp.write(file_path)
            temp.flush()

            cmd = "%s --files-from=%s" % (self.rsync_cmd, temp.name)
            if self.debug:
                print "CALLING:" + cmd

            ret = subprocess.call(cmd, shell=True)
            if ret != 0:
                print "WARN: could not transfer %s, rsync failed with error code=%s; continuing..." % (
                    file_path, ret)

            temp.close()
            self.queue.task_done()

    def sync_files(self):
        """The main entry point to start the sync processes"""

        # create a (synchronized) queue for the threads to access
        for file_path in self.file_list:
            self.queue.put(file_path)
        self.queue.join()

        # now we perform the final call to rsync to sync directories, symlinks,
        # perform deletes (if --delete was in the original command), etc.
        # i.e., everything that remains beyond the parallel file transfers
        # that have already occurred.

        # we could just issue the original command, but it will be faster to
        # explicitly --exclude-from the files we already transferred (especially
        # when --checksum is used in the original command)
        temp = tempfile.NamedTemporaryFile()
        for file_path in self.file_list:
            temp.write(file_path + "\n")
        temp.flush()
        cmd = "%s --exclude-from=%s" % (self.rsync_cmd, temp.name)

        if (self.debug):
            print "Calling final rsync:" + cmd
        ret = subprocess.call(cmd, shell=True)
        if ret != 0:
            print "WARN: potential problem with final rsync call, rsync failed with error code=%s" % ret

        temp.close()
        return ret
Example #58
0
def find_parameters(dataset_pathname, options=''):
    def update_param(c, g, rate, best_c, best_g, best_rate, worker, resumed):
        if (rate > best_rate) or (rate == best_rate and g == best_g
                                  and c < best_c):
            best_rate, best_c, best_g = rate, c, g
        stdout_str = '[{0}] {1} {2} (best '.format \
            (worker, ' '.join(str(x) for x in [c, g] if x is not None), rate)
        output_str = ''
        if c != None:
            stdout_str += 'c={0}, '.format(2.0**best_c)
            output_str += 'log2c={0} '.format(c)
        if g != None:
            stdout_str += 'g={0}, '.format(2.0**best_g)
            output_str += 'log2g={0} '.format(g)
        stdout_str += 'rate={0})'.format(best_rate)
        print(stdout_str)
        if options.out_pathname and not resumed:
            output_str += 'rate={0}\n'.format(rate)
            result_file.write(output_str)
            result_file.flush()

        return best_c, best_g, best_rate

    options = GridOption(dataset_pathname, options)

    if options.gnuplot_pathname:
        gnuplot = Popen(options.gnuplot_pathname,
                        stdin=PIPE,
                        stdout=PIPE,
                        stderr=PIPE).stdin
    else:
        gnuplot = None

    # put jobs in queue

    jobs, resumed_jobs = calculate_jobs(options)
    job_queue = Queue(0)
    result_queue = Queue(0)

    for (c, g) in resumed_jobs:
        result_queue.put(('resumed', c, g, resumed_jobs[(c, g)]))

    for line in jobs:
        for (c, g) in line:
            if (c, g) not in resumed_jobs:
                job_queue.put((c, g))

    # hack the queue to become a stack --
    # this is important when some thread
    # failed and re-put a job. It we still
    # use FIFO, the job will be put
    # into the end of the queue, and the graph
    # will only be updated in the end

    job_queue._put = job_queue.queue.appendleft

    # fire telnet workers

    if telnet_workers:
        nr_telnet_worker = len(telnet_workers)
        username = getpass.getuser()
        password = getpass.getpass()
        for host in telnet_workers:
            worker = TelnetWorker(host, job_queue, result_queue, host,
                                  username, password, options)
            worker.start()

    # fire ssh workers

    if ssh_workers:
        for host in ssh_workers:
            worker = SSHWorker(host, job_queue, result_queue, host, options)
            worker.start()

    # fire local workers

    for i in range(nr_local_worker):
        worker = LocalWorker('local', job_queue, result_queue, options)
        worker.start()

    # gather results

    done_jobs = {}

    if options.out_pathname:
        if options.resume_pathname:
            result_file = open(options.out_pathname, 'a')
        else:
            result_file = open(options.out_pathname, 'w')

    db = []
    best_rate = -1
    best_c, best_g = None, None

    for (c, g) in resumed_jobs:
        rate = resumed_jobs[(c, g)]
        best_c, best_g, best_rate = update_param(c, g, rate, best_c, best_g,
                                                 best_rate, 'resumed', True)

    for line in jobs:
        for (c, g) in line:
            while (c, g) not in done_jobs:
                (worker, c1, g1, rate1) = result_queue.get()
                done_jobs[(c1, g1)] = rate1
                if (c1, g1) not in resumed_jobs:
                    best_c, best_g, best_rate = update_param(
                        c1, g1, rate1, best_c, best_g, best_rate, worker,
                        False)
            db.append((c, g, done_jobs[(c, g)]))
        if gnuplot and options.grid_with_c and options.grid_with_g:
            redraw(db, [best_c, best_g, best_rate], gnuplot, options)
            redraw(db, [best_c, best_g, best_rate], gnuplot, options, True)

    if options.out_pathname:
        result_file.close()
    job_queue.put((WorkerStopToken, None))
    best_param, best_cg = {}, []
    if best_c != None:
        best_param['c'] = 2.0**best_c
        best_cg += [2.0**best_c]
    if best_g != None:
        best_param['g'] = 2.0**best_g
        best_cg += [2.0**best_g]
    print('{0} {1}'.format(' '.join(map(str, best_cg)), best_rate))

    return best_rate, best_param
Example #59
0
class Dispatcher(object):
    """Dispatcher object that communicates and coordinates individual workers.

    Warnings
    --------
    There should never be more than one dispatcher running at any one time.

    """

    def __init__(self, maxsize=MAX_JOBS_QUEUE, ns_conf=None):
        """Partly initializes the dispatcher.

        A full initialization (including initialization of the workers) requires a call to
        :meth:`~gensim.models.lda_dispatcher.Dispatcher.initialize`

        Parameters
        ----------
        maxsize : int, optional
                Maximum number of jobs to be kept pre-fetched in the queue.
        ns_conf : dict of (str, object)
            Sets up the name server configuration for the pyro daemon server of dispatcher.
            This also helps to keep track of your objects in your network by using logical object names
            instead of exact object name(or id) and its location.

        """
        self.maxsize = maxsize
        self.callback = None
        self.ns_conf = ns_conf if ns_conf is not None else {}

    @Pyro4.expose
    def initialize(self, **model_params):
        """Fully initialize the dispatcher and all its workers.

        Parameters
        ----------
        **model_params
            Keyword parameters used to initialize individual workers, see :class:`~gensim.models.ldamodel.LdaModel`.

        Raises
        ------
        RuntimeError
            When no workers are found (the :mod:`gensim.models.lda_worker` script must be ran beforehand).

        """
        self.jobs = Queue(maxsize=self.maxsize)
        self.lock_update = threading.Lock()
        self._jobsdone = 0
        self._jobsreceived = 0

        self.workers = {}
        with utils.getNS(**self.ns_conf) as ns:
            self.callback = Pyro4.Proxy(ns.list(prefix=LDA_DISPATCHER_PREFIX)[LDA_DISPATCHER_PREFIX])
            for name, uri in iteritems(ns.list(prefix=LDA_WORKER_PREFIX)):
                try:
                    worker = Pyro4.Proxy(uri)
                    workerid = len(self.workers)
                    # make time consuming methods work asynchronously
                    logger.info("registering worker #%i at %s", workerid, uri)
                    worker.initialize(workerid, dispatcher=self.callback, **model_params)
                    self.workers[workerid] = worker
                except Pyro4.errors.PyroError:
                    logger.warning("unresponsive worker at %s,deleting it from the name server", uri)
                    ns.remove(name)

        if not self.workers:
            raise RuntimeError('no workers found; run some lda_worker scripts on your machines first!')

    @Pyro4.expose
    def getworkers(self):
        """Return pyro URIs of all registered workers.

        Returns
        -------
        list of URIs
            The pyro URIs for each worker.

        """
        return [worker._pyroUri for worker in itervalues(self.workers)]

    @Pyro4.expose
    def getjob(self, worker_id):
        """Atomically pop a job from the queue.

        Parameters
        ----------
        worker_id : int
            The worker that requested the job.

        Returns
        -------
        iterable of list of (int, float)
            The corpus in BoW format.

        """
        logger.info("worker #%i requesting a new job", worker_id)
        job = self.jobs.get(block=True, timeout=1)
        logger.info("worker #%i got a new job (%i left)", worker_id, self.jobs.qsize())
        return job

    @Pyro4.expose
    def putjob(self, job):
        """Atomically add a job to the queue.

        Parameters
        ----------
        job : iterable of list of (int, float)
            The corpus in BoW format.

        """
        self._jobsreceived += 1
        self.jobs.put(job, block=True, timeout=HUGE_TIMEOUT)
        logger.info("added a new job (len(queue)=%i items)", self.jobs.qsize())

    @Pyro4.expose
    def getstate(self):
        """Merge states from across all workers and return the result.

        Returns
        -------
        :class:`~gensim.models.ldamodel.LdaState`
            Merged resultant state

        """
        logger.info("end of input, assigning all remaining jobs")
        logger.debug("jobs done: %s, jobs received: %s", self._jobsdone, self._jobsreceived)
        i = 0
        count = 10
        while self._jobsdone < self._jobsreceived:
            time.sleep(0.5)  # check every half a second
            i += 1
            if i > count:
                i = 0
                for workerid, worker in iteritems(self.workers):
                    logger.info("checking aliveness for worker %s", workerid)
                    worker.ping()

        logger.info("merging states from %i workers", len(self.workers))
        workers = list(self.workers.values())
        result = workers[0].getstate()
        for worker in workers[1:]:
            result.merge(worker.getstate())

        logger.info("sending out merged state")
        return result

    @Pyro4.expose
    def reset(self, state):
        """Reinitialize all workers for a new EM iteration.

        Parameters
        ----------
        state : :class:`~gensim.models.ldamodel.LdaState`
            State of :class:`~gensim.models.lda.LdaModel`.

        """
        for workerid, worker in iteritems(self.workers):
            logger.info("resetting worker %s", workerid)
            worker.reset(state)
            worker.requestjob()
        self._jobsdone = 0
        self._jobsreceived = 0

    @Pyro4.expose
    @Pyro4.oneway
    @utils.synchronous('lock_update')
    def jobdone(self, workerid):
        """A worker has finished its job. Log this event and then asynchronously transfer control back to the worker.

        Callback used by workers to notify when their job is done.

        The job done event is logged and then control is asynchronously transfered back to the worker
        (who can then request another job). In this way, control flow basically oscillates between
        :meth:`gensim.models.lda_dispatcher.Dispatcher.jobdone` and :meth:`gensim.models.lda_worker.Worker.requestjob`.

        Parameters
        ----------
        workerid : int
            The ID of the worker that finished the job (used for logging).

        """
        self._jobsdone += 1
        logger.info("worker #%s finished job #%i", workerid, self._jobsdone)
        self.workers[workerid].requestjob()  # tell the worker to ask for another job, asynchronously (one-way)

    def jobsdone(self):
        """Wrap :attr:`~gensim.models.lda_dispatcher.Dispatcher._jobsdone` needed for remote access through proxies.

        Returns
        -------
        int
            Number of jobs already completed.

        """
        return self._jobsdone

    @Pyro4.oneway
    def exit(self):
        """Terminate all registered workers and then the dispatcher."""
        for workerid, worker in iteritems(self.workers):
            logger.info("terminating worker %s", workerid)
            worker.exit()
        logger.info("terminating dispatcher")
        os._exit(0)  # exit the whole process (not just this thread ala sys.exit())
Example #60
0
        files = [dir + '/' + f for f in files]
        if args.fastHadd:
            if len(files) > 1:
                cmd = ['fastHadd', 'add', '-j', '6', '-o', '%s.dat' % dir]
                cmd.extend(files)
                os.system(' '.join(cmd))
            else:
                os.system('cp %s %s.dat' % (files[0], dir))
            cmd = [
                'fastHadd', 'convert', '-o',
                '%s.root' % dir,
                '%s.dat' % dir
            ]
            os.system(' '.join(cmd))
        else:
            tasks_queue.put((outfile, files))
    else:
        raise IOError('You asked to merge %i files, but only %i were found.'
                      ' Something must have gone wrong in the batch jobs.' %
                      (num, len(files)))

if args.fastHadd: sys.exit(0)
print "starting parallel merging..."
tasks = []
task_map = {}
while not tasks_queue.empty():
    running = [t for t in tasks if t.poll() is None]
    done = [t for t in tasks if t.poll() is not None]
    #check hadd success
    if not all(i.returncode == 0 for i in done):
        set_trace()