Exemple #1
0
 def test_normalRun(self):
     """
     Runs normally
     """
     saved_coverage = process.coverage
     process.coverage = MagicMock()
     self.addCleanup(setattr, process, 'coverage', saved_coverage)
     # Parent directory setup
     os.chdir(self.tmpdir)
     sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
     basename = os.path.basename(sub_tmpdir)
     # Child setup
     fh = open(os.path.join(basename, '__init__.py'), 'w')
     fh.write('\n')
     fh.close()
     fh = open(os.path.join(basename, 'test_pool_runner_dotted.py'), 'w')
     fh.write(dedent(
         """
         import unittest
         class A(unittest.TestCase):
             def testPass(self):
                 pass
         """))
     fh.close()
     module_name = basename + '.test_pool_runner_dotted.A.testPass'
     result = Queue()
     poolRunner(module_name, result, 1)
     result.get()
     self.assertEqual(len(result.get().passing), 1)
Exemple #2
0
def main():
    """
    Main function of the proxy scanner.
    """
    global pl, output, q

    parser = ArgumentParser(description='Scans a list of proxies to determine which work for HTTPS.')
    parser.add_argument('--output', default='output/proxies.txt', type=str,
        help='The file in which to store the found proxies.')
    parser.add_argument('--threads', default=10, type=int,
        help='Number of threads to use.')

    args = parser.parse_args()
    output = args.output

    threads = args.threads
    q = Queue(threads * 3)

    print 'Starting threads.'
    for x in xrange(threads):
        t = Thread(target=check_proxies)
        t.daemon = True
        t.start()

    print 'Queueing proxies.'
    for proxy in proxies.proxies:
        q.put(proxy)
    q.join()

    save_proxies()
Exemple #3
0
class EmitterThread(threading.Thread):

    def __init__(self, *args, **kwargs):
        self.__name = kwargs['name']
        self.__emitter = kwargs.pop('emitter')()
        self.__logger = kwargs.pop('logger')
        self.__config = kwargs.pop('config')
        self.__max_queue_size = kwargs.pop('max_queue_size', 100)
        self.__queue = Queue(self.__max_queue_size)
        threading.Thread.__init__(self, *args, **kwargs)
        self.daemon = True

    def run(self):
        while True:
            (data, headers) = self.__queue.get()
            try:
                self.__logger.debug('Emitter %r handling a packet', self.__name)
                self.__emitter(data, self.__logger, self.__config)
            except Exception:
                self.__logger.error('Failure during operation of emitter %r', self.__name, exc_info=True)

    def enqueue(self, data, headers):
        try:
            self.__queue.put((data, headers), block=False)
        except Full:
            self.__logger.warn('Dropping packet for %r due to backlog', self.__name)
Exemple #4
0
 def test_error(self):
     """
     Exception raised running unit test is reported as an error
     """
     # Parent directory setup
     os.chdir(self.tmpdir)
     sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
     basename = os.path.basename(sub_tmpdir)
     # Child setup
     fh = open(os.path.join(basename, '__init__.py'), 'w')
     fh.write('\n')
     fh.close()
     fh = open(os.path.join(basename, 'test_pool_runner_dotted_fail.py'), 'w')
     fh.write(dedent(
         """
         import unittest
         class A(unittest.TestCase):
             def testError(self):
                 raise AttributeError
         """))
     fh.close()
     module_name = basename + '.test_pool_runner_dotted_fail.A.testError'
     result = Queue()
     poolRunner(module_name, result)
     result.get()
     self.assertEqual(len(result.get().errors), 1)
Exemple #5
0
class AntiFlapping(object):
    """
    AntiFlapping class to process event in a timely maneer
    """
    def __init__(self, window):
        self.window = window
        self.tasks = Queue(maxsize=1)
        self._window_ended = True
        self._thread = Thread(name="AntiFlapping", target=self._run)
        self._thread.start()

    def newEvent(self, func, kwargs={}):
        """
        newEvent Triggered.
        """
        if not self.tasks.full() and self._window_ended:
            self.tasks.put({'func': func, 'args':kwargs})

    def _run(self):
        """
        internal runloop that will fire tasks in order.
        """
        while True:
            task = self.tasks.get()
            self._window_ended = False
            sleep(self.window)
            self._window_ended = True
            if task['args']:
                task['func'](**task['args'])
            else:
                task['func']()
Exemple #6
0
class TestStatsdLoggingDelegation(unittest.TestCase):
    def setUp(self):
        self.port = 9177
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.sock.bind(('localhost', self.port))
        self.queue = Queue()
        self.reader_thread = Thread(target=self.statsd_reader)
        self.reader_thread.setDaemon(1)
        self.reader_thread.start()

    def tearDown(self):
        # The "no-op when disabled" test doesn't set up a real logger, so
        # create one here so we can tell the reader thread to stop.
        if not getattr(self, 'logger', None):
            self.logger = utils.get_logger({
                'log_statsd_host': 'localhost',
                'log_statsd_port': str(self.port),
            }, 'some-name')
        self.logger.increment('STOP')
        self.reader_thread.join(timeout=4)
        self.sock.close()
        del self.logger
        time.sleep(0.15)  # avoid occasional "Address already in use"?

    def statsd_reader(self):
        while True:
            try:
                payload = self.sock.recv(4096)
                if payload and 'STOP' in payload:
                    return 42
                self.queue.put(payload)
            except Exception, e:
                sys.stderr.write('statsd_reader thread: %r' % (e,))
                break
  def test_sigpipe(self):
    r, w = os.pipe()
    outstream = os.fdopen(w, 'w')
    task = self.create_task(self.context(console_outstream=outstream))
    raised = Queue(maxsize=1)

    def execute():
      try:
        task.execute()
      except IOError as e:
        raised.put(e)

    execution = threading.Thread(target=execute, name='ConsoleTaskTestBase_sigpipe')
    execution.setDaemon(True)
    execution.start()
    try:
      data = os.read(r, 5)
      self.assertEqual('jake\n', data)
      os.close(r)
    finally:
      task.stop()
      execution.join()

    with self.assertRaises(Empty):
      e = raised.get_nowait()

      # Instead of taking the generic assertRaises raises message, provide a more detailed failure
      # message that shows exactly what untrapped error was on the queue.
      self.fail('task raised {0}'.format(e))
Exemple #8
0
    def __test_event_fire_master(self):
        events = Queue()

        def get_event(events):
            me = event.MasterEvent(self.master_opts['sock_dir'])
            events.put_nowait(
                me.get_event(wait=10, tag='salttest', full=False)
            )

        threading.Thread(target=get_event, args=(events,)).start()
        time.sleep(1)   # Allow multiprocessing.Process to start

        ret = self.run_function(
            'event.fire_master',
            ['event.fire_master: just test it!!!!', 'salttest']
        )
        self.assertTrue(ret)

        eventfired = events.get(block=True, timeout=10)
        self.assertIsNotNone(eventfired)
        self.assertIn(
            'event.fire_master: just test it!!!!', eventfired['data']
        )

        ret = self.run_function(
            'event.fire_master',
            ['event.fire_master: just test it!!!!', 'salttest-miss']
        )
        self.assertTrue(ret)

        with self.assertRaises(Empty):
            eventfired = events.get(block=True, timeout=10)
Exemple #9
0
    def __test_event_fire_ipc_mode_tcp(self):
        events = Queue()

        def get_event(events):
            me = event.MinionEvent(**self.sub_minion_opts)
            events.put_nowait(
                me.get_event(wait=10, tag='salttest', full=False)
            )

        threading.Thread(target=get_event, args=(events,)).start()
        time.sleep(1)   # Allow multiprocessing.Process to start

        ret = self.run_function(
            'event.fire', ['event.fire: just test it!!!!', 'salttest'],
            minion_tgt='sub_minion'
        )
        self.assertTrue(ret)

        eventfired = events.get(block=True, timeout=10)
        self.assertIsNotNone(eventfired)
        self.assertIn('event.fire: just test it!!!!', eventfired)

        ret = self.run_function(
            'event.fire', ['event.fire: just test it!!!!', 'salttest-miss'],
            minion_tgt='sub_minion'
        )
        self.assertTrue(ret)

        with self.assertRaises(Empty):
            eventfired = events.get(block=True, timeout=10)
Exemple #10
0
class PooledPg:
	"""A very simple PostgreSQL connection pool.

	After you have created the connection pool,
	you can get connections using getConnection().
	"""

	def __init__(self, maxconnections, *args, **kwargs):
		"""Set up the PostgreSQL connection pool.

		maxconnections: the number of connections cached in the pool
		args, kwargs: the parameters that shall be used to establish
			the PostgreSQL connections using pg.connect()
		"""
		# Since there is no connection level safety, we
		# build the pool using the synchronized queue class
		# that implements all the required locking semantics.
		from Queue import Queue
		self._queue = Queue(maxconnections)
		# Establish all database connections (it would be better to
		# only establish a part of them now, and the rest on demand).
		for i in range(maxconnections):
			self.cache(PgConnection(*args, **kwargs))

	def cache(self, con):
		""""Add or return a connection to the pool."""
		self._queue.put(con)

	def connection(self):
		""""Get a connection from the pool."""
		return PooledPgConnection(self, self._queue.get())
Exemple #11
0
class DecodingThread(threading.Thread):
    """Thread for concurrent simulation.

    A :class:`DecodingThread` is responsible for one specific decoder. As soon as an item is
    placed on the :attr:`jobQueue`, decoding starts. After finishing, the attributes
    :attr:`time`, :attr:`error`, :attr:`objVal` and :attr:`mlCertificate` contain information
    about the solution.

    :param decoder: The :class:`.Decoder` used for this process.
    :param revealSent: If decoding should reveal the sent codeword.

    .. attribute:: jobQueue

      On this queue, pairs (llr, sentCodeword) are put. The process will start decoding
      immediately, and signal :func:`JoinableQueue.task_done` when finished."""

    def __init__(self, decoder, revealSent):
        threading.Thread.__init__(self)
        self.decoder = decoder
        self.jobQueue = Queue()
        self.daemon = True
        self.revealSent = revealSent
        self.time = 0.0
        self.start()

    def run(self):
        while True:
            llr, sent = self.jobQueue.get()
            with Timer() as timer:
                if self.revealSent:
                    self.decoder.decode(llr, sent=sent)
                else:
                    self.decoder.decode(llr)
            self.time = timer.duration
            self.jobQueue.task_done()
class BlockingDirectoryIterator(object):
    """
    iterator that blocks and yields new files added to a directory

    use like this:
        for filename in PollingDirectoryIterator('/tmp','A*.DAT').get_files():
            print filename
    """
    def __init__(self, directory, wildcard, interval=1):
        self._values = Queue()
        self._exception = None
        self._ready = Event()
        self._poller = DirectoryPoller(directory, wildcard, self._on_condition, self._on_exception, interval)
        self._poller.start()
    def __iter__(self):
        return self
    def get_files(self):
        while True:
            # could have exception or list of filenames
            out = self._values.get()
            if isinstance(out, Exception):
                raise out
            else:
                yield out
    def cancel(self):
        self._poller.shutdown()
    def _on_condition(self, filenames):
        for file in filenames:
            self._values.put(file)
    def _on_exception(self, exception):
        self._values.put(exception)
Exemple #13
0
class LinkQueue:
    """
    A Thread-safe queue of unique elements.
    """

    def __init__(self, maxsize=10000):
        self.items = []
        self.items_lock = threading.Lock()
        self.queue = Queue(maxsize=maxsize)

    def __str__(self):
        return self.items.__str__()

    def put_all(self, items):
        for i in items:
            with self.items_lock:
                self.put(i)

    def put(self, item):
        if item not in self.items:
            self.queue.put(item)
            self.items.append(item)

    def pop(self):
        item = self.queue.get(block=True)
        return item

    def size(self):
        return self.queue.qsize()
 def __init__(self, threads_count, fail_op, log):   
     self._tasks = Queue()   
     self._results = Queue()   
        
     for i in xrange(threads_count):   
         thread.start_new_thread(get_remote_data,    
                                                         (self._tasks, self._results, fail_op, log))   
    def test_create_cell_recalculator_should(self, mock_recalculate):
        unrecalculated_queue = Queue()
        unrecalculated_queue.put(1)
        unrecalculated_queue.put(1)
        unrecalculated_queue.task_done = Mock()

        leaf_queue = Queue()
        leaf_queue.put(sentinel.one)
        leaf_queue.put(sentinel.two)
        leaf_queue.task_done = Mock()

        target = create_cell_recalculator(leaf_queue, unrecalculated_queue, sentinel.graph, sentinel.context)
        target()

        self.assertTrue(unrecalculated_queue.empty())
        self.assertEquals(
            unrecalculated_queue.task_done.call_args_list,
            [ ((), {}), ((), {}), ]
        )

        self.assertTrue(leaf_queue.empty())

        self.assertEquals(
            mock_recalculate.call_args_list,
            [
                ((sentinel.one, leaf_queue, sentinel.graph, sentinel.context), {}),
                ((sentinel.two, leaf_queue, sentinel.graph, sentinel.context), {})
            ]
        )

        self.assertEquals(
            leaf_queue.task_done.call_args_list,
            [ ((), {}), ((), {}), ]
        )
Exemple #16
0
    def run(self):
        '''
        Does the job
        '''
        self.parser.add_option("-l", "--list", default=False, action="store_true", 
            help = "If present, list hosts configured in site.xml")
        self.parser.add_option("-a", "--artm", default=False, action="store_true", 
            help = "If present, include lo-art-1 to cycle/off")
        self.parser.add_option("-c", "--cob", default=False, action="store_true", 
            help = "If present, reboot only cob-* machines. cob-dmc is not rebooted because it belong to CONTROL subsystem.")
        self.parser.add_option("-o", "--off", default=False, action="store_true", 
            help = "If present, turn off the machines instead cycle them.")
        self.parser.add_option("-t", "--timeout", default=150, 
            help = "Set timeout to wait the recovered hosts. Default is 150 secs")
        self.parse()
        self.parse_args()
        self.get_hosts()
        if self.list is False:
            lastpdu = 'none'
            for host in self.hosts:
                currentpdu = str(self.get_pdu(host)[0])
                if currentpdu != lastpdu:
                    lastpdu = currentpdu
                    self.pstrip_cmd(self.get_pdu(host))
                    time.sleep(1)
                else:
                    time.sleep(2)
                    lastpdu = currentpdu
                    self.pstrip_cmd(self.get_pdu(host))
            if self.verbose:
                print self._get_time()+" Waiting for hosts ..."
            if self.off is False:
                queue = Queue()
                for host in self.hosts:
                    queue.put(host)
                    self.remaining_hosts.append(host)
                for host in self.hosts:
                    rh =  Thread(target=self.recover_host,args=(host, self.timeout,queue))
                    rh.setDaemon(True)
                    rh.start()
                queue.all_tasks_done.acquire()
                try:
                    endtime = time.time() + self.timeout
                    while queue.unfinished_tasks:
                        remaining = endtime -  time.time()
                        if remaining <= 0.0:
                            raise timeOut('Time Out Raise!!!')
                        queue.all_tasks_done.wait(remaining)
                except timeOut:
                    print "%s Probably %d hosts are still rebooting, please check ..." % (self._get_time(), int(queue.unfinished_tasks))
                    print "%s Please check these hosts:" % self._get_time()
                    for h in self.remaining_hosts:
                        print "%s ---> \033[31m%s\033[0m" % (self._get_time(), h)
                finally:
                    queue.all_tasks_done.release()

        else:
            print "Hosts configured in site.xml"
            for host in self.hosts:
                print host
class HttpPool(object):   
    def __init__(self, threads_count, fail_op, log):   
        self._tasks = Queue()   
        self._results = Queue()   
           
        for i in xrange(threads_count):   
            thread.start_new_thread(get_remote_data,    
                                                            (self._tasks, self._results, fail_op, log))   
               
    def add_task(self, tid, host, url, params, headers = {}, method = 'GET', timeout = None):   
        task = {   
            'id' : tid,   
            'conn_args' : {'host' : host} if timeout is None else {'host' : host, 'timeout' : timeout},   
            'headers' : headers,   
            'url' : url,   
            'params' : params,   
            'method' : method,   
            }   
        try:   
            self._tasks.put_nowait(task)   
        except Full:   
            return False  
        return True  
           
    def get_results(self):   
        results = []   
        while True:   
            try:   
                res = self._results.get_nowait()   
            except Empty:   
                break  
            results.append(res)   
        return results   
def ThreadV():
	global queue
	global sina
	queue = Queue()
	sina = sina_data()
	#程序开始运行时,统计服务器还剩余的remaining_ip_hits数和reset_time_in_seconds数,详细信息见http://open.weibo.com/wiki/Account/rate_limit_status
	rateLimit = client.account.rate_limit_status.get()
	print 'remaining_ip_hits:%d reset_time_in_seconds:%d\n'%(rateLimit['remaining_ip_hits'],rateLimit['reset_time_in_seconds'])
	time.sleep(2)
	#bp_statuses_log为记录断点日志文件
	if not os.path.exists('/home/mii/weibo_crawler/lijun_thread/bp_statuses_log'):
		place = 0
		f = open('/home/mii/weibo_crawler/lijun_thread/bp_statuses_log','w')
		f.close()
	elif len(open('/home/mii/weibo_crawler/lijun_thread/bp_statuses_log').read()) == 0:
		place = 0
	else:
		place = int(open('bp_statuses_log').read().strip())
		Count.count = place
	#从断点处开始获取大V昵称,并放入队列queue中
	keys = open('shanghai3','r').readlines()[place:]
	for key in keys:
		queue.put(key)
	#开启多线程
	n = 5
	for i in range(n):
		t = threadv()
		t.start()
    def test_request_retries_configurable(self):
        # We guess at some ports that will be unused by Riak or
        # anything else.
        client = self.create_client(http_port=DUMMY_HTTP_PORT,
                                    pb_port=DUMMY_PB_PORT)

        # Change the retry count
        client.retries = 10
        self.assertEqual(10, client.retries)

        # The retry count should be a thread local
        retries = Queue()

        def _target():
            retries.put(client.retries)
            retries.join()

        th = Thread(target=_target)
        th.start()
        self.assertEqual(3, retries.get(block=True))
        retries.task_done()
        th.join()

        # Modify the retries in a with statement
        with client.retry_count(5):
            self.assertEqual(5, client.retries)
            self.assertRaises(IOError, client.ping)
Exemple #20
0
    def __init__( self, date, warcs, viral, logs, identifiers ):
        self.warcs = []
        self.viral = []
        self.date = date
        self.wq = Queue()
        self.vq = Queue()

        for i in range(NUM_THREADS):
            worker = Thread(target=create_warcs, args=(self.wq, self.warcs))
            worker.setDaemon(True)
            worker.start()

        for warc in warcs:
            self.wq.put(warc)
        self.wq.join()

        for i in range(NUM_THREADS):
            worker = Thread(target=create_warcs, args=(self.vq, self.viral))
            worker.setDaemon(True)
            worker.start()

        for warc in viral:
            self.vq.put(warc)
        self.vq.join()

        self.logs = []
        for log in logs:
            self.logs.append( ZipContainer( path=log ) )
        self.identifiers = identifiers
        self.createDomainMets()
        self.createCrawlerMets()
Exemple #21
0
 def get (self):
     if Queue.empty(self):
         return None
     else:
         indexer = Queue.get(self)
         indexer.refreshEMSState()
         return indexer
Exemple #22
0
def track_followers():
	lock = Lock()
	queue = Queue()
	threads = []
	for acc in SocialProfile.objects.filter(jobs__isnull=False).distinct():
		if acc.is_executing_jobs:
			continue
		jobs = acc.jobs.filter(action="TRACK_FOLLOWERS")
		if jobs:
			threads.append(JobExecuter(lock=lock,account=acc, queue=queue, jobs=jobs))
	for thread in threads:
		thread.account.is_executing_jobs = True
		thread.account.save()
		thread.start()

	while threads:
		try:
			executer = queue.get(timeout=1)
		except:
			executer = None
		if executer:
			threads.remove(executer)
			executer.account.is_executing_jobs = False
			executer.account.save()
		else:
			threads[:] = [t for t in threads if t.isAlive()]
Exemple #23
0
    def download_cover(self, log, result_queue, abort,  # {{{
            title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):
        cached_url = self.get_cached_cover_url(identifiers)
        if cached_url is None:
            log.info('No cached cover found, running identify')
            rq = Queue()
            self.identify(log, rq, abort, title=title, authors=authors,
                    identifiers=identifiers)
            if abort.is_set():
                return
            results = []
            while True:
                try:
                    results.append(rq.get_nowait())
                except Empty:
                    break
            results.sort(key=self.identify_results_keygen(
                title=title, authors=authors, identifiers=identifiers))
            for mi in results:
                cached_url = self.get_cached_cover_url(mi.identifiers)
                if cached_url is not None:
                    break
        if cached_url is None:
            log.info('No cover found')
            return

        if abort.is_set():
            return
        br = self.browser
        log('Downloading cover from:', cached_url)
        try:
            cdata = br.open_novisit(cached_url, timeout=timeout).read()
            result_queue.put((self, cdata))
        except:
            log.exception('Failed to download cover from:', cached_url)
Exemple #24
0
  def read(self, timeout=None):
    read_queue = Queue()

    def enqueue_output():
      for block in iter(self._proc.stdout.read, b''):
        read_queue.put(block)

      read_queue.put('')

    thread = Thread(target=enqueue_output)
    thread.daemon = True
    thread.start()

    output = ''

    try:
      started = time()

      while timeout is None or not float_info.epsilon > timeout:
        s = read_queue.get(timeout=timeout)

        if s:
          output += s
        else:
          return output

        if not timeout is None:
          timeout -= (time() - started)
    except Empty:
      return output
class SharedCounter(object):
    """Thread-safe counter.

    Please note that the final value is not synchronized, this means
    that you should not update the value by using a previous value, the only
    reliable operations are increment and decrement.

    Example

        >>> max_clients = SharedCounter(initial_value=10)

        # Thread one
        >>> max_clients += 1 # OK (safe)

        # Thread two
        >>> max_clients -= 3 # OK (safe)

        # Main thread
        >>> if client >= int(max_clients): # Max clients now at 8
        ...    wait()


        >>> max_client = max_clients + 10 # NOT OK (unsafe)

    """

    def __init__(self, initial_value):
        self._value = initial_value
        self._modify_queue = Queue()

    def increment(self, n=1):
        """Increment value."""
        self += n
        return int(self)

    def decrement(self, n=1):
        """Decrement value."""
        self -= n
        return int(self)

    def _update_value(self):
        self._value += sum(consume_queue(self._modify_queue))
        return self._value

    def __iadd__(self, y):
        """``self += y``"""
        self._modify_queue.put(y * +1)
        return self

    def __isub__(self, y):
        """``self -= y``"""
        self._modify_queue.put(y * -1)
        return self

    def __int__(self):
        """``int(self) -> int``"""
        return self._update_value()

    def __repr__(self):
        return "<SharedCounter: int(%s)>" % str(int(self))
class WorkerThread(Thread):

    def __init__(self):
        """Create a worker thread. Start it by calling the start() method."""
        self.queue = Queue()
        Thread.__init__(self)

    def stop(self):
        """Stop the thread a.s.a.p., meaning whenever the currently running
        job is finished."""
        self.working = 0
        self.queue.put(None)

    def scheduleWork(self, func, *args, **kwargs):
        """Schedule some work to be done in the worker thread."""
        self.queue.put((func, args, kwargs))

    def run(self):
        """Fetch work from a queue, block when there's nothing to do.
        This method is called by Thread, don't call it yourself."""
        self.working = 1
        while self.working:
            work = self.queue.get()
            if work is None or not self.working:
                break
            func, args, kwargs = work
            pool = NSAutoreleasePool.alloc().init()
            try:
                func(*args, **kwargs)
            finally:
                # delete all local references; if they are the last refs they
                # may invoke autoreleases, which should then end up in our pool
                del func, args, kwargs, work
                del pool
class IWRCBot():
    def __init__(self, site, safe = True):
        self.other_ns = re.compile(u'14\[\[07(' + u'|'.join(site.namespaces()) + u')')
        interwiki.globalvar.autonomous = True
        self.site = site
        self.queue = Queue()
        self.processed = []
        self.safe = safe
        # Start 20 threads
        for i in range(20):
            t = threading.Thread(target=self.worker)
            t.setDaemon(True)
            t.start()

    def worker(self):
        bot = interwiki.InterwikiBot()
        while True:
            # Will wait until one page is available
            bot.add(self.queue.get())
            bot.queryStep()
            self.queue.task_done()

    def addQueue(self, name):
        if self.other_ns.match(name):
            return
        if self.safe:
            if name in self.processed:
                return
            self.processed.append(name)
        page = pywikibot.Page(self.site, name)
        # the Queue has for now an unlimited size,
        # it is a simple atomic append(), no need to acquire a semaphore
        self.queue.put_nowait(page)
def main():
    print "[Facebook Album Downloader v1]"
    start = timeit.default_timer()

    # hide images
    prefs = {"profile.managed_default_content_settings.images": 2}
    extensions = webdriver.ChromeOptions()
    extensions.add_experimental_option("prefs", prefs)
    browser = webdriver.Chrome(executable_path="chromedriver", chrome_options=extensions)

    findAlbum(browser)
    createAlbumPath()

    queue = Queue()

    for x in range(max_workers):
        worker = DownloadWorker(queue)
        worker.daemon = True
        worker.start()

    print "[Getting Image Links]"
    linkImages = getImageLinks(browser)
    print "[Found: " + str(len(linkImages)) + "]"

    for fullRes in linkImages:
        queue.put(fullRes)

    print "[Downloading...]"
    queue.join()

    browser.quit()

    stop = timeit.default_timer()
    print "[Time taken: %ss]" % str(stop - start)
    raw_input("Press any key to continue...")
Exemple #29
0
class Source:
    def __init__(self, task_list, results, timeout, verbose = False):
        self.tasks = Queue()
        for task in task_list:
            self.tasks.put_nowait(task)

        self.results = results
        self.timeout = timeout
        self.verbose = verbose

    def start(self, worker_count):
        t0 = datetime.now()

        sink = Sink(self.results)
        self.workers = [ Worker(_+1, self.tasks, sink, self.timeout, self.verbose) for _ in range(worker_count) ]
        if self.verbose:
            print('[P] Starting workers.')
        for w in self.workers:
            w.t0 = t0
            w.start()
        ans = self.join_workers()
        if self.verbose:
            print('[P] Finished.')
        return ans

    def join_workers(self):
        try:
            for w in self.workers:
                w.join(20000)
            return True
        except KeyboardInterrupt:
            for w in self.workers:
                w.stop = True
            return False
Exemple #30
0
def fetch_account_info():
	queue = Queue()
	threads = []
	lock = Lock()

	for acc in SocialProfile.objects.filter(jobs__isnull=False).distinct():
		jobs = acc.jobs.filter(Q(action="GET_ACCOUNT_INFO") | Q(action="LOOKUP_ID"))
		if jobs:
			threads.append(AccountFetch(account=acc, queue=queue, jobs=jobs))
				
	for thread in threads:
		thread.account.is_executing_jobs = True
		thread.account.save()
		thread.start()

	while threads:
		try:
			executer = queue.get(timeout=1)
	
		except:
			executer = None
			print executer
		if executer:
			threads.remove(executer)
			executer.account.is_executing_jobs = False
			executer.account.save()
		else:
			threads[:] = [t for t in threads if t.isAlive()]
        raise cherrypy.HTTPError(411)
    
    body = entity.fp.read()
    # decompress if gzip content type
    if entity.headers.get(ntou("Content-Type")) == ntou("application/gzip"):
        try:
            body = zlib.decompress(body)
        except:
            raise cherrypy.HTTPError(500, 'Invalid gzip data')

    try:
        cherrypy.serving.request.json = json_decode(body.decode('utf-8'))
    except ValueError:
        raise cherrypy.HTTPError(400, 'Invalid JSON document')

stat_handler_queue = Queue()
        
def worker():
    while True:
        item = stat_handler_queue.get()
        fn = item[0]
        fn(item[1])
        stat_handler_queue.task_done()
        
worker_thread = Thread(target=worker)
worker_thread.daemon = True
worker_thread.start()

class StatHandler(object):
    '''
    A base stat handler for incoming stats. By initialising with a given push function
Exemple #32
0
def cmd_download(savedir, skipextras, skipgames, skipids, dryrun, id,
                 randomorder):
    sizes, rates, errors = {}, {}, {}
    work = Queue()  # build a list of work items

    load_cookies()

    items = load_manifest()
    work_dict = dict()

    # util
    def kibs(b):
        return '%.1fKB' % (b / float(1024))

    def megs(b):
        return '%.1fMB' % (b / float(1024**2))

    def gigs(b):
        return '%.2fGB' % (b / float(1024**3))

    def auto_size(b):
        if b > 1024**3:
            return gigs(b)
        elif b > 1024**2:
            return megs(b)
        elif b > 1024:
            return kibs(b)
        else:
            return '%dB' % (b)

    if id:
        id_found = False
        for item in items:
            if item.title == id:
                items = [item]
                id_found = True
                break
        if not id_found:
            error('no game with id "{}" was found.'.format(id))
            exit(1)

    if skipids:
        info("skipping games with id[s]: {%s}" % skipids)
        ignore_list = skipids.split(",")
        items[:] = [item for item in items if item.title not in ignore_list]

    if randomorder:
        shuffle(items)
    else:
        items = sorted(items, key=lambda g: g.title)
    # Find all items to be downloaded and push into work queue
    for item in items:
        info("{%s}" % item.title)
        item_homedir = os.path.join(savedir, item.title)
        if not dryrun:
            if not os.path.isdir(item_homedir):
                os.makedirs(item_homedir)

        if skipextras:
            item.extras = []

        if skipgames:
            item.downloads = []

        # Generate and save a game info text file
        if not dryrun:
            with ConditionalWriter(os.path.join(item_homedir,
                                                INFO_FILENAME)) as fd_info:
                fd_info.write(u'{0}-- {1} --{0}{0}'.format(
                    os.linesep, item.long_title))
                fd_info.write(u'title.......... {}{}'.format(
                    item.title, os.linesep))
                if item.genre:
                    fd_info.write(u'genre.......... {}{}'.format(
                        item.genre, os.linesep))
                fd_info.write(u'game id........ {}{}'.format(
                    item.id, os.linesep))
                fd_info.write(u'url............ {}{}'.format(
                    GOG_HOME_URL + item.store_url, os.linesep))
                if item.rating > 0:
                    fd_info.write(u'user rating.... {}%{}'.format(
                        item.rating * 2, os.linesep))
                if item.release_timestamp > 0:
                    rel_date = datetime.datetime.fromtimestamp(
                        item.release_timestamp).strftime('%B %d, %Y')
                    fd_info.write(u'release date... {}{}'.format(
                        rel_date, os.linesep))
                if hasattr(item, 'gog_messages') and item.gog_messages:
                    fd_info.write(u'{0}gog messages...:{0}'.format(os.linesep))
                    for gog_msg in item.gog_messages:
                        fd_info.write(u'{0}{1}{0}'.format(
                            os.linesep,
                            html2text(gog_msg).strip()))
                fd_info.write(u'{0}game items.....:{0}{0}'.format(os.linesep))
                for game_item in item.downloads:
                    fd_info.write(u'    [{}] -- {}{}'.format(
                        game_item.name, game_item.desc, os.linesep))
                    if game_item.version:
                        fd_info.write(u'        version: {}{}'.format(
                            game_item.version, os.linesep))
                if len(item.extras) > 0:
                    fd_info.write(u'{0}extras.........:{0}{0}'.format(
                        os.linesep))
                    for game_item in item.extras:
                        fd_info.write(u'    [{}] -- {}{}'.format(
                            game_item.name, game_item.desc, os.linesep))
                if item.changelog:
                    fd_info.write(u'{0}changelog......:{0}{0}'.format(
                        os.linesep))
                    fd_info.write(html2text(item.changelog).strip())
                    fd_info.write(os.linesep)
        # Generate and save a game serial text file
        if not dryrun:
            if item.serial != '':
                with ConditionalWriter(
                        os.path.join(item_homedir,
                                     SERIAL_FILENAME)) as fd_serial:
                    item.serial = item.serial.replace(u'<span>', '')
                    item.serial = item.serial.replace(u'</span>', os.linesep)
                    fd_serial.write(item.serial)

        # Populate queue with all files to be downloaded
        for game_item in item.downloads + item.extras:
            if game_item.name is None:
                continue  # no game name, usually due to 404 during file fetch
            dest_file = os.path.join(item_homedir, game_item.name)

            if os.path.isfile(dest_file):
                if game_item.size is None:
                    warn('     unknown    %s has no size info.  skipping')
                    continue
                elif game_item.size != os.path.getsize(dest_file):
                    warn('     fail       %s has incorrect size.' %
                         game_item.name)
                else:
                    info('     pass       %s' % game_item.name)
                    continue  # move on to next game item

            info('     download   %s' % game_item.name)
            sizes[dest_file] = game_item.size

            work_dict[dest_file] = (game_item.href, game_item.size, 0,
                                    game_item.size - 1, dest_file)

    for work_item in work_dict:
        work.put(work_dict[work_item])

    if dryrun:
        info("{} left to download".format(auto_size(sum(sizes.values()))))
        return  # bail, as below just kicks off the actual downloading

    info('-' * 60)

    # work item I/O loop
    def ioloop(tid, path, page, out):
        sz, t0 = True, time.time()
        while sz:
            buf = page.read(4 * 1024)
            t = time.time()
            out.write(buf)
            sz, dt, t0 = len(buf), t - t0, t
            with lock:
                sizes[path] -= sz
                rates.setdefault(path, []).append((tid, (sz, dt)))

    # downloader worker thread main loop
    def worker():
        tid = threading.current_thread().ident
        while not work.empty():
            (href, sz, start, end, path) = work.get()
            try:
                dest_dir = os.path.dirname(path)
                with lock:
                    if not os.path.isdir(dest_dir):
                        os.makedirs(dest_dir)
                    if os.path.exists(path) and os.path.getsize(
                            path
                    ) > sz:  # if needed, truncate file if ours is larger than expected size
                        with open_notrunc(path) as f:
                            f.truncate(sz)
                with open_notrunc(path) as out:
                    out.seek(start)
                    se = start, end
                    try:
                        with request(href, byte_range=se) as page:
                            hdr = page.headers['Content-Range'].split()[-1]
                            if hdr != '%d-%d/%d' % (start, end, sz):
                                with lock:
                                    error(
                                        "chunk request has unexpected Content-Range. "
                                        "expected '%d-%d/%d' received '%s'. skipping."
                                        % (start, end, sz, hdr))
                            else:
                                assert out.tell() == start
                                ioloop(tid, path, page, out)
                                assert out.tell() == end + 1
                    except HTTPError as e:
                        error("failed to download %s, byte_range=%s" %
                              (os.path.basename(path), str(se)))
            except IOError as e:
                with lock:
                    print('!', path, file=sys.stderr)
                    errors.setdefault(path, []).append(e)
            work.task_done()

    # detailed progress report
    def progress():
        with lock:
            left = sum(sizes.values())
            for path, flowrates in sorted(rates.items()):
                flows = {}
                for tid, (sz, t) in flowrates:
                    szs, ts = flows.get(tid, (0, 0))
                    flows[tid] = sz + szs, t + ts
                bps = sum(szs / ts for szs, ts in list(flows.values())
                          if ts > 0)
                info('%10s %s/s %2dx  %s' % \
                    (auto_size(sizes[path]), auto_size(bps), len(flows), "%s/%s" % (os.path.basename(os.path.split(path)[0]), os.path.split(path)[1])))
            if len(rates) != 0:  # only update if there's change
                info('%s remaining' % auto_size(left))
            rates.clear()

    # process work items with a thread pool
    lock = threading.Lock()
    pool = []
    for i in range(HTTP_GAME_DOWNLOADER_THREADS):
        t = threading.Thread(target=worker)
        t.daemon = True
        t.start()
        pool.append(t)
    try:
        while any(t.is_alive() for t in pool):
            progress()
            time.sleep(1)
    except KeyboardInterrupt:
        raise
    except:
        with lock:
            log_exception('')
        raise
Exemple #33
0
def dictionaryAttack(attack_dict):
    suffix_list = [""]
    custom_wordlist = [""]
    hash_regexes = []
    results = []
    resumes = []
    user_hash = []
    processException = False
    foundHash = False

    for (_, hashes) in attack_dict.items():
        for hash_ in hashes:
            if not hash_:
                continue

            hash_ = hash_.split()[0] if hash_ and hash_.strip() else hash_
            regex = hashRecognition(hash_)

            if regex and regex not in hash_regexes:
                hash_regexes.append(regex)
                infoMsg = "using hash method '%s'" % __functions__[
                    regex].func_name
                logger.info(infoMsg)

    for hash_regex in hash_regexes:
        keys = set()
        attack_info = []

        for (user, hashes) in attack_dict.items():
            for hash_ in hashes:
                if not hash_:
                    continue

                foundHash = True
                hash_ = hash_.split()[0] if hash_ and hash_.strip() else hash_

                if re.match(hash_regex, hash_):
                    item = None

                    if hash_regex not in (HASH.CRYPT_GENERIC, HASH.WORDPRESS):
                        hash_ = hash_.lower()

                    if hash_regex in (HASH.MYSQL, HASH.MYSQL_OLD,
                                      HASH.MD5_GENERIC, HASH.SHA1_GENERIC):
                        item = [(user, hash_), {}]
                    elif hash_regex in (HASH.ORACLE_OLD, HASH.POSTGRES):
                        item = [(user, hash_), {'username': user}]
                    elif hash_regex in (HASH.ORACLE, ):
                        item = [(user, hash_), {'salt': hash_[-20:]}]
                    elif hash_regex in (HASH.MSSQL, HASH.MSSQL_OLD,
                                        HASH.MSSQL_NEW):
                        item = [(user, hash_), {'salt': hash_[6:14]}]
                    elif hash_regex in (HASH.CRYPT_GENERIC, ):
                        item = [(user, hash_), {'salt': hash_[0:2]}]
                    elif hash_regex in (HASH.WORDPRESS, ):
                        if ITOA64.index(hash_[3]) < 32:
                            item = [(user, hash_), {
                                'salt': hash_[4:12],
                                'count': 1 << ITOA64.index(hash_[3]),
                                'prefix': hash_[:12]
                            }]
                        else:
                            warnMsg = "invalid hash '%s'" % hash_
                            logger.warn(warnMsg)

                    if item and hash_ not in keys:
                        resumed = hashDBRetrieve(hash_)
                        if not resumed:
                            attack_info.append(item)
                            user_hash.append(item[0])
                        else:
                            infoMsg = "resuming password '%s' for hash '%s'" % (
                                resumed, hash_)
                            if user and not user.startswith(DUMMY_USER_PREFIX):
                                infoMsg += " for user '%s'" % user
                            logger.info(infoMsg)
                            resumes.append((user, hash_, resumed))
                        keys.add(hash_)

        if not attack_info:
            continue

        if not kb.wordlists:
            while not kb.wordlists:

                # the slowest of all methods hence smaller default dict
                if hash_regex in (HASH.ORACLE_OLD, HASH.WORDPRESS):
                    dictPaths = [paths.SMALL_DICT]
                else:
                    dictPaths = [paths.WORDLIST]

                message = "what dictionary do you want to use?\n"
                message += "[1] default dictionary file '%s' (press Enter)\n" % dictPaths[
                    0]
                message += "[2] custom dictionary file\n"
                message += "[3] file with list of dictionary files"
                choice = readInput(message, default="1")

                try:
                    if choice == "2":
                        message = "what's the custom dictionary's location?\n"
                        dictPaths = [readInput(message)]

                        logger.info("using custom dictionary")
                    elif choice == "3":
                        message = "what's the list file location?\n"
                        listPath = readInput(message)
                        checkFile(listPath)
                        dictPaths = getFileItems(listPath)

                        logger.info("using custom list of dictionaries")
                    else:
                        logger.info("using default dictionary")

                    dictPaths = filter(None, dictPaths)

                    for dictPath in dictPaths:
                        checkFile(dictPath)

                    kb.wordlists = dictPaths

                except Exception, ex:
                    warnMsg = "there was a problem while loading dictionaries"
                    warnMsg += " ('%s')" % getSafeExString(ex)
                    logger.critical(warnMsg)

            message = "do you want to use common password suffixes? (slow!) [y/N] "
            test = readInput(message, default="N")

            if test[0] in ("y", "Y"):
                suffix_list += COMMON_PASSWORD_SUFFIXES

        infoMsg = "starting dictionary-based cracking (%s)" % __functions__[
            hash_regex].func_name
        logger.info(infoMsg)

        for item in attack_info:
            ((user, _), _) = item
            if user and not user.startswith(DUMMY_USER_PREFIX):
                custom_wordlist.append(normalizeUnicode(user))

        if hash_regex in (HASH.MYSQL, HASH.MYSQL_OLD, HASH.MD5_GENERIC,
                          HASH.SHA1_GENERIC):
            for suffix in suffix_list:
                if not attack_info or processException:
                    break

                if suffix:
                    clearConsoleLine()
                    infoMsg = "using suffix '%s'" % suffix
                    logger.info(infoMsg)

                retVal = None
                processes = []

                try:
                    if _multiprocessing:
                        if _multiprocessing.cpu_count() > 1:
                            infoMsg = "starting %d processes " % _multiprocessing.cpu_count(
                            )
                            singleTimeLogMessage(infoMsg)

                        gc.disable()

                        retVal = _multiprocessing.Queue()
                        count = _multiprocessing.Value(
                            'i', _multiprocessing.cpu_count())

                        for i in xrange(_multiprocessing.cpu_count()):
                            p = _multiprocessing.Process(
                                target=_bruteProcessVariantA,
                                args=(attack_info, hash_regex, suffix, retVal,
                                      i, count, kb.wordlists, custom_wordlist))
                            processes.append(p)

                        for p in processes:
                            p.daemon = True
                            p.start()

                        while count.value > 0:
                            time.sleep(0.5)

                    else:
                        warnMsg = "multiprocessing hash cracking is currently "
                        warnMsg += "not supported on this platform"
                        singleTimeWarnMessage(warnMsg)

                        retVal = Queue()
                        _bruteProcessVariantA(attack_info, hash_regex, suffix,
                                              retVal, 0, 1, kb.wordlists,
                                              custom_wordlist)

                except KeyboardInterrupt:
                    print
                    processException = True
                    warnMsg = "user aborted during dictionary-based attack phase (Ctrl+C was pressed)"
                    logger.warn(warnMsg)

                    for process in processes:
                        try:
                            process.terminate()
                            process.join()
                        except (OSError, AttributeError):
                            pass

                finally:
                    if _multiprocessing:
                        gc.enable()

                    if retVal:
                        conf.hashDB.beginTransaction()

                        while not retVal.empty():
                            user, hash_, word = item = retVal.get(block=False)
                            attack_info = filter(
                                lambda _: _[0][0] != user or _[0][1] != hash_,
                                attack_info)
                            hashDBWrite(hash_, word)
                            results.append(item)

                        conf.hashDB.endTransaction()

            clearConsoleLine()

        else:
            for ((user, hash_), kwargs) in attack_info:
                if processException:
                    break

                if any(_[0] == user and _[1] == hash_ for _ in results):
                    continue

                count = 0
                found = False

                for suffix in suffix_list:
                    if found or processException:
                        break

                    if suffix:
                        clearConsoleLine()
                        infoMsg = "using suffix '%s'" % suffix
                        logger.info(infoMsg)

                    retVal = None
                    processes = []

                    try:
                        if _multiprocessing:
                            if _multiprocessing.cpu_count() > 1:
                                infoMsg = "starting %d processes " % _multiprocessing.cpu_count(
                                )
                                singleTimeLogMessage(infoMsg)

                            gc.disable()

                            retVal = _multiprocessing.Queue()
                            found_ = _multiprocessing.Value('i', False)
                            count = _multiprocessing.Value(
                                'i', _multiprocessing.cpu_count())

                            for i in xrange(_multiprocessing.cpu_count()):
                                p = _multiprocessing.Process(
                                    target=_bruteProcessVariantB,
                                    args=(user, hash_, kwargs, hash_regex,
                                          suffix, retVal, found_, i, count,
                                          kb.wordlists, custom_wordlist))
                                processes.append(p)

                            for p in processes:
                                p.daemon = True
                                p.start()

                            while count.value > 0:
                                time.sleep(0.5)

                            found = found_.value != 0

                        else:
                            warnMsg = "multiprocessing hash cracking is currently "
                            warnMsg += "not supported on this platform"
                            singleTimeWarnMessage(warnMsg)

                            class Value():
                                pass

                            retVal = Queue()
                            found_ = Value()
                            found_.value = False

                            _bruteProcessVariantB(user, hash_, kwargs,
                                                  hash_regex, suffix, retVal,
                                                  found_, 0, 1, kb.wordlists,
                                                  custom_wordlist)

                            found = found_.value

                    except KeyboardInterrupt:
                        print
                        processException = True
                        warnMsg = "user aborted during dictionary-based attack phase (Ctrl+C was pressed)"
                        logger.warn(warnMsg)

                        for process in processes:
                            try:
                                process.terminate()
                                process.join()
                            except (OSError, AttributeError):
                                pass

                    finally:
                        if _multiprocessing:
                            gc.enable()

                        if retVal:
                            conf.hashDB.beginTransaction()

                            while not retVal.empty():
                                user, hash_, word = item = retVal.get(
                                    block=False)
                                hashDBWrite(hash_, word)
                                results.append(item)

                            conf.hashDB.endTransaction()

                clearConsoleLine()
class KrakrobotSimulator(object):

    def __init__(self,
                 map,
                 robot_controller,
                 init_position=None,
                 steering_noise=0.01,
                 color_noise=10,
                 sonar_noise=0.1,
                 distance_noise=0.001,
                 forward_steering_drift=0,
                 measurement_noise=0.2,
                 speed=5.0,
                 turning_speed=0.4 * pi,
                 execution_cpu_time_limit=10.0,
                 simulation_time_limit=10.0,
                 simulation_dt=0.0,
                 frame_dt=0.1,
                 gps_delay=2.0,
                 collision_threshold=50,
                 iteration_write_frequency=1000,
                 command_line=True,
                 print_robot=True,
                 print_logger=False,
                 accepted_commands=[TURN, MOVE, BEEP, FINISH, SENSE_COLOR]
                 ):
        """
            Construct KrakrobotSimulator instancew

            :param steering_noise - variance of steering in move
            :param distance_noise - variance of distance in move
            :param measurement_noise - variance of measurement (GPS??)
            :param map - map for the robot simulator representing the maze or file to map
            :param init_position - starting position of the Robot (can be moved to map class) [x,y,heading]
            :param speed - distance travelled by one move action (cannot be bigger than 0.5, or he could traverse the walls)
            :param simulation_time_limit - limit in ms for whole robot execution (also with init)
            :param collision_threshold - maximum number of collisions after which robot is destroyed
            :param simulation_dt -  controlls simulation calculation intensivity
            :param frame_dt - save frame every dt
            :param robot - RobotController class that will be simulated in run procedure
        """

        if type(map) is str :
            self.map = load_map(map)
            for row in self.map['board']:
                logger.info(row)
        else:
            self.map = map

        self.iteration_write_frequency = iteration_write_frequency

        self.collision_threshold = collision_threshold

        if init_position is not None:
            self.init_position = tuple(init_position)
        else:
            for i in xrange(self.map['N']):
                for j in xrange(self.map['M']):
                    if self.map['board'][i][j] == MAP_START_POSITION:
                        self.init_position = (i + 0.5, j + 0.5, 0)

        self.speed = speed
        self.turning_speed = turning_speed
        self.simulation_dt = simulation_dt
        self.frame_dt = frame_dt
        self.robot_controller = robot_controller
        self.print_robot = print_robot
        self.print_logger = print_logger
        self.accepted_commands = accepted_commands

        self.command_line = command_line

        self.sonar_time = SONAR_TIME
        self.gps_delay = gps_delay
        self.light_sensor_time = LIGHT_SENSOR_TIME

        self.simulation_time_limit = simulation_time_limit
        self.execution_cpu_time_limit = execution_cpu_time_limit

        self.goal_threshold = 0.5  # When to declare goal reach

        self.color_noise = color_noise
        self.sonar_noise = sonar_noise
        self.distance_noise = distance_noise
        self.forward_steering_drift = forward_steering_drift
        self.measurement_noise = measurement_noise
        self.steering_noise = steering_noise
        self.reset()

        # TODO: Disable logger printing when needed
        if self.print_logger:
            logger.propagate = True
        else:
            logger.propagate = False

        for i in xrange(self.map['N']):
            for j in xrange(self.map['M']):
                if self.map['board'][i][j] == MAP_GOAL:
                    self.goal = (i, j)

    def get_next_frame(self):
        """
            @returns next frame of simulation data

            @note the queue is thread-safe and it works like consumer-producer
            those frames should be consumed by rendering thread
        """
        # if len(self.sim_frames) == 0: return None

        return self.sim_frames.get()

    def get_next_frame_nowait(self):
        """
            @returns next frame of simulation data

            @note Only get an item if one is immediately available. Otherwise
            raise the Empty exception.
        """
        return self.sim_frames.get_nowait()

    def reset(self):
        """ Reset state of the KrakrobotSimulator """
        self.robot_path = []
        self.collisions = []
        self.results = None

        self.goal_achieved = False
        self.robot_timer = 0.0
        self.sim_frames = Queue(100000)
        self.finished = False
        self.terminate_flag = False

        self.logs = []

    def run(self):
        """ Runs simulations by quering the robot """
        self.reset()

        # Initialize robot object
        robot = Robot(self.speed, self.turning_speed, self.gps_delay, self.sonar_time, TICK_MOVE, TICK_ROTATE)
        robot.set(self.init_position[0], self.init_position[1], self.init_position[2])
        robot.set_noise(new_s_noise=self.steering_noise,
                        new_d_noise=self.distance_noise,
                        new_m_noise=self.measurement_noise,
                        new_fs_drift=self.forward_steering_drift,
                        new_sonar_noise=self.sonar_noise,
                        new_c_noise=self.color_noise)


        # Initialize robot controller object given by contestant
        robot_controller = PythonTimedRobotController(self.robot_controller.clone())
        robot_controller.init(x=self.init_position[0],
                              y=self.init_position[1],
                              angle=self.init_position[2],
                              steering_noise=robot.steering_noise,
                              distance_noise=robot.distance_noise,
                              forward_steering_drift=robot.forward_steering_drift,
                              speed=robot.speed,
                              turning_speed=robot.turning_speed,
                              execution_cpu_time_limit=self.execution_cpu_time_limit,
                              N=self.map['N'],
                              M=self.map['M'])


        maximum_timedelta = datetime.timedelta(seconds=self.execution_cpu_time_limit)

        self.robot_path.append((robot.x, robot.y))
        collision_counter = 0  # We have maximum collision allowed

        frame_time_left = self.simulation_dt
        frame_count = 0
        current_command = None
        iteration = 0
        beeps = []
        communicated_finished = False
        try:
            while not communicated_finished \
                    and not robot.time_elapsed >= self.simulation_time_limit \
                    and not self.terminate_flag:

                if maximum_timedelta <= robot_controller.time_consumed:
                    raise KrakrobotException("Robot has exceeded CPU time limit")

                if iteration % self.iteration_write_frequency == 0:
                    logger.info("Iteration {0}, produced {1} frames".format(iteration,
                                                                            frame_count))
                    logger.info("Elapsed {0}".format(robot.time_elapsed))
                    logger.info("Current command: {}".format(current_command))

                iteration += 1

                if frame_time_left > self.frame_dt and not self.command_line:
                    ### Save frame <=> last command took long ###
                    if len(self.robot_path) == 0 or \
                                    robot.x != self.robot_path[-1][0] or robot.y != self.robot_path[-1][1]:
                        self.robot_path.append((robot.x, robot.y))
                    self.sim_frames.put(self._create_sim_data(robot, beeps))

                    frame_count += 1
                    frame_time_left -= self.frame_dt

                if current_command is not None:
                    ### Process current command ###

                    if current_command[0] == TURN:
                        robot = robot.turn(np.sign(current_command[1]))
                        frame_time_left += TICK_ROTATE / self.turning_speed
                    elif current_command[0] == MOVE:
                        robot_proposed = robot.move(np.sign(current_command[1]))

                        if not robot_proposed.check_collision(self.map['board']):
                            collision_counter += 1
                            self.collisions.append((robot_proposed.x, robot_proposed.y))
                            logger.error("Collision")
                            if collision_counter >= COLLISION_THRESHOLD:
                                raise KrakrobotException \
                                    ("The robot has been destroyed by wall. Sorry! We miss WALLE already.")
                        else:
                            robot = robot_proposed

                        frame_time_left += TICK_MOVE / self.speed
                    else:
                        raise KrakrobotException("Robot hasn't supplied any command")

                    if current_command[1] == 0:
                        current_command = None
                    else:
                        current_command = [current_command[0], current_command[1] - np.sign(current_command[1])]

                else:
                    ### Get current command ###

                    command = None
                    try:
                        r, g, b = robot.sense_color(self.map)
                        robot_controller.on_sense_color(r, g, b)
                        command = list(robot_controller.act(robot.time_elapsed))
                    except Exception, e:
                        logger.error("Robot controller failed with exception " + str(e))
                        logger.error(traceback.format_exc())
                        break

                    # logger.info("Robot timer "+str(robot.time_elapsed))
                    if not command or len(command) == 0:
                        raise KrakrobotException("No command passed, or zero length command passed")

                    if command[0] not in self.accepted_commands:
                        raise KrakrobotException("Not allowed command " + str(command[0]))

                    # Dispatch command
                    if command[0] == SENSE_GPS:
                        robot_controller.on_sense_gps(*robot.sense_gps())
                        frame_time_left += self.gps_delay
                    elif command[0] == WRITE_CONSOLE:
                        new_line = "{'frame': " + str(frame_count) + \
                                   ", 'time': " + str(robot.time_elapsed) + \
                                   '}:\n' + command[1]
                        self.logs.append(new_line)
                        if self.print_robot:
                            print new_line
                    elif command[0] == SENSE_SONAR:
                        w = robot.sense_sonar(self.map['board'])
                        robot_controller.on_sense_sonar(w)
                        frame_time_left += self.sonar_time
                    elif command[0] == SENSE_COLOR:
                        r, g, b = robot.sense_color(self.map)
                        robot_controller.on_sense_color(r, g, b)
                        frame_time_left += self.light_sensor_time
                    elif command[0] == TURN:
                        if len(command) <= 1 or len(command) > 2:
                            raise KrakrobotException("Incorrect command length")
                        current_command = command
                        current_command[1] = int(current_command[1])
                    elif command[0] == MOVE:
                        if len(command) <= 1 or len(command) > 2:
                            raise KrakrobotException("Incorrect command length")

                        if command[1] < 0:
                            raise KrakrobotException("Not allowed negative distance")
                        # Move robot
                        current_command = command
                        current_command[1] = int(current_command[1])

                    elif command[0] == BEEP:
                        beeps.append((robot.x, robot.y, robot.time_elapsed))
                    elif command[0] == FINISH:
                        logger.info("Communicated finishing")
                        communicated_finished = True
                    else:
                        raise KrakrobotException("Not received command from act(), or command was incorrect")

        except Exception, e:
            # TODO: merge with final result!
            logger.error("Simulation failed with exception " + str(e) + " after " + str(robot.time_elapsed) + " time")
            map_to_save = dict(self.map)
            del map_to_save['color_bitmap']
            return {
                    "sim_time": robot.time_elapsed,
                    "cpu_time": robot_controller.time_consumed.total_seconds() * 1000,
                    "error": str(traceback.format_exc()),
                    "beeps": beeps,
                    "map": map_to_save
                    }

        self.sim_frames.put(self._create_sim_data(robot, beeps))
        while frame_time_left >= self.frame_dt and not self.command_line and not self.terminate_flag:
            ### Save frame <=> last command took long ###
            self.sim_frames.put(self._create_sim_data(robot, beeps))
            frame_time_left -= self.frame_dt

        # Simulation process finished
        self.finished = True
        logger.info("Exiting")
        self.results = None
        try:
            # Return simulation results
            map_to_save = dict(self.map)
            del map_to_save['color_bitmap']
            self.results = {"sim_time": robot.time_elapsed,
                    "map": map_to_save,
                    "beeps": beeps,
                    "cpu_time": robot_controller.time_consumed.total_seconds() * 1000,
                    "error": False
                    }
            logger.info("Simulation ended after " + str(robot.time_elapsed) + " seconds, communicated_finish=" + str(
                communicated_finished))
            return self.results

        except Exception, e:
            self.results = None
            logger.error("Failed constructing result " + str(e))
            return {"error": str(e)}
Exemple #35
0
class ProcessFiles(ClusterThread):
    def __init__(self, manager_handler, filename, client_name, stopper):
        """
        Abstract class which defines the necessary methods to receive a file
        """
        ClusterThread.__init__(self, stopper)

        self.manager_handler = manager_handler  # handler object
        self.filename = filename  # filename of the file to receive
        self.name = client_name  # name of the sender
        self.command_queue = Queue()  # queue to store received file commands
        self.received_all_information = False  # flag to indicate whether all file has been received
        self.received_error = False  # flag to indicate there has been an error in receiving process
        self.f = None  # file object that is being received
        self.id = None  # id of the thread doing the receiving process
        self.thread_tag = "[FileThread]"  # logger tag of the thread
        self.n_get_timeouts = 0  # number of times Empty exception is raised
        self.start_time = 0  # debug: start receiving time
        self.end_time = 0  # debug: end time
        self.total_time = 0  # debug: total time receiving
        self.size_received = 0  # debug: total bytes received

        #Intervals
        self.interval_file_transfer_receive = get_cluster_items_communication_intervals(
        )['file_transfer_receive']
        self.max_time_receiving_file = get_cluster_items_communication_intervals(
        )['max_time_receiving_file']

    # Overridden methods
    def stop(self):
        """
        Stops the thread
        """
        if self.id:
            self.manager_handler.del_worker(self.id)
        ClusterThread.stop(self)

    def run(self):
        """
        Receives the file and processes it.
        """
        logger.info("{0}: Start.".format(self.thread_tag))

        while not self.stopper.is_set() and self.running:
            self.lock_status(True)

            if not self.check_connection():
                continue

            if self.received_all_information:
                logger.info("{0}: Reception completed: Time: {1:.2f}s.".format(
                    self.thread_tag, self.total_time))
                logger.debug("{0}: Reception completed: Size: {2}B.".format(
                    self.thread_tag, self.total_time, self.size_received))
                try:
                    result = self.process_file()
                    if result:
                        logger.info("{0}: Result: Successfully.".format(
                            self.thread_tag))
                    else:
                        logger.error("{0}: Result: Error.".format(
                            self.thread_tag))

                    self.unlock_and_stop(reason="task performed",
                                         send_err_request=False)
                except Exception as e:
                    logger.error("{0}: Result: Unknown error: {1}.".format(
                        self.thread_tag, e))
                    self.unlock_and_stop(reason="error")

            elif self.received_error:
                logger.error(
                    "{0}: An error took place during file reception.".format(
                        self.thread_tag))
                self.unlock_and_stop(reason="error")

            else:  # receiving file
                try:
                    try:
                        command, data = self.command_queue.get(block=True,
                                                               timeout=1)
                        self.n_get_timeouts = 0
                    except Empty:
                        self.n_get_timeouts += 1
                        # wait before raising the exception but
                        # check while conditions every second
                        # to stop the thread if a Ctrl+C is received
                        if self.n_get_timeouts > self.max_time_receiving_file:
                            raise Exception("No file command was received")
                        else:
                            continue

                    self.process_file_cmd(command, data)
                except Exception as e:
                    logger.error(
                        "{0}: Unknown error in process_file_cmd: {1}.".format(
                            self.thread_tag, e))
                    self.unlock_and_stop(reason="error")

            time.sleep(self.interval_file_transfer_receive)

        logger.info("{0}: End.".format(self.thread_tag))

    # New methods
    def unlock_and_stop(self, reason, send_err_request=None):
        """
        Releases a lock before stopping the thread

        :param reason: Reason why this function was called. Only for logger purposes.
        :param send_err_request: Whether to send an error request. Only used in master nodes.
        """
        self.lock_status(False)
        self.stop()

    def check_connection(self):
        """
        Check if the node is connected. Only defined in client nodes.
        """
        raise NotImplementedError

    def lock_status(self, status):
        """
        Acquires / Releases a lock.

        :param status: flag to indicate whether release or acquire the lock.
        """
        raise NotImplementedError

    def process_file(self):
        """
        Method which defines how to process a file once it's been received.
        """
        raise NotImplementedError

    def set_command(self, command, data):
        """
        Adds a received command to the command queue

        :param command: received command
        :param data: received data (filename, file chunk, file md5...)
        """
        split_data = data.split(b' ', 1)
        local_data = split_data[1] if len(split_data) > 1 else None
        self.command_queue.put((command, local_data))

    def process_file_cmd(self, command, data):
        """
        Process the commands received in the command queue
        """
        try:
            if command == "file_open":
                self.size_received = 0
                logger.debug("{0}: Opening file.".format(self.thread_tag))
                self.start_time = time.time()
                self.file_open()
            elif command == "file_update":
                logger.debug("{0}: Updating file.".format(self.thread_tag))
                self.file_update(data)
            elif command == "file_close":
                logger.debug("{0}: Closing file.".format(self.thread_tag))
                self.file_close(data)
                logger.debug("{0}: File closed.".format(self.thread_tag))
                self.end_time = time.time()
                self.total_time = self.end_time - self.start_time
                self.received_all_information = True
        except Exception as e:
            logger.error("{0}: '{1}'.".format(self.thread_tag, e))
            self.received_error = True

    def file_open(self):
        """
        Start the protocol of receiving a file. Create a new file
        """
        # Create the file
        self.filename = "{}/queue/cluster/{}/{}.tmp".format(
            common.ossec_path, self.name, self.id)
        logger.debug2("{0}: Creating file {1}".format(self.thread_tag,
                                                      self.filename))
        self.f = open(self.filename, 'wb')
        logger.debug2("{}: File {} created successfully.".format(
            self.thread_tag, self.filename))

    def file_update(self, chunk):
        """
        Continue the protocol of receiving a file. Append data

        :parm data: data received from socket

        This data must be:
            - chunk
        """
        # Open the file
        self.f.write(chunk)
        self.size_received += len(chunk)

    def file_close(self, md5_sum):
        """
        Ends the protocol of receiving a file

        :parm data: data received from socket

        This data must be:
            - MD5 sum
        """
        # compare local file's sum with received sum
        self.f.close()
        local_md5_sum = self.manager_handler.compute_md5(self.filename)
        if local_md5_sum != md5_sum.decode():
            error_msg = "Checksum of received file {} is not correct. Expected {} / Found {}".\
                            format(self.filename, md5_sum, local_md5_sum)
            os.remove(self.filename)
            raise Exception(error_msg)

        logger.debug2("{0}: File {1} received successfully".format(
            self.thread_tag, self.filename))
Exemple #36
0
def find_parameters(dataset_pathname, options=''):
    def update_param(c, g, rate, best_c, best_g, best_rate, worker, resumed):
        if (rate > best_rate) or (rate == best_rate and g == best_g
                                  and c < best_c):
            best_rate, best_c, best_g = rate, c, g
        stdout_str = '[{0}] {1} {2} (best '.format\
         (worker,' '.join(str(x) for x in [c,g] if x is not None),rate)
        output_str = ''
        if c != None:
            stdout_str += 'c={0}, '.format(2.0**best_c)
            output_str += 'log2c={0} '.format(c)
        if g != None:
            stdout_str += 'g={0}, '.format(2.0**best_g)
            output_str += 'log2g={0} '.format(g)
        stdout_str += 'rate={0})'.format(best_rate)
        print(stdout_str)
        if options.out_pathname and not resumed:
            output_str += 'rate={0}\n'.format(rate)
            result_file.write(output_str)
            result_file.flush()

        return best_c, best_g, best_rate

    options = GridOption(dataset_pathname, options)

    if options.gnuplot_pathname:
        gnuplot = Popen(options.gnuplot_pathname,
                        stdin=PIPE,
                        stdout=PIPE,
                        stderr=PIPE).stdin
    else:
        gnuplot = None

    # put jobs in queue

    jobs, resumed_jobs = calculate_jobs(options)
    job_queue = Queue(0)
    result_queue = Queue(0)

    for (c, g) in resumed_jobs:
        result_queue.put(('resumed', c, g, resumed_jobs[(c, g)]))

    for line in jobs:
        for (c, g) in line:
            if (c, g) not in resumed_jobs:
                job_queue.put((c, g))

    # hack the queue to become a stack --
    # this is important when some thread
    # failed and re-put a job. It we still
    # use FIFO, the job will be put
    # into the end of the queue, and the graph
    # will only be updated in the end

    job_queue._put = job_queue.queue.appendleft

    # fire telnet workers

    if telnet_workers:
        nr_telnet_worker = len(telnet_workers)
        username = getpass.getuser()
        password = getpass.getpass()
        for host in telnet_workers:
            worker = TelnetWorker(host, job_queue, result_queue, host,
                                  username, password, options)
            worker.start()

    # fire ssh workers

    if ssh_workers:
        for host in ssh_workers:
            worker = SSHWorker(host, job_queue, result_queue, host, options)
            worker.start()

    # fire local workers

    for i in range(nr_local_worker):
        worker = LocalWorker('local', job_queue, result_queue, options)
        worker.start()

    # gather results

    done_jobs = {}

    if options.out_pathname:
        if options.resume_pathname:
            result_file = open(options.out_pathname, 'a')
        else:
            result_file = open(options.out_pathname, 'w')

    db = []
    best_rate = -1
    best_c, best_g = None, None

    for (c, g) in resumed_jobs:
        rate = resumed_jobs[(c, g)]
        best_c, best_g, best_rate = update_param(c, g, rate, best_c, best_g,
                                                 best_rate, 'resumed', True)

    for line in jobs:
        for (c, g) in line:
            while (c, g) not in done_jobs:
                (worker, c1, g1, rate1) = result_queue.get()
                done_jobs[(c1, g1)] = rate1
                if (c1, g1) not in resumed_jobs:
                    best_c, best_g, best_rate = update_param(
                        c1, g1, rate1, best_c, best_g, best_rate, worker,
                        False)
            db.append((c, g, done_jobs[(c, g)]))
        if gnuplot and options.grid_with_c and options.grid_with_g:
            redraw(db, [best_c, best_g, best_rate], gnuplot, options)
            redraw(db, [best_c, best_g, best_rate], gnuplot, options, True)

    if options.out_pathname:
        result_file.close()
    job_queue.put((WorkerStopToken, None))
    best_param, best_cg = {}, []
    if best_c != None:
        best_param['c'] = 2.0**best_c
        best_cg += [2.0**best_c]
    if best_g != None:
        best_param['g'] = 2.0**best_g
        best_cg += [2.0**best_g]
    print('{0} {1}'.format(' '.join(map(str, best_cg)), best_rate))

    return best_rate, best_param
Exemple #37
0
def test_identify_plugin(name,
                         tests,
                         modify_plugin=lambda plugin: None,
                         fail_missing_meta=True):  # {{{
    '''
    :param name: Plugin name
    :param tests: List of 2-tuples. Each two tuple is of the form (args,
                  test_funcs). args is a dict of keyword arguments to pass to
                  the identify method. test_funcs are callables that accept a
                  Metadata object and return True iff the object passes the
                  test.
    '''
    plugin = None
    for x in all_metadata_plugins():
        if x.name == name and 'identify' in x.capabilities:
            plugin = x
            break
    modify_plugin(plugin)
    prints('Testing the identify function of', plugin.name)
    prints('Using extra headers:', plugin.browser.addheaders)

    tdir, lf, log, abort = init_test(plugin.name)
    prints('Log saved to', lf)

    times = []
    for kwargs, test_funcs in tests:
        prints('Running test with:', kwargs)
        rq = Queue()
        args = (log, rq, abort)
        start_time = time.time()
        plugin.running_a_test = True
        try:
            err = plugin.identify(*args, **kwargs)
        finally:
            plugin.running_a_test = False
        total_time = time.time() - start_time
        times.append(total_time)
        if err is not None:
            prints('identify returned an error for args', args)
            prints(err)
            break

        results = []
        while True:
            try:
                results.append(rq.get_nowait())
            except Empty:
                break

        prints('Found', len(results), 'matches:', end=' ')
        prints('Smaller relevance means better match')

        results.sort(key=plugin.identify_results_keygen(
            title=kwargs.get('title', None),
            authors=kwargs.get('authors', None),
            identifiers=kwargs.get('identifiers', {})))

        for i, mi in enumerate(results):
            prints('*' * 30, 'Relevance:', i, '*' * 30)
            prints(mi)
            prints('\nCached cover URL    :',
                   plugin.get_cached_cover_url(mi.identifiers))
            prints('*' * 75, '\n\n')

        possibles = []
        for mi in results:
            test_failed = False
            for tfunc in test_funcs:
                if not tfunc(mi):
                    test_failed = True
                    break
            if not test_failed:
                possibles.append(mi)

        if not possibles:
            prints('ERROR: No results that passed all tests were found')
            prints('Log saved to', lf)
            raise SystemExit(1)

        good = [x for x in possibles if plugin.test_fields(x) is None]
        if not good:
            prints('Failed to find', plugin.test_fields(possibles[0]))
            if fail_missing_meta:
                raise SystemExit(1)

        if results[0] is not possibles[0]:
            prints('Most relevant result failed the tests')
            raise SystemExit(1)

        if 'cover' in plugin.capabilities:
            rq = Queue()
            mi = results[0]
            plugin.download_cover(log,
                                  rq,
                                  abort,
                                  title=mi.title,
                                  authors=mi.authors,
                                  identifiers=mi.identifiers)
            results = []
            while True:
                try:
                    results.append(rq.get_nowait())
                except Empty:
                    break
            if not results and fail_missing_meta:
                prints('Cover download failed')
                raise SystemExit(1)
            elif results:
                cdata = results[0]
                cover = os.path.join(
                    tdir,
                    plugin.name.replace(' ', '') + '-%s-cover.jpg' %
                    sanitize_file_name2(mi.title.replace(' ', '_')))
                with open(cover, 'wb') as f:
                    f.write(cdata[-1])

                prints('Cover downloaded to:', cover)

                if len(cdata[-1]) < 10240:
                    prints('Downloaded cover too small')
                    raise SystemExit(1)

    prints('Average time per query', sum(times) / len(times))

    if os.stat(lf).st_size > 10:
        prints('There were some errors/warnings, see log', lf)
Exemple #38
0
class AudioConsumerTest(unittest.TestCase):
    """
    AudioConsumerTest
    """
    def setUp(self):
        self.loop = RecognizerLoop()
        self.queue = Queue()
        self.recognizer = MockRecognizer()

        self.consumer = AudioConsumer(
            self.loop.state, self.queue, self.loop,
            self.loop.wakeup_recognizer, self.loop.ww_recognizer,
            RemoteRecognizerWrapperFactory.wrap_recognizer(
                self.recognizer,
                'google'), self.loop.wakeup_prefixes, self.loop.wakeup_words)

    def __create_sample_from_test_file(self, sample_name):
        root_dir = dirname(dirname(dirname(__file__)))
        filename = join(root_dir, 'test', 'client', 'data',
                        sample_name + '.wav')
        wavfile = WavFile(filename)
        with wavfile as source:
            return AudioData(source.stream.read(), wavfile.SAMPLE_RATE,
                             wavfile.SAMPLE_WIDTH)

    def test_audio_pos_front_back(self):
        audio = self.__create_sample_from_test_file('mycroft_in_utterance')
        self.queue.put(audio)
        TRUE_POS_BEGIN = 69857 + int(WakewordExtractor.TRIM_SECONDS *
                                     audio.sample_rate * audio.sample_width)
        TRUE_POS_END = 89138 - int(WakewordExtractor.TRIM_SECONDS *
                                   audio.sample_rate * audio.sample_width)

        TOLERANCE_RANGE_FRAMES = (WakewordExtractor.MAX_ERROR_SECONDS *
                                  audio.sample_rate * audio.sample_width)

        monitor = {}
        self.recognizer.set_transcriptions(
            ["what's the weather next week", ""])

        def wakeword_callback(message):
            monitor['pos_begin'] = message.get('pos_begin')
            monitor['pos_end'] = message.get('pos_end')

        self.loop.once('recognizer_loop:wakeword', wakeword_callback)
        self.consumer.try_consume_audio()

        pos_begin = monitor.get('pos_begin')
        self.assertIsNotNone(pos_begin)
        diff = abs(pos_begin - TRUE_POS_BEGIN)
        self.assertTrue(
            diff <= TOLERANCE_RANGE_FRAMES,
            str(diff) + " is not less than " + str(TOLERANCE_RANGE_FRAMES))

        pos_end = monitor.get('pos_end')
        self.assertIsNotNone(pos_end)
        diff = abs(pos_end - TRUE_POS_END)
        self.assertTrue(
            diff <= TOLERANCE_RANGE_FRAMES,
            str(diff) + " is not less than " + str(TOLERANCE_RANGE_FRAMES))

    def test_wakeword_in_beginning(self):
        self.queue.put(self.__create_sample_from_test_file('mycroft'))
        monitor = {}
        self.recognizer.set_transcriptions(
            ["what's the weather next week", ""])

        def callback(message):
            monitor['utterances'] = message.get('utterances')

        self.loop.once('recognizer_loop:utterance', callback)
        self.consumer.try_consume_audio()
        utterances = monitor.get('utterances')
        self.assertIsNotNone(utterances)
        self.assertTrue(len(utterances) == 1)
        self.assertEquals("what's the weather next week", utterances[0])

    def test_wakeword_in_phrase(self):
        self.queue.put(self.__create_sample_from_test_file('mycroft'))
        monitor = {}
        self.recognizer.set_transcriptions(
            ["he can do other stuff too", "what's the weather in cincinnati"])

        def callback(message):
            monitor['utterances'] = message.get('utterances')

        self.loop.once('recognizer_loop:utterance', callback)
        self.consumer.try_consume_audio()
        utterances = monitor.get('utterances')
        self.assertIsNotNone(utterances)
        self.assertTrue(len(utterances) == 2)
        self.assertEquals("he can do other stuff too", utterances[0])
        self.assertEquals("what's the weather in cincinnati", utterances[1])

    def test_call_and_response(self):
        self.queue.put(self.__create_sample_from_test_file('mycroft'))
        monitor = {}
        self.recognizer.set_transcriptions(["mycroft", ""])

        def wakeword_callback(message):
            monitor['wakeword'] = message.get('utterance')

        def utterance_callback(message):
            monitor['utterances'] = message.get('utterances')

        self.loop.once('recognizer_loop:wakeword', wakeword_callback)
        self.consumer.try_consume_audio()

        self.assertIsNotNone(monitor.get('wakeword'))

        self.queue.put(self.__create_sample_from_test_file('mycroft'))
        self.recognizer.set_transcriptions(
            ["what's the weather next week", ""])
        self.loop.once('recognizer_loop:utterance', utterance_callback)
        self.consumer.try_consume_audio()

        utterances = monitor.get('utterances')
        self.assertIsNotNone(utterances)
        self.assertTrue(len(utterances) == 1)
        self.assertEquals("what's the weather next week", utterances[0])

    def test_ignore_wakeword_when_sleeping(self):
        self.queue.put(self.__create_sample_from_test_file('mycroft'))
        self.loop.sleep()
        monitor = {}
        self.recognizer.set_transcriptions(["", ""])

        def wakeword_callback(message):
            monitor['wakeword'] = message.get('utterance')

        self.loop.once('recognizer_loop:wakeword', wakeword_callback)
        self.consumer.try_consume_audio()

        self.assertIsNone(monitor.get('wakeword'))
        self.assertTrue(self.loop.state.sleeping)
Exemple #39
0
class EscposDriver(Thread):
    def __init__(self):
        Thread.__init__(self)
        self.queue = Queue()
        self.lock  = Lock()
        self.status = {'status':'connecting', 'messages':[]}

    def connected_usb_devices(self):
        connected = []

        # printers can either define bDeviceClass=7, or they can define one of
        # their interfaces with bInterfaceClass=7. This class checks for both.
        class FindUsbClass(object):
            def __init__(self, usb_class):
                self._class = usb_class
            def __call__(self, device):
                # first, let's check the device
                if device.bDeviceClass == self._class:
                    return True
                # transverse all devices and look through their interfaces to
                # find a matching class
                for cfg in device:
                    intf = usb.util.find_descriptor(cfg, bInterfaceClass=self._class)

                    if intf is not None:
                        return True

                return False

        printers = usb.core.find(find_all=True, custom_match=FindUsbClass(7))

        # if no printers are found after this step we will take the
        # first epson or star device we can find.
        # epson
        if not printers:
            printers = usb.core.find(find_all=True, idVendor=0x04b8)
        # star
        if not printers:
            printers = usb.core.find(find_all=True, idVendor=0x0519)

        for printer in printers:
            connected.append({
                'vendor': printer.idVendor,
                'product': printer.idProduct,
                'name': usb.util.get_string(printer, 256, printer.iManufacturer) + " " + usb.util.get_string(printer, 256, printer.iProduct)
            })

        return connected

    def lockedstart(self):
        with self.lock:
            if not self.isAlive():
                self.daemon = True
                self.start()
    
    def get_escpos_printer(self):
  
        printers = self.connected_usb_devices()
        if len(printers) > 0:
            print_dev = Usb(printers[0]['vendor'], printers[0]['product'])
            self.set_status(
                'connected',
                "Connected to %s (in=0x%02x,out=0x%02x)" % (printers[0]['name'], print_dev.in_ep, print_dev.out_ep)
            )
            return print_dev
        else:
            self.set_status('disconnected','Printer Not Found')
            return None

    def get_status(self):
        self.push_task('status')
        return self.status

    def open_cashbox(self,printer):
        printer.cashdraw(2)
        printer.cashdraw(5)

    def set_status(self, status, message = None):
        _logger.info(status+' : '+ (message or 'no message'))
        if status == self.status['status']:
            if message != None and (len(self.status['messages']) == 0 or message != self.status['messages'][-1]):
                self.status['messages'].append(message)
        else:
            self.status['status'] = status
            if message:
                self.status['messages'] = [message]
            else:
                self.status['messages'] = []

        if status == 'error' and message:
            _logger.error('ESC/POS Error: '+message)
        elif status == 'disconnected' and message:
            _logger.warning('ESC/POS Device Disconnected: '+message)

    def run(self):
        printer = None
        if not escpos:
            _logger.error('ESC/POS cannot initialize, please verify system dependencies.')
            return
        while True:
            try:
                error = True
                timestamp, task, data = self.queue.get(True)

                printer = self.get_escpos_printer()

                if printer == None:
                    if task != 'status':
                        self.queue.put((timestamp,task,data))
                    error = False
                    time.sleep(5)
                    continue
                elif task == 'receipt': 
                    if timestamp >= time.time() - 1 * 60 * 60:
                        self.print_receipt_body(printer,data)
                        printer.cut()
                elif task == 'xml_receipt':
                    if timestamp >= time.time() - 1 * 60 * 60:
                        printer.receipt(data)
                elif task == 'cashbox':
                    if timestamp >= time.time() - 12:
                        self.open_cashbox(printer)
                elif task == 'printstatus':
                    self.print_status(printer)
                elif task == 'status':
                    pass
                error = False

            except NoDeviceError as e:
                print "No device found %s" %str(e)
            except HandleDeviceError as e:
                print "Impossible to handle the device due to previous error %s" % str(e)
            except TicketNotPrinted as e:
                print "The ticket does not seems to have been fully printed %s" % str(e)
            except NoStatusError as e:
                print "Impossible to get the status of the printer %s" % str(e)
            except Exception as e:
                self.set_status('error', str(e))
                errmsg = str(e) + '\n' + '-'*60+'\n' + traceback.format_exc() + '-'*60 + '\n'
                _logger.error(errmsg);
            finally:
                if error:
                    self.queue.put((timestamp, task, data))
                if printer:
                    printer.close()

    def push_task(self,task, data = None):
        self.lockedstart()
        self.queue.put((time.time(),task,data))

    def print_status(self,eprint):
        localips = ['0.0.0.0','127.0.0.1','127.0.1.1']
        hosting_ap = os.system('pgrep hostapd') == 0
        ssid = subprocess.check_output('iwconfig 2>&1 | grep \'ESSID:"\' | sed \'s/.*"\\(.*\\)"/\\1/\'', shell=True).rstrip()
        mac = subprocess.check_output('ifconfig | grep -B 1 \'inet addr\' | grep -o \'HWaddr .*\' | sed \'s/HWaddr //\'', shell=True).rstrip()
        ips =  [ c.split(':')[1].split(' ')[0] for c in commands.getoutput("/sbin/ifconfig").split('\n') if 'inet addr' in c ]
        ips =  [ ip for ip in ips if ip not in localips ] 
        eprint.text('\n\n')
        eprint.set(align='center',type='b',height=2,width=2)
        eprint.text('PosBox Status\n')
        eprint.text('\n')
        eprint.set(align='center')

        if hosting_ap:
            eprint.text('Wireless network:\nPosbox\n\n')
        elif ssid:
            eprint.text('Wireless network:\n' + ssid + '\n\n')

        if len(ips) == 0:
            eprint.text('ERROR: Could not connect to LAN\n\nPlease check that the PosBox is correc-\ntly connected with a network cable,\n that the LAN is setup with DHCP, and\nthat network addresses are available')
        elif len(ips) == 1:
            eprint.text('IP Address:\n'+ips[0]+'\n')
        else:
            eprint.text('IP Addresses:\n')
            for ip in ips:
                eprint.text(ip+'\n')

        if len(ips) >= 1:
            eprint.text('\nMAC Address:\n' + mac + '\n')
            eprint.text('\nHomepage:\nhttp://'+ips[0]+':8069\n')

        eprint.text('\n\n')
        eprint.cut()

    def print_receipt_body(self,eprint,receipt):

        def check(string):
            return string != True and bool(string) and string.strip()
        
        def price(amount):
            return ("{0:."+str(receipt['precision']['price'])+"f}").format(amount)
        
        def money(amount):
            return ("{0:."+str(receipt['precision']['money'])+"f}").format(amount)

        def quantity(amount):
            if math.floor(amount) != amount:
                return ("{0:."+str(receipt['precision']['quantity'])+"f}").format(amount)
            else:
                return str(amount)

        def printline(left, right='', width=40, ratio=0.5, indent=0):
            lwidth = int(width * ratio) 
            rwidth = width - lwidth 
            lwidth = lwidth - indent
            
            left = left[:lwidth]
            if len(left) != lwidth:
                left = left + ' ' * (lwidth - len(left))

            right = right[-rwidth:]
            if len(right) != rwidth:
                right = ' ' * (rwidth - len(right)) + right

            return ' ' * indent + left + right + '\n'
        
        def print_taxes():
            taxes = receipt['tax_details']
            for tax in taxes:
                eprint.text(printline(tax['tax']['name'],price(tax['amount']), width=40,ratio=0.6))

        # Receipt Header
        if receipt['company']['logo']:
            eprint.set(align='center')
            eprint.print_base64_image(receipt['company']['logo'])
            eprint.text('\n')
        else:
            eprint.set(align='center',type='b',height=2,width=2)
            eprint.text(receipt['company']['name'] + '\n')

        eprint.set(align='center',type='b')
        if check(receipt['company']['contact_address']):
            eprint.text(receipt['company']['contact_address'] + '\n')
        if check(receipt['company']['phone']):
            eprint.text('Tel:' + receipt['company']['phone'] + '\n')
        if check(receipt['company']['vat']):
            eprint.text('VAT:' + receipt['company']['vat'] + '\n')
        if check(receipt['company']['email']):
            eprint.text(receipt['company']['email'] + '\n')
        if check(receipt['company']['website']):
            eprint.text(receipt['company']['website'] + '\n')
        if check(receipt['header']):
            eprint.text(receipt['header']+'\n')
        if check(receipt['cashier']):
            eprint.text('-'*32+'\n')
            eprint.text('Served by '+receipt['cashier']+'\n')

        # Orderlines
        eprint.text('\n\n')
        eprint.set(align='center')
        for line in receipt['orderlines']:
            pricestr = price(line['price_display'])
            if line['discount'] == 0 and line['unit_name'] == 'Unit(s)' and line['quantity'] == 1:
                eprint.text(printline(line['product_name'],pricestr,ratio=0.6))
            else:
                eprint.text(printline(line['product_name'],ratio=0.6))
                if line['discount'] != 0:
                    eprint.text(printline('Discount: '+str(line['discount'])+'%', ratio=0.6, indent=2))
                if line['unit_name'] == 'Unit(s)':
                    eprint.text( printline( quantity(line['quantity']) + ' x ' + price(line['price']), pricestr, ratio=0.6, indent=2))
                else:
                    eprint.text( printline( quantity(line['quantity']) + line['unit_name'] + ' x ' + price(line['price']), pricestr, ratio=0.6, indent=2))

        # Subtotal if the taxes are not included
        taxincluded = True
        if money(receipt['subtotal']) != money(receipt['total_with_tax']):
            eprint.text(printline('','-------'));
            eprint.text(printline(_('Subtotal'),money(receipt['subtotal']),width=40, ratio=0.6))
            print_taxes()
            #eprint.text(printline(_('Taxes'),money(receipt['total_tax']),width=40, ratio=0.6))
            taxincluded = False


        # Total
        eprint.text(printline('','-------'));
        eprint.set(align='center',height=2)
        eprint.text(printline(_('         TOTAL'),money(receipt['total_with_tax']),width=40, ratio=0.6))
        eprint.text('\n\n');
        
        # Paymentlines
        eprint.set(align='center')
        for line in receipt['paymentlines']:
            eprint.text(printline(line['journal'], money(line['amount']), ratio=0.6))

        eprint.text('\n');
        eprint.set(align='center',height=2)
        eprint.text(printline(_('        CHANGE'),money(receipt['change']),width=40, ratio=0.6))
        eprint.set(align='center')
        eprint.text('\n');

        # Extra Payment info
        if receipt['total_discount'] != 0:
            eprint.text(printline(_('Discounts'),money(receipt['total_discount']),width=40, ratio=0.6))
        if taxincluded:
            print_taxes()
            #eprint.text(printline(_('Taxes'),money(receipt['total_tax']),width=40, ratio=0.6))

        # Footer
        if check(receipt['footer']):
            eprint.text('\n'+receipt['footer']+'\n\n')
        eprint.text(receipt['name']+'\n')
        eprint.text(      str(receipt['date']['date']).zfill(2)
                    +'/'+ str(receipt['date']['month']+1).zfill(2)
                    +'/'+ str(receipt['date']['year']).zfill(4)
                    +' '+ str(receipt['date']['hour']).zfill(2)
                    +':'+ str(receipt['date']['minute']).zfill(2) )
class ThreadedSocketIOClient(SocketIOClient):
    """The upstream amitu socket client can only send one message,
    and then shuts down the connection.

    This threaded client can handle a sequential conversation consisting
    of multiple messages.

    Example usage:

    rcvd = []
    def myfunc(msg):
        rcvd.append(msg)
        # do something more useful

    sockio = ThreadedSocketIOClient(server, port)
    # first message
    socketio('5:::{"foo":"bar"}', myfunc)
    # second message
    socketio('5:::{"bar":"baz"}', myfunc)

    # wait for callbacks
    while len(rcvd) < 2:
        time.sleep(1)

    # shutdown
    sockio.close()
    """
    def __init__(self, server, port, protocol="ws", *args, **kwargs):
        self._q = Queue()
        self.msg = None
        self._callback = None
        self._t = None
        super(ThreadedSocketIOClient, self).__init__(server, port, protocol,
                                                     *args, **kwargs)

    def __call__(self, msg, callback):
        logger.debug("%s.__call__::%s, %s", self.__class__.__name__, msg,
                     callback)
        self._q.put((msg, callback))
        if self._t is None:
            self.runloop()

    def callback(self, msg):
        logger.debug("%s.callback::calling %s with msg=%s",
                     self.__class__.__name__, self._callback, msg)
        if self._callback is not None:
            self._callback(msg)
            # re-loop
            self.runloop()
        else:
            raise AttributeError("No callback to handle message::%s" % msg)

    def runloop(self):
        logger.debug("%s.runloop", self.__class__.__name__)
        # blocks until next message or terminator
        self.msg, self._callback = self._q.get()
        logger.debug("%s.runloop::callback set to %s", self.__class__.__name__,
                     self._callback)
        # initial loop
        if self._t is None:
            self._t = threading.Thread(target=self._run)
            self._t.start()
        # terminator
        elif self.msg is None:
            self._close()
        else:
            self.send_message(self.msg)

    def _run(self):
        self.on("connect", self.my_connect)
        self.on("message", self.my_message)
        self.on("disconnect", self.my_disconnect)
        self.on("error", self.my_error)
        self.on("timeout", self.my_timeout)
        # fixes connection reset by peer errors
        time.sleep(0.001)
        self.run()

    def my_error(self, error):
        self.my_disconnect('dikke error %s ik kap ermee ait' % error)

    def my_timeout(self):
        self.my_disconnect('timeout yo, ik kap ermee')

    def my_connect(self):
        self.send_message(self.msg)

    def send_message(self, msg):
        logger.debug("%s.send_message::%s", self.__class__.__name__, msg)
        self.send(msg)

    def my_message(self, msg):
        logger.debug("%s.my_message::> %s", self.__class__.__name__, msg)
        message = msg.split(':')
        if message[0] == "5":
            my_msg = json.loads(':'.join(message[3:]))
            self.callback(my_msg)

    def my_disconnect(self, msg=None):
        self.close()

    def close(self):
        logger.debug("%s.close", self.__class__.__name__)
        self._q.put((None, None))

    def _close(self):
        self.sock.settimeout(1)
        self.sock.shutdown(socket.SHUT_RDWR)
        # no sys.exit!

    def on_server(data):
        pass

    def onclose(self):
        logger.debug("%s.onclose" % (self.__class__.__name__))
        super(ThreadedSocketIOClient, self).onclose()
Exemple #41
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import re
import sys
import json
import requests
import threading
import argparse
from Queue import Queue
from bs4 import BeautifulSoup

RESULT = {}

share_queue = Queue()

reload(sys)
sys.setdefaultencoding("utf-8")

header = {
    'User-Agent':
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:52.0) Gecko/20100101',
    "Accept":
    "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
    "Connection": "close",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8"
}

proxies = {'https': 'socks5://127.0.0.1:1080'}

Exemple #42
0
 def __init__(self):
     Thread.__init__(self)
     self.queue = Queue()
     self.lock  = Lock()
     self.status = {'status':'connecting', 'messages':[]}
Exemple #43
0
class Updater:
    """
    This class, which employs the Dispatcher class, provides a frontend to
    telegram.Bot to the programmer, so they can focus on coding the bot. It's
    purpose is to receive the updates from Telegram and to deliver them to said
    dispatcher. It also runs in a separate thread, so the user can interact
    with the bot, for example on the command line. The dispatcher supports
    handlers for different kinds of data: Updates from Telegram, basic text
    commands and even arbitrary types.
    The updater can be started as a polling service or, for production, use a
    webhook to receive updates. This is achieved using the WebhookServer and
    WebhookHandler classes.


    Attributes:

    Args:
        token (Optional[str]): The bot's token given by the @BotFather
        base_url (Optional[str]):
        workers (Optional[int]): Amount of threads in the thread pool for
            functions decorated with @run_async
        bot (Optional[Bot]):

    Raises:
        ValueError: If both `token` and `bot` are passed or none of them.
    """
    def __init__(self,
                 token=None,
                 base_url=None,
                 workers=4,
                 bot=None,
                 job_queue_tick_interval=1.0):
        if (token is None) and (bot is None):
            raise ValueError('`token` or `bot` must be passed')
        if (token is not None) and (bot is not None):
            raise ValueError('`token` and `bot` are mutually exclusive')

        if bot is not None:
            self.bot = bot
        else:
            self.bot = Bot(token, base_url)
        self.update_queue = Queue()
        self.job_queue = JobQueue(self.bot, job_queue_tick_interval)
        self.dispatcher = Dispatcher(self.bot,
                                     self.update_queue,
                                     workers=workers)
        self.last_update_id = 0
        self.logger = logging.getLogger(__name__)
        self.running = False
        self.is_idle = False
        self.httpd = None
        self.__lock = Lock()

    def start_polling(self, poll_interval=0.0, timeout=10, network_delay=2):
        """
        Starts polling updates from Telegram.

        Args:
            poll_interval (Optional[float]): Time to wait between polling
                updates from Telegram in seconds. Default is 0.0
            timeout (Optional[float]): Passed to Bot.getUpdates
            network_delay (Optional[float]): Passed to Bot.getUpdates

        Returns:
            Queue: The update queue that can be filled from the main thread
        """

        with self.__lock:
            if not self.running:
                self.running = True

                # Create Thread objects
                dispatcher_thread = Thread(target=self.dispatcher.start,
                                           name="dispatcher")
                updater_thread = Thread(target=self._start_polling,
                                        name="updater",
                                        args=(poll_interval, timeout,
                                              network_delay))

                # Start threads
                dispatcher_thread.start()
                updater_thread.start()

                # Return the update queue so the main thread can insert updates
                return self.update_queue

    def start_webhook(self,
                      listen='127.0.0.1',
                      port=80,
                      url_path='',
                      cert=None,
                      key=None):
        """
        Starts a small http server to listen for updates via webhook. If cert
        and key are not provided, the webhook will be started directly on
        http://listen:port/url_path, so SSL can be handled by another
        application. Else, the webhook will be started on
        https://listen:port/url_path

        Args:
            listen (Optional[str]): IP-Address to listen on
            port (Optional[int]): Port the bot should be listening on
            url_path (Optional[str]): Path inside url
            cert (Optional[str]): Path to the SSL certificate file
            key (Optional[str]): Path to the SSL key file

        Returns:
            Queue: The update queue that can be filled from the main thread
        """

        with self.__lock:
            if not self.running:
                self.running = True

                # Create Thread objects
                dispatcher_thread = Thread(target=self.dispatcher.start,
                                           name="dispatcher")
                updater_thread = Thread(target=self._start_webhook,
                                        name="updater",
                                        args=(listen, port, url_path, cert,
                                              key))

                # Start threads
                dispatcher_thread.start()
                updater_thread.start()

                # Return the update queue so the main thread can insert updates
                return self.update_queue

    def _start_polling(self, poll_interval, timeout, network_delay):
        """
        Thread target of thread 'updater'. Runs in background, pulls
        updates from Telegram and inserts them in the update queue of the
        Dispatcher.
        """

        current_interval = poll_interval
        self.logger.info('Updater thread started')

        # Remove webhook
        self.bot.setWebhook(webhook_url=None)

        while self.running:
            try:
                updates = self.bot.getUpdates(self.last_update_id,
                                              timeout=timeout,
                                              network_delay=network_delay)
                if not self.running:
                    if len(updates) > 0:
                        self.logger.info('Updates ignored and will be pulled '
                                         'again on restart.')
                    break

                for update in updates:
                    self.update_queue.put(update)
                    self.last_update_id = update.update_id + 1
                    current_interval = poll_interval

                sleep(current_interval)
            except TelegramError as te:
                # Put the error into the update queue and let the Dispatcher
                # broadcast it
                self.update_queue.put(te)
                sleep(current_interval)

            except URLError as e:
                self.logger.error("Error while getting Updates: %s" % e)
                # increase waiting times on subsequent errors up to 30secs
                if current_interval == 0:
                    current_interval = 1
                elif current_interval < 30:
                    current_interval += current_interval / 2
                elif current_interval > 30:
                    current_interval = 30

        self.logger.info('Updater thread stopped')

    def _start_webhook(self, listen, port, url_path, cert, key):
        self.logger.info('Updater thread started')
        use_ssl = cert is not None and key is not None
        url_path = "/%s" % url_path

        # Create and start server
        self.httpd = WebhookServer((listen, port), WebhookHandler,
                                   self.update_queue, url_path)

        if use_ssl:
            # Check SSL-Certificate with openssl, if possible
            try:
                exit_code = subprocess.call(
                    ["openssl", "x509", "-text", "-noout", "-in", cert],
                    stdout=open(os.devnull, 'wb'),
                    stderr=subprocess.STDOUT)
            except OSError:
                exit_code = 0

            if exit_code is 0:
                try:
                    self.httpd.socket = ssl.wrap_socket(self.httpd.socket,
                                                        certfile=cert,
                                                        keyfile=key,
                                                        server_side=True)
                except ssl.SSLError as error:
                    raise TelegramError(str(error))
            else:
                raise TelegramError('SSL Certificate invalid')

        self.httpd.serve_forever(poll_interval=1)
        self.logger.info('Updater thread stopped')

    def stop(self):
        """
        Stops the polling/webhook thread, the dispatcher and the job queue
        """

        self.job_queue.stop()
        with self.__lock:
            if self.running:
                self.running = False
                self.logger.info('Stopping Updater and Dispatcher...')
                self.logger.debug('This might take a long time if you set a '
                                  'high value as polling timeout.')

                if self.httpd:
                    self.logger.info(
                        'Waiting for current webhook connection to be '
                        'closed... Send a Telegram message to the bot to exit '
                        'immediately.')
                    self.httpd.shutdown()
                    self.httpd = None

                self.logger.debug("Requesting Dispatcher to stop...")
                self.dispatcher.stop()
                while dispatcher.running_async > 0:
                    sleep(1)

                self.logger.debug("Dispatcher stopped.")

    def signal_handler(self, signum, frame):
        self.is_idle = False
        self.stop()

    def idle(self, stop_signals=(SIGINT, SIGTERM, SIGABRT)):
        """
        Blocks until one of the signals are received and stops the updater

        Args:
            stop_signals: Iterable containing signals from the signal module
                that should be subscribed to. Updater.stop() will be called on
                receiving one of those signals. Defaults to (SIGINT, SIGTERM,
                SIGABRT)
        """
        for sig in stop_signals:
            signal(sig, self.signal_handler)

        self.is_idle = True

        while self.is_idle:
            sleep(1)
Exemple #44
0
def wsgi_init(port=8091):
    task_queue = Queue()
    socket = eventlet.listen(('', port))
    thread.start_new_thread(start_wsgi, (socket, application, task_queue))
    return (task_queue, socket)
Exemple #45
0
def get_channels(channels, **kwargs):
    """Multi-threaded channel query
    """
    if len(channels) == 0:
        return []

    # set up Queues
    inqueue = Queue()
    outqueue = Queue()

    # open threads
    for i in range(len(channels)):
        t = ThreadChannelQuery(inqueue, outqueue, **kwargs)
        t.setDaemon(True)
        t.start()

    # populate input queue
    for i, c in enumerate(channels):
        inqueue.put((i, c))

    # block
    inqueue.join()
    outqueue.join()
    result = []
    for i in range(len(channels)):
        c = outqueue.get()
        if isinstance(c, Exception):
            raise c
        else:
            result.append(c)
    return zip(*sorted(result, key=lambda (idx, chan): idx))[1]
def online_hosts():
    os.system('clear')
    print "\n\n"
    print "\n            \033[36m\033[1mNETWORK-SCANNER\033[0;0m\n"
    file.write('Online Hosts (0): ' + str(time.ctime()) + '\n')
    try:
        sys.stdout.write(
            "            [*]\033[94m Internet Connection Status                                      \033[0;0m:"
        )
        sys.stdout.flush()
        if socket.gethostbyname('www.google.com'):
            file.write('Connected: ' + str(time.ctime()) + '\n')
            sys.stdout.write("\033[92m     CONNECTED\033[0;0m\n")

    except Exception:
        sys.stdout.write("\033[91m         NOT CONNECTED\033[0;0m\n")
        file.write('Connection Lost: ' + str(time.ctime()) + '\n')
        sys.stdout.write(
            "            [-]\033[91mPlease Check Your Internet Connection!\033[0;0m\n\n"
        )
        time.sleep(2)
        sys.exit()

    try:

        def get_hosts(q):

            while True:
                try:
                    ip = q.get()
                    comm = [
                        'ping -c 1 -W 2 ' + ip +
                        " | grep '64 bytes from' | awk '{print $4}'"
                    ]
                    add = subprocess.Popen(comm, shell=True, stdout=PIPE)
                    address1 = add.stdout.read()
                    address = str(address1).split("\n")[0].split(":")[0]
                    try:
                        responses, unanswered = srp(
                            Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(pdst=address),
                            verbose=False,
                            timeout=2,
                            retry=5)
                        if responses:
                            for s, r in responses:
                                mac = r[Ether].src
                                sys.stdout.write(
                                    "            {:20.16}        {:20.18}".
                                    format(address, mac) + "\n")
                                sys.stdout.flush()
                                data.write(
                                    str(address) + '	:	' + str(mac) + "\n")
                                break
                        q.task_done()
                    except Exception:
                        print "            [-] \033[91mError\033[0;0m Retrieving MAC Addresses, Try Again!!"
                        time.sleep(1)
                        sys.exit

                except Exception:
                    q.task_done()
                    pass

        q = Queue(maxsize=0)
        threads = 80

        for ip_s in range(1, 255):
            com = ["route -n | grep 'UG' | awk '{print $2}'"]
            ga = subprocess.Popen(com, stdout=PIPE, shell=True)
            gate_ip = ga.stdout.read()

            ipaddr = ".".join(str(gate_ip).split(".")[0:3]) + '.' + str(ip_s)
            q.put(ipaddr)

        print "\n            [*] Getting Information..."
        a = time.time()
        sys.stdout.write(
            "            [*] \033[94mStarting Network Scanner...                                     \033[0;0m:     "
            + str(time.ctime()))
        sys.stdout.flush()
        print "\n\n            \033[1m________________________________________\033[0;0m"
        sys.stdout.write(
            "            \033[1m\033[4mIP ADDRESS                  MAC ADDRESS \033[0;0m\n\n"
        )
        sys.stdout.flush()

        data = open(
            'bin/data/' + str(date) + '/ntwrk_sc/Ntwrk_sc_' + str(c_time) +
            '.log', 'a')
        data.write('Network Scanner: ' + str(time.ctime()))

        for i in range(threads):
            thread = Thread(target=get_hosts, args=(q, ))
            thread.setDaemon(True)
            thread.start()

        q.join()
        file.write('Online Hosts (1): ' + str(time.ctime()) + '\n')

    except Exception:
        print "\n            [-] \033[91mError\033[0;0m Scanning Network"
        Options()

    sys.stdout.write(
        "\n            [+] \033[92mSuccess: Network Scan Done!                                     \033[0;0m:     "
        + str(time.ctime()))
    sys.stdout.flush()
    d = str(time.time() - a)
    c = d[0:5]
    sys.stdout.write(
        "\n            [+] Time Elapsed!                                                   :     "
        + str(c) + " seconds" + "\n\n")
    sys.stdout.flush()
    file.write('Exit: ' + str(time.ctime()) + '\n\n')
    file.close()
    data.write('\n------------END------------\n')
    data.close()
Exemple #47
0
def getStatus(ourl):
    try:
        url = urlparse(ourl)
        conn = httplib.HTTPConnection(url.netloc)
        conn.request("HEAD", url.path)
        res = conn.getresponse()
        return res.status, ourl
    except:
        return "error", ourl


def doSomethingWithResult(status, url):
    print status, url


q = Queue(concurrent * 2)
for i in range(concurrent):
    t = Thread(target=doWork)
    t.daemon = True
    t.start()

start_time = time.time()
try:
    for url in open('ip.txt'):
        q.put(url.strip())
    q.join()
except KeyboardInterrupt:
    sys.exit(1)
print("--- %s seconds ---" % (time.time() - start_time))
Exemple #48
0
 def __init__(self):
     ProxyManager.__init__(self)
     self.queue = Queue()
     self.proxy_item = dict()
Exemple #49
0
#!/usr/bin/env python
from threading import Thread
import subprocess
from Queue import Queue
import multiprocessing

num_threads = multiprocessing.cpu_count()
print("Number of available threads: ", num_threads)
queue = Queue(-1)
ips = []
for i in range(1, 255):
    ips = ips + ["192.168.1." + str(i)]


#wraps system ping command
def pinger(i, q):
    """Pings subnet"""
    while True:
        print("--i am thread ", i, " --")
        ip = q.get()
        #print "Thread %s: Pinging %s" % (i, ip)
        ret = subprocess.call("ping -c 1 %s" % ip,
                              shell=True,
                              stdout=open('/dev/null', 'w'),
                              stderr=subprocess.STDOUT)
        if ret == 0:
            print "%s: is alive" % ip
        else:
            print "%s: did not respond" % ip
        q.task_done()
Exemple #50
0
class MsgProcessor():
    '''
    MsgProcessor class, processes messages between the other components of the kernel
    and the consensus algorithms (this), defines a few message types for passing
    messages between the two parts of the system, this subroutine should be booted-up
    alongside the kernel, and assigned a specific socket port

    Composites two greenlets that does most of the heavy lifting. Establishes a two
    way connection.

    Connection phase happens as such:
    1) The other party sets up server first, then calls us (launches)
    2) We set up our server, then connect to their server, and send the message:
        {
            @type: connect
            host: '127.0.0.1'
            port: 30303 (or something like that)
        }

    3) At which point we assume the other side has connected to us, proceed as usual

    SERVER_SIDE:
    Messages should be in the following format:
        {
            @type: tx_msg (so far only transaction messages supported),
            msg_contents: TX_BODY
        }

    (we dont have any failure messages right now)


    CLIENT_SIDE:
    At an undetermined future point in time, the we should respond with:
        {
            @type: blk_msg,
            msg_contents: [tx_hash, ...]
        }
    '''
    MSG_SIZE = 4096

    def __init__(self,
                 conn=('127.0.0.1', 30302),
                 serv_conn=('127.0.0.1', 30303)):
        self.worker = None
        self.shutdown = False
        self.conn = conn
        self.serv_conn = serv_conn
        self.q = Queue()

        self.server_shutdown = False
        self.client_shutdown = False

        self.server_t = None
        self.client_t = None

    def run(self):
        '''
        Runs the message processor, no lightweight threads are started until this point
        '''
        Greenlet(self._run()).start()

    def _run(self):
        '''
        TODO: add exception handling
        '''
        self.context = zmq.Context()

        def _server():
            print("MsgProcessor: server running")
            socket = self.context.socket(zmq.PAIR)
            socket.bind("tcp://127.0.0.1:%s" % self.serv_conn[1])
            print("MsgProcessor: server -> bound")
            return socket

        def _client():
            print("MsgProcessor: client running")
            socket = self.context.socket(zmq.PAIR)
            socket.connect("tcp://*****:*****@type': 'connect_msg',
                'host': '127.0.0.1',
                'port': 30303
            }))

        self.server_t = Greenlet(self._client)
        self.client_t = Greenlet(self._server)

        self.server_t.start()
        self.client_t.start()

    def _client(self):
        '''
        Client routine, defines an inbound queue (from our perspective),
        the only message that we support currently is BLK
        '''

        client = self.client_sock

        if client == None:
            raise RuntimeError("client_sock not connected")

        print("MsgProcessor: client (us -> kernel) subroutine active")

        while not self.client_shutdown:
            msg = self.q.get()

            if isinstance(msg, PoisonPill):
                break

            # expecting a dictionary here thats serializable
            client.send(json.dumps(msg))

        print("MsgProcessor: client shutdown")

    def _server(self):
        '''
        Server routine, defines an outbound queue, directly triggers
        '''
        server = self.server_sock

        if server == None:
            raise RuntimeError("server_sock not connected")

        print("MsgProcessor: server (kernel -> us) subroutine active")

        while not self.server_shutdown:
            msg = server.get()
            obj = json.loads(msg)

            if (obj['msg_type'] == 'DISCONNECT'):
                # we're being disconnected, send poison pill to client and disconnect
                self.q.put(PoisonPill())
                break
            elif (obj['msg_type'] == 'TX'):
                tx_hash = obj['tx_hash']  # expected 32-byte hash
                # TODO: hook up to rest of system

        print("MsgProcessor: server shutdown")

    def send_block(self, tx_list, binary=False):
        if binary:
            tx_list = [a.encode('hex_codec') for a in tx_list]
        self.q.put(json.dumps({'@type': 'blk_msg', 'tx_list': tx_list}))

    def stop(self):
        self.server_shutdown = True
        self.client_shutdown = True

        gevent.joinall([self.server_t, self.client_t])

        if self.server_sock != None:
            self.server_sock.close()

        if self.client_sock != None:
            self.client_sock.close()

        if self.context != None:
            self.context.term()
Exemple #51
0
 def create_queue(self):
     """Creates a new Queue object"""
     return Queue()
Exemple #52
0
        rospy.loginfo("isStart true.")
        isStart = True
        isAbort = False
        # Format for service: start_response, abort_response
        locomotionGoal = req.start_ctrl
        cur_heading=locomotionGoal.heading_setpoint
        search_depth=locomotionGoal.depth_setpoint

    if req.abort_request:
        rospy.loginfo("Acoustic abort received")
        isAbort = True
        isStart = False
    return mission_to_visionResponse(isStart, isAbort)

################### MAIN #####################
tdoa_queue = Queue()
recover_time = 0

#Triangulation consts
speedOfSound = 1484
x_1, y_1, z_1 = 0  ,-0.07, 0.07 #left hydrophone
x_2, y_2, z_2 = 0  , 0.07, 0.07 #right hydrophone
x_3, y_3, z_3 = 0.1, 0   , 0    #top hydrophone

#States variabes
isStart = False
isEnd = False
isAbort = False
isTest = False
movement_client = None
locomotionGoal = None 
Exemple #53
0
 def __init__(self, group=None, target=None, name=None,
                    args=(), kwargs={}):
     self.inputqueue = Queue()
     self.idletime = time()
     super(_PollingThread, self).__init__(group,
                 target, name, args, kwargs)
def word_chain():
    from sets import Set
    from Queue import Queue

    alpha_lower = 'abcdefghijklmnopqrstuvwxyz'
    with open('/usr/share/dict/words', 'r') as f:
        english_words = Set(f.read().lower().splitlines())

    start = 'dang' #'puzzled'#'fuzzily' #raw_input("first word? ").lower() #
    end = 'wolf' #'abalone' #raw_input("second word? ").lower() #

    good_input = False
    while not good_input:
        if start not in english_words:
            start = raw_input("try first word again, not in english_words: ").lower()
        elif end not in english_words:
            end = raw_input("try second word again, not in english_words: ").lower()
        elif len(start) != len(end):
            end = raw_input("words not same length, try second word again: ").lower()
        elif start == end:
            end = raw_input("same word, try second word again: ").lower()
        else:
            good_input = True
    print start + ' in english_words'
    print end + ' in english_words'

    guesses = Queue()
    guesses.put(start)
    path_storage = {start:"none"}
    found = False

    while not guesses.empty() and not found:
    #for this in range(10):
        word = guesses.get()
        #print word
        word_as_list = list(word)
        for i in range(len(word_as_list)):
            #print i
            for letter in alpha_lower:
                tmp = word_as_list[:]
                tmp[i] = letter
                guess = ''.join(tmp)
                if guess not in path_storage and guess in english_words:
                    path_storage[guess] = word
                    guesses.put(guess)
                    if guess == end:
                        found = True
                        #print "found"

    count = 1
    path = [end]
    if end not in path_storage:
        print "no path!"
        print path_storage
    else:
        while end != start:
            path.append(path_storage[end])
            end = path[count]
            count += 1
        print "found in " + str(len(path) - 1) + " steps."
        for step in range(len(path) - 1, -1, -1):
            print path[step] + " " + str(step)
    def Plan(self, start_config, goal_config):
        self.planning_env.InitializePlot(goal_config)
        start_time = time.time()
        plan = []

        # TODO: Here you will implement the breadth first planner
        #  The return path should be a numpy array
        #  of dimension k x n where k is the number of waypoints
        #  and n is the dimension of the robots configuration space

        plan.append(start_config)
        plan.append(goal_config)

        start_id = self.planning_env.discrete_env.ConfigurationToNodeId(
            start_config)
        goal_id = self.planning_env.discrete_env.ConfigurationToNodeId(
            goal_config)

        seen = [start_id]

        queue = Queue()
        queue.put(start_id)

        tree = RRTTree(self.planning_env, start_config)
        nodeIdToTreeIdDict = {}
        nodeIdToTreeIdDict[start_id] = 0

        while not queue.empty():

            curr_id = queue.get()
            curr_tree_id = nodeIdToTreeIdDict[curr_id]

            for node_id in self.planning_env.GetSuccessors(curr_id):

                if not node_id in seen:

                    seen.append(node_id)
                    node_tree_id = tree.AddVertex(
                        self.planning_env.discrete_env.NodeIdToConfiguration(
                            node_id))
                    nodeIdToTreeIdDict[node_id] = node_tree_id
                    tree.AddEdge(curr_tree_id, node_tree_id)
                    #self.planning_env.PlotEdge(self.planning_env.discrete_env.NodeIdToConfiguration(curr_id),self.planning_env.discrete_env.NodeIdToConfiguration(node_id) );
                    queue.put(node_id)

                    if node_id == goal_id:
                        while True:
                            old_config = tree.vertices[node_tree_id]
                            node_tree_id = tree.edges[node_tree_id]
                            node_config = tree.vertices[node_tree_id]
                            self.planning_env.PlotEdge(old_config, node_config)
                            if (node_tree_id == tree.GetRootId()):
                                plan_length = self.Plan_Length(plan)
                                print("--- %s seconds ---" %
                                      (time.time() - start_time))
                                print("--- %s plan length ---" % plan_length)
                                print("--- %s vertices ---" %
                                      len(tree.vertices))
                                return plan
                            else:
                                plan.insert(1, node_config)
        return plan
Exemple #56
0
    class _PollingThread( Thread ):
        """
        This polling thread listens on selected pipes, and automatically reads
        and writes data between the buffer and those pipes. This will self
        terminate when there are no more pipes defined, and will need to be
        restarted.
        """
        def __init__(self, group=None, target=None, name=None,
                           args=(), kwargs={}):
            self.inputqueue = Queue()
            self.idletime = time()
            super(_PollingThread, self).__init__(group,
                        target, name, args, kwargs)
        def add_pipe(self, buff, pipe, mode):
            self.inputqueue.put((buff, pipe, mode))
        def run(self):
            poller = kqueue()
            fds = {}
            events = []
            while True:
                while not self.inputqueue.empty():
                    # loop through the queue and gather new pipes to add the
                    # kernel queue
                    buff, pipe, mode = self.inputqueue.get()
                    if 'r' in mode:
                        events.append(kevent(pipe, KQ_FILTER_READ, KQ_EV_ADD))
                    elif 'w' in mode:
                        events.append(kevent(pipe, KQ_FILTER_WRITE, KQ_EV_ADD))
                    else:
                        continue
                    fds[pipe.fileno()] = (weakref.ref(buff), pipe)

                if len(events) == 0:
                    events = None
                events = poller.control(events, 16, 0.1)

                for i in range(len(events)):
                    # loop through response and handle events
                    event = events.pop()
                    buff, pipe = fds[event.ident]

                    if buff() is None:
                        # buffer object has closed out from underneath us
                        # pipe will be automatically removed from kqueue
                        pipe.close()
                        del fds[event.ident]
                        continue

                    if (abs(event.filter) & abs(KQ_FILTER_READ)) and event.data:
                        # new data has come in, push into the buffer
                        buff().write(pipe.read(event.data))

                    if (abs(event.filter) & abs(KQ_FILTER_WRITE)) and event.data:
                        # space is available to write data
                        pipe.write(buff().read(\
                                    min(buff()._nbytes, event.data, 2**16)))

                    if abs(event.flags) & abs(KQ_EV_EOF):
                        # pipe has been closed and all IO has been processed
                        # pipe will be automatically removed from kqueue
                        buff().close()
                        pipe.close()
                        del fds[event.ident]

                if len(fds) == 0:
                    # no pipes referenced
                    if self.idletime + 20 < time():
                        # idle timeout reached, terminate
                        break
                    sleep(0.1)
                else:
                    self.idletime = time()
Exemple #57
0
            if self.queue.qsize() == 0:
                sleep(1)
                continue
            announce = self.queue.get()
            t = threading.Thread(target=download_metadata, args=(announce[0], announce[1]))
            t.setDaemon(True)
            t.start()

#main
if __name__ == "__main__":
    path=""
    thread_num=0
    save_seed=-1
    options=[]

    trans_queue = Queue()

    get_option()
    print path
    print thread_num
    print save_seed
    if (path!="-s")and(save_seed==1):
        if not os.path.exists('BT/'):
            os.makedirs('BT/')
    #start watcher
    Watcher()
    #metadata process
    master = Master()
    master.start()
    #start DHT Network
    print('Receiving datagrams on :6882')
Exemple #58
0
    class _PollingThread( Thread ):
        """
        This polling thread listens on selected pipes, and automatically reads
        and writes data between the buffer and those pipes. This will self
        terminate when there are no more pipes defined, and will need to be
        restarted.
        """
        def __init__(self, group=None, target=None, name=None,
                           args=(), kwargs={}):
            self.inputqueue = Queue()
            self.idletime = time()
            super(_PollingThread, self).__init__(group,
                        target, name, args, kwargs)
        def add_pipe(self, buff, pipe, mode):
            self.inputqueue.put((buff, pipe, mode))
        def run(self):
            poller = poll()
            fds = {}
            events = []
            while True:
                while not self.inputqueue.empty():
                    # loop though the queue and add new pipes to the
                    # poll object
                    buff, pipe, mode = self.inputqueue.get()
                    if 'r' in mode:
                        poller.register(pipe.fileno(), POLLIN|POLLHUP)
                    elif 'w' in mode:
                        poller.register(pipe.fileno(), POLLOUT|POLLHUP)
                    else:
                        continue
                    fds[pipe.fileno()] = (weakref.ref(buff), pipe)

                for fd,event in poller.poll(100):
                    # loop through file numbers and handle events
                    buff, pipe = fds[fd]
                    if buff() is None:
                        # buffer object has closed out from underneath us
                        # remove reference from poller
                        pipe.close()
                        del fds[fd]
                        poller.unregister(fd)
                        continue

                    if event & POLLIN:
                        # read as much data from the pipe as it has available
                        buff().write(pipe.read(2**16))
                    if event & POLLOUT:
                        # write as much data to the pipe as there is space for
                        # roll back buffer if data is not fully written
                        data = buff().read(2**16)
                        nbytes = pipe.write(data)
                        if nbytes != len(data):
                            buff()._rollback(len(data) - nbytes)
                    if event & POLLHUP:
                        # pipe has closed, and all reads have been processed
                        # remove reference from poller
                        buff().close()
                        pipe.close()
                        del fds[fd]
                        poller.unregister(fd)

                if len(fds) == 0:
                    # no pipes referenced
                    if self.idletime + 20 < time():
                        # idle timeout reached, terminate
                        break
                    sleep(0.1)
                else:
                    self.idletime = time()
Exemple #59
0
class TTS(object):
    """
    TTS abstract class to be implemented by all TTS engines.

    It aggregates the minimum required parameters and exposes
    ``execute(sentence)`` function.
    """
    __metaclass__ = ABCMeta

    def __init__(self, lang, voice, validator, phonetic_spelling=True):
        super(TTS, self).__init__()
        self.lang = 'hi'  #lang or 'en-us'
        self.voice = voice
        self.filename = '/tmp/tts.wav'
        self.validator = validator
        self.phonetic_spelling = phonetic_spelling
        self.enclosure = None
        random.seed()
        self.queue = Queue()
        self.playback = PlaybackThread(self.queue)
        self.playback.start()
        self.clear_cache()
        self.spellings = self.load_spellings()

    def load_spellings(self):
        """Load phonetic spellings of words as dictionary"""
        path = join('text', self.lang, 'phonetic_spellings.txt')
        spellings_file = resolve_resource_file(path)
        if not spellings_file:
            return {}
        try:
            with open(spellings_file) as f:
                lines = filter(bool, f.read().split('\n'))
            lines = [i.split(':') for i in lines]
            return {key.strip(): value.strip() for key, value in lines}
        except ValueError:
            LOG.exception('Failed to load phonetic spellings.')
            return {}

    def begin_audio(self):
        """Helper function for child classes to call in execute()"""
        # Create signals informing start of speech
        self.ws.emit(Message("recognizer_loop:audio_output_start"))

    def end_audio(self):
        """
            Helper function for child classes to call in execute().

            Sends the recognizer_loop:audio_output_end message, indicating
            that speaking is done for the moment. It also checks if cache
            directory needs cleaning to free up disk space.
        """

        self.ws.emit(Message("recognizer_loop:audio_output_end"))
        # Clean the cache as needed
        cache_dir = mycroft.util.get_cache_directory("tts")
        mycroft.util.curate_cache(cache_dir, min_free_percent=100)

        # This check will clear the "signal"
        check_for_signal("isSpeaking")

    def init(self, ws):
        self.ws = ws
        self.playback.init(self)
        self.enclosure = EnclosureAPI(self.ws)
        self.playback.enclosure = self.enclosure

    def get_tts(self, sentence, wav_file):
        """
            Abstract method that a tts implementation needs to implement.
            Should get data from tts.

            Args:
                sentence(str): Sentence to synthesize
                wav_file(str): output file

            Returns: (wav_file, phoneme) tuple
        """
        pass

    def execute(self, sentence, ident=None):
        """
            Convert sentence to speech.

            The method caches results if possible using the hash of the
            sentence.

            Args:
                sentence:   Sentence to be spoken
                ident:      Id reference to current interaction
        """
        create_signal("isSpeaking")
        if self.phonetic_spelling:
            for word in re.findall(r"[\w']+", sentence):
                if word in self.spellings:
                    sentence = sentence.replace(word, self.spellings[word])

        key = str(hashlib.md5(sentence.encode('utf-8', 'ignore')).hexdigest())
        wav_file = os.path.join(mycroft.util.get_cache_directory("tts"),
                                key + '.' + self.type)

        if os.path.exists(wav_file):
            LOG.debug("TTS cache hit")
            phonemes = self.load_phonemes(key)
        else:
            wav_file, phonemes = self.get_tts(sentence, wav_file)
            if phonemes:
                self.save_phonemes(key, phonemes)

        self.queue.put((self.type, wav_file, self.visime(phonemes), ident))

    def visime(self, phonemes):
        """
            Create visimes from phonemes. Needs to be implemented for all
            tts backend

            Args:
                phonemes(str): String with phoneme data
        """
        return None

    def clear_cache(self):
        """ Remove all cached files. """
        if not os.path.exists(mycroft.util.get_cache_directory('tts')):
            return
        for f in os.listdir(mycroft.util.get_cache_directory("tts")):
            file_path = os.path.join(mycroft.util.get_cache_directory("tts"),
                                     f)
            if os.path.isfile(file_path):
                os.unlink(file_path)

    def save_phonemes(self, key, phonemes):
        """
            Cache phonemes

            Args:
                key:        Hash key for the sentence
                phonemes:   phoneme string to save
        """

        cache_dir = mycroft.util.get_cache_directory("tts")
        pho_file = os.path.join(cache_dir, key + ".pho")
        try:
            with open(pho_file, "w") as cachefile:
                cachefile.write(phonemes)
        except:
            LOG.debug("Failed to write .PHO to cache")
            pass

    def load_phonemes(self, key):
        """
            Load phonemes from cache file.

            Args:
                Key:    Key identifying phoneme cache
        """
        pho_file = os.path.join(mycroft.util.get_cache_directory("tts"),
                                key + ".pho")
        if os.path.exists(pho_file):
            try:
                with open(pho_file, "r") as cachefile:
                    phonemes = cachefile.read().strip()
                return phonemes
            except:
                LOG.debug("Failed to read .PHO from cache")
        return None

    def __del__(self):
        self.playback.stop()
        self.playback.join()
Exemple #60
0
 def __init__(self):
     Thread.__init__(self)
     self.setDaemon(True)
     self.queue = Queue()