Example #1
0
 def bfs(self, node):
     if not self.visited[node]:
         self.visited[node] = True
         child_iter = 1
         tree = []
         
         treeNode = [0]
         for child in self.adjList[node]:
             if not self.visited[child]:
                 treeNode.append(child_iter)
                 child_iter += 1
         tree.append(treeNode)
         
         children_queue = Queue()
         parent_queue = Queue()
         for child in self.adjList[node]:
             children_queue.put(child)
             parent_queue.put(node)
             
         while(not children_queue.empty()):
             current = children_queue.get()
             parent = parent_queue.get()
             if not self.visited[current]:
                 self.visited[current] = True
                 
                 treeNode = [parent]
                 for child in self.adjList[current]:
                     if not self.visited[child]:
                         treeNode.append(child_iter)
                         child_iter += 1
                         children_queue.put(child)
                         parent_queue.put(current)
                 tree.append(treeNode)
                 
         self.trees.append(tree)
Example #2
0
File: event.py Project: sijis/salt
    def __test_event_fire_ipc_mode_tcp(self):
        events = Queue()

        def get_event(events):
            me = event.MinionEvent(**self.sub_minion_opts)
            events.put_nowait(
                me.get_event(wait=10, tag='salttest', full=False)
            )

        threading.Thread(target=get_event, args=(events,)).start()
        time.sleep(1)   # Allow multiprocessing.Process to start

        ret = self.run_function(
            'event.fire', ['event.fire: just test it!!!!', 'salttest'],
            minion_tgt='sub_minion'
        )
        self.assertTrue(ret)

        eventfired = events.get(block=True, timeout=10)
        self.assertIsNotNone(eventfired)
        self.assertIn('event.fire: just test it!!!!', eventfired)

        ret = self.run_function(
            'event.fire', ['event.fire: just test it!!!!', 'salttest-miss'],
            minion_tgt='sub_minion'
        )
        self.assertTrue(ret)

        with self.assertRaises(Empty):
            eventfired = events.get(block=True, timeout=10)
Example #3
0
 def test_error(self):
     """
     Exception raised running unit test is reported as an error
     """
     # Parent directory setup
     os.chdir(self.tmpdir)
     sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
     basename = os.path.basename(sub_tmpdir)
     # Child setup
     fh = open(os.path.join(basename, '__init__.py'), 'w')
     fh.write('\n')
     fh.close()
     fh = open(os.path.join(basename, 'test_pool_runner_dotted_fail.py'), 'w')
     fh.write(dedent(
         """
         import unittest
         class A(unittest.TestCase):
             def testError(self):
                 raise AttributeError
         """))
     fh.close()
     module_name = basename + '.test_pool_runner_dotted_fail.A.testError'
     result = Queue()
     poolRunner(module_name, result)
     result.get()
     self.assertEqual(len(result.get().errors), 1)
Example #4
0
class Dispatcher:
    """
    The Dispatcher class manages the task and result queues.
    """

    def __init__(self):
        """
        Initialise the Dispatcher.
        """
        self.taskQueue = Queue()
        self.resultQueue = Queue()
        self.outputQueue = Queue()

    def getTaskQueue(self):

        return self.taskQueue

    def getResultQueue(self):

        return self.resultQueue

    def getOutputQueue(self):

        return self.outputQueue

    def putTask(self, task):
        """
        Put a task on the task queue.
        """
        self.taskQueue.put(task)

    def getTask(self):
        """
        Get a task from the task queue.
        """
        return self.taskQueue.get()

    def putResult(self, output):
        """
        Put a result on the result queue.
        """
        self.resultQueue.put(output)

    def getResult(self):
        """
        Get a result from the result queue.
        """
        return self.resultQueue.get()

    def putOutput(self, out):
        """
        Put output into a queue
        """
        self.outputQueue.put(out)

    def getOutput(self):
        """
        Get an output from the output queue
        """
        return self.outputQueue.get()
Example #5
0
File: event.py Project: sijis/salt
    def __test_event_fire_master(self):
        events = Queue()

        def get_event(events):
            me = event.MasterEvent(self.master_opts['sock_dir'])
            events.put_nowait(
                me.get_event(wait=10, tag='salttest', full=False)
            )

        threading.Thread(target=get_event, args=(events,)).start()
        time.sleep(1)   # Allow multiprocessing.Process to start

        ret = self.run_function(
            'event.fire_master',
            ['event.fire_master: just test it!!!!', 'salttest']
        )
        self.assertTrue(ret)

        eventfired = events.get(block=True, timeout=10)
        self.assertIsNotNone(eventfired)
        self.assertIn(
            'event.fire_master: just test it!!!!', eventfired['data']
        )

        ret = self.run_function(
            'event.fire_master',
            ['event.fire_master: just test it!!!!', 'salttest-miss']
        )
        self.assertTrue(ret)

        with self.assertRaises(Empty):
            eventfired = events.get(block=True, timeout=10)
Example #6
0
 def test6_ThreeThreadsTwoConnections(self):
     pool = PooledPg(2, 2, 2, True)
     from Queue import Queue, Empty
     queue = Queue(3)
     def connection():
         try:
             queue.put(pool.connection(), 1, 1)
         except TypeError:
             queue.put(pool.connection(), 1)
     from threading import Thread
     for i in range(3):
         Thread(target=connection).start()
     try:
         db1 = queue.get(1, 1)
         db2 = queue.get(1, 1)
     except TypeError:
         db1 = queue.get(1)
         db2 = queue.get(1)
     db1_con = db1._con
     db2_con = db2._con
     self.assertNotEqual(db1, db2)
     self.assertNotEqual(db1_con, db2_con)
     try:
         self.assertRaises(Empty, queue.get, 1, 0.1)
     except TypeError:
         self.assertRaises(Empty, queue.get, 0)
     del db1
     try:
         db1 = queue.get(1, 1)
     except TypeError:
         db1 = queue.get(1)
     self.assertNotEqual(db1, db2)
     self.assertNotEqual(db1._con, db2._con)
     self.assertEqual(db1._con, db1_con)
Example #7
0
 def test_normalRun(self):
     """
     Runs normally
     """
     saved_coverage = process.coverage
     process.coverage = MagicMock()
     self.addCleanup(setattr, process, 'coverage', saved_coverage)
     # Parent directory setup
     os.chdir(self.tmpdir)
     sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
     basename = os.path.basename(sub_tmpdir)
     # Child setup
     fh = open(os.path.join(basename, '__init__.py'), 'w')
     fh.write('\n')
     fh.close()
     fh = open(os.path.join(basename, 'test_pool_runner_dotted.py'), 'w')
     fh.write(dedent(
         """
         import unittest
         class A(unittest.TestCase):
             def testPass(self):
                 pass
         """))
     fh.close()
     module_name = basename + '.test_pool_runner_dotted.A.testPass'
     result = Queue()
     poolRunner(module_name, result, 1)
     result.get()
     self.assertEqual(len(result.get().passing), 1)
Example #8
0
def runexternal_out_and_err(cmd, check_memleak=True):
    from gdaltest import is_travis_branch
    if not is_travis_branch('mingw'):
        has_subprocess = False
        try:
            import subprocess
            import shlex
            if hasattr(subprocess, 'Popen') and hasattr(shlex, 'split'):
                has_subprocess = True
        except (ImportError, AttributeError):
            pass
        if has_subprocess:
            return _runexternal_out_and_err_subprocess(cmd, check_memleak=check_memleak)

    (ret_stdin, ret_stdout, ret_stderr) = os.popen3(cmd)
    ret_stdin.close()

    q_stdout = Queue()
    t_stdout = Thread(target=read_in_thread, args=(ret_stdout, q_stdout))
    q_stderr = Queue()
    t_stderr = Thread(target=read_in_thread, args=(ret_stderr, q_stderr))
    t_stdout.start()
    t_stderr.start()

    out_str = q_stdout.get()
    err_str = q_stderr.get()

    if check_memleak:
        warn_if_memleak(cmd, out_str)

    return (out_str, err_str)
Example #9
0
class _ParamUpdater(Thread):
    def __init__(self, cf, updatedCallback):
        Thread.__init__(self)
        self.setDaemon(True)
        self.cf = cf
        self.updatedCallback = updatedCallback
        self.requestQueue = Queue()
        self.incommingQueue = Queue()
        self.cf.add_port_callback(CRTPPort.PARAM, self._new_packet_cb)

    def _new_packet_cb(self, pk):
        if (pk.channel != TOC_CHANNEL):
            self.updatedCallback(pk)
            self.incommingQueue.put(0)  # Don't care what we put, used to sync

    def request_param_update(self, varid):
        logger.debug("Requesting update for varid %d", varid)
        pk = CRTPPacket()
        pk.set_header(CRTPPort.PARAM, READ_CHANNEL)
        pk.data = struct.pack('<B', varid)
        self.cf.send_packet(pk, expect_answer=True)

    def run(self):
        while(True):
            varid = self.requestQueue.get()  # Wait for request update
            self.request_param_update(varid)  # Send request for update
            self.incommingQueue.get()  # Blocking until reply arrives
Example #10
0
def run_threaded_exchanges(exchange, conversations):
    event = Event()

    queue_ab = Queue()
    exchange_ab = ThreadedExchange(exchange,
                                   conversations.ab, conversations.ba,
                                   event, queue_ab)

    queue_ac = Queue()
    exchange_ac = ThreadedExchange(exchange,
                                   conversations.ac, conversations.ca,
                                   event, queue_ac)

    queue_bc = Queue()
    exchange_bc = ThreadedExchange(exchange,
                                   conversations.bc, conversations.cb,
                                   event, queue_bc)

    exchange_ab.start()
    exchange_ac.start()
    exchange_bc.start()

    event.set()

    assert queue_ab.get() and queue_ac.get() and queue_bc.get()
Example #11
0
def _runexternal_out_and_err_subprocess(cmd, check_memleak=True):
    # pylint: disable=unused-argument
    import subprocess
    import shlex
    command = shlex.split(cmd)
    command = [elt.replace('\x00', '') for elt in command]
    p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

    if p.stdout is not None:
        q_stdout = Queue()
        t_stdout = Thread(target=read_in_thread, args=(p.stdout, q_stdout))
        t_stdout.start()
    else:
        q_stdout = None
        ret_stdout = ''

    if p.stderr is not None:
        q_stderr = Queue()
        t_stderr = Thread(target=read_in_thread, args=(p.stderr, q_stderr))
        t_stderr.start()
    else:
        q_stderr = None
        ret_stderr = ''

    if q_stdout is not None:
        ret_stdout = q_stdout.get()
    if q_stderr is not None:
        ret_stderr = q_stderr.get()

    waitcode = p.wait()
    if waitcode != 0:
        ret_stderr = ret_stderr + '\nERROR ret code = %d' % waitcode

    return (ret_stdout, ret_stderr)
Example #12
0
 def test4_threads(self):
     dbpool = self.my_dbpool(2)
     from Queue import Queue, Empty
     queue = Queue(3)
     def connection():
         queue.put(dbpool.connection())
     from threading import Thread
     thread1 = Thread(target=connection).start()
     thread2 = Thread(target=connection).start()
     thread3 = Thread(target=connection).start()
     try:
         db1 = queue.get(1, 1)
         db2 = queue.get(1, 1)
     except TypeError:
         db1 = queue.get(1)
         db2 = queue.get(1)
     self.assertNotEqual(db1, db2)
     self.assertNotEqual(db1._con, db2._con)
     try:
         self.assertRaises(Empty, queue.get, 1, 0.1)
     except TypeError:
         self.assertRaises(Empty, queue.get, 0)
     db2.close()
     try:
         db3 = queue.get(1, 1)
     except TypeError:
         db3 = queue.get(1)
     self.assertNotEqual(db1, db3)
     self.assertNotEqual(db1._con, db3._con)
Example #13
0
class Fetcher:
    def __init__(self, threads=5):
        self.__thread_cnt = threads
        self.__threads = []
        self.__lock = Lock()
        self.q_req = Queue()
        self.q_resp = Queue()

        self.__running = 0
        for i in range(self.__thread_cnt):
            new_thread = threading.Thread(target=self.__task)
            new_thread.setDaemon(True)
            new_thread.start()
            self.__threads.append(new_thread)

    def __task(self):
        while True:
            req = self.q_req.get()
            print "Req: %s" % (req)
            with self.__lock:
                self.__running += 1
            content = urllib2.urlopen(req).read()
            self.q_resp.put((req, content))

            with self.__lock:
                self.__running -= 1

    def push(self, req):
        self.q_req.put(req)

    def pop(self):
        return self.q_resp.get()

    def task_left(self):
        return self.__running + self.q_resp.qsize() + self.q_req.qsize()
Example #14
0
    def context_inheritance(self):
        class FooContextManager(
                ContextStackManagerEventletMixin,
                ContextStackManagerThreadMixin,
                ContextStackManagerBase
            ):
            pass
        csm = FooContextManager()
        csm.push_application('foo')

        def foo(csm, queue):
            csm.push_thread('bar')
            queue.put(list(csm.iter_current_stack()))
            eventlet.spawn(bar, csm, queue).wait()
            queue.put(list(csm.iter_current_stack()))

        def bar(csm, queue):
            csm.push_coroutine('baz')
            queue.put(list(csm.iter_current_stack()))

        queue = Queue()
        thread = Thread(target=foo, args=(csm, queue))
        thread.start()
        Assert(queue.get()) == ['bar', 'foo']
        Assert(queue.get()) == ['baz', 'bar', 'foo']
        Assert(queue.get()) == ['bar', 'foo']
        Assert(list(csm.iter_current_stack())) == ['foo']
Example #15
0
    def multiple_thread_contexts(self):
        csm = ThreadContextStackManager()

        def make_func(name):
            def func(csm, queue, event):
                csm.push_thread(name)
                queue.put(list(csm.iter_current_stack()))
                event.wait()
            func.__name__ = name
            return func

        foo_queue = Queue()
        bar_queue = Queue()
        foo_event = Event()
        bar_event = Event()
        foo_thread = Thread(
            target=make_func('foo'), args=(csm, foo_queue, foo_event)
        )
        bar_thread = Thread(
            target=make_func('bar'), args=(csm, bar_queue, bar_event)
        )
        foo_thread.start()
        # during that time foo should have pushed an object on
        # the thread local stack
        time.sleep(1)
        bar_thread.start()
        foo_event.set()
        bar_event.set()
        Assert(foo_queue.get()) == ['foo']
        Assert(bar_queue.get()) == ['bar']
        Assert(list(csm.iter_current_stack())) == []
Example #16
0
def make_work(callback, tasks, limit, ignore_exceptions=True,
              taskq_size=50):
    """
    Run up to "limit" threads, do tasks and yield results.

    :param callback:  the function that will process single task
    :param tasks:  the sequence or iterator or queue of tasks, each task
        in turn is sequence of arguments, if task is just signle argument
        it should be wrapped into list or tuple
    :param limit: the maximum number of threads
    """
    
    # If tasks is number convert it to the list of number
    if isinstance(tasks, int):
        tasks = xrange(tasks)

    # Ensure that tasks sequence is iterator
    tasks = iter(tasks)    

    taskq= Queue(taskq_size)

    # Here results of task processing will be saved
    resultq= Queue()

    # Prepare and run up to "limit" threads
    threads = []
    for x in xrange(limit):
        thread = Worker(callback, taskq, resultq, ignore_exceptions)
        thread.daemon = True
        thread.start()
        threads.append(thread)

    # Put tasks from tasks iterator to taskq queue
    # until tasks iterator ends
    # Do it in separate thread
    def task_processor(task_iter, task_queue, limit):
        try:
            for task in task_iter:
                task_queue.put(task)
        finally:
            for x in xrange(limit):
                task_queue.put(STOP)

    processor = Thread(target=task_processor, args=[tasks, taskq, limit])
    processor.daemon = True
    processor.start()

    while True:
        try:
            yield resultq.get(True, 0.2)
        except Empty:
            pass
        if not any(x.isAlive() for x in threads):
            break

    while True:
        try:
            yield resultq.get(False)
        except Empty:
            break
Example #17
0
File: tests.py Project: mgax/kv
    def test_lock_fails_if_db_already_locked(self):
        import sqlite3
        from threading import Thread
        from Queue import Queue

        db_path = self.tmp / "kv.sqlite"
        q1 = Queue()
        q2 = Queue()
        kv2 = KV(db_path, timeout=0.1)

        def locker():
            kv1 = KV(db_path)
            with kv1.lock():
                q1.put(None)
                q2.get()

        th = Thread(target=locker)
        th.start()
        try:
            q1.get()
            with self.assertRaises(sqlite3.OperationalError) as cm1:
                with kv2.lock():
                    pass
            self.assertEqual(cm1.exception.message, "database is locked")
            with self.assertRaises(sqlite3.OperationalError) as cm2:
                kv2["a"] = "b"
            self.assertEqual(cm2.exception.message, "database is locked")
        finally:
            q2.put(None)
            th.join()
Example #18
0
 def retry_block(self):
     # Received a retry request that made it all the way up to the top.
     # First, check to see if any of the variables we've accessed have
     # been modified since we started, which could change whether or not
     # we need to retry.
     with _global_lock:
         for item in self.check_values:
             item._check_clean()
         # Nope, none of them have changed. So now we create a queue,
         # then add it to all of the vars we need to watch.
         q = Queue(1)
         for item in self.retry_values:
             item._add_retry_queue(q)
     # Then we create a timer to let us know when our retry timeout (if any
     # calls made during this transaction indicated one) is up. Note that
     # _Timer does nothing when given a resume time of None, so we don't
     # need to worry about that here.
     timer = _Timer(q, self.resume_at)
     timer.start()
     # Then we wait.
     q.get()
     # One of the vars was modified or our timeout expired. Now we go cancel
     # the timer (in case it was a change to one of our watched vars that
     # woke us up instead of a timeout) and remove ourselves from the vars'
     # queues.
     timer.cancel()
     with _global_lock:
         for item in self.retry_values:
             item._remove_retry_queue(q)
     # And then we retry immediately.
     raise _RetryImmediately
	def run(self):
		f1 = open('put_simulation_service_time.csv', 'wt')
		f = open('put_simulation_result.csv', 'wt')
		writer = csv.writer(f)
		service = csv.writer(f1)
		writer.writerow( ('Arrival Rate', 'Service Time','Throughput') )
		service.writerow( ('Mean', 'Count', 'Service Time') )
		sumarray = []
		threads = Queue()
		sum = 0.0;
		for index in range(0,self.count):
			wait = random.expovariate(self.put_mean)
			lockobj.acquire()
			location = empty_slotsset.pop()
			self.store_time(location)
			lockobj.release()
			state = "xterm -e ./client/mosquitto_pub -t 'loc/put' -m {0} &".format(location)
			start_time = time.time()
			process = ProcessStore(state, None, start_time)
			thread = ProcessThread(process, sumarray,self.mean_hold)
			thread.start()
			threads.put(thread)
			time.sleep(wait)
		while not threads.empty():
			threads.get().join()
		i = 1
		for val in sumarray:
			service.writerow((self.put_mean , i, val))
			i += 1
			sum += val
		writer.writerow((self.put_mean , sum/self.count, (int)(self.count*3600/sum)))
		f1.close()		
		f.close()
Example #20
0
def test_get_batches_vep():
    """
    Test to get a batch
    """
    batch_queue = Queue()
    variants = []
    
    first_variant = get_variant_line(info="MQ;CSQ=G|ADK")
    
    second_variant = get_variant_line(pos="2", info="MQ;CSQ=G|ADK")
    
    variants.append(first_variant)
    variants.append(second_variant)

    header = HeaderParser()
    header.parse_header_line("#{0}".format(HEADER))
    header.vep_columns = ['Allele', 'SYMBOL']
    
    chromosomes = get_batches(variants=variants, batch_queue=batch_queue, 
                header=header)
    
    batch_1 = batch_queue.get()
    batch_queue.task_done()
    
    batch_2 = batch_queue.get()
    batch_queue.task_done()
    
    assert chromosomes == ['1']
    assert len(batch_1) == 1
    assert len(batch_2) == 1
    def test_volumizer(self):

        if self.no_server:
            raise SkipTest

        dicom_q = Queue()
        series = "test_data/p004/e4120/4120_4_1_dicoms"
        files = self.client.series_files(series)[:80]
        for f in files:
            dicom_q.put(self.client.retrieve_dicom(f))

        volume_q = Queue()
        volumizer = qm.Volumizer(dicom_q, volume_q)

        try:
            volumizer.start()

            vol1 = volume_q.get(timeout=5)
            vol2 = volume_q.get(timeout=5)

            for vol in [vol1, vol2]:
                image_shape = vol["image"].get_data().shape
                nt.assert_equal(image_shape, (80, 80, 40))

            with nt.assert_raises(Empty):
                volume_q.get(block=False)

        finally:
            volumizer.halt()
            volumizer.join()
Example #22
0
def test_get_batches_new_chromosome():
    """
    Test to get a batch
    """
    batch_queue = Queue()
    variants = []
    
    first_variant = get_variant_line()
    second_variant = get_variant_line(chrom="2")
    
    variants.append(first_variant)
    variants.append(second_variant)
    
    header = HeaderParser()
    header.parse_header_line("#{0}".format(HEADER))
    
    chromosomes = get_batches(variants=variants, batch_queue=batch_queue, 
    header=header)
    
    batch_1 = batch_queue.get()
    batch_queue.task_done()
    
    batch_2 = batch_queue.get()
    batch_queue.task_done()
    
    assert chromosomes == ['1', '2']
    assert len(batch_1) == 1
    assert len(batch_2) == 1
Example #23
0
    def test_foreign_suite(self):
        """
        Load tests does not reuse the tests and instead returns
        another TestSuite (or maybe not even a unittest.TestSuite).
        """

        with open(os.path.join(self.basename, 'test_load_keys_foreign_suite.py'), 'w') as f:
            f.write(textwrap.dedent(
                """
                import unittest
                class A(unittest.TestCase):
                    def test_that_will_fail(self):
                        self.fail()

                def load_tests(loader, tests, pattern):
                    class B(unittest.TestCase):
                        def test_that_succeeds(self):
                            pass
                    suite = unittest.TestSuite()
                    suite.addTests(loader.loadTestsFromTestCase(B))
                    return suite
                """))

        module_name = self.basename + '.test_load_keys_foreign_suite'
        result = Queue()
        poolRunner(module_name, result, 0)
        result.get()

        proto_test_result = result.get()
        self.assertEqual(len(proto_test_result.passing), 1)
        self.assertEqual(len(proto_test_result.errors), 0)
        self.assertEqual(len(proto_test_result.failures), 0)
        self.assertEqual(proto_test_result.passing[0].class_name, 'B')
Example #24
0
    def test_none_cancels(self):
        """
        Check that if load_tests returns None, no tests are run.
        """
        with open(os.path.join(self.basename, 'test_load_keys_none_cancels.py'), 'w') as fh:
            fh.write(textwrap.dedent(
                """
                import unittest
                class A(unittest.TestCase):
                    def test_that_will_fail(self):
                        self.fail()

                def load_tests(loader, tests, pattern):
                    return None
                """))

        module_name = self.basename + '.test_load_keys_none_cancels'
        result = Queue()
        poolRunner(module_name, result, 0)
        result.get()

        proto_test_result = result.get()
        self.assertEqual(len(proto_test_result.errors), 1)
        self.assertEqual(len(proto_test_result.passing), 0)
        self.assertEqual(len(proto_test_result.failures), 0)
Example #25
0
    def test_monkey_patch(self):
        """
        Check that monkey-patching a TestCase in the load_tests function
        actually changes the referenced class.
        """

        with open(os.path.join(self.basename, 'test_load_tests_monkeypatch.py'), 'w') as f:
            f.write(textwrap.dedent(
                """
                import unittest
                class A(unittest.TestCase):
                    passing = False
                    def test_that_will_fail(self):
                        self.assertTrue(self.passing)

                def load_tests(loader, tests, pattern):
                    A.passing = True
                    return tests
                """))

        module_name = self.basename + '.test_load_tests_monkeypatch'
        result = Queue()
        poolRunner(module_name, result, 0)
        result.get()

        proto_test_result = result.get()
        self.assertEqual(len(proto_test_result.passing), 1)
        self.assertEqual(len(proto_test_result.failures), 0)
        self.assertEqual(len(proto_test_result.errors), 0)
        self.assertEqual(proto_test_result.passing[0].class_name, 'A')
Example #26
0
    def xreader():
        in_queue = Queue(buffer_size)
        out_queue = Queue(buffer_size)
        out_order = [0]
        # start a read worker in a thread
        target = order_read_worker if order else read_worker
        t = Thread(target=target, args=(reader, in_queue))
        t.daemon = True
        t.start()
        # start several handle_workers
        target = order_handle_worker if order else handle_worker
        args = (in_queue, out_queue, mapper, out_order) if order else (
            in_queue, out_queue, mapper)
        workers = []
        for i in xrange(process_num):
            worker = Thread(target=target, args=args)
            worker.daemon = True
            workers.append(worker)
        for w in workers:
            w.start()

        sample = out_queue.get()
        while not isinstance(sample, XmapEndSignal):
            yield sample
            sample = out_queue.get()
        finish = 1
        while finish < process_num:
            sample = out_queue.get()
            if isinstance(sample, XmapEndSignal):
                finish += 1
            else:
                yield sample
Example #27
0
class FiniteStateMachine(threading.Thread):

    def __init__(self, name=None):
        self.queue = Queue()
        self.exit = False
        super(FiniteStateMachine, self).__init__(name=name)

    def run(self):
        logger.debug(self.name)
        while(not self.exit):
            s = self.queue.get()
            if(s == States.EXIT):
                self.exit = True
            else:
                self.work(s)

    def empty(self):
        for i in range(self.queue.qsize()):
            self.queue.get()

    def work(self, state):
        logger.debug("In Work State: {0}".format(state))
        #raise NotImplementedError

    def quit(self):
        self.empty()
        self.queue.put(States.EXIT)
Example #28
0
class CommandQueue():
    """
    >>> q = CommandQueue()
    >>> command = Command('foo', 1, 2, 3)
    >>> q.put(command)
    >>> q.block()
    >>> q.get() == command
    True
    """

    def __init__(self):
        self._in = Queue()
        self._out = Queue()

    def block(self):
        foo = self._in.get()
        self._out.put(foo)

    def put(self, command):
        if isinstance(command, Command):
            self._in.put(command)
        else:
            raise TypeError(type(command))

    def push(self, name, *args):
        command = Command(name, *args)
        self.put(command)

    def get(self):
        return self._out.get()
Example #29
0
def extractArticleText(symbol):

    articles = []
    headlines = []
    html = urllib.urlopen('http://www.nasdaq.com/symbol/' + symbol + '/news-headlines')
    soup = BeautifulSoup(html, "html.parser")
    soup = soup.find("div", { "class" : "headlines" })
    
#   for b in soup.find_all('small'):
#       headline = re.sub(r'[\ \n]{2,}', '', b.text)
#       headlines.append(headline.replace("\r\n\t\t", ""))

    # set up threads for parallel work
    # fill the queue with links
    q = Queue()
    p = Queue()
    links = soup.find_all('a', href=True)
    for link in links:
        q.put(link)
    for i in range(concurrent):
        t = Thread(target=parseArticles, args=(q, p, ))
        t.start()
    q.join()
    while p.qsize() > 0:
        articles.append((p.get(),sentiment(p.get())))
    return articles
Example #30
0
def test_get_batches_two_regions():
    """
    Test to get a batch
    """
    batch_queue = Queue()
    variants = []
    first_variant = get_variant_line()
    second_variant = get_variant_line(pos="2", info="Annotation=DDD;Exonic")
    variants.append(first_variant)
    variants.append(second_variant)
    
    header = HeaderParser()
    header.parse_header_line("#{0}".format(HEADER))
    
    chromosomes = get_batches(variants=variants, batch_queue=batch_queue, 
    header=header)
    batch_1 = batch_queue.get()
    batch_queue.task_done()
    
    batch_2 = batch_queue.get()
    batch_queue.task_done()
    
    assert chromosomes == ['1']
    assert len(batch_1) == 1
    assert len(batch_2) == 1
Example #31
0
class Connection(signalr.Connection, object):
    def __init__(self, url, session):
        super(Connection, self).__init__(url, session)
        self.__transport = WebSocketsTransport(session, self)
        self.exception = None
        self.queue = Queue()
        self.__queue_handler = None

    def start(self):
        self.starting.fire()

        negotiate_data = self.__transport.negotiate()
        self.token = negotiate_data['ConnectionToken']

        listener = self.__transport.start()

        def wrapped_listener():
            while self.is_open:
                try:
                    listener()
                except Exception as e:
                    event = QueueEvent(event_type='ERROR', payload=e)
                    self.queue.put(event)
                    self.is_open = False
                    self.exception = e
            else:
                self.started = False

        self.is_open = True
        self.__listener_thread = Thread(target=wrapped_listener,
                                        name='SignalrListener')
        self.__listener_thread.daemon = True
        self.__listener_thread.start()
        self.queue_handler()

    def queue_handler(self):
        while True:
            event = self.queue.get()
            try:
                if event is not None:
                    if event.type == 'SEND':
                        self.__transport.send(event.payload)
                    elif event.type == 'ERROR':
                        self.exit_gracefully()
                        raise self.exception
                    elif event.type == 'CLOSE':
                        self.exit_gracefully()
                        raise WebSocketConnectionClosedByUser(
                            'Connection closed by user.')
            finally:
                self.queue.task_done()

    def send(self, data):
        event = QueueEvent(event_type='SEND', payload=data)
        self.queue.put(event)

    def close(self):
        event = QueueEvent(event_type='CLOSE', payload=None)
        self.queue.put(event)

    def exit_gracefully(self):
        self.is_open = False
        self.__transport.close()
        self.__listener_thread.join()
Example #32
0
class PooledPg:
    """Pool for classic PyGreSQL connections.

    After you have created the connection pool, you can use
    connection() to get pooled, steady PostgreSQL connections.

    """

    version = __version__

    def __init__(self,
                 mincached=0,
                 maxcached=0,
                 maxconnections=0,
                 blocking=False,
                 maxusage=None,
                 setsession=None,
                 reset=None,
                 *args,
                 **kwargs):
        """Set up the PostgreSQL connection pool.

        mincached: initial number of connections in the pool
            (0 means no connections are made at startup)
        maxcached: maximum number of connections in the pool
            (0 or None means unlimited pool size)
        maxconnections: maximum number of connections generally allowed
            (0 or None means an arbitrary number of connections)
        blocking: determines behavior when exceeding the maximum
            (if this is set to true, block and wait until the number of
            connections decreases, otherwise an error will be reported)
        maxusage: maximum number of reuses of a single connection
            (0 or None means unlimited reuse)
            When this maximum usage number of the connection is reached,
            the connection is automatically reset (closed and reopened).
        setsession: optional list of SQL commands that may serve to prepare
            the session, e.g. ["set datestyle to ...", "set time zone ..."]
        reset: how connections should be reset when returned to the pool
            (0 or None to rollback transcations started with begin(),
            1 to always issue a rollback, 2 for a complete reset)
        args, kwargs: the parameters that shall be used to establish
            the PostgreSQL connections using class PyGreSQL pg.DB()

        """
        self._args, self._kwargs = args, kwargs
        self._maxusage = maxusage
        self._setsession = setsession
        self._reset = reset or 0
        if mincached is None:
            mincached = 0
        if maxcached is None:
            maxcached = 0
        if maxconnections is None:
            maxconnections = 0
        if maxcached:
            if maxcached < mincached:
                maxcached = mincached
        if maxconnections:
            if maxconnections < maxcached:
                maxconnections = maxcached
            # Create semaphore for number of allowed connections generally:
            from threading import Semaphore
            self._connections = Semaphore(maxconnections)
            self._blocking = blocking
        else:
            self._connections = None
        self._cache = Queue(maxcached)  # the actual connection pool
        # Establish an initial number of database connections:
        idle = [self.connection() for i in range(mincached)]
        while idle:
            idle.pop().close()

    def steady_connection(self):
        """Get a steady, unpooled PostgreSQL connection."""
        return SteadyPgConnection(self._maxusage, self._setsession, True,
                                  *self._args, **self._kwargs)

    def connection(self):
        """Get a steady, cached PostgreSQL connection from the pool."""
        if self._connections:
            if not self._connections.acquire(self._blocking):
                raise TooManyConnections
        try:
            con = self._cache.get(0)
        except Empty:
            con = self.steady_connection()
        return PooledPgConnection(self, con)

    def cache(self, con):
        """Put a connection back into the pool cache."""
        try:
            if self._reset == 2:
                con.reset()  # reset the connection completely
            else:
                if self._reset or con._transaction:
                    try:
                        con.rollback()  # rollback a possible transaction
                    except Exception:
                        pass
            self._cache.put(con, 0)  # and then put it back into the cache
        except Full:
            con.close()
        if self._connections:
            self._connections.release()

    def close(self):
        """Close all connections in the pool."""
        while 1:
            try:
                con = self._cache.get(0)
                try:
                    con.close()
                except Exception:
                    pass
                if self._connections:
                    self._connections.release()
            except Empty:
                break

    def __del__(self):
        """Delete the pool."""
        try:
            self.close()
        except Exception:
            pass
Example #33
0
    def join_channel(self, request):
        """
        To join the peer to a channel.

        Args:
            request: the request to join a channel
        Return:
            True in sucess or False in failure
        """
        _logger.debug('join_channel - start')

        err_msg = None
        if (not request):
            err_msg = "Missing all required input request parameters"

        if 'targets' not in request:
            err_msg = "Missing peers parameter"

        if 'block' not in request:
            err_msg = "Missing genesis block parameter"

        if 'tx_id' not in request:
            err_msg = "Missing transaction id parameter"

        if err_msg:
            _logger.error('join_channel error: {}'.format(err_msg))
            raise ValueError(err_msg)

        tx_context = self._client.tx_context
        block_bytes = request['block']
        chaincode_input = chaincode_pb2.ChaincodeInput()
        chaincode_input.args.extend([proto_b("JoinChain"), block_bytes])
        chaincode_id = chaincode_pb2.ChaincodeID()
        chaincode_id.name = proto_str("cscc")

        cc_spec = create_cc_spec(chaincode_input, chaincode_id, 'GOLANG')
        cc_invoke_spec = chaincode_pb2.ChaincodeInvocationSpec()
        cc_invoke_spec.chaincode_spec.CopyFrom(cc_spec)

        extension = proposal_pb2.ChaincodeHeaderExtension()
        extension.chaincode_id.name = proto_str('cscc')
        channel_header = build_channel_header(
            common_pb2.HeaderType.Value('ENDORSER_TRANSACTION'),
            tx_context.tx_id,
            '',
            current_timestamp(),
            tx_context.epoch,
            extension=extension.SerializeToString())

        header = build_header(tx_context.identity, channel_header,
                              tx_context.nonce)
        proposal = build_cc_proposal(cc_invoke_spec, header,
                                     request['transient_map'])

        try:
            responses = send_transaction_proposal(proposal, header, tx_context,
                                                  request['targets'])
        except Exception as e:
            raise IOError("fail to send transanction proposal", e)

        q = Queue(1)
        result = True
        for r in responses:
            r.subscribe(on_next=lambda x: q.put(x),
                        on_error=lambda x: q.put(x))
            res = q.get(timeout=5)
            proposal_res = res[0]
            result = result and (proposal_res.response.status == 200)
        if result:
            _logger.info("sucessfully join the peers")

        return result
class MazeAgent:
    def __init__(self, env):
        self.env = env
        self.malmo = env.malmo
        self.loc = env.playerPos
        self.facing = (0, 1)
        self.maze = env.agMaze
        self.plan = Queue()

        self.maze_width = len(self.maze[0])
        self.maze_height = len(self.maze)

        self.start = self.loc
        self.goals = []

        for row in list(enumerate(self.maze)):
            for column in list(enumerate(row[1])):
                state = (column[0] + 2, row[0] - 2)
                if column[1] == "G":
                    self.goals.append((self.maze_height - state[0],
                                       self.maze_width + state[1]))

        self.lava = []
        self.safe = [self.start]
        self.warning = []
        self.visited = []

    def think(self, perception):
        current_location = (self.loc[0], self.loc[1])

        moves = self.available_moves(current_location)
        next_move = []
        for move in moves:
            adjusted_move = (current_location[0] + MOVE_DIRS_ADJUSTED[move][0],
                             current_location[1] + MOVE_DIRS_ADJUSTED[move][1])
            move_weight = 3 if adjusted_move in self.warning else 1

            if adjusted_move not in self.visited:
                next_move.append(
                    move_weight +
                    self.manhattan_distance(adjusted_move, self.goals))

        next_move = moves[next_move.index(min(next_move))]

        self.plan.put(next_move)
        self.loc = (self.loc[0] + MOVE_DIRS_ADJUSTED[next_move][0],
                    self.loc[1] + MOVE_DIRS_ADJUSTED[next_move][1])
        print((self.loc, next_move))

        self.visited.append(current_location)

    def act(self):
        current_location = (self.loc[0], self.loc[1])
        # BlindBot perceives what it is standing on...
        perception = self.perceive()
        print(str(current_location) + ": " + perception)
        if perception == 'obsidian':
            current_location = (self.loc[0], self.loc[1])
            self.warning.append(current_location)

        # ...BlindBot thinks...
        self.think(perception)

        # ...and then moves according to its plan, if it has one
        if self.plan.empty():
            return
        self.move(self.plan.get())
        return

    # [!] TODO: Any record-keeping when bot dies
    def die(self):
        # add the current location (lava) to unsafe space
        current_location = (self.loc[0], self.loc[1])
        self.lava.append(current_location)
        # we know that all four spots around are obsidian/barrier aka not visitable
        self.warning.extend(((current_location[0] - 1, current_location[1]),
                             (current_location[0] + 1, current_location[1]),
                             (current_location[0], current_location[1] + 1),
                             (current_location[0], current_location[1] - 1)))
        print(self.warning)
        # Reset player position back to start
        self.loc = self.env.playerPos

    def manhattan_distance(self, start, end):
        ret = []
        for goal in end:
            sx, sy = start
            ex, ey = goal
            ret.append(abs(ex - sx) + abs(ey - sy))
        return min(ret)

    def available_moves(self, current_location):
        moves = []

        if current_location[1] + 1 < self.maze_height - 1 and (
                current_location[0],
                current_location[1] + 1) not in self.visited:
            moves.append("U")
        if current_location[0] - 1 > 0 and (
                current_location[0] - 1,
                current_location[1]) not in self.visited:
            moves.append("R")
        if current_location[1] - 1 > 0 and (current_location[0],
                                            current_location[1] -
                                            1) not in self.visited:
            moves.append("D")
        if current_location[0] + 1 < self.maze_width - 1 and (
                current_location[0] + 1,
                current_location[1]) not in self.visited:
            moves.append("L")

        return moves

    # -----------------------------------------------------------------------
    # YOU MAY NOT MODIFY ANYTHING BELOW THIS LINE
    # -----------------------------------------------------------------------
    def move(self, dir):
        moveDir = MOVE_DIRS[dir]
        faceDir = tuple(numpy.subtract(self.facing, moveDir))
        turnVal = -0.5

        if (abs(faceDir[0]) == 2 or abs(faceDir[1]) == 2):
            turnVal = 1
        else:
            turnVal = turnVal * faceDir[0] * faceDir[1]

        self.facing = moveDir
        self.loc = self.loc + moveDir
        self.malmo.sendCommand("turn " + str(turnVal))
        self.malmo.sendCommand("move 1")

    def perceive(self):
        # Information comes in as a TimestampedString
        msg = self.malmo.getWorldState().observations[-1].text
        observations = json.loads(msg)
        grid = observations.get(u'floor3x3', 0)
        # Since our agent is blind, it only knows what it's standing on:
        standingOn = grid[4]
        # If we're standing on a goal, huzzah! We win!
        self.env.goalTest(standingOn)
        return standingOn
Example #35
0
def dictionaryAttack(attack_dict):
    suffix_list = [""]
    custom_wordlist = [""]
    hash_regexes = []
    results = []
    resumes = []
    user_hash = []
    processException = False
    foundHash = False

    for (_, hashes) in attack_dict.items():
        for hash_ in hashes:
            if not hash_:
                continue

            hash_ = hash_.split()[0] if hash_ and hash_.strip() else hash_
            regex = hashRecognition(hash_)

            if regex and regex not in hash_regexes:
                hash_regexes.append(regex)
                infoMsg = "using hash method '%s'" % __functions__[
                    regex].func_name
                logger.info(infoMsg)

    for hash_regex in hash_regexes:
        keys = set()
        attack_info = []

        for (user, hashes) in attack_dict.items():
            for hash_ in hashes:
                if not hash_:
                    continue

                foundHash = True
                hash_ = hash_.split()[0] if hash_ and hash_.strip() else hash_

                if re.match(hash_regex, hash_):
                    item = None

                    if hash_regex not in (HASH.CRYPT_GENERIC, HASH.JOOMLA,
                                          HASH.WORDPRESS, HASH.UNIX_MD5_CRYPT,
                                          HASH.APACHE_MD5_CRYPT,
                                          HASH.APACHE_SHA1):
                        hash_ = hash_.lower()

                    if hash_regex in (HASH.MYSQL, HASH.MYSQL_OLD,
                                      HASH.MD5_GENERIC, HASH.SHA1_GENERIC,
                                      HASH.APACHE_SHA1):
                        item = [(user, hash_), {}]
                    elif hash_regex in (HASH.ORACLE_OLD, HASH.POSTGRES):
                        item = [(user, hash_), {'username': user}]
                    elif hash_regex in (HASH.ORACLE, ):
                        item = [(user, hash_), {'salt': hash_[-20:]}]
                    elif hash_regex in (HASH.MSSQL, HASH.MSSQL_OLD,
                                        HASH.MSSQL_NEW):
                        item = [(user, hash_), {'salt': hash_[6:14]}]
                    elif hash_regex in (HASH.CRYPT_GENERIC, ):
                        item = [(user, hash_), {'salt': hash_[0:2]}]
                    elif hash_regex in (HASH.UNIX_MD5_CRYPT,
                                        HASH.APACHE_MD5_CRYPT):
                        item = [(user, hash_), {
                            'salt': hash_.split('$')[2],
                            'magic': '$%s$' % hash_.split('$')[1]
                        }]
                    elif hash_regex in (HASH.JOOMLA, ):
                        item = [(user, hash_), {'salt': hash_.split(':')[-1]}]
                    elif hash_regex in (HASH.WORDPRESS, ):
                        if ITOA64.index(hash_[3]) < 32:
                            item = [(user, hash_), {
                                'salt': hash_[4:12],
                                'count': 1 << ITOA64.index(hash_[3]),
                                'prefix': hash_[:12]
                            }]
                        else:
                            warnMsg = "invalid hash '%s'" % hash_
                            logger.warn(warnMsg)

                    if item and hash_ not in keys:
                        resumed = hashDBRetrieve(hash_)
                        if not resumed:
                            attack_info.append(item)
                            user_hash.append(item[0])
                        else:
                            infoMsg = "resuming password '%s' for hash '%s'" % (
                                resumed, hash_)
                            if user and not user.startswith(DUMMY_USER_PREFIX):
                                infoMsg += " for user '%s'" % user
                            logger.info(infoMsg)
                            resumes.append((user, hash_, resumed))
                        keys.add(hash_)

        if not attack_info:
            continue

        if not kb.wordlists:
            while not kb.wordlists:

                # the slowest of all methods hence smaller default dict
                if hash_regex in (HASH.ORACLE_OLD, HASH.WORDPRESS):
                    dictPaths = [paths.SMALL_DICT]
                else:
                    dictPaths = [paths.WORDLIST]

                message = "what dictionary do you want to use?\n"
                message += "[1] default dictionary file '%s' (press Enter)\n" % dictPaths[
                    0]
                message += "[2] custom dictionary file\n"
                message += "[3] file with list of dictionary files"
                choice = readInput(message, default='1')

                try:
                    if choice == '2':
                        message = "what's the custom dictionary's location?\n"
                        dictPath = readInput(message)
                        if dictPath:
                            dictPaths = [dictPath]
                            logger.info("using custom dictionary")
                    elif choice == '3':
                        message = "what's the list file location?\n"
                        listPath = readInput(message)
                        checkFile(listPath)
                        dictPaths = getFileItems(listPath)
                        logger.info("using custom list of dictionaries")
                    else:
                        logger.info("using default dictionary")

                    dictPaths = filter(None, dictPaths)

                    for dictPath in dictPaths:
                        checkFile(dictPath)

                        if os.path.splitext(dictPath)[1].lower() == ".zip":
                            _ = zipfile.ZipFile(dictPath, 'r')
                            if len(_.namelist()) == 0:
                                errMsg = "no file(s) inside '%s'" % dictPath
                                raise SqlmapDataException(errMsg)
                            else:
                                _.open(_.namelist()[0])

                    kb.wordlists = dictPaths

                except Exception, ex:
                    warnMsg = "there was a problem while loading dictionaries"
                    warnMsg += " ('%s')" % getSafeExString(ex)
                    logger.critical(warnMsg)

            message = "do you want to use common password suffixes? (slow!) [y/N] "

            if readInput(message, default='N', boolean=True):
                suffix_list += COMMON_PASSWORD_SUFFIXES

        infoMsg = "starting dictionary-based cracking (%s)" % __functions__[
            hash_regex].func_name
        logger.info(infoMsg)

        for item in attack_info:
            ((user, _), _) = item
            if user and not user.startswith(DUMMY_USER_PREFIX):
                custom_wordlist.append(normalizeUnicode(user))

        if hash_regex in (HASH.MYSQL, HASH.MYSQL_OLD, HASH.MD5_GENERIC,
                          HASH.SHA1_GENERIC, HASH.APACHE_SHA1):
            for suffix in suffix_list:
                if not attack_info or processException:
                    break

                if suffix:
                    clearConsoleLine()
                    infoMsg = "using suffix '%s'" % suffix
                    logger.info(infoMsg)

                retVal = None
                processes = []

                try:
                    if _multiprocessing:
                        if _multiprocessing.cpu_count() > 1:
                            infoMsg = "starting %d processes " % _multiprocessing.cpu_count(
                            )
                            singleTimeLogMessage(infoMsg)

                        gc.disable()

                        retVal = _multiprocessing.Queue()
                        count = _multiprocessing.Value(
                            'i', _multiprocessing.cpu_count())

                        for i in xrange(_multiprocessing.cpu_count()):
                            process = _multiprocessing.Process(
                                target=_bruteProcessVariantA,
                                args=(attack_info, hash_regex, suffix, retVal,
                                      i, count, kb.wordlists, custom_wordlist,
                                      conf.api))
                            processes.append(process)

                        for process in processes:
                            process.daemon = True
                            process.start()

                        while count.value > 0:
                            time.sleep(0.5)

                    else:
                        warnMsg = "multiprocessing hash cracking is currently "
                        warnMsg += "not supported on this platform"
                        singleTimeWarnMessage(warnMsg)

                        retVal = Queue()
                        _bruteProcessVariantA(attack_info, hash_regex, suffix,
                                              retVal, 0, 1, kb.wordlists,
                                              custom_wordlist, conf.api)

                except KeyboardInterrupt:
                    print
                    processException = True
                    warnMsg = "user aborted during dictionary-based attack phase (Ctrl+C was pressed)"
                    logger.warn(warnMsg)

                    for process in processes:
                        try:
                            process.terminate()
                            process.join()
                        except (OSError, AttributeError):
                            pass

                finally:
                    if _multiprocessing:
                        gc.enable()

                    if retVal:
                        conf.hashDB.beginTransaction()

                        while not retVal.empty():
                            user, hash_, word = item = retVal.get(block=False)
                            attack_info = filter(
                                lambda _: _[0][0] != user or _[0][1] != hash_,
                                attack_info)
                            hashDBWrite(hash_, word)
                            results.append(item)

                        conf.hashDB.endTransaction()

            clearConsoleLine()

        else:
            for ((user, hash_), kwargs) in attack_info:
                if processException:
                    break

                if any(_[0] == user and _[1] == hash_ for _ in results):
                    continue

                count = 0
                found = False

                for suffix in suffix_list:
                    if found or processException:
                        break

                    if suffix:
                        clearConsoleLine()
                        infoMsg = "using suffix '%s'" % suffix
                        logger.info(infoMsg)

                    retVal = None
                    processes = []

                    try:
                        if _multiprocessing:
                            if _multiprocessing.cpu_count() > 1:
                                infoMsg = "starting %d processes " % _multiprocessing.cpu_count(
                                )
                                singleTimeLogMessage(infoMsg)

                            gc.disable()

                            retVal = _multiprocessing.Queue()
                            found_ = _multiprocessing.Value('i', False)
                            count = _multiprocessing.Value(
                                'i', _multiprocessing.cpu_count())

                            for i in xrange(_multiprocessing.cpu_count()):
                                process = _multiprocessing.Process(
                                    target=_bruteProcessVariantB,
                                    args=(user, hash_, kwargs, hash_regex,
                                          suffix, retVal, found_, i, count,
                                          kb.wordlists, custom_wordlist,
                                          conf.api))
                                processes.append(process)

                            for process in processes:
                                process.daemon = True
                                process.start()

                            while count.value > 0:
                                time.sleep(0.5)

                            found = found_.value != 0

                        else:
                            warnMsg = "multiprocessing hash cracking is currently "
                            warnMsg += "not supported on this platform"
                            singleTimeWarnMessage(warnMsg)

                            class Value():
                                pass

                            retVal = Queue()
                            found_ = Value()
                            found_.value = False

                            _bruteProcessVariantB(user, hash_, kwargs,
                                                  hash_regex, suffix, retVal,
                                                  found_, 0, 1, kb.wordlists,
                                                  custom_wordlist, conf.api)

                            found = found_.value

                    except KeyboardInterrupt:
                        print
                        processException = True
                        warnMsg = "user aborted during dictionary-based attack phase (Ctrl+C was pressed)"
                        logger.warn(warnMsg)

                        for process in processes:
                            try:
                                process.terminate()
                                process.join()
                            except (OSError, AttributeError):
                                pass

                    finally:
                        if _multiprocessing:
                            gc.enable()

                        if retVal:
                            conf.hashDB.beginTransaction()

                            while not retVal.empty():
                                user, hash_, word = item = retVal.get(
                                    block=False)
                                hashDBWrite(hash_, word)
                                results.append(item)

                            conf.hashDB.endTransaction()

                clearConsoleLine()
Example #36
0
def jbofihe(text):
    """Call ``jbofihe -ie -cr`` on text and return the output.

    >>> jbofihe('coi rodo')
    "(0[coi {<ro BOI> do} DO'U])0"
    >>> jbofihe('coi ho')
    Traceback (most recent call last):
      ...
    ValueError: not grammatical: coi _ho_ ⚠
    >>> jbofihe("coi ro do'u")
    Traceback (most recent call last):
      ...
    ValueError: not grammatical: coi ro _do'u_ ⚠
    >>> jbofihe('coi ro')
    Traceback (most recent call last):
      ...
    ValueError: not grammatical: coi ro ⚠
    >>> jbofihe('(')
    Traceback (most recent call last):
      ...
    ValueError: parser timeout
    """
    data = Queue(1)
    process = Popen(('jbofihe', '-ie', '-cr'),
                    stdin=PIPE,
                    stdout=PIPE,
                    stderr=PIPE)

    def target(queue):
        queue.put(process.communicate(text))

    thread = Thread(target=target, args=(data, ))
    thread.start()
    thread.join(1)

    if thread.isAlive():
        os.kill(process.pid, signal.SIGTERM)
        raise ValueError('parser timeout')

    output, error = data.get()
    grammatical = not process.returncode  # 0=grammatical, 1=ungrammatical

    if grammatical:
        return output.replace('\n', ' ').rstrip()

    error = error.replace('\n', ' ')
    match = re.match(
        r"^Unrecognizable word '(?P<word>.+?)' "
        r"at line \d+ column (?P<column>\d+)", error)
    if match:
        reg = r'^(%s)(%s)(.*)' % ('.' * (int(match.group('column')) - 1),
                                  re.escape(match.group('word')))
        text = re.sub(reg, r'\1_\2_ ⚠ \3', text).rstrip()
        raise ValueError('not grammatical: %s' % text)

    if '<End of text>' in error:
        raise ValueError('not grammatical: %s ⚠' % text)

    match = re.search(
        r'Misparsed token :\s+(?P<word>.+?) \[.+?\] '
        r'\(line \d+, col (?P<column>\d+)\)', error)
    if match:
        reg = r'^(%s)(%s)(.*)' % ('.' * (int(match.group('column')) - 1),
                                  match.group('word'))
        text = re.sub(reg, r'\1_\2_ ⚠ \3', text).rstrip()
        raise ValueError('not grammatical: %s' % text)

    raise ValueError('not grammatical')
Example #37
0
infileName = "input.txt"
outfileName = "expected.txt"

# max number of outputs
maxCount = 47
count = 0

# read infile line-by-line, parsing and writing sorted output
with open(infileName, "r") as infile, open(outfileName, "w") as outfile:
        q = Queue()
        q.put(0)
        q.put(0)
	count = count + 1
	for line in iter(infile):
		#sortedLine = sorted(map(int, line.split()))
		# parse line into list of integers
		intLine = map(int, line.split())
		# compute: add
		added = intLine[0] + intLine[1]
                q.put(added)
		# output
                x = q.get()
                if(x<10):
		    outfile.write("%d %d ->  %d" % (intLine[0], intLine[1], x))
                else:
		    outfile.write("%d %d -> %d" % (intLine[0], intLine[1], x))
		outfile.write("\n")
		if count == maxCount:
			break
		count = count + 1
Example #38
0
class mcast_joiner(object):
    def __init__(self, group, port, collect_stats=False):
        self.collect_stats = collect_stats
        self.records = []
        self.intervals = []
        self.sizes = []
        self.missing = {}
        self.report = {}
        self.port = port
        self.group = group
        self.q = Queue()
        self.ev = Event()
        self.ev.set()

    def connect(self):
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

        server_address = ('', self.port)
        self.sock.bind(server_address)

        # Tell the operating system to add the socket to the multicast group
        # on all interfaces.
        pgroup = socket.inet_aton(self.group)
        mreq = struct.pack('4sL', pgroup, socket.INADDR_ANY)
        self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)

        # TODO: set timeout behaviour, although maybe not as import
        # now that thread is daemon

        self.t = Thread(target=self.run)
        self.t.daemon = True
        self.t.start()

    def run(self):
        try:
            # Receive/respond loop
            i = 0
            while self.ev.is_set():
                data, address = self.sock.recvfrom(1024)
                self.q.put((i, data, address, time()))
                i += 1
        finally:
            # ensure that IGMP leave message gets sent
            self.sock.close()
            self.sock = None
        return

    def close(self):
        self.ev.clear()
        self.t = None

    def __iter__(self):
        return self

    def __next__(self):
        n = self.q.get()
        if self.collect_stats:
            self.records.append(n)
        return n

    def updatestats(self):
        if "packets" in self.report:
            startp = self.report["packets"]
        else:
            startp = 0

        if "lastrec" in self.report:
            lastseq = int(self.report["lastrec"][1].rstrip())
            lasttime = self.report["lastrec"][3]
        else:
            lastseq = int(self.records[0][1].rstrip())
            lasttime = self.records[0][3]

        if "cumsize" in self.report:
            cumsize = self.report["cumsize"]
        else:
            cumsize = 0

        if "firstrec" not in self.report:
            self.report["firstrec"] = self.records[0]

        if "highestseq" in self.report:
            highestseq = self.report["highestseq"]
        else:
            highestseq = int(self.records[0][1].rstrip())

        for n in self.records[startp:]:
            i, seq, address, time = n
            size = len(seq)
            cumsize += size
            self.sizes.append(size)

            seq = int(seq.rstrip())
            if "firstseq" not in self.report:
                self.report["firstseq"] = seq
            if seq > lastseq + 1:
                for i in xrange(lastseq + 1, seq):
                    self.missing[i] = "missing"

            if seq in self.missing:
                if self.missing[seq] == "missing":
                    self.missing[seq] = "late"
                elif self.missing[seq].startswith("received"):
                    self.missing[seq] = "received, duplicate"
                else:
                    self.missing[seq] = "WTF?"
            else:
                self.missing[seq] = "received"
                lastseq = seq
            if seq > highestseq: highestseq = seq

            tdiff = time - lasttime
            lasttime = time
            self.intervals.append(tdiff)

            self.report["lastrec"] = n

        self.report["highestseq"] = highestseq
        self.report["totaltime"] = self.report["lastrec"][3] - self.report[
            "firstrec"][3]
        self.report["cumsize"] = cumsize
        if havestats:
            self.report["averageinterval"] = mean(self.intervals[1:])
            self.report["jitter"] = stdev(self.intervals[1:])
        self.report["packets"] = len(self.records)
        self.report[
            "bps"] = self.report["cumsize"] * 8 / self.report["totaltime"]
        self.report["pps"] = self.report["packets"] / self.report["totaltime"]

    def next(self):
        return self.__next__()
class DxlClientConfig(_BaseObject):
    """
    The Data Exchange Layer (DXL) client configuration contains the information necessary to connect
    a :class:`dxlclient.client.DxlClient` to the DXL fabric.

    The configuration includes the required PKI information (client certificate, client private key,
    broker CA certificates) and the set of DXL message brokers that are available to connect to on the fabric.

    The following sample shows creating a client configuration, instantiating a DXL client, and connecting
    to the fabric:

    .. code-block:: python

        from dxlclient.broker import Broker
        from dxlclient.client import DxlClient
        from dxlclient.client_config import DxlClientConfig

        # Create the client configuration
        config = DxlClientConfig(
            broker_ca_bundle="c:\\\\certs\\\\brokercerts.crt",
            cert_file="c:\\\\certs\\\\client.crt",
            private_key="c:\\\\certs\\\\client.key",
            brokers=[Broker.parse("ssl://192.168.189.12")])

        # Create the DXL client
        with DxlClient(config) as dxl_client:

            # Connect to the fabric
            dxl_client.connect()
    """

    # The default number of times to retry during connect, default -1 (infinite)
    _DEFAULT_CONNECT_RETRIES = -1
    # The default keep alive interval (in seconds); client pings broker at interval
    # 30 minutes by default
    _DEFAULT_MQTT_KEEP_ALIVE_INTERVAL = 30 * 60
    # The default reconnect back off multiplier
    _DEFAULT_RECONNECT_BACK_OFF_MULTIPLIER = 2
    # The default reconnect delay (in seconds)
    _DEFAULT_RECONNECT_DELAY = 1
    # The default maximum reconnect delay, defaults to 1 minute
    _DEFAULT_RECONNECT_DELAY_MAX = 60
    # The default reconnect delay random multiplier, defaults to 25 percent
    _DEFAULT_RECONNECT_DELAY_RANDOM = 0.25
    # Whether to attempt to reconnect when disconnected
    _DEFAULT_RECONNECT_WHEN_DISCONNECTED = True

    def __init__(self, broker_ca_bundle, cert_file, private_key, brokers):
        """
        Constructor parameters:

        :param broker_ca_bundle: The file name of a bundle containing the broker CA certificates in PEM format
        :param cert_file: The file name of the client certificate in PEM format
        :param private_key: The file name of the client private key in PEM format
        :param brokers: A list of :class:`dxlclient.broker.Broker` objects representing brokers comprising the
            DXL fabric. When invoking the :func:`dxlclient.client.DxlClient.connect` method, the
            :class:`dxlclient.client.DxlClient` will attempt to connect to the closest broker.
        """
        super(DxlClientConfig, self).__init__()

        client_id = UuidGenerator.generate_id_as_string()

        if not broker_ca_bundle:
            raise ValueError("Broker CA bundle not specified")

        if not cert_file:
            raise ValueError("Certificate file not specified")

        if not private_key:
            raise ValueError("Private key file not specified")

        if brokers is None:
            raise ValueError("Brokers were not specified")

        # The number of times to retry during connect
        self._connect_retries = self._DEFAULT_CONNECT_RETRIES
        # The keep alive interval
        self._keep_alive_interval = self._DEFAULT_MQTT_KEEP_ALIVE_INTERVAL
        # The reconnect back off multiplier
        self._reconnect_back_off_multiplier = self._DEFAULT_RECONNECT_BACK_OFF_MULTIPLIER
        # The reconnect delay (in seconds)
        self._reconnect_delay = self._DEFAULT_RECONNECT_DELAY
        # The maximum reconnect delay
        self._reconnect_delay_max = self._DEFAULT_RECONNECT_DELAY_MAX
        # The reconnect delay random
        self._reconnect_delay_random = self._DEFAULT_RECONNECT_DELAY_RANDOM
        # Whether to reconnect when disconnected
        self._reconnect_when_disconnected = self._DEFAULT_RECONNECT_WHEN_DISCONNECTED

        # The unique identifier of the client
        self._client_id = client_id
        # The list of brokers
        self.brokers = brokers
        # The filename of the CA bundle file in PEM format
        self.broker_ca_bundle = broker_ca_bundle
        # The filename of the client certificate in PEM format (must not have a password)
        self.cert_file = cert_file
        # The filename of the private key used to request the certificates
        self.private_key = private_key
        # Queue for getting the sorted broker list
        self._queue = None
        # The incoming message queue size
        self._incoming_message_queue_size = 1000
        # The incoming thread pool size
        self._incoming_message_thread_pool_size = 1

    def __del__(self):
        """destructor"""
        super(DxlClientConfig, self).__del__()

    @property
    def broker_ca_bundle(self):
        """
        The file name of a bundle containing the broker CA certificates in PEM format
        """
        return self._broker_ca_bundle

    @broker_ca_bundle.setter
    def broker_ca_bundle(self, broker_ca_bundle):
        self._broker_ca_bundle = broker_ca_bundle

    @property
    def cert_file(self):
        """
        The file name of the client certificate in PEM format
        """
        return self._cert_file

    @cert_file.setter
    def cert_file(self, cert_file):
        self._cert_file = cert_file

    @property
    def private_key(self):
        """
        The file name of the client private key in PEM format
        """
        return self._private_key

    @private_key.setter
    def private_key(self, private_key):
        self._private_key = private_key

    @property
    def brokers(self):
        """
        A list of :class:`dxlclient.broker.Broker` objects representing brokers comprising the
        DXL fabric. When invoking the :func:`dxlclient.client.DxlClient.connect` method, the
        :class:`dxlclient.client.DxlClient` will attempt to connect to the closest broker.
        """
        return self._brokers

    @brokers.setter
    def brokers(self, brokers):
        self._brokers = brokers

    @property
    def incoming_message_queue_size(self):
        """
        The queue size for incoming messages (will block when queue is full)
        """
        return self._incoming_message_queue_size

    @incoming_message_queue_size.setter
    def incoming_message_queue_size(self, incoming_message_queue_size):
        self._incoming_message_queue_size = incoming_message_queue_size

    @property
    def incoming_message_queue_size(self):
        """
        The queue size for incoming messages (will block when queue is full).

        Defaults to ``1000``
        """
        return self._incoming_message_queue_size

    @incoming_message_queue_size.setter
    def incoming_message_queue_size(self, incoming_message_queue_size):
        self._incoming_message_queue_size = incoming_message_queue_size

    @property
    def incoming_message_thread_pool_size(self):
        """
        The thread pool size for incoming messages

        Defaults to ``1``
        """
        return self._incoming_message_thread_pool_size

    @incoming_message_thread_pool_size.setter
    def incoming_message_thread_pool_size(self, incoming_message_thread_pool_size):
        self._incoming_message_thread_pool_size = incoming_message_thread_pool_size

    @property
    def connect_retries(self):
        """
        The maximum number of connection attempts for each :class:`dxlclient.broker.Broker`
        specified in the :class:`dxlclient.client_config.DxlClientConfig`

        A value of ``-1`` indicates that the client will continue to retry without limit until it
        establishes a connection
        """
        return self._connect_retries

    @connect_retries.setter
    def connect_retries(self, connect_retries):
        """
        Sets the number of retries to perform when connecting. A value of -1
        indicates retry forever.

        :param connect_retries: The number of retries. A value of -1 indicates
                                retry forever.
        """
        self._connect_retries = connect_retries

    @property
    def keep_alive_interval(self):
        """
        The maximum period in seconds between communications with a connected :class:`dxlclient.broker.Broker`.
        If no other messages are being exchanged, this controls the rate at which the client will send ping
        messages to the :class:`dxlclient.broker.Broker`.

        Defaults to ``1800`` seconds (30 minutes)
        """
        return self._keep_alive_interval

    @property
    def reconnect_back_off_multiplier(self):
        """
        Multiples the current reconnect delay by this value on subsequent connect retries. For example, a current
        delay of 3 seconds with a multiplier of 2 would result in the next retry attempt being in 6 seconds.

        Defaults to ``2``
        """
        return self._reconnect_back_off_multiplier

    @reconnect_back_off_multiplier.setter
    def reconnect_back_off_multiplier(self, reconnect_back_off_multiplier):
        self._reconnect_back_off_multiplier = reconnect_back_off_multiplier

    @keep_alive_interval.setter
    def keep_alive_interval(self, keep_alive_interval):
        self._keep_alive_interval = keep_alive_interval

    @property
    def reconnect_delay(self):
        """
        The initial delay between retry attempts in seconds. The delay increases ("backs off")
        as subsequent connection attempts are made.

        Defaults to ``1`` second
        """
        return self._reconnect_delay

    @reconnect_delay.setter
    def reconnect_delay(self, reconnect_delay):
        self._reconnect_delay = reconnect_delay

    @property
    def reconnect_delay_max(self):
        """
        The maximum delay between connection retry attempts in seconds

        Defaults to ``60`` seconds (1 minute)
        """
        return self._reconnect_delay_max

    @reconnect_delay_max.setter
    def reconnect_delay_max(self, reconnect_delay_max):
        self._reconnect_delay_max = reconnect_delay_max

    @property
    def reconnect_delay_random(self):
        """
        Get the randomness delay percentage (between 0.0 and 1.0).
        The default value is 0.25
        """
        return self._reconnect_delay_random

    @reconnect_delay_random.setter
    def reconnect_delay_random(self, reconnect_delay_random):
        """
        Sets a randomness delay percentage (between 0.0 and 1.0). When
        calculating the reconnect delay, this percentage indicates how much
        randomness there should be in the current delay. For example, if the
        current delay is 100ms, a value of .25 would mean that the actual delay
        would be between 100ms and 125ms.

        :param reconnect_delay_random: The randomness delay percentage (between 0.0 and 1.0).
        """
        self._reconnect_delay_random = reconnect_delay_random

    @property
    def reconnect_when_disconnected(self):
        """
        Whether the client will continuously attempt to reconnect to the fabric if it becomes disconnected

        Defaults to ``True``
        """
        return self._reconnect_when_disconnected

    @reconnect_when_disconnected.setter
    def reconnect_when_disconnected(self, reconnect):
        self._reconnect_when_disconnected = reconnect

    def _set_brokers_from_json(self, broker_list):
        """
        Sets brokers list from JSON object.

        :param broker_list: Object containing dxl brokers. Should have this format:
        """
        brokers = _get_brokers(broker_list)
        if brokers is not None:
            self._brokers[:] = brokers

    def _get_sorted_broker_list_worker(self, broker):
        """Returns a sorted list of the brokers in this config."""
        broker._connect_to_broker()

    def _get_sorted_broker_list(self):
        """
        Returns the Broker list sorted by response time low to high.

        :returns: {@code list}: Sorted list of brokers.
        """
        threads = []

        for broker in self._brokers:
            # pylint: disable=invalid-name
            t = threading.Thread(target=self._get_sorted_broker_list_worker, args=[broker])
            threads.append(t)
            t.daemon = True
            t.start()

        for t in threads:
            t.join()

        return sorted(self._brokers, key=lambda b: (b._response_time is None, b._response_time))

    def _get_fastest_broker_worker(self, broker):
        """Calculate the fastest (smallest response time) broker."""
        broker._connect_to_broker()
        self._queue.put(broker)

    def _get_fastest_broker(self):
        """
        Returns the Broker with the lowest response time.

        :returns: {@code dxlclient.broker.Broker}: Fastest broker.
        """
        brokers = self._brokers
        self._queue = Queue()

        for broker in brokers:
            # pylint: disable=invalid-name
            t = threading.Thread(target=self._get_fastest_broker_worker, args=[broker])
            t.daemon = True
            t.start()

        return self._queue.get(timeout=15)

    @staticmethod
    def create_dxl_config_from_file(dxl_config_file):
        """

        This method allows creation of a :class:`DxlClientConfig` object from a
        specified configuration file. The information contained in the file has a one-to-one
        correspondence with the :class:`DxlClientConfig` constructor.

        .. code-block:: python

            [Certs]
            BrokerCertChain=c:\\\\certs\\\\brokercerts.crt
            CertFile=c:\\\\certs\\\\client.crt
            PrivateKey=c:\\\\certs\\\\client.key

            [Brokers]
            mybroker=mybroker;8883;mybroker.mcafee.com;192.168.1.12
            mybroker2=mybroker2;8883;mybroker2.mcafee.com;192.168.1.13

        The configuration file can be loaded as follows:

        .. code-block:: python

            from dxlclient.client_config import DxlClientConfig

            config = DxlClientConfig.create_dxl_config_from_file("c:\\\\certs\\\\dxlclient.cfg")

        :param dxl_config_file: Path to the configuration file
        :return: A :class:`DxlClientConfig` object corresponding to the specified configuration file
        """
        config_parser = _DxlConfigParser()

        if not config_parser.read(dxl_config_file):
            raise Exception("Can't parse config file")

        config_file_path = path.dirname(dxl_config_file)
        cert_file = DxlClientConfig._get_file_path(config_file_path, config_parser.get("Certs", "CertFile"))
        private_key = DxlClientConfig._get_file_path(config_file_path, config_parser.get("Certs", "PrivateKey"))
        cert_chain = DxlClientConfig._get_file_path(config_file_path, config_parser.get("Certs", "BrokerCertChain"))
        client_id = config_parser.get("General", "ClientId", False)

        client_config = DxlClientConfig(broker_ca_bundle=cert_chain,
                                        cert_file=cert_file, private_key=private_key, brokers=[])
        if client_id:
            client_config._client_id = client_id

        broker_list = {}
        try:
            brokers = config_parser.items("Brokers")
            for broker in brokers:
                broker_list[broker[0]] = broker[1]
        except NoSectionError:
            logger.warning("Brokers not defined in config file")

        if len(broker_list) is 0:
            logger.warning("Broker list is empty")

        client_config._set_brokers_from_json(broker_list)
        return client_config

    @staticmethod
    def _get_file_path(config_path, cert_file_path):
        if not path.isfile(cert_file_path) and not path.isabs(cert_file_path):
            file_path = path.join(config_path, cert_file_path)
            if path.isfile(file_path):
                cert_file_path = file_path
        return cert_file_path
Example #40
0
    def __init__(self, host=False, port=False):
        if host:
            self.host = host
        if port:
            self.port = port

        self.establish_connection_to_spine()
        report_queue = Queue(maxsize=0)

        membrane = Membrane(report_queue, self.log)
        membrane.setup_listening_membrane()
        report_queue.put(membrane.connected)
        if membrane.connected:
            report = membrane.connected
        else:
            report = ''
        membrane_info = {}
        self.nerve_state = {
            'boot_time': str(time.time()),
            'membrane_host': membrane.host,
            'membrane_port': membrane.port,
            'spine_host': self.host,
            'spine_port': self.port,
            'membrane_info': membrane_info,
        }
        self.store_state()

        quit = False

        while not quit:
            now = str(time.time())

            if not self.temp:
                temp_count = 0
                temp = os.popen('vcgencmd measure_temp 2> /dev/null').read()
                self.temp = temp
            else:
                temp_count += 1
                temp = self.temp
                if temp_count % 999 == 0:
                    temp = os.popen(
                        'vcgencmd measure_temp 2> /dev/null').read()
                    self.temp = temp

            if '=' in temp:
                temp = temp.split('=')[1].strip()
            else:
                temp = 'Unknown'

            msg_body = "host:%s time:%s temp:%s" % (self.hostname, now, temp)

            if report:
                if report == 'QUIT':
                    quit = True
                else:
                    msg = u"%s %s" % (msg_body, report)
                print(msg)

                if not 'HEARTBEAT' in msg:
                    self.socket.send_string(msg)
                report = ''
                time.sleep(0.05)

#self.heartbeat()
            self.poll.register(self.socket, zmq.POLLIN)
            sockets = dict(self.poll.poll(0.1))

            spine_data = {}
            changed = False

            if self.socket in sockets:
                msg = self.socket.recv()
                if msg:
                    try:
                        new_spine_data = json.loads(msg)
                        if new_spine_data:
                            if not spine_data == new_spine_data:
                                spine_data == new_spine_data
                                changed = True
                    except:
                        pass

                if changed and spine_data:
                    print(spine_data)

            while not report_queue.empty():
                report = report_queue.get()
                if report:
                    report = report.strip()
                    #self.log("Membrane input: %s." % report)
                report_queue.task_done()

        for i in range(10):
            report = 'QUIT'
            msg = u"%s NERVE:%s:%s" % (self.hostname, now, report)
            self.socket.send_string(msg)
            time.sleep(0.0001)

        membrane.join()
Example #41
0
class ThreadTool:
	def __init__(self,isThread=1,needfinishqueue=0,deamon=True):
		self.isThread=isThread
		self.idletask={}
		self.Threads=[]
		self.alivenum=0
		self.needfinishqueue=needfinishqueue
		self.running = 0
		self.threads_num = 10
		self.deamon=deamon
		self.job=None
		self.default_object=None
		if self.isThread==1:
			self.lock = Lock() #线程锁

			self.q_request = Queue() #任务队列
			if needfinishqueue>0:
				self.q_finish = Queue() #完成队列
		elif self.isThread==0 :
			self.lock = multiprocessing.Lock()  
			self.q_request=multiprocessing.Queue()
			if self.needfinishqueue>0:
				self.q_finish=multiprocessing.Queue()
		else:

			from gevent.queue import JoinableQueue as geventqueue
			from gevent.lock import Semaphore


			self.lock = Semaphore()
			self.q_request = geventqueue()
			if self.needfinishqueue > 0:
				self.q_finish = geventqueue()

	def __del__(self): #解构时需等待两个队列完成
		time.sleep(0.5)
		if self.isThread==1 or self.isThread==2:

			self.q_request.join()
			if self.needfinishqueue>0:
				self.q_finish.join()


	def getqueue_size(self):
		return self.q_request.qsize()
	def set_Thread_size(self,threads_num=10):
		self.threads_num = threads_num
	def init_add(self,add_init_object):
		self.default_object=add_init_object
	def add_task(self,job):
		self.job=job
#获取当前剩余的任务,用于集群操做
	def get_work(self):
		tmparray=[]
		if self.q_request.qsize()>0:
			try:
				req = self.q_request.get(block=True,timeout=4)
				tmparray.append(req)
				return tmparray
			except:
				return tmparray
		else:
			return tmparray
	def start(self):
		sizenumber=min(self.threads_num,self.q_request.qsize())
		if self.isThread==1:
			for i in range(sizenumber):
				t = Thread(target=self.getTask)
				print '线程'+str(i+1)+'  正在启动'
				t.setDaemon(self.deamon)
				t.start()
				self.Threads.append(t)
				with self.lock:	
					self.alivenum+=1
		elif self.isThread==0:
			for i in range(sizenumber):
				t = multiprocessing.Process(target=self.getTaskProcess)
				print '进程'+str(i+1)+'  正在启动'
				t.Daemon=self.deamon
				t.start()	
				self.Threads.append(t)
				with self.lock:	
					self.alivenum+=1
		else:

			for i in range(sizenumber):
				t = gevent.spawn(self.getgeventTask)
				print '协程' + str(i + 1) + '  正在启动'
				self.Threads.append(t)
				with self.lock:
					self.alivenum += 1
	def get_running_size(self):
		return self.running
	def taskleft(self):
		if self.needfinishqueue>0:
			return self.q_request.qsize()+self.q_finish.qsize()+self.running
		else:
			return self.q_request.qsize()+self.running
	def push(self,req):
		sizenum=len(req)
		for urls in req:
			self.q_request.put(urls)

		threadnownum=0
		threaddie=[]
		dienum=0
		if self.isThread==1:
			tempnumb=0
			with self.lock:
				tempnumb=self.alivenum
			if tempnumb<self.threads_num:
					
				for item in self.Threads:

					if item.isAlive():


						threadnownum=threadnownum+1


				with self.lock:	
					print str(threadnownum)+'活着的线程数'
					self.Threads = filter(lambda x:x.isAlive() !=False,self.Threads)
				print str(len(self.Threads))+'清理后活着的进程数'
			else:
				threadnownum=self.threads_num
		elif self.isThread==0:
			tempnumb=0
			with self.lock:
				tempnumb=self.alivenum
			if tempnumb<self.threads_num:
				for item in self.Threads:

					if item.is_alive():

						threadnownum=threadnownum+1	


				with self.lock:	
					print str(threadnownum)+'活着的进程数'
					self.Threads = filter(lambda x:x.is_alive()!=False,self.Threads)

				print str(len(self.Threads))+'清理后活着的进程数'
			else:
				threadnownum=self.threads_num
			
		sizenumber=min(self.threads_num-threadnownum,sizenum)
		if self.isThread==1:
			for i in range(sizenumber):
				t=Thread(target=self.getTask)
				t.Daemon=self.deamon
				t.start()
				self.Threads.append(t)
				with self.lock:	
					self.alivenum+=1

		elif self.isThread==0:
			for i in range(sizenumber):
				t=multiprocessing.Process(target=self.getTaskProcess)
				t.Daemon=self.deamon
				t.start()
				self.Threads.append(t)
				with self.lock:	
					self.alivenum+=1
		else:

			for i in range(self.threads_num):
				print 'alive num', self.alivenum, self.threads_num
				if self.alivenum <self.threads_num:
					t = gevent.spawn(self.getgeventTask)
					print '协程' + str(self.alivenum) + '  正在启动'
					self.Threads.append(t)
					with self.lock:
						self.alivenum += 1
				else:
					break

			self.q_request.join()

	def pop(self):
		return self.q_finish.get()
	def do_job(self,job,req,threadname):
		return job(req,threadname)

	def getTaskProcess(self):
		while True:
# 			if self.taskleft()>0:
# 				try:
# 					req = self.q_request.get(block=True,timeout=10000)
# 				except:
# 					continue
# 			else:
# 				threadname=multiprocessing.current_process().name
# 				print threadname+'关闭'
# 				with self.lock:	
# 					self.alivenum-=1
# 				break
			req = self.q_request.get()
			with self.lock:				#要保证该操作的原子性,进入critical area
				self.running=self.running+1

			threadname=multiprocessing.current_process().name

			print '进程'+threadname+'发起请求: '

			ans=self.do_job(self.job,req,threadname)
#			ans = self.connectpool.getConnect(req)

# 			self.lock.release()
			if self.needfinishqueue>0:
				self.q_finish.put((req,ans))
#			self.lock.acquire()
			with self.lock:
				self.running= self.running-1
			threadname=multiprocessing.current_process().name

			print '进程'+threadname+'完成请求'
#			self.lock.release()

			#self.q_request.task_done()

	def getTask(self):
		while True:
			# if self.taskleft()>0:
			# 	try:
			# 		req = self.q_request.get(block=True,timeout=10000)
			# 	except:
			# 		continue
			# else:
			# 	threadname=threading.currentThread().getName()
			# 	with self.lock:
			# 		self.alivenum-=1
			# 	print threadname+'关闭'
			# 	break
			req = self.q_request.get()
			
			with self.lock:				#要保证该操作的原子性,进入critical area
				self.running=self.running+1


			threadname=threading.currentThread().getName()

			print '线程'+threadname+'发起请求: '

			ans=self.do_job(self.job,req,threadname)
#			ans = self.connectpool.getConnect(req)

# 			self.lock.release()
			if self.needfinishqueue>0:
				self.q_finish.put((req,ans))
#			self.lock.acquire()
			with self.lock:
				self.running-= 1
			threadname=threading.currentThread().getName()

			print '线程'+threadname+'完成请求'
#			self.lock.release()
			self.q_request.task_done()
	def getgeventTask(self):
		while True:
			# if self.taskleft()>0:
			# 	try:
			# 		req = self.q_request.get(block=True,timeout=10000)
			# 	except:
			# 		continue
			# else:
			# 	threadname=threading.currentThread().getName()
			# 	with self.lock:
			# 		self.alivenum-=1
			# 	print threadname+'关闭'
			# 	break

			req = self.q_request.get()

			with self.lock:				#要保证该操作的原子性,进入critical area
				self.running=self.running+1


			threadname=gevent.getcurrent()

			print threadname,'协程发起请求: '

			ans=self.do_job(self.job,req,threadname)
#			ans = self.connectpool.getConnect(req)

# 			self.lock.release()
			if self.needfinishqueue>0:
				self.q_finish.put((req,ans))
#			self.lock.acquire()
			with self.lock:
				self.running-= 1
			threadname = gevent.getcurrent()

			print threadname, '协程发起请求: '
#			self.lock.release()
			self.q_request.task_done()
Example #42
0
File: main.py Project: LiberTang0/5
class Scanner(Thread):
    def __init__(self):
        Thread.__init__(self)
        self.lock = Lock()
        self.status = {'status': 'connecting', 'messages': []}
        self.input_dir = '/dev/input/by-id/'
        self.barcodes = Queue()
        self.keymap = {
            2: ("1", "!"),
            3: ("2", "@"),
            4: ("3", "#"),
            5: ("4", "$"),
            6: ("5", "%"),
            7: ("6", "^"),
            8: ("7", "&"),
            9: ("8", "*"),
            10: ("9", "("),
            11: ("0", ")"),
            12: ("-", "_"),
            13: ("=", "+"),
            # 14 BACKSPACE
            # 15 TAB
            16: ("q", "Q"),
            17: ("w", "W"),
            18: ("e", "E"),
            19: ("r", "R"),
            20: ("t", "T"),
            21: ("y", "Y"),
            22: ("u", "U"),
            23: ("i", "I"),
            24: ("o", "O"),
            25: ("p", "P"),
            26: ("[", "{"),
            27: ("]", "}"),
            # 28 ENTER
            # 29 LEFT_CTRL
            30: ("a", "A"),
            31: ("s", "S"),
            32: ("d", "D"),
            33: ("f", "F"),
            34: ("g", "G"),
            35: ("h", "H"),
            36: ("j", "J"),
            37: ("k", "K"),
            38: ("l", "L"),
            39: (";", ":"),
            40: ("'", "\""),
            41: ("`", "~"),
            # 42 LEFT SHIFT
            43: ("\\", "|"),
            44: ("z", "Z"),
            45: ("x", "X"),
            46: ("c", "C"),
            47: ("v", "V"),
            48: ("b", "B"),
            49: ("n", "N"),
            50: ("m", "M"),
            51: (",", "<"),
            52: (".", ">"),
            53: ("/", "?"),
            # 54 RIGHT SHIFT
            57: (" ", " "),
        }

    def lockedstart(self):
        with self.lock:
            if not self.isAlive():
                self.daemon = True
                self.start()

    def set_status(self, status, message=None):
        if status == self.status['status']:
            if message != None and message != self.status['messages'][-1]:
                self.status['messages'].append(message)
        else:
            self.status['status'] = status
            if message:
                self.status['messages'] = [message]
            else:
                self.status['messages'] = []

        if status == 'error' and message:
            _logger.error('Barcode Scanner Error: ' + message)
        elif status == 'disconnected' and message:
            _logger.info('Disconnected Barcode Scanner: %s', message)

    def get_device(self):
        try:
            if not evdev:
                return None
            devices = [device for device in listdir(self.input_dir)]
            keyboards = [
                device for device in devices
                if ('kbd' in device) and ('keyboard' not in device.lower())
            ]
            scanners = [
                device for device in devices
                if ('barcode' in device.lower()) or (
                    'scanner' in device.lower())
            ]
            if len(scanners) > 0:
                self.set_status('connected', 'Connected to ' + scanners[0])
                return evdev.InputDevice(join(self.input_dir, scanners[0]))
            elif len(keyboards) > 0:
                self.set_status('connected', 'Connected to ' + keyboards[0])
                return evdev.InputDevice(join(self.input_dir, keyboards[0]))
            else:
                self.set_status('disconnected', 'Barcode Scanner Not Found')
                return None
        except Exception as e:
            self.set_status('error', str(e))
            return None

    def get_barcode(self):
        """ Returns a scanned barcode. Will wait at most 5 seconds to get a barcode, and will
            return barcode scanned in the past if they are not older than 5 seconds and have not
            been returned before. This is necessary to catch barcodes scanned while the POS is
            busy reading another barcode
        """

        self.lockedstart()

        while True:
            try:
                timestamp, barcode = self.barcodes.get(True, 5)
                if timestamp > time.time() - 5:
                    return barcode
            except Empty:
                return ''

    def get_status(self):
        self.lockedstart()
        return self.status

    def run(self):
        """ This will start a loop that catches all keyboard events, parse barcode
            sequences and put them on a timestamped queue that can be consumed by
            the point of sale's requests for barcode events 
        """

        self.barcodes = Queue()

        barcode = []
        shift = False
        device = None

        while True:  # barcodes loop
            if device:  # ungrab device between barcodes and timeouts for plug & play
                try:
                    device.ungrab()
                except Exception as e:
                    device = None
                    self.set_status('error', str(e))
            else:
                time.sleep(5)  # wait until a suitable device is plugged
                device = self.get_device()
                if not device:
                    continue

            try:
                device.grab()
                shift = False
                barcode = []

                while True:  # keycode loop
                    r, w, x = select([device], [], [], 5)
                    if len(r) == 0:  # timeout
                        break
                    events = device.read()

                    for event in events:
                        if event.type == evdev.ecodes.EV_KEY:
                            #_logger.debug('Evdev Keyboard event %s',evdev.categorize(event))
                            if event.value == 1:  # keydown events
                                if event.code in self.keymap:
                                    if shift:
                                        barcode.append(
                                            self.keymap[event.code][1])
                                    else:
                                        barcode.append(
                                            self.keymap[event.code][0])
                                elif event.code == 42 or event.code == 54:  # SHIFT
                                    shift = True
                                elif event.code == 28:  # ENTER, end of barcode
                                    self.barcodes.put(
                                        (time.time(), ''.join(barcode)))
                                    barcode = []
                            elif event.value == 0:  #keyup events
                                if event.code == 42 or event.code == 54:  # LEFT SHIFT
                                    shift = False

            except Exception as e:
                self.set_status('error', str(e))
Example #43
0
class Scheduler(object):
    def __init__(self):
        # A queue of tasks ready to be run
        self.ready = Queue()
        # A dict to allow us to find a task by its task id
        self.taskmap = {}

        # These dicts hold tasks that are no longer in the ready queue
        # They will be placed back under various conditions
        # Tasks waiting for other tasks to exit
        self.exit_waiting = {}
        # I/O waiting
        self.read_waiting = {}
        self.write_waiting = {}
        # Tasks sleeping
        self.sleep_waiting = []
        # Messages - each entry holds a list of tids subscribed to the message
        self.messagesubscribers = {}
        # Message Queue - each entry looks like (message, params)
        self.mqueue = Queue()

    def new(self, target):
        # Create a new task object (we wrap a coroutine function in the Task class to make it a schedulable task)
        newtask = Task(target)
        self.taskmap[newtask.tid] = newtask
        # Place it in the ready queue so it will be executed
        self.schedule(newtask)
        # the caller gets to know the task id
        return newtask.tid

    def exit(self, task):
        if not task.tid in self.taskmap:
            print('killing non-existant task', task.tid)
            return
        del self.taskmap[task.tid]
        # Notify other tasks waiting for exit
        # This removes the dict entry for this tid and
        # task iterates over the list of tasks that were waiting and re-schedules them
        # the list of these tasks is removed (popped) out of the exit_waiting dict
        for task in self.exit_waiting.pop(task.tid, []):
            self.schedule(task)

    def waitforexit(self, task, waittid):
        if waittid in self.taskmap:
            # If we are still in the taskmap then keep waiting
            self.exit_waiting.setdefault(waittid, []).append(task)
            return True
        else:
            # indicate the task has already exited
            return False

    # I/O waiting
    def waitforread(self, task, fd):
        self.read_waiting[fd] = task

    def waitforwrite(self, task, fd):
        self.write_waiting[fd] = task

    def iopoll(self, timeout):
        if self.read_waiting or self.write_waiting:
            # Select only the file descriptors that are ready
            r, w, e = select.select(self.read_waiting, self.write_waiting, [],
                                    timeout)
            # iterate over these ready descriptors and re-schedule the tasks that were waiting
            for fd in r:
                self.schedule(self.read_waiting.pop(fd))
            for fd in w:
                self.schedule(self.write_waiting.pop(fd))
            if r or w: self.schedule(None)
        else:
            # If we have no read or write sockets to select on, then lets sleep either the timeout or our minimum time granularity
            if 0.1 < timeout:
                time.sleep(0.1)
            # If the timout is less than or equal to our minimum granularity then just sleep this amount
            elif timeout > 0:
                time.sleep(timeout)
            # If we are told to sleep for 0 then sleep for the minimum instead
            elif self.mqueue.empty():
                time.sleep(0.1)

    # Time waiting
    def waitforclock(self, task, clocktime):
        # Add task to our sleep_waiting queue.
        # Using heappush means we auto sort on push so that the next task is ready to pop
        heapq.heappush(self.sleep_waiting, (clocktime, task))

    def timepoll(self):
        # Process all items waiting to expire
        woken = False
        # while there is something there, and the last item is ready to run
        while self.sleep_waiting and self.sleep_waiting[0][0] <= time.time():
            expiredtask = heapq.heappop(self.sleep_waiting)
            if expiredtask[1].tid in self.taskmap:
                self.schedule(expiredtask[1])
                woken = True
            else:
                print('scheduled task already exited', expiredtask[1].tid)
        if woken:
            self.schedule(None)

    # Go Idle
    def goidle(self, task):
        pass

    # Messaging
    def sendmessage(self, message):
        # Insert the given message into the message queue
        self.mqueue.put(message)

    def processmessage(self):
        message = self.mqueue.get(False)
        key = message[1]
        if not key in self.messagesubscribers:
            print('got unknown message:', message)
        else:
            for tid in self.messagesubscribers[key]:
                task = tid in self.taskmap and self.taskmap[tid]
                if task:
                    task.sendval = message
                    self.schedule(task)

    def subscribemessage(self, messagekey, tid):
        self.messagesubscribers.setdefault(messagekey, []).append(tid)

    # Scheduling
    def schedule(self, task):
        # just place the task in the queue
        self.ready.put(task)

    def processready(self):
        # Process all the ready tasks or until we hit a None boundary
        while 1:
            if self.ready.empty():
                break
            task = self.ready.get(False)
            if task == None:
                # Since some tasks may generate more tasks, we need boundaries so the mainloop can continue
                break
            try:
                result = task.run()
                if isinstance(result, SystemCall):
                    result.task = task
                    result.sched = self
                    result.handle()
                    continue
            except StopIteration:
                # If we reach the 'end' of the stream of values from a generator, we exit the task
                self.exit(task)
                continue
            except:
                # Other types of exceptions just cause the task to exit with some logging so we know why
                print('Task exiting with error: %s' % formatExceptionInfo())
                self.exit(task)
                continue
            # Re-schedule the task now that we have successfully run it
            self.schedule(task)

    # Task select polling loop
    def mainloop(self):
        while 1:

            # Process tasks if we have some
            if not self.ready.empty():
                self.processready()

            # Process io quickly or sit and select until the next sleeping task is due to wake
            if self.ready.empty():
                # if there are no tasks in the ready queue and there is nothing sleeping then set timeout to 0 to indicate 'use minumim granularity'
                if not self.sleep_waiting:
                    self.iopoll(0)
                else:
                    # We can timeout for as long as the next sleep_waiting task would sleep
                    waiting = self.sleep_waiting[0][0] - time.time()
                    if waiting >= 0:
                        self.iopoll(waiting)
                    else:
                        self.iopoll(0)
            else:
                # Otherwise since we have waiting tasks, just return asap
                self.iopoll(0)

            # Check to see if any sleeping tasks can be woken
            if self.sleep_waiting:
                self.timepoll()
                # immediately process these new readytasks so nothing can overwrite the sendval
                self.processready()

            # Check to see if any tasks have waiting messages
            # This should cover idle, running and sleeping tasks
            if not self.mqueue.empty():
                self.processmessage()
                # immediately process these new readytasks so nothing can overwrite the sendval
                self.processready()

            if not self.taskmap:
                print('no more tasks')
                break
Example #44
0
class wire_writer(object):
    #Class command values
    SHUT_DOWN_ALL = 0
    SHUT_DOWN_NAMED = 1
    WRITE_BYTES = 2
    TERMINATE = 3

    #Class command return values
    COMMAND_OK = 0
    COMMAND_FAIL = 1

    def __init__(self, write_interface_list):
        self.write_interface_list = write_interface_list
        self.command_queue = Queue()  #to writer child
        self.status_queue = Queue()  #fm writer child
        self.command_failure = Event()
        self.start(write_interface_list)

    def start(self, write_interface_list):

        try:
            t = Thread(target=self.run,
                       args=(write_interface_list, self.command_queue,
                             self.status_queue, self.command_failure))
            t.daemon = True
            t.start()

        except Exception as e:
            raise e

    #################This is the writer code that is run in its own thread#####################
    def run(self, write_interface_list, command_queue, status_queue,
            fail_event):
        lib_directories = [
            site.getsitepackages()[0],
            site.getsitepackages()[0]
        ]
        wr = wire_writer_child()
        err_buf = wr.make_pcap_error_buffer()
        fail_event.clear()

        try:
            wr.open_interfaces_for_sending(write_interface_list)
        except Exception as e:
            fail_event.set()
            raise e

        while (True):

            if (fail_event.is_set()):
                break

            try:
                #thread blocks on queue.get command waiting for next command
                cmd, cmd_val = command_queue.get()

                if (self.TERMINATE == cmd):
                    command_queue.task_done()
                    break

                (status, rtn_data) = wr.do_command(cmd, cmd_val)
                command_queue.task_done()
                status_queue.put((status, rtn_data))

            #re-raise exception to command side
            except Exception as e:
                fail_event.set()
                raise e

        #close all interfaces opened by this writer
        wr.close_sending_interfaces()

    ###############################################################################################

    def cmd(self, command, command_data=None, cmd_timeout=None):
        self.command_queue.put((command, command_data), timeout=cmd_timeout)

    def get_rst(self, get_timeout=None):
        if self.command_failure.is_set():
            raise RuntimeError("Last command failed.")

        rtn = self.status_queue.get(timeout=get_timeout)
        self.status_queue.task_done()

        return (rtn)
Example #45
0
class ExaBGPEmulator(object):
    def __init__(self, rs, address, port, input_file, speed_up, rate, mode, seperate_prefix):
        self.logger = util.log.getLogger('xbgp')
        self.logger.debug('init')
        self.route_id_counter = 0
        self.real_start_time = time()
        self.simulation_start_time = 0

        self.input_file = input_file
        self.speed_up = speed_up
        self.rs = rs
        self.send_rate = int(rate)
        self.mode = int(mode)
        self.seperate_prefix = seperate_prefix

        self.run = True
        self.fp_thread = None
        self.us_thread = None
        self.update_queue = Queue()
        if self.rs == SIX_PACK_RS:
            self.logger.debug('connecting to RS1')
            self.conn_rs1 = Client((port_config.process_assignement["rs1"], port_config.ports_assignment["rs1_receive_bgp_messages"]), authkey=None)
            self.logger.debug('connected to RS1')
            self.logger.debug('connecting to RS2')
            self.conn_rs2 = Client((port_config.process_assignement["rs2"], port_config.ports_assignment["rs2_receive_bgp_messages"]), authkey=None)
            self.logger.debug('connected to RS2')
        elif self.rs == SGX_RS or self.rs == ORIGIN_RS:
            self.logger.info('connecting to RS')
            self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            self.conn.connect((address, port))
            self.logger.info('connected to RS')

    def file_processor(self):
        with open(self.input_file) as infile:
            tmp = {}
            next_hop = ""
            flag = 0

            for line in infile:
                if line.startswith("TIME"):
                    flag = 1
                    tmp = {"exabgp": "3.4.8", "type": "update"}
                    next_hop = ""

                    x = line.split("\n")[0].split(": ")[1]
                    time = mktime(strptime(x, "%m/%d/%y %H:%M:%S"))
                    tmp["time"] = int(time/self.speed_up)

                elif flag == 1:
                    if 'Keepalive' in line or line.startswith("\n"):
                        # Only process Update Messages
                        flag = 0
                    else:
                        x = line.split("\n")[0].split(": ")

                        if "neighbor" not in tmp:
                             tmp["neighbor"] = {"address": {}, "asn": {}, "message": {"update": {}}}

                        elif line.startswith("FROM"):
                            x = x[1].split(" ")
                            tmp["neighbor"]["ip"] = x[0]
                            tmp["neighbor"]["address"]["peer"] = x[0]
                            tmp["neighbor"]["asn"]["peer"] = x[1][2:]

                        elif line.startswith("TO"):
                            x = x[1].split(" ")
                            tmp["neighbor"]["address"]["local"] = x[0]
                            tmp["neighbor"]["asn"]["local"] = x[1][2:]

                        elif line.startswith("ORIGIN"):
                            if "attribute" not in tmp["neighbor"]["message"]["update"]:
                                tmp["neighbor"]["message"]["update"]["attribute"] = {}
                            tmp["neighbor"]["message"]["update"]["attribute"]["origin"] = x[1].lower()

                        elif line.startswith("ASPATH"):
                            if "attribute" not in tmp["neighbor"]["message"]["update"]:
                                tmp["neighbor"]["message"]["update"]["attribute"] = {}
                            tmp["neighbor"]["message"]["update"]["attribute"]["as-path"] = []
                            for asn in x[1].split(' '):
                                if asn[0] == '{':
                                    for i in asn[1:-1].split(','):
                                        tmp["neighbor"]["message"]["update"]["attribute"]["as-path"].append(int(i))
                                else:
                                    tmp["neighbor"]["message"]["update"]["attribute"]["as-path"].append(int(asn))

                        elif line.startswith("MULTI_EXIT_DISC"):
                            if "attribute" not in tmp["neighbor"]["message"]["update"]:
                                tmp["neighbor"]["message"]["update"]["attribute"] = {}
                            tmp["neighbor"]["message"]["update"]["attribute"]["med"] = x[1]

                        elif line.startswith("NEXT_HOP"):
                            if "announce" not in tmp["neighbor"]["message"]["update"]:
                                tmp["neighbor"]["message"]["update"]["announce"] = {}
                            tmp["neighbor"]["message"]["update"]["announce"]["ipv4 unicast"] = {x[1]: {}}
                            next_hop = x[1]

                        elif line.startswith("COMMUNITY"):
                            if "attribute" not in tmp["neighbor"]["message"]["update"]:
                                tmp["neighbor"]["message"]["update"]["attribute"] = {}
                            tmp["neighbor"]["message"]["update"]["attribute"]["community"] =  x[1]

                        elif line.startswith("ANNOUNCE"):
                            if "announce" not in tmp["neighbor"]["message"]["update"]:
                                tmp["neighbor"]["message"]["update"]["announce"] = {"ipv4 unicast": {}}
                            flag = 2

                        elif line.startswith("WITHDRAW"):
                            tmp["neighbor"]["message"]["update"]["withdraw"] = {"ipv4 unicast": {}}
                            flag = 3

                elif flag >= 2:
                    if line.startswith("\n"):
                        if not self.run:
                            break

                        if self.seperate_prefix:
                            if self.rs == SIX_PACK_RS:
                                routes = self.create_routes_to_be_sent(tmp)
                            elif self.rs == SGX_RS or self.rs == ORIGIN_RS:
                                routes = self.create_routes_per_prefix(tmp)
                            for route in routes:
                                route["route_id"] = self.route_id_counter
                                self.route_id_counter += 1
                                self.update_queue.put({'route': route, "time": tmp["time"]})
                        else:
                            # NOTE: process announcements only for testing
                            if "announce" in tmp["neighbor"]["message"]["update"]:
                                tmp["route_id"] = self.route_id_counter
                                self.route_id_counter += 1
                                self.logger.debug(str(tmp))
                                self.update_queue.put({'route': tmp, "time": tmp["time"]})
                                #self.logger.info("update_queue.qsize:" + str(self.update_queue.qsize()) + "route_id:%d" % tmp["route_id"])

                        while self.update_queue.qsize() > 32000:
                            self.logger.info('queue is full - taking a break')
                            sleep(self.sleep_time(tmp["time"])/2 + 0.001)
                            if not self.run:
                                break
                        flag = 0

                    else:
                        if line.startswith("ANNOUNCE"):
                            if "announce" not in tmp["neighbor"]["message"]["update"]:
                                tmp["neighbor"]["message"]["update"]["announce"] = {"ipv4 unicast": {}}
                            flag = 2

                        elif line.startswith("WITHDRAW"):
                            tmp["neighbor"]["message"]["update"]["withdraw"] = {"ipv4 unicast": {}}
                            flag = 3

                        else:
                            x = line.split("\n")[0].split()[0]
                            if flag == 2:
                                tmp["neighbor"]["message"]["update"]["announce"]["ipv4 unicast"][next_hop][x] = {}
                            else:
                                tmp["neighbor"]["message"]["update"]["withdraw"]["ipv4 unicast"][x] = {}

        self.run = False
        print "file processor done"

    def create_routes_per_prefix(self, bgp_update):
        routes = []
        if "announce" not in bgp_update["neighbor"]["message"]["update"]:
            return routes
        nh_dict = bgp_update["neighbor"]["message"]["update"]["announce"]["ipv4 unicast"]
        for next_hop in nh_dict:
            for prefix in nh_dict[next_hop]:
                bgp_update["neighbor"]["message"]["update"]["announce"]["ipv4 unicast"] = {next_hop: {prefix: {}}}
                routes.append(deepcopy(bgp_update))
        return routes

    def create_routes_to_be_sent(self, bgp_update):
        # 1. generate key for the incoming route
        key = os.urandom(KEY_LENGTH)
        keystr = key.encode("hex")
        self.cipher = AESCipher(key)

        routes = []
        # for each IP prefix destination add a route in the queue
        if "announce" in bgp_update["neighbor"]["message"]["update"]:
            # GENERATE ANNOUNCEMENTS
            for next_hop in bgp_update["neighbor"]["message"]["update"]["announce"]["ipv4 unicast"]:
                for prefix in bgp_update["neighbor"]["message"]["update"]["announce"]["ipv4 unicast"][next_hop]:
                    route = Route()
                    route.neighbor = bgp_update["neighbor"]["ip"]
                    route.prefix = prefix
                    route.time = bgp_update["time"]
                    route.id = self.route_id_counter
                    self.route_id_counter += 1
                    route.as_path = bgp_update["neighbor"]["message"]["update"]["attribute"]["as-path"]
                    route.next_hop = next_hop
                    if "community" in bgp_update["neighbor"]["message"]["update"]["attribute"]:
                        route.communities = bgp_update["neighbor"]["message"]["update"]["attribute"]["community"]
                    route.type = "announce"

                    encrypted_route = self.cipher.encrypt(pickle.dumps(route)) #encrypt serialized route object
                    routes.append({"prefix" : prefix, "asn" : bgp_update["neighbor"]["asn"]["peer"], "route-in-clear" : None, "route_id" : route.id, "encrypted_route" : encrypted_route, "key" : keystr, "type" : route.type , "announcement_id" : route.id})

        return routes

    def bgp_update_sender(self):
        while self.run or not self.update_queue.empty():
            try:
                # get msg. type: {"route", "time"}
                msg = self.update_queue.get(True, 1)
            except Empty:
                continue

            if self.simulation_start_time == 0:
                self.real_start_time = time()
                self.simulation_start_time = msg["time"]

            current_bgp_update = msg["time"]
            elapsed = current_bgp_update - self.simulation_start_time
            if elapsed > update_minutes:
                print "start: current", self.simulation_start_time, current_bgp_update
                break
            sleep_time = self.sleep_time(msg["time"])
            if sleep_time != 0:
                print "current_bgp_update:", current_bgp_update, ", elapsed:", elapsed, ", sleep_time:", sleep_time
            sleep(sleep_time)
            #self.logger.info("route_id:%d " % msg["route"]["route_id"] + "Peer asn:%s " % msg["route"]["neighbor"]["asn"]["peer"] + "time(s):" + str(msg["time"]) + "sleep_time(s):" + str(sleep_time))

            if self.rs == SIX_PACK_RS:
                self.send_update_rs1(msg["route"])
                self.send_update_rs2(msg["route"])
            elif self.rs == SGX_RS or self.rs == ORIGIN_RS:
                self.send_update_sgx(msg["route"])

        self.stop()

    def bgp_update_rate_sender(self):
        current_count = 0
        count = 0
        while not self.update_queue.empty() or self.run:
            try:
                msg = self.update_queue.get(True, 1)
            except Empty:
                continue
            if self.simulation_start_time == 0:
                self.simulation_start_time = msg["time"]

            current_bgp_update = msg["time"]
            elapsed = current_bgp_update - self.simulation_start_time
            if count > update_minutes:
                print "start, current_msg_time, current_time", self.simulation_start_time, current_bgp_update, count
                break

            if current_count == self.send_rate:
                current_count = 0
                count += 1
                print "elapsed:", count
                sleep(1)
            current_count += 1

            if self.rs == SIX_PACK_RS:
                self.send_update_rs1(msg["route"])
                self.send_update_rs2(msg["route"])
            elif self.rs == SGX_RS or self.rs == ORIGIN_RS:
                self.send_update_sgx(msg["route"])

        self.stop()

    def bgp_update_fast_sender(self):
        count = 0
        while not self.update_queue.empty() or self.run:
            try:
                msg = self.update_queue.get(True, 1)
            except Empty:
                continue
            count += 1
            if self.rs == SIX_PACK_RS:
                self.send_update_rs1(msg["route"])
                self.send_update_rs2(msg["route"])
            elif self.rs == SGX_RS or self.rs == ORIGIN_RS:
                self.send_update_sgx(msg["route"])

        print "total sent announcements: " + str(count)
        self.stop()

    def sleep_time(self, update_time):
        time_diff = update_time - self.simulation_start_time
        wake_up_time = self.real_start_time + time_diff
        sleep_time = wake_up_time - time()
        if sleep_time < 0:
            sleep_time = 0
        return sleep_time

    def send_update(self, update):
        self.conn.send(json.dumps(update))

    def send_update_sgx(self, update):
        s = json.dumps(update)
        self.conn.send(struct.pack("H", len(s) + 2) + s)

    def send_update_rs1(self, update):
        self.conn_rs1.send(pickle.dumps(update))

    def send_update_rs2(self, update):
        self.conn_rs2.send(pickle.dumps(update))

    def start(self):
        self.logger.debug('start file processor')
        self.fp_thread = Thread(target=self.file_processor)
        self.fp_thread.start()

        self.logger.debug('start update sender')
        if self.mode == 0:
            self.us_thread = Thread(target=self.bgp_update_sender)
            self.us_thread.start()
        if self.mode == 1:
            self.us_thread = Thread(target=self.bgp_update_rate_sender)
            self.us_thread.start()
        if self.mode == 2:
            self.us_thread = Thread(target=self.bgp_update_fast_sender)
            self.us_thread.start()

    def stop(self):
        self.logger.debug('terminate')
        print "send stop signal"
        if self.rs == SIX_PACK_RS:
            self.send_update_rs1({"stop": 1})
            self.send_update_rs2({"stop": 1})
        elif self.rs == SGX_RS or self.rs == ORIGIN_RS:
            self.send_update_sgx({"stop": 1})

        if self.run == True:
            self.run = False
            self.us_thread.join()
        self.logger.debug('bgp update sender terminated')

        self.fp_thread.join()
        self.logger.debug('file processor terminated')

        if self.rs == SIX_PACK_RS:
            self.conn_rs1.close()
            self.conn_rs2.close()
        elif self.rs == SGX_RS or self.rs == ORIGIN_RS:
            self.conn.close()
def main_cli(opts, args, gt_instance_uuid=None):
    """! This is main CLI function with all command line parameters
    @details This function also implements CLI workflow depending on CLI parameters inputed
    @return This function doesn't return, it exits to environment with proper success code
    """
    def filter_ready_devices(mbeds_list):
        """! Filters list of MUTs to check if all MUTs are correctly detected with mbed-ls module.
        @details This function logs a lot to help users figure out root cause of their problems
        @param mbeds_list List of MUTs to verify
        @return Tuple of (MUTS detected correctly, MUTs not detected fully)
        """
        ready_mbed_devices = [
        ]  # Devices which can be used (are fully detected)
        not_ready_mbed_devices = [
        ]  # Devices which can't be used (are not fully detected)

        required_mut_props = [
            'target_id', 'platform_name', 'serial_port', 'mount_point'
        ]

        gt_logger.gt_log(
            "detected %d device%s" %
            (len(mbeds_list), 's' if len(mbeds_list) != 1 else ''))
        for mut in mbeds_list:
            for prop in required_mut_props:
                if not mut[prop]:
                    # Adding MUT to NOT DETECTED FULLY list
                    if mut not in not_ready_mbed_devices:
                        not_ready_mbed_devices.append(mut)
                        gt_logger.gt_log_err(
                            "mbed-ls was unable to enumerate correctly all properties of the device!"
                        )
                        gt_logger.gt_log_tab(
                            "check with 'mbedls -j' command if all properties of your device are enumerated properly"
                        )

                    gt_logger.gt_log_err("mbed-ls property '%s' is '%s'" %
                                         (prop, str(mut[prop])))
                    if prop == 'serial_port':
                        gt_logger.gt_log_tab(
                            "check if your serial port driver is correctly installed!"
                        )
                    if prop == 'mount_point':
                        gt_logger.gt_log_tab(
                            'check if your OS can detect and mount mbed device mount point!'
                        )
            else:
                # Adding MUT to DETECTED CORRECTLY list
                ready_mbed_devices.append(mut)
        return (ready_mbed_devices, not_ready_mbed_devices)

    def get_parallel_value(value):
        """! Get correct value for parallel switch (--parallel)
        @param value Value passed from --parallel
        @return Refactored version of parallel number
        """
        try:
            parallel_test_exec = int(value)
            if parallel_test_exec < 1:
                parallel_test_exec = 1
        except ValueError:
            gt_logger.gt_log_err(
                "argument of mode --parallel is not a int, disabled parallel mode"
            )
            parallel_test_exec = 1
        return parallel_test_exec

    # This is how you magically control colours in this piece of art software
    gt_logger.colorful(not opts.plain)

    # Prints version and exits
    if opts.version:
        print_version()
        return (0)

    # Load test specification or print warnings / info messages and exit CLI mode
    test_spec, ret = get_test_spec(opts)
    if not test_spec:
        return ret

    # Verbose flag
    verbose = opts.verbose_test_result_only

    # We will load hooks from JSON file to support extra behaviour during test execution
    greentea_hooks = GreenteaHooks(
        opts.hooks_json) if opts.hooks_json else None

    # Capture alternative test console inputs, used e.g. in 'yotta test command'
    if opts.digest_source:
        enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)
        host_test_result = run_host_test(
            None,
            None,
            None,
            None,
            None,
            digest_source=opts.digest_source,
            enum_host_tests_path=enum_host_tests_path,
            verbose=verbose)

        # Some error in htrun, abort test execution
        if isinstance(host_test_result, int):
            # int(host_test_result) > 0 - Call to mbedhtrun failed
            # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun
            return host_test_result

        # If execution was successful 'run_host_test' return tuple with results
        single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary, memory_metrics = host_test_result
        status = TEST_RESULTS.index(
            single_test_result) if single_test_result in TEST_RESULTS else -1
        return (status)

    ### Query with mbedls for available mbed-enabled devices
    gt_logger.gt_log("detecting connected mbed-enabled devices...")

    ### check if argument of --parallel mode is a integer and greater or equal 1
    parallel_test_exec = get_parallel_value(opts.parallel_test_exec)

    # Detect devices connected to system
    mbeds = mbed_os_tools.detect.create()
    mbeds_list = mbeds.list_mbeds(unique_names=True, read_details_txt=True)

    if opts.global_resource_mgr:
        # Mocking available platform requested by --grm switch
        grm_values = parse_global_resource_mgr(opts.global_resource_mgr)
        if grm_values:
            gt_logger.gt_log_warn(
                "entering global resource manager mbed-ls dummy mode!")
            grm_platform_name, grm_module_name, grm_ip_name, grm_port_name = grm_values
            mbeds_list = []
            if grm_platform_name == '*':
                required_devices = [
                    tb.get_platform() for tb in test_spec.get_test_builds()
                ]
                for _ in range(parallel_test_exec):
                    for device in required_devices:
                        mbeds_list.append(mbeds.get_dummy_platform(device))
            else:
                for _ in range(parallel_test_exec):
                    mbeds_list.append(
                        mbeds.get_dummy_platform(grm_platform_name))
            opts.global_resource_mgr = ':'.join(
                [v for v in grm_values[1:] if v])
            gt_logger.gt_log_tab("adding dummy platform '%s'" %
                                 grm_platform_name)
        else:
            gt_logger.gt_log(
                "global resource manager switch '--grm %s' in wrong format!" %
                opts.global_resource_mgr)
            return (-1)

    if opts.fast_model_connection:
        # Mocking available platform requested by --fm switch
        fm_values = parse_fast_model_connection(opts.fast_model_connection)
        if fm_values:
            gt_logger.gt_log_warn(
                "entering fastmodel connection, mbed-ls dummy simulator mode!")
            fm_platform_name, fm_config_name = fm_values
            mbeds_list = []
            for _ in range(parallel_test_exec):
                mbeds_list.append(mbeds.get_dummy_platform(fm_platform_name))
            opts.fast_model_connection = fm_config_name
            gt_logger.gt_log_tab("adding dummy fastmodel platform '%s'" %
                                 fm_platform_name)
        else:
            gt_logger.gt_log(
                "fast model connection switch '--fm %s' in wrong format!" %
                opts.fast_model_connection)
            return (-1)

    ready_mbed_devices = []  # Devices which can be used (are fully detected)
    not_ready_mbed_devices = [
    ]  # Devices which can't be used (are not fully detected)

    if mbeds_list:
        ready_mbed_devices, not_ready_mbed_devices = filter_ready_devices(
            mbeds_list)
        if ready_mbed_devices:
            # devices in form of a pretty formatted table
            for line in log_mbed_devices_in_table(
                    ready_mbed_devices).splitlines():
                gt_logger.gt_log_tab(line.strip(), print_text=verbose)
    else:
        gt_logger.gt_log_err("no compatible devices detected")
        return (RET_NO_DEVICES)

    ### We can filter in only specific target ids
    accepted_target_ids = None
    if opts.use_target_ids:
        gt_logger.gt_log(
            "filtering out target ids not on below list (specified with --use-tids switch)"
        )
        accepted_target_ids = opts.use_target_ids.split(',')
        for tid in accepted_target_ids:
            gt_logger.gt_log_tab("accepting target id '%s'" %
                                 gt_logger.gt_bright(tid))

    test_exec_retcode = 0  # Decrement this value each time test case result is not 'OK'
    test_platforms_match = 0  # Count how many tests were actually ran with current settings
    target_platforms_match = 0  # Count how many platforms were actually tested with current settings

    test_report = {}  # Test report used to export to Junit, HTML etc...
    test_queue = Queue(
    )  # contains information about test_bin and image_path for each test case
    test_result_queue = Queue()  # used to store results of each thread
    execute_threads = []  # list of threads to run test cases

    # Values used to generate random seed for test execution order shuffle
    SHUFFLE_SEED_ROUND = 10  # Value used to round float random seed
    shuffle_random_seed = round(random.random(), SHUFFLE_SEED_ROUND)

    # Set shuffle seed if it is provided with command line option
    if opts.shuffle_test_seed:
        shuffle_random_seed = round(float(opts.shuffle_test_seed),
                                    SHUFFLE_SEED_ROUND)

    ### Testing procedures, for each target, for each target's compatible platform
    # In case we are using test spec (switch --test-spec) command line option -t <list_of_targets>
    # is used to enumerate builds from test spec we are supplying
    filter_test_builds = opts.list_of_targets.split(
        ',') if opts.list_of_targets else None
    for test_build in test_spec.get_test_builds(filter_test_builds):
        platform_name = test_build.get_platform()
        gt_logger.gt_log(
            "processing target '%s' toolchain '%s' compatible platforms... (note: switch set to --parallel %d)"
            % (gt_logger.gt_bright(platform_name),
               gt_logger.gt_bright(
                   test_build.get_toolchain()), int(opts.parallel_test_exec)))

        baudrate = test_build.get_baudrate()

        ### Select MUTS to test from list of available MUTS to start testing
        mut = None
        number_of_parallel_instances = 1
        muts_to_test = []  # MUTs to actually be tested
        for mbed_dev in ready_mbed_devices:
            if accepted_target_ids and mbed_dev[
                    'target_id'] not in accepted_target_ids:
                continue

            # Check that we have a valid serial port detected.
            sp = mbed_dev['serial_port']
            if not sp:
                gt_logger.gt_log_err(
                    "Serial port for target %s not detected correctly\n" %
                    mbed_dev['target_id'])
                continue

            if mbed_dev['platform_name'] == platform_name:
                # We will force configuration specific baudrate by adding baudrate to serial port
                # Only add baudrate decoration for serial port if it's not already there
                # Format used by mbedhtrun: 'serial_port' = '<serial_port_name>:<baudrate>'
                if not sp.endswith(str(baudrate)):
                    mbed_dev['serial_port'] = "%s:%d" % (
                        mbed_dev['serial_port'], baudrate)

                mut = mbed_dev
                if mbed_dev not in muts_to_test:
                    # We will only add unique devices to list of devices "for testing" in this test run
                    muts_to_test.append(mbed_dev)
                if number_of_parallel_instances < parallel_test_exec:
                    number_of_parallel_instances += 1
                else:
                    break

        # devices in form of a pretty formatted table
        for line in log_mbed_devices_in_table(muts_to_test).splitlines():
            gt_logger.gt_log_tab(line.strip(), print_text=verbose)

        # Configuration print mode:
        if opts.verbose_test_configuration_only:
            continue

        ### If we have at least one available device we can proceed
        if mut:
            target_platforms_match += 1

            build = test_build.get_name()
            build_path = test_build.get_path()

            # Demo mode: --run implementation (already added --run to mbedhtrun)
            # We want to pass file name to mbedhtrun (--run NAME  =>  -f NAME_ and run only one binary
            if opts.run_app:
                gt_logger.gt_log(
                    "running '%s' for '%s'-'%s'" %
                    (gt_logger.gt_bright(
                        opts.run_app), gt_logger.gt_bright(platform_name),
                     gt_logger.gt_bright(test_build.get_toolchain())))
                disk = mut['mount_point']
                port = mut['serial_port']
                micro = mut['platform_name']
                program_cycle_s = get_platform_property(
                    micro, "program_cycle_s")
                copy_method = opts.copy_method if opts.copy_method else 'shell'
                enum_host_tests_path = get_local_host_tests_dir(
                    opts.enum_host_tests)

                test_platforms_match += 1
                host_test_result = run_host_test(
                    opts.run_app,
                    disk,
                    port,
                    build_path,
                    mut['target_id'],
                    micro=micro,
                    copy_method=copy_method,
                    program_cycle_s=program_cycle_s,
                    digest_source=opts.digest_source,
                    json_test_cfg=opts.json_test_configuration,
                    run_app=opts.run_app,
                    enum_host_tests_path=enum_host_tests_path,
                    verbose=True)

                # Some error in htrun, abort test execution
                if isinstance(host_test_result, int):
                    # int(host_test_result) > 0 - Call to mbedhtrun failed
                    # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun
                    return host_test_result

                # If execution was successful 'run_host_test' return tuple with results
                single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary, memory_metrics = host_test_result
                status = TEST_RESULTS.index(
                    single_test_result
                ) if single_test_result in TEST_RESULTS else -1
                if single_test_result != TEST_RESULT_OK:
                    test_exec_retcode += 1

            test_list = test_build.get_tests()

            filtered_ctest_test_list = create_filtered_test_list(
                test_list,
                opts.test_by_names,
                opts.skip_test,
                test_spec=test_spec)

            gt_logger.gt_log(
                "running %d test%s for platform '%s' and toolchain '%s'" %
                (len(filtered_ctest_test_list),
                 "s" if len(filtered_ctest_test_list) != 1 else "",
                 gt_logger.gt_bright(platform_name),
                 gt_logger.gt_bright(test_build.get_toolchain())))

            # Test execution order can be shuffled (also with provided random seed)
            # for test execution reproduction.
            filtered_ctest_test_list_keys = filtered_ctest_test_list.keys()
            if opts.shuffle_test_order:
                # We want to shuffle test names randomly
                random.shuffle(filtered_ctest_test_list_keys,
                               lambda: shuffle_random_seed)

            for test_name in filtered_ctest_test_list_keys:
                image_path = filtered_ctest_test_list[test_name].get_binary(
                    binary_type=TestBinary.BIN_TYPE_BOOTABLE).get_path()
                compare_log = filtered_ctest_test_list[test_name].get_binary(
                    binary_type=TestBinary.BIN_TYPE_BOOTABLE).get_compare_log(
                    )
                if image_path is None:
                    gt_logger.gt_log_err(
                        "Failed to find test binary for test %s flash method %s"
                        % (test_name, 'usb'))
                else:
                    test = {
                        "test_bin": test_name,
                        "image_path": image_path,
                        "compare_log": compare_log
                    }
                    test_queue.put(test)

            number_of_threads = 0
            for mut in muts_to_test:
                # Experimental, parallel test execution
                if number_of_threads < parallel_test_exec:
                    args = (test_result_queue, test_queue, opts, mut, build,
                            build_path, greentea_hooks)
                    t = Thread(target=run_test_thread, args=args)
                    execute_threads.append(t)
                    number_of_threads += 1

        gt_logger.gt_log_tab(
            "use %s instance%s of execution threads for testing" %
            (len(execute_threads),
             's' if len(execute_threads) != 1 else str()),
            print_text=verbose)
        for t in execute_threads:
            t.daemon = True
            t.start()

        # merge partial test reports from different threads to final test report
        for t in execute_threads:
            try:
                # We can't block forever here since that prevents KeyboardInterrupts
                # from being propagated correctly. Therefore, we just join with a
                # timeout of 0.1 seconds until the thread isn't alive anymore.
                # A time of 0.1 seconds is a fairly arbitrary choice. It needs
                # to balance CPU utilization and responsiveness to keyboard interrupts.
                # Checking 10 times a second seems to be stable and responsive.
                while t.isAlive():
                    t.join(0.1)

                test_return_data = test_result_queue.get(False)
            except Exception as e:
                # No test report generated
                gt_logger.gt_log_err("could not generate test report" + str(e))
                test_exec_retcode += -1000
                return test_exec_retcode

            test_platforms_match += test_return_data['test_platforms_match']
            test_exec_retcode += test_return_data['test_exec_retcode']
            partial_test_report = test_return_data['test_report']
            # todo: find better solution, maybe use extend
            for report_key in partial_test_report.keys():
                if report_key not in test_report:
                    test_report[report_key] = {}
                    test_report.update(partial_test_report)
                else:
                    test_report[report_key].update(
                        partial_test_report[report_key])

        execute_threads = []

        if opts.verbose_test_configuration_only:
            print(
                "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target"
            )
            return (0)

        gt_logger.gt_log("all tests finished!")

    # We will execute post test hooks on tests
    for build_name in test_report:
        test_name_list = []  # All test case names for particular yotta target
        for test_name in test_report[build_name]:
            test = test_report[build_name][test_name]
            # Test was successful
            if test['single_test_result'] in [
                    TEST_RESULT_OK, TEST_RESULT_FAIL
            ]:
                test_name_list.append(test_name)
                # Call hook executed for each test, just after all tests are finished
                if greentea_hooks:
                    # We can execute this test hook just after all tests are finished ('hook_post_test_end')
                    format = {
                        "test_name": test_name,
                        "test_bin_name": test['test_bin_name'],
                        "image_path": test['image_path'],
                        "build_path": test['build_path'],
                        "build_path_abs": test['build_path_abs'],
                    }
                    greentea_hooks.run_hook_ext('hook_post_test_end', format)
        if greentea_hooks:
            build = test_spec.get_test_build(build_name)
            assert build is not None, "Failed to find build info for build %s" % build_name

            # Call hook executed for each yotta target, just after all tests are finished
            build_path = build.get_path()
            build_path_abs = os.path.abspath(build_path)
            # We can execute this test hook just after all tests are finished ('hook_post_test_end')
            format = {
                "build_path": build_path,
                "build_path_abs": build_path_abs,
                "test_name_list": test_name_list,
            }
            greentea_hooks.run_hook_ext('hook_post_all_test_end', format)

    # This tool is designed to work in CI
    # We want to return success codes based on tool actions,
    # only if testes were executed and all passed we want to
    # return 0 (success)
    if not opts.only_build_tests:
        # Prints shuffle seed
        gt_logger.gt_log("shuffle seed: %.*f" %
                         (SHUFFLE_SEED_ROUND, shuffle_random_seed))

        def dump_report_to_text_file(filename, content):
            """! Closure for report dumps to text files
            @param filename Name of destination file
            @parm content Text content of the file to write
            @return True if write was successful, else return False
            """
            try:
                with io.open(filename,
                             encoding="utf-8",
                             errors="backslashreplace",
                             mode="w") as f:
                    f.write(content.decode('utf-8'))
            except IOError as e:
                gt_logger.gt_log_err("can't export to '%s', reason:" %
                                     filename)
                gt_logger.gt_log_err(str(e))
                return False
            return True

        # Reports to JUNIT file
        if opts.report_junit_file_name:
            gt_logger.gt_log("exporting to JUNIT file '%s'..." %
                             gt_logger.gt_bright(opts.report_junit_file_name))
            # This test specification will be used by JUnit exporter to populate TestSuite.properties (useful meta-data for Viewer)
            test_suite_properties = {}
            for target_name in test_report:
                test_build_properties = get_test_build_properties(
                    test_spec, target_name)
                if test_build_properties:
                    test_suite_properties[target_name] = test_build_properties
            junit_report = exporter_testcase_junit(
                test_report, test_suite_properties=test_suite_properties)
            dump_report_to_text_file(opts.report_junit_file_name, junit_report)

        # Reports to text file
        if opts.report_text_file_name:
            gt_logger.gt_log("exporting to TEXT '%s'..." %
                             gt_logger.gt_bright(opts.report_text_file_name))
            # Useful text reporter for those who do not like to copy paste to files tabale with results
            text_report, text_results = exporter_text(test_report)
            text_testcase_report, text_testcase_results = exporter_testcase_text(
                test_report)
            text_final_report = '\n'.join([
                text_report, text_results, text_testcase_report,
                text_testcase_results
            ])
            dump_report_to_text_file(opts.report_text_file_name,
                                     text_final_report)

        # Reports to JSON file
        if opts.report_json_file_name:
            # We will not print summary and json report together
            gt_logger.gt_log("exporting to JSON '%s'..." %
                             gt_logger.gt_bright(opts.report_json_file_name))
            json_report = exporter_json(test_report)
            dump_report_to_text_file(opts.report_json_file_name, json_report)

        # Reports to HTML file
        if opts.report_html_file_name:
            gt_logger.gt_log("exporting to HTML file '%s'..." %
                             gt_logger.gt_bright(opts.report_html_file_name))
            # Generate a HTML page displaying all of the results
            html_report = exporter_html(test_report)
            dump_report_to_text_file(opts.report_html_file_name, html_report)

        # Memory metrics to CSV file
        if opts.report_memory_metrics_csv_file_name:
            gt_logger.gt_log(
                "exporting memory metrics to CSV file '%s'..." %
                gt_logger.gt_bright(opts.report_memory_metrics_csv_file_name))
            # Generate a CSV file page displaying all memory metrics
            memory_metrics_csv_report = exporter_memory_metrics_csv(
                test_report)
            dump_report_to_text_file(opts.report_memory_metrics_csv_file_name,
                                     memory_metrics_csv_report)

        # Final summary
        if test_report:
            # Test suite report
            gt_logger.gt_log("test suite report:")
            text_report, text_results = exporter_text(test_report)
            print(text_report)
            gt_logger.gt_log("test suite results: " + text_results)
            # test case detailed report
            gt_logger.gt_log("test case report:")
            text_testcase_report, text_testcase_results = exporter_testcase_text(
                test_report)
            print(text_testcase_report)
            gt_logger.gt_log("test case results: " + text_testcase_results)

        # This flag guards 'build only' so we expect only yotta errors
        if test_platforms_match == 0:
            # No tests were executed
            gt_logger.gt_log_warn(
                "no platform/target matching tests were found!")
        if target_platforms_match == 0:
            # No platforms were tested
            gt_logger.gt_log_warn("no matching platforms were found!")

    return (test_exec_retcode)
Example #47
0
class Master(Thread):
    def __init__(self):
        Thread.__init__(self)
        self.setDaemon(True)
        self.queue = Queue()
        self.metadata_queue = Queue()
        self.dbconn = mdb.connect(DB_HOST,
                                  DB_USER,
                                  DB_PASS,
                                  DB_NAME,
                                  port=DB_PORT,
                                  charset='utf8')
        self.dbconn.autocommit(False)
        self.dbcurr = self.dbconn.cursor()
        self.dbcurr.execute('SET NAMES utf8')
        self.n_reqs = self.n_valid = self.n_new = 0
        self.n_downloading_lt = self.n_downloading_pt = 0
        self.visited = set()
        self.black_list = load_res_blacklist(BLACK_FILE)

    def got_torrent(self):
        binhash, address, data, dtype, start_time = self.metadata_queue.get()
        if dtype == 'pt':
            self.n_downloading_pt -= 1
        elif dtype == 'lt':
            self.n_downloading_lt -= 1
        if not data:
            return
        self.n_valid += 1

        save_metadata(self.dbcurr, binhash, address, start_time, data,
                      self.black_list)
        self.n_new += 1

    def run(self):
        self.name = threading.currentThread().getName()
        print self.name, 'started'
        while True:
            while self.metadata_queue.qsize() > 0:
                self.got_torrent()
            address, binhash, dtype = self.queue.get()
            if binhash in self.visited:
                continue
            if len(self.visited) > 100000:
                self.visited = set()
            self.visited.add(binhash)

            self.n_reqs += 1
            info_hash = binhash.encode('hex')

            utcnow = datetime.datetime.utcnow()
            date = (utcnow + datetime.timedelta(hours=8))
            date = datetime.datetime(date.year, date.month, date.day)

            # Check if we have this info_hash
            self.dbcurr.execute(
                'SELECT id FROM search_hash WHERE info_hash=%s', (info_hash, ))
            y = self.dbcurr.fetchone()
            if y:
                self.n_valid += 1
                # 更新最近发现时间,请求数
                self.dbcurr.execute(
                    'UPDATE search_hash SET last_seen=%s, requests=requests+1 WHERE info_hash=%s',
                    (utcnow, info_hash))
            else:
                if dtype == 'pt':
                    t = threading.Thread(target=simMetadata.download_metadata,
                                         args=(address, binhash,
                                               self.metadata_queue))
                    t.setDaemon(True)
                    t.start()
                    self.n_downloading_pt += 1
                elif dtype == 'lt' and self.n_downloading_lt < MAX_QUEUE_LT:
                    t = threading.Thread(target=ltMetadata.download_metadata,
                                         args=(address, binhash,
                                               self.metadata_queue))
                    t.setDaemon(True)
                    t.start()
                    self.n_downloading_lt += 1

            if self.n_reqs >= 1000:
                self.dbcurr.execute(
                    'INSERT INTO search_statusreport(date,new_hashes,total_requests, valid_requests)  VALUES(%s,%s,%s,%s) ON DUPLICATE KEY UPDATE '
                    +
                    'total_requests=total_requests+%s, valid_requests=valid_requests+%s, new_hashes=new_hashes+%s',
                    (date, self.n_new, self.n_reqs, self.n_valid, self.n_reqs,
                     self.n_valid, self.n_new))
                self.dbconn.commit()
                print '\n', time.ctime(
                ), 'n_reqs', self.n_reqs, 'n_valid', self.n_valid, 'n_new', self.n_new, 'n_queue', self.queue.qsize(
                ),
                print 'n_d_pt', self.n_downloading_pt, 'n_d_lt', self.n_downloading_lt,
                self.n_reqs = self.n_valid = self.n_new = 0

    def log_announce(self, binhash, address=None):
        self.queue.put([address, binhash, 'pt'])

    def log_hash(self, binhash, address=None):
        if not lt:
            return
        if is_ip_allowed(address[0]):
            return
        if self.n_downloading_lt < MAX_QUEUE_LT:
            self.queue.put([address, binhash, 'lt'])
Example #48
0
class NmapProcess(Thread):
    """
    NmapProcess is a class which wraps around the nmap executable.
    Consequently, in order to run an NmapProcess, nmap should be installed
    on the host running the script. By default NmapProcess will produce
    the output of the nmap scan in the nmap XML format. This could be then
    parsed out via the NmapParser class from libnmap.parser module.
    """
    def __init__(self,
                 targets="127.0.0.1",
                 options="-sT",
                 event_callback=None,
                 safe_mode=True):
        """
        Constructor of NmapProcess class.

        :param targets: hosts to be scanned. Could be a string of hosts
        separated with a coma or a python list of hosts/ip.
        :type targets: string or list

        :param options: list of nmap options to be applied to scan.
        These options are all documented in nmap's man pages.

        :param event_callback: callable function which will be ran
        each time nmap process outputs data. This function will receive
        two parameters:
            1. the nmap process object
            2. the data produced by nmap process. See readme for examples.

        :param safe_mode: parameter to protect unsafe options like -oN, -oG,
        -iL, -oA,...

        :return: NmapProcess object

        """
        Thread.__init__(self)
        self.__nmap_proc = None
        self.__nmap_rc = 0

        unsafe_opts = set([
            '-oG', '-oN', '-iL', '-oA', '-oS', '-oX', '--iflist', '--resume',
            '--stylesheet', '--datadir'
        ])

        self.__nmap_binary_name = "nmap"
        self.__nmap_fixed_options = "-oX - -vvv --stats-every 2s"
        self.__nmap_binary = self._whereis(self.__nmap_binary_name)
        if self.__nmap_binary is None:
            raise EnvironmentError(
                1, "nmap is not installed or could "
                "not be found in system path")

        self.__sudo_run = ""
        if isinstance(targets, str):
            self.__nmap_targets = targets.replace(" ", "").split(',')
        elif isinstance(targets, list):
            self.__nmap_targets = targets
        else:
            raise Exception("Supplied target list should be either a "
                            "string of a list")

        self._nmap_options = set(options.split())
        if safe_mode and not self._nmap_options.isdisjoint(unsafe_opts):
            raise Exception("unsafe options activated while safe_mode "
                            "is set True")
        self.__nmap_dynamic_options = options
        self.__nmap_command_line = self.get_command_line()

        if event_callback and callable(event_callback):
            self.__nmap_event_callback = event_callback
        else:
            self.__nmap_event_callback = None
        (self.DONE, self.READY, self.RUNNING, self.CANCELLED,
         self.FAILED) = range(5)

        self.__io_queue = Queue()
        self.__ioerr_queue = Queue()

        # API usable in callback function
        self.__state = self.READY
        self.__starttime = 0
        self.__endtime = 0
        self.__version = ''
        self.__progress = 0
        self.__etc = 0
        self.__elapsed = ''
        self.__summary = ''
        self.__stdout = ''
        self.__stderr = ''

    def _run_init(self):
        """
        Protected method ran at every call to run(). This ensures that no
        no parameters are polluted.
        """
        self.__nmap_proc = None
        self.__nmap_rc = -1
        self.__nmap_command_line = self.get_command_line()
        self.__state = self.READY
        self.__progress = 0
        self.__etc = 0
        self.__starttime = 0
        self.__endtime = 0
        self.__elapsed = ''
        self.__summary = ''
        self.__version = ''
        self.__io_queue = Queue()
        self.__ioerr_queue = Queue()
        self.__stdout = ''
        self.__stderr = ''

    def _whereis(self, program):
        """
        Protected method enabling the object to find the full path of a binary
        from its PATH environment variable.

        :param program: name of a binary for which the full path needs to
        be discovered.

        :return: the full path to the binary.

        :todo: add a default path list in case PATH is empty.
        """
        for path in os.environ.get('PATH', '').split(':'):
            if (os.path.exists(os.path.join(path, program))
                    and not os.path.isdir(os.path.join(path, program))):
                return os.path.join(path, program)
        return None

    def get_command_line(self):
        """
        Public method returning the reconstructed command line ran via the lib

        :return: the full nmap command line to run
        :rtype: string
        """
        return (
            "%s %s %s %s %s".lstrip() %
            (self.__sudo_run, self.__nmap_binary, self.__nmap_fixed_options,
             self.__nmap_dynamic_options, " ".join(self.__nmap_targets)))

    def sudo_run(self, run_as='root'):
        """
        Public method enabling the library's user to run the scan with
        priviledges via sudo. The sudo configuration should be set manually
        on the local system otherwise sudo will prompt for a password.
        This method alters the command line by prefixing the sudo command to
        nmap and will then call self.run()

        :param run_as: user name to which the lib needs to sudo to run the scan

        :return: return code from nmap execution
        """
        sudo_user = run_as.split().pop()
        try:
            pwd.getpwnam(sudo_user).pw_uid
        except KeyError:
            raise

        sudo_path = self._whereis("sudo")
        if sudo_path is None:
            raise EnvironmentError(
                2, "sudo is not installed or "
                "could not be found in system path: "
                "cannot run nmap with sudo")

        self.__sudo_run = "%s -u %s" % (sudo_path, sudo_user)
        rc = self.run()
        self.__sudo_run = ""

        return rc

    def run(self):
        """
        Public method which is usually called right after the constructor
        of NmapProcess. This method starts the nmap executable's subprocess.
        It will also bind to threads that will read from subprocess' stdout
        and stderr and push the lines read in a python queue for futher
        processing.

        return: return code from nmap execution from self.__wait()
        """
        def stream_reader(thread_stdout, io_queue):
            """
            local function that will read lines from a file descriptor
            and put the data in a python queue for futher processing.

            :param thread_stdout: file descriptor to read lines from.
            :param io_queue: queue in which read lines will be pushed.
            """
            for streamline in iter(thread_stdout.readline, b''):
                try:
                    if streamline is not None:
                        io_queue.put(streamline)
                except Full:
                    raise Exception("Queue ran out of buffer: "
                                    "increase q.get(timeout) value")

        self._run_init()
        try:
            _tmp_cmdline = shlex.split(self.__nmap_command_line)
            self.__nmap_proc = subprocess.Popen(args=_tmp_cmdline,
                                                stdin=subprocess.PIPE,
                                                stdout=subprocess.PIPE,
                                                stderr=subprocess.PIPE)
            Thread(target=stream_reader,
                   name='stdout-reader',
                   args=(self.__nmap_proc.stdout, self.__io_queue)).start()
            Thread(target=stream_reader,
                   name='stderr-reader',
                   args=(self.__nmap_proc.stderr, self.__ioerr_queue)).start()
            self.__state = self.RUNNING
        except OSError:
            self.__state = self.FAILED
            raise

        return self.__wait()

    def __wait(self):
        """
        Private method, called by run() which will loop and
        process the data from the python queues. Those queues are fed by
        the stream_readers of run, reading lines from subprocess.stdout/err.
        Each time data is pushed in the nmap's stdout queue:
        1. __process_event is called with the acquired data as input
        2. if a event callback was provided, the function passed in the
           constructor is called.

        :return: return code from nmap execution
        """
        thread_stream = ''
        while self.__nmap_proc.poll() is None or not self.__io_queue.empty():
            try:
                thread_stream = self.__io_queue.get(timeout=1)
            except Empty:
                pass
            except KeyboardInterrupt:
                break
            else:
                evnt = self.__process_event(thread_stream)
                if self.__nmap_event_callback and evnt:
                    self.__nmap_event_callback(self, thread_stream)
                self.__stdout += thread_stream

        self.__nmap_rc = self.__nmap_proc.poll()
        if self.rc is None:
            self.__state = self.CANCELLED
        elif self.rc == 0:
            self.__state = self.DONE
            self.__progress = 100
        else:
            self.__state = self.FAILED
            self.__stderr = self.__ioerr_queue.get(timeout=2)

        return self.rc

    def run_background(self):
        self.daemon = True
        super(NmapProcess, self).start()

    def is_running(self):
        """
        Checks if nmap is still running.

        :return: True if nmap is still running
        """
        return self.state == self.RUNNING

    def has_terminated(self):
        """
        Checks if nmap has terminated. Could have failed or succeeded

        :return: True if nmap process is not running anymore.
        """
        return (self.state == self.DONE or self.state == self.FAILED
                or self.state == self.CANCELLED)

    def has_failed(self):
        """
        Checks if nmap has failed.

        :return: True if nmap process errored.
        """
        return self.state == self.FAILED

    def is_successful(self):
        """
        Checks if nmap terminated successfully.

        :return: True if nmap terminated successfully.
        """
        return self.state == self.DONE

    def __process_event(self, eventdata):
        """
        Private method called while nmap process is running. It enables the
        library to handle specific data/events produced by nmap process.
        So far, the following events are supported:

        1. task progress: updates estimated time to completion and percentage
           done while scan is running. Could be used in combination with a
           callback function which could then handle this data while scan is
           running.
        2. nmap run: header of the scan. Usually displayed when nmap is started
        3. finished: when nmap scan ends.

        :return: True is event is known.

        :todo: handle parsing directly via NmapParser.parse()
        """
        rval = False
        try:
            edomdoc = pulldom.parseString(eventdata)
            for xlmnt, xmlnode in edomdoc:
                if xlmnt is not None and xlmnt == pulldom.START_ELEMENT:
                    if (xmlnode.nodeName == 'taskprogress'
                            and xmlnode.attributes.keys()):
                        percent_done = xmlnode.attributes['percent'].value
                        etc_done = xmlnode.attributes['etc'].value
                        self.__progress = percent_done
                        self.__etc = etc_done
                        rval = True
                    elif (xmlnode.nodeName == 'nmaprun'
                          and xmlnode.attributes.keys()):
                        self.__starttime = xmlnode.attributes['start'].value
                        self.__version = xmlnode.attributes['version'].value
                        rval = True
                    elif (xmlnode.nodeName == 'finished'
                          and xmlnode.attributes.keys()):
                        self.__endtime = xmlnode.attributes['time'].value
                        self.__elapsed = xmlnode.attributes['elapsed'].value
                        self.__summary = xmlnode.attributes['summary'].value
                        rval = True
        except:
            pass
        return rval

    @property
    def targets(self):
        """
        Provides the list of targets to scan

        :return: list of string
        """
        return self.__nmap_targets

    @property
    def options(self):
        """
        Provides the list of options for that scan

        :return: list of string (nmap options)
        """
        return self._nmap_options

    @property
    def state(self):
        """
        Accessor for nmap execution state. Possible states are:

        - self.READY
        - self.RUNNING
        - self.FAILED
        - self.CANCELLED
        - self.DONE

        :return: integer (from above documented enum)
        """
        return self.__state

    @property
    def starttime(self):
        """
        Accessor for time when scan started

        :return: string. Unix timestamp
        """
        return self.__starttime

    @property
    def endtime(self):
        """
        Accessor for time when scan ended

        :return: string. Unix timestamp
        """
        return self.__endtime

    @property
    def elapsed(self):
        """
        Accessor returning for how long the scan ran (in seconds)

        :return: string
        """
        return self.__elapsed

    @property
    def summary(self):
        """
        Accessor returning a short summary of the scan's results

        :return: string
        """
        return self.__summary

    @property
    def etc(self):
        """
        Accessor for estimated time to completion

        :return:  estimated time to completion
        """
        return self.__etc

    @property
    def version(self):
        """
        Accessor for nmap binary version number

        :return: version number of nmap binary
        :rtype: string
        """
        return self.__version

    @property
    def progress(self):
        """
        Accessor for progress status in percentage

        :return: percentage of job processed.
        """
        return self.__progress

    @property
    def rc(self):
        """
        Accessor for nmap execution's return code

        :return: nmap execution's return code
        """
        return self.__nmap_rc

    @property
    def stdout(self):
        """
        Accessor for nmap standart output

        :return: output from nmap scan in XML
        :rtype: string
        """
        return self.__stdout

    @property
    def stderr(self):
        """
        Accessor for nmap standart error

        :return: output from nmap when errors occured.
        :rtype: string
        """
        return self.__stderr
class PortScan(object):
    """端口扫描"""

    def __init__(self, c, user_ports):
        """
        初始化
        :param c:
        :param user_ports:
        """
        self.config = c
        self.probes = [
            "\r\n\r\n",
            "stats\r\n",
            "test\r\n",
            "ls\r\n",
            "GET / HTTP/1.0\r\n\r\n",
            "GET / HTTP/1.1\nuser-agent: Googlebot\n\n"
        ]

        self.signs_from_file = self.config.file2list("conf/signs.conf")
        self.ports = []
        self.get_ports(user_ports)
        self.lock = threading.Lock()
        self.ping_list = []
        self.q = Queue()
        self.sp = Queue()
        self.signs = self.prep_signs()
        self.ip_dict = {}

    def get_ports(self, user_ports):
        """
        获取扫描端口列表
        :param user_ports:
        :return:
        """
        if user_ports == '':
            # 文件中读,端口配置
            user_ports = open("conf/ports.conf", "r").read().replace("\r", "").replace("\n", "")
        try:
            self.ports = user_ports.split(",")
            remove_port = []
            for p in self.ports:
                if str(p).find("-") >= 0:
                    remove_port.append(str(p))
                    start = int(p.split("-")[0])
                    end = int(p.split("-")[1]) + 1
                    for i in range(start, end):
                        self.ports.append(i)
                else:
                    pass
            for repate in remove_port:
                self.ports.remove(repate)
        except:
            printRed('[!] not a valid ports given. you should put ip like 22,80,1433 or 22-1000')
            sys.exit()

    # ping扫描函数
    def pinger(self):
        """
        多线程继续ping扫描
        ping 扫描
        :return:
        """
        while True:
            ip = self.q.get()
            if platform.system() == 'Linux':
                p = Popen(['ping', '-c 2', ip], stdout=PIPE)
                m = re.search('(\d)\sreceived', p.stdout.read())
                try:
                    if m.group(1) != '0':
                        self.ping_list.append(ip)
                        self.lock.acquire()
                        printRed("%s is live!!\r\n" % ip)
                        self.lock.release()
                except:
                    pass

            if platform.system() == 'Darwin':
                import commands
                p = commands.getstatusoutput("ping -c 2 " + ip)
                m = re.findall('ttl', p[1])
                try:
                    if m:
                        self.ping_list.append(ip)
                        self.lock.acquire()
                        printRed("%s is live!!\r\n" % ip)
                        self.lock.release()
                except:
                    pass

            if platform.system() == 'Windows':
                p = Popen('ping -n 2 ' + ip, stdout=PIPE)
                m = re.findall('TTL', p.stdout.read())
                if m:
                    self.ping_list.append(ip)
                    self.lock.acquire()
                    printRed("%s is live!!\r\n" % ip)
                    self.lock.release()
            self.q.task_done()

    def ping_scan(self, isping, threads, ips):
        """
        ping_scan ping扫描
        :param isping:
        :param threads:
        :param ips:
        :return:
        """
        starttime = time.time()
        print "[*] start Scanning at %s" % time.ctime()
        # isping=='no' 就禁ping扫描
        # 默认ping 扫描
        if isping == 'yes':
            print "Scanning for live machines..."
            for i in xrange(threads):
                t = Thread(target=self.pinger)
                t.setDaemon(True)
                t.start()
            for ip in ips:
                self.q.put(ip)

            self.q.join()

        else:
            self.ping_list = ips

        if len(self.ping_list) == 0:
            print "not find any live machine - -|||"
            sys.exit()

        print "[*] Scanning for live machines done,it has Elapsed time:%s " % (time.time() - starttime)

    def prep_signs(self):
        """
        文件中获取
        :return:
        """
        sign_list = []
        for item in self.signs_from_file:
            (label, pattern) = item.split('|', 2)
            sign = (label, pattern)
            sign_list.append(sign)
        return sign_list

    @staticmethod
    def match_banner(banner, slist):
        """
        匹配端口对应服务类型
        :param banner:
        :param slist:
        :return:
        """
        # print banner
        for item in slist:
            p = re.compile(item[1])
            # print item[1]
            if p.search(banner):
                return item[0]
        return 'Unknown'

    def scan_ports(self):
        """
        扫端口及其对应服务类型函数
        :return:
        """
        while True:
            ip, port = self.sp.get()
            # print ip,port
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            # 判断端口的服务类型
            service = 'Unknown'
            try:
                s.connect((ip, port))
            except:
                self.sp.task_done()
                continue

            try:
                result = s.recv(256)
                service = self.match_banner(result, self.signs)
            except:
                for probe in self.probes:
                    # print probe
                    try:
                        s.close()
                        sd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                        sd.settimeout(5)
                        sd.connect((ip, port))
                        sd.send(probe)
                    except:
                        continue
                    try:
                        result = sd.recv(256)
                        service = self.match_banner(result, self.signs)
                        if service != 'Unknown':
                            break
                    except:
                        continue

            if service not in self.ip_dict:
                self.ip_dict[service] = []
                self.ip_dict[service].append(ip + ':' + str(port))
                self.lock.acquire()
                printRed("%s opening %s\r\n" % (ip, port))
                self.lock.release()
            else:
                self.ip_dict[service].append(ip + ':' + str(port))
                self.lock.acquire()
                printRed("%s opening %s\r\n" % (ip, port))
                self.lock.release()

            self.sp.task_done()

    def ports_scan(self, threads, filename):
        """
        端口扫描主函数
        :param threads:
        :param filename:
        :return:
        """
        print "Scanning ports now..."
        print "[*] start Scanning live machines' ports at %s" % time.ctime()
        starttime = time.time()

        for i in xrange(threads):
            st = Thread(target=self.scan_ports)
            st.setDaemon(True)
            st.start()

        for scanip in self.ping_list:
            for port in self.ports:
                self.sp.put((scanip, port))
        self.sp.join()
        print "[*] Scanning ports done,it has Elapsed time:%s " % (time.time() - starttime)
        # 将服务端口 信息 记录文件
        for name in self.ip_dict.keys():
            if len(self.ip_dict[name]):
                contents = str(name) + ' service has:\n' + '       ' + str(self.ip_dict[name]) + '\n'
                self.config.write_file(contents=contents, filename=filename)

    def handle_unknown(self):
        """
        处理没有识别的服务
        :return:
        """
        if 'Unknown' in self.ip_dict:
            for ip in self.ip_dict['Unknown']:
                # print ip
                try:
                    if str(ip).split(':')[1] == '389':
                        if "ldap" in self.ip_dict:
                            self.ip_dict['ldap'].append(ip)
                        else:
                            self.ip_dict['ldap'] = [ip]
                    if str(ip).split(':')[1] == '445':
                        if 'smb' in self.ip_dict:
                            self.ip_dict['smb'].append(ip)
                        else:
                            self.ip_dict['smb'] = [ip]
                    if str(ip).split(':')[1] in ['3306', '3307', '3308', '3309']:
                        if 'mysql' in self.ip_dict:
                            self.ip_dict['mysql'].append(ip)
                        else:
                            self.ip_dict['mysql'] = [ip]
                    if str(ip).split(':')[1] == '1433':
                        if 'mssql' in self.ip_dict:
                            self.ip_dict['mssql'].append(ip)
                        else:
                            self.ip_dict['mssql'] = [ip]
                    if str(ip).split(':')[1] in ['10022', '22']:
                        if 'ssh' in self.ip_dict:
                            self.ip_dict['ssh'].append(ip)
                        else:
                            self.ip_dict['ssh'] = [ip]
                    if str(ip).split(':')[1] == '27017':
                        if 'mongodb' in self.ip_dict:
                            self.ip_dict['mongodb'].append(ip)
                        else:
                            self.ip_dict['mongodb'] = [ip]
                    if str(ip).split(':')[1] == '5432':
                        if 'postgres' in self.ip_dict:
                            self.ip_dict['postgres'].append(ip)
                        else:
                            self.ip_dict['postgres'] = [ip]
                    if str(ip).split(':')[1] == '443':
                        if 'ssl' in self.ip_dict:
                            self.ip_dict['ssl'].append(ip)
                        else:
                            self.ip_dict['ssl'] = [ip]
                    if str(ip).split(':')[1] == '873':
                        if 'rsync' in self.ip_dict:
                            self.ip_dict['rsync'].append(ip)
                        else:
                            self.ip_dict['rsync'] = [ip]
                    if str(ip).split(':')[1] == '6379':
                        if 'redis' in self.ip_dict:
                            self.ip_dict['redis'].append(ip)
                        else:
                            self.ip_dict['redis'] = [ip]
                except Exception as e:
                    print e
                    # 处理被识别为http的mongo
            if "http" in self.ip_dict:
                for ip in self.ip_dict['http']:
                    if str(ip).split(':')[1] == '27017':
                        self.ip_dict['http'].remove(ip)
                        self.ip_dict['mongodb'].append(ip)

    def run(self, is_ping, threads, ips, filename):
        """
        主函数
        :param is_ping:
        :param threads:
        :param ips:
        :param filename:
        :return:
        """
        self.ping_scan(is_ping, threads, ips)
        self.ports_scan(threads, filename)
        self.handle_unknown()
Example #50
0
class BACnet_application(BIPSimpleApplication, RecurringTask):
    def __init__(self, i_am_callback, *args):
        BIPSimpleApplication.__init__(self, *args)
        RecurringTask.__init__(self, 250)

        self.i_am_callback = i_am_callback

        self.request_queue = Queue()

        # assigning invoke identifiers
        self.nextInvokeID = 1

        # keep track of requests to line up responses
        self.iocb = {}

        self.install_task()

    def process_task(self):
        while True:
            try:
                iocb = self.request_queue.get(False)
            except Empty:
                break

            self.handle_request(iocb)

    def submit_request(self, iocb):
        self.request_queue.put(iocb)

    def get_next_invoke_id(self, addr):
        """Called to get an unused invoke ID."""

        initialID = self.nextInvokeID
        while 1:
            invokeID = self.nextInvokeID
            self.nextInvokeID = (self.nextInvokeID + 1) % 256

            # see if we've checked for them all
            if initialID == self.nextInvokeID:
                raise RuntimeError("no available invoke ID")

            # see if this one is used
            if (addr, invokeID) not in self.iocb:
                break

        return invokeID

    def handle_request(self, iocb):
        apdu = iocb.ioRequest

        if isinstance(apdu, ConfirmedRequestSequence):
            # assign an invoke identifier
            apdu.apduInvokeID = self.get_next_invoke_id(apdu.pduDestination)

            # build a key to reference the IOCB when the response comes back
            invoke_key = (apdu.pduDestination, apdu.apduInvokeID)

            # keep track of the request
            self.iocb[invoke_key] = iocb

        try:
            self.request(apdu)
        except StandardError as e:
            iocb.set_exception(e)

    def confirmation(self, apdu):
        # build a key to look for the IOCB
        invoke_key = (apdu.pduSource, apdu.apduInvokeID)

        # find the request
        iocb = self.iocb.get(invoke_key, None)
        if iocb is None:
            _log.error("no matching request for confirmation")
            return
        del self.iocb[invoke_key]

        if isinstance(apdu, AbortPDU):
            iocb.set_exception(
                RuntimeError("Device communication aborted: " + str(apdu)))
            return

        if isinstance(apdu, Error):
            iocb.set_exception(
                RuntimeError("Error during device communication: " +
                             str(apdu)))
            return

        elif (isinstance(iocb.ioRequest, ReadPropertyRequest)
              and isinstance(apdu, ReadPropertyACK)):
            # find the datatype
            datatype = get_datatype(apdu.objectIdentifier[0],
                                    apdu.propertyIdentifier)
            if not datatype:
                iocb.set_exception(TypeError("unknown datatype"))
                return

            # special case for array parts, others are managed by cast_out
            if issubclass(datatype, Array) and (apdu.propertyArrayIndex
                                                is not None):
                if apdu.propertyArrayIndex == 0:
                    value = apdu.propertyValue.cast_out(Unsigned)
                else:
                    value = apdu.propertyValue.cast_out(datatype.subtype)
            else:
                value = apdu.propertyValue.cast_out(datatype)
                if issubclass(datatype, Enumerated):
                    value = datatype(value).get_long()
            iocb.set(value)

        elif (isinstance(iocb.ioRequest, WritePropertyRequest)
              and isinstance(apdu, SimpleAckPDU)):
            iocb.set(apdu)
            return

        elif (isinstance(iocb.ioRequest, ReadPropertyMultipleRequest)
              and isinstance(apdu, ReadPropertyMultipleACK)):

            result_dict = {}
            for result in apdu.listOfReadAccessResults:
                # here is the object identifier
                objectIdentifier = result.objectIdentifier

                # now come the property values per object
                for element in result.listOfResults:
                    # get the property and array index
                    propertyIdentifier = element.propertyIdentifier
                    propertyArrayIndex = element.propertyArrayIndex

                    # here is the read result
                    readResult = element.readResult

                    # check for an error
                    if readResult.propertyAccessError is not None:
                        error_obj = readResult.propertyAccessError

                        msg = 'ERROR DURRING SCRAPE (Class: {0} Code: {1})'
                        _log.error(
                            msg.format(error_obj.errorClass,
                                       error_obj.errorCode))

                    else:
                        # here is the value
                        propertyValue = readResult.propertyValue

                        # find the datatype
                        datatype = get_datatype(objectIdentifier[0],
                                                propertyIdentifier)
                        if not datatype:
                            iocb.set_exception(TypeError("unknown datatype"))
                            return

                        # special case for array parts, others are managed by cast_out
                        if issubclass(datatype, Array) and (propertyArrayIndex
                                                            is not None):
                            if propertyArrayIndex == 0:
                                value = propertyValue.cast_out(Unsigned)
                            else:
                                value = propertyValue.cast_out(
                                    datatype.subtype)
                        else:
                            value = propertyValue.cast_out(datatype)
                            if issubclass(datatype, Enumerated):
                                value = datatype(value).get_long()

                            try:
                                if issubclass(datatype, Array):
                                    if issubclass(datatype.subtype, Choice):
                                        new_value = []
                                        for item in value.value[1:]:
                                            result = item.dict_contents(
                                            ).values()
                                            if result[0] != ():
                                                new_value.append(result[0])
                                            else:
                                                new_value.append(None)
                                        value = new_value
                            except StandardError as e:
                                _log.exception(e)
                                iocb.set_exception(e)

                        result_dict[objectIdentifier[0], objectIdentifier[1],
                                    propertyIdentifier,
                                    propertyArrayIndex] = value

            iocb.set(result_dict)

        else:
            iocb.set_exception(TypeError('Unsupported Request Type'))

    def indication(self, apdu):
        if isinstance(apdu, IAmRequest):
            device_type, device_instance = apdu.iAmDeviceIdentifier
            if device_type != 'device':
                #Bail without an error.
                return

            _log.debug("Calling IAm callback.")

            self.i_am_callback(str(apdu.pduSource), device_instance,
                               apdu.maxAPDULengthAccepted,
                               str(apdu.segmentationSupported), apdu.vendorID)

        # forward it along
        BIPSimpleApplication.indication(self, apdu)
Example #51
0
class DuanZiSpider(object):
    """内涵吧爬虫:爬取段子以及段子中的图片"""
    def __init__(self):
        """初始化对象"""
        self.base_url = 'http://www.neihanpa.com/article'
        self.start_index = int(raw_input('请输入开始页:'))
        self.end_index = int(raw_input('请输入结束页:'))
        self.headers = HEADERS_USER
        # 创建队列存储页面
        self.queue = Queue(int(self.end_index - self.start_index))
        #  创建匹配规则获取urls
        self.xpath_urls = '//a[@class="title"and @title]/@href '
        # 创建Redis链接
        self.redis_cli = StrictRedis('127.0.0.1')

    def send_request(self, url, query={}):
        """发送请求"""
        print '线程: %s ,正在爬取页面: %s' % (threading.current_thread(), url)
        s = requests.session()
        s.keep_alive = False
        response = requests.get(
            url,
            params=query,
            headers={'User-Agent': random.choice(self.headers)})
        return response.content

    def __open(self):
        self.client = pymongo.MongoClient(host="127.0.0.1", port=27017)
        self.db = self.client.test
        self.collection = self.db.neihan

    def save_content(self, html):
        """保存内容到mangodb"""
        html_obj = etree.HTML(html)
        # 找到段子文本
        content_str = ''
        contents = html_obj.xpath('//div[@class="detail"]//p/text()')
        for co in contents:
            content_str += (co + '\n')
        # 段子标题
        title = html_obj.xpath('//h1[@class="title"]/text()')

        # 段子图片保存在本地,返回名字,保存路径到mangodb
        img = html_obj.xpath('//div[@class="detail"]//img/@src')
        try:
            url = img[0]
        except Exception as e:
            print e
            return
        try:
            file_name = re.search(r'/(\w+\.png)$', url).group(1)
        except Exception as e:
            file_name = base64.b16encode('dadasda') + '.png'
            print "图片名称提取失败"
        response = self.send_request(url)
        with open(r'd:/neihan/images/' + file_name, 'wb') as f:
            f.write(response)

        self.__open()
        item_list = {}
        item_list['title'] = title[0]
        item_list['img_path'] = url
        item_list['content'] = content_str
        print "[INFO] 正在写入MongoDB"
        print self.client
        try:
            self.collection.insert(item_list)
            print "[INFO] 写入成功!"
        except Exception as e:
            print '写入mongodb失败'

    def parse_index_page(self, html):
        """处理抓取url页面内容"""
        html_obj = etree.HTML(html)
        urls = html_obj.xpath(self.xpath_urls)
        print urls
        # 抓取到的url存入Redis
        for url in urls:
            self.redis_cli.lpush('urls', url)
            # print '保存:%s,ok'%url

    def do_job(self):
        """爬虫开始工作"""
        while True:
            i = self.queue.get()
            # 执行任务
            url = self.base_url + '/index_' + str(i) + '.html'
            html = self.send_request(url)
            self.parse_index_page(html)

            while True:
                # 从Redis获取url爬取
                url_detail = self.redis_cli.rpop('urls')
                if not url_detail:
                    break
                detail_url = "http://www.neihanpa.com" + url_detail
                detail_html = self.send_request(detail_url)
                self.save_content(detail_html)

            # 每执行完一个任务通知队列
            self.queue.task_done()

    def main(self):
        # 创建9个线程的线程池
        for _ in range(1, 10):
            t = threading.Thread(target=duanzi.do_job)
            # 设置成守护线程,主线程退出,所有线程也会挂掉
            t.daemon = True
            # 开启线程
            t.start()
Example #52
0
def get_vj_alignments(ref, reads, cmd_build_index, args_build_index, cmd_align,
                      args_align_v, args_align_j, phred_encoding, n_threads):
    """Align V and J germline reference sequences to reads (in FastQ format).
    Yields (v_rec, j_rec) as 2-tuples, where v_rec is a SAMRecord object
    containing an alignment of a V sequence to a read identified by
    v_rec.QNAME. j_rec is similar, but contains an alignment of a J sequence to
    the trimmed version of the same read.

    :ref: AlleleContainer containing both V and J germline reference alleles.
    :reads: handle to FastQ file 
    :cmd_build_index: command that will be run to build an index of the
    reference sequences. If empty "" or None, this command will not be run.
    :args_build_index: string, arguments that will be provided to
    cmd_build_index
    :cmd_align: string, command that will be run to start the aligner
    :args_align_v: string, arguments that will be provided to cmd_align to
    align V sequences to the reads.
    :args_align_j: string, arguments that will be provided to cmd_align to
    align J sequences to the reads.
    :phred_encoding: string passed to the aligner to tell it which encoding to
    use. (e.g. for bowtie2 "33" or "64" is used).
    :n_threads: maximum number of threads/processes the aligner is allowed to
    use.
    """
    with TemporaryDirectory() as dirname:
        logger.info("Created temporary directory: \"%s\"" % dirname)
        # e.g. if n_threads is 7, 4 will be given to v aligner and 3 to the
        # j aligner. This is because the v aligner is first and has to do more
        # work.
        n_threads_v = int(round(n_threads / float(2)))
        n_threads_j = n_threads // 2
        v_aligner = start_aligner(dirname, ref, "V", cmd_build_index,
                                  args_build_index, cmd_align, args_align_v,
                                  phred_encoding, n_threads_v)
        j_aligner = start_aligner(dirname, ref, "J", cmd_build_index,
                                  args_build_index, cmd_align, args_align_j,
                                  phred_encoding, n_threads_j)

        v_aligner_input_thread = th.Thread(name="input to v_aligner",
                                           target=_copyfileobj,
                                           args=(reads, v_aligner.stdin))
        v_aligner_input_thread.start()

        # Trim V-region from reads and send trimmed reads to aligner, for J
        # segment alignment, in separate thread
        v_records = Queue()
        j_aligner_input_thread = th.Thread(name="input to j_aligner",
                                           target=trim_and_send,
                                           args=(v_aligner, j_aligner,
                                                 v_records))
        j_aligner_input_thread.start()

        vj_records = Queue()
        vj_output_thread = th.Thread(name="collect vj output",
                                     target=collect_vj_output,
                                     args=(v_records, j_aligner, vj_records))
        vj_output_thread.start()

        while vj_output_thread.is_alive() or not vj_records.empty():
            while not vj_records.empty():
                yield vj_records.get()
Example #53
0
class ResourceDownloader:
    def __init__(self, directory):
        self.killer = GracefulKiller()
        self.output_directory = directory
        self.queue = Queue()
        self.queue_thread = threading.Thread(target=self.__consume)
        self.__initialPopulate()
        try:
            self.queue_thread.start()
        except KeyboardInterrupt:
            self.queue_thread.stop()
            raise

    # This will fetch the pending requests that are stored in sqlite
    def __initialPopulate(self):
        s = Session()
        requests = s.query(DownloadRequest).all()
        for request in requests:
            print "Added pending request: ", request.url
            self.queue.put(request)
        Session.remove()

    def __consume(self):
        empty = False
        request = None
        s = None
        print self.killer.kill_now
        while self.killer.kill_now != True:
            try:
                #Fetch request from the queue
                print "Consuming"
                try:
                    request = self.queue.get(timeout=5)
                    #This sleep is to avoid race condition
                    time.sleep(0.5)
                    empty = False
                except:
                    empty = True
                if not empty:
                    s = Session()
                    s.add(request)
                    print "Fetching: ", request.url
                    #Get the proper callback and process the request url provided
                    RESOURCE_CALLBACKS[request.service](self.output_directory,
                                                        request.url)
                    #Remove from the pending requests table
                    s.delete(request)
                    s.commit()
                    s.flush()
                    Session.remove()
            except KeyboardInterrupt:
                s.rollback()
                raise

    def putRequest(self, request):
        s = Session()
        self.queue.put(request)
        s.add(request)
        s.commit()
        Session.remove()

    def download(self, s, u):
        print " --> Request Received ", u
        request = DownloadRequest(service=s, url=u)
        self.putRequest(request)
Example #54
0
def consume(command, frame, no):
    print(command)
    global STOP_ALL, L
    time.sleep(float(no) / 10.0 * 3)
    process = subprocess.Popen(command,
                               stdin=subprocess.PIPE,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE,
                               shell=True)
    stdout_queue = Queue()
    stdout_reader = FileReader(process.stdout, stdout_queue)
    stdout_reader.start()
    stderr_queue = Queue()
    stderr_reader = FileReader(process.stderr, stderr_queue)
    stderr_reader.start()
    frame.txt_log.write(('No%s_' % str(int(no) + 1)) +
                        ACTIVE_DEVICES[int(no)] + u' <<<开始>>>' + '\n')
    _mod = get_own_mod(no)
    log = None
    if is_save_log:
        log = save_logcat(no)
    while not stdout_reader.eof() or not stderr_reader.eof():
        if STOP_ALL:
            print('stop')
            L.acquire()
            frame.btn_start.SetLabel(u'开始')
            frame.txt_log.write(('No%s' % str(int(no) + 1)) + u' <<<停止>>>' +
                                '\n')
            L.release()
            break
        while not stdout_queue.empty():
            line = stdout_queue.get().decode("utf-8", errors="ignore")
            try:
                if is_save_log:
                    log.write(line)
                main_doing(line, _mod, frame, no)
            except IndexError as e:
                frame.txt_log.SetDefaultStyle(wx.TextAttr('RED'))
                frame.txt_log.write(repr(e))
                frame.txt_log.SetDefaultStyle(wx.TextAttr('BLACK'))
        while not stderr_queue.empty():
            line = stderr_queue.get().decode("utf-8", errors="ignore")
            L.acquire()
            frame.txt_log.SetDefaultStyle(wx.TextAttr('RED'))
            frame.txt_log.write(line)
            frame.txt_log.SetDefaultStyle(wx.TextAttr('BLACK'))
            L.release()
            if 'replaced' in line:
                continue
            print('Received line on standard error: ' + repr(line))
            frame.txt_log.SetDefaultStyle(wx.TextAttr('RED'))
            frame.txt_log.write(repr(line))
            frame.txt_log.SetDefaultStyle(wx.TextAttr('BLACK'))
            STOP_ALL = True
            frame.btn_start.SetLabel(u'开始')
        # Sleep a bit before asking the readers again.
        try:
            time.sleep(.1)
        except KeyboardInterrupt:
            pass

    if is_save_log:
        log.close()
    frame.FindWindowById(id_cb_save_log).Enable(True)
    frame.FindWindowById(id_cb_ap_wakeup).Enable(True)
    frame.FindWindowById(id_cb_ap_asr).Enable(True)
    # Let's be tidy and join the threads we've started.
    # stdout_reader.join()
    # stderr_reader.join()
    # Close subprocess' file descriptors.
    # process.stdout.close()
    # process.stderr.close()
    # os.system("taskkill /t /f /pid %s" % process.pid)
    os.popen("taskkill /t /f /pid %s" % process.pid)
    process.kill()
    if STOP_ALL:
        return
    print('unlink')
    L.acquire()
    try:
        frame.txt_log.SetDefaultStyle(wx.TextAttr('RED'))
        frame.txt_log.write(ACTIVE_DEVICES[int(no)] + u' <<<断开连接>>>' + '\n')
        frame.txt_log.SetDefaultStyle(wx.TextAttr('BLACK'))
    except IndexError as e:
        print(repr(e))
    frame.act_refresh_devices()
    L.release()
Example #55
0
class fullWindow():
    """Window class that displays widgets and starts/interacts with speech recognition thread."""
    def __init__(self):
        """Creates Tkinter window and initializes its widgets."""
        self.rootWin = Tk()
        self.rootWin.configure(background='black')
        self.rootWin.attributes("-fullscreen", True)
        self.time = time.time() - 5 * 60
        #this makes the loop below update weather right away
        self.rootWin.bind('<Return>', self.escape)  #enter key exits program
        self.rootWin.bind('<Down>',
                          self.restartSpeech)  #down key restarts program
        #SETUP FRAMES
        self.leftFrame = Frame(self.rootWin,
                               background='black')  #create first frame
        self.leftFrame.pack(
            expand=False, fill='both', side=LEFT
        )  #put frame against LEFT side, fill frame in x and y directions
        self.rightFrame = Frame(self.rootWin,
                                background='black')  #create a second frame
        self.rightFrame.pack(
            expand=False, fill='both', side=LEFT
        )  #put frame against RIGHT side, fill frame in x and y directions

        #WIDGETS
        #DIRECTION
        self.direction = direction(
            self.leftFrame, text_color)  #create direction object in leftFrame
        #CLOCK
        self.address = "Needham, US"
        self.clock = clock(self.leftFrame,
                           text_color)  #create clock object in rightFrame
        self.clock.pack(
            side=TOP,
            anchor=NW)  #put clock object in frame (against RIGHT side)
        self.timezoneDiff = self.clock.getTimezoneDiff(self.address)
        #WEATHER
        self.weather = weather(self.leftFrame,
                               text_color)  #create clock object in rightFrame
        #NEWS
        self.news = news(self.leftFrame, text_color)
        self.newsSources = self.news.getSources(
        )  #returns dictionary, dict[name] = id
        # self.newsSources = None
        self.newsOutlet = "cnn"  #default news source
        #INTERACTION TEXT
        self.speechText = speechText(self.leftFrame, text_color)
        self.speechText.pack(anchor=W, pady=100)
        #SPEECH
        self.queue = Queue()
        self.speech = mic_input_parser(self.queue, self.newsSources)
        #TRIP DISTANCE/DURATION
        self.trip = trip(self.leftFrame, text_color)
        #NEWSBOX
        #self.newsbox = newsBox(self.rightFrame, text_color)

        #SET NON-PINNED WIDGET LIST
        self.temp_widget_list = [
            self.direction, self.weather, self.news, self.trip
        ]  #, self.newsbox]
        self.pinned_widgets = []

    def update(self):
        """Updates widgets periodically or given user input."""
        #VOICE RECOGNITION QUEUE
        if not self.queue.empty():
            self.speechText.speechText.config(text="")
            command_type, command_val = self.queue.get()
            print command_type, command_val
            #SET MIRROR MOVEMENT DIRECTION
            if command_type == "direction":
                if command_val == "open":
                    self.direction.direction = 1
                    text = "open"
                elif command_val == "closed":
                    self.direction.direction = 0
                    text = "closed"
                self.direction.updateDirection(text)
                self.showWidget(self.direction)
                self.speechText.echoAction(command_type, command_val)
            #SET WEATHER LOCATION
            if command_type == "weather":
                self.address = command_val
                self.weather.updateWeather(self.address)
                self.showWidget(self.weather)
                self.speechText.echoAction(command_type, command_val)
            #SET NEWS SOURCE
            if command_type == "news":
                self.newsOutlet = command_val
                self.news.trendingNews.config(text=command_val)
                self.news.updateNews(self.newsOutlet)
                self.showWidget(self.news)
                self.speechText.echoAction(command_type, command_val)
            #SET TIMEZONE
            if command_type == "timezone":
                self.address = command_val
                self.timezoneDiff = self.clock.getTimezoneDiff(self.address)
                self.time = time.time(
                ) - 5 * 61  #make change now by changing time
                self.speechText.echoAction(command_type, command_val)
            #SHOW TRIP
            if command_type == "trip":
                origin_address, final_address, travel_mode = command_val
                self.trip.setWidget(origin_address, final_address, travel_mode)
                self.showWidget(self.trip)
                self.speechText.echoAction(command_type, command_val)
            #SHOW NEWSBOX
            if command_type == "newsbox":
                search_term = command_val
                self.newsbox.produce_map(search_term)
                self.showWidget(self.newsbox)
            #PIN WIDGET
            if command_type == "add":
                if command_val == "weather":
                    self.pinWidget(self.weather)
                elif command_val == "news":
                    self.pinWidget(self.news)
                elif command_val == "trip":
                    self.pinWidget(self.trip)
                elif command_val == "direction":
                    self.pinWidget(self.direction)
            #UNPIN WIDGET
            if command_type == "remove":
                if command_val == "weather":
                    self.unPinWidget(self.weather)
                elif command_val == "news":
                    self.unPinWidget(self.news)
                elif command_val == "trip":
                    self.unPinWidget(self.trip)
                elif command_val == "direction":
                    self.unPinWidget(self.direction)
            #SHOW WIDGET
            if command_type == "show":
                if command_val == "weather":
                    self.showWidget(self.weather)
                elif command_val == "news":
                    self.showWidget(self.news)
                elif command_val == "trip":
                    self.showWidget(self.trip)
                elif command_val == "direction":
                    self.showWidget(self.direction)
            #HIDE WIDGET
            if command_type == "hide":
                if command_val == "weather":
                    self.hideWidget(self.weather)
                elif command_val == "news":
                    self.hideWidget(self.news)
                elif command_val == "trip":
                    self.hideWidget(self.trip)
                elif command_val == "direction":
                    self.hideWidget(self.direction)
            #COMMAND ASSISTANCE
            if command_type == "misheard":
                self.speechText.misheard(command_val)
    #WEATHER/NEWS UPDATE
        if time.time(
        ) - self.time > 5 * 60:  #if it's been 5 minutes, check weather again
            self.weather.updateWeather(self.address)
            #self.news.updateNews(self.newsOutlet)
            self.time = time.time()

    #DIRECTION UPDATE


##        self.direction.dirText.config(text = read_serial)
#TIME UPDATE
        currentTime = self.clock.updateTime(self.timezoneDiff)

    def showWidget(self, new_widget):
        """Makes specified widget visible in "focus" spot.
    	Params: new_widget - widget object user wants to see
    	"""
        for widget in self.temp_widget_list:  #bc we don't have many widgets and this makes it so we don't have
            widget.pack_forget(
            )  #to remember the current visible widget which we now need to make invisible
        for widget in self.pinned_widgets:
            widget.pack(side=BOTTOM, anchor=SW)
        new_widget.pack(
            side=TOP, anchor=SW
        )  #this will also move a pinned widget up to the top right (focus)

    def hideWidget(self, selected_widget):
        """Makes specified widget invisible in "focus" spot.
    	Params: selected_widget - widget object user no longer wants to see
    	"""
        if selected_widget not in self.pinned_widgets:
            selected_widget.pack_forget()
        else:  #if it's in self.pinned_widgets
            selected_widget.pack(side=BOTTOM,
                                 anchor=SW)  #put pinned widget back at bottom

    def pinWidget(self,
                  pinned_widget):  #just removes it from temporary widget list
        """Adds specified widget to list of pinned widgets and shows it at the bottom right corner of screen until widget is actively removed.
    	Params: pinned_widget - widget object user wants to keep on the screen
    	"""
        if pinned_widget not in self.pinned_widgets:
            self.temp_widget_list.remove(pinned_widget)
            self.pinned_widgets.append(pinned_widget)
        pinned_widget.pack(
            side=BOTTOM,
            anchor=NW)  #put pinned widget at bottom right of screen

    def unPinWidget(
            self,
            unpinned_widget):  #just adds it back to temporary widget list
        """Removes specified widget from list of pinned widgets and stops showing it at the bottom right corner of screen.
    	Params: unpinned_widget - widget object user wants to remove from the screen
    	"""
        if unpinned_widget not in self.temp_widget_list:
            self.temp_widget_list.append(unpinned_widget)
        unpinned_widget.pack_forget()
        self.pinned_widgets.remove(unpinned_widget)

    def escape(self, event):  #exit tkinter program
        """Destroys program given a keypress."""
        self.rootWin.destroy()

    def restartSpeech(self, event):  #exit tkinter program
        """Restarts speech thread (in case it freezes or something) given a keypress."""
        self.speech = mic_input_parser(self.queue, self.newsSources)
        print "restarted speech thread"
Example #56
0
class Parser(QObject, ParserCommon):
    """ Log File Parser.

    This class takes a stream of log messages and parses them on parseLog().
    A listener could be registered for parsing events like log records,
    exceptions and new keys.
    """

    printLogSignl = pyqtSignal(str, bool)

    def __init__(self, log_stream, events, log_id=None):
        super(Parser, self).__init__()
        self.train_dict_list = []
        self.test_dict_list = []
        self.log_stream = log_stream
        self.thread_id = None
        self.start_time = None
        self.log_id = log_id
        self.logging = False
        self.streams = Queue()
        self.lock = Lock()
        if self.log_stream:
            self.streams.put(self.log_stream)
        self.events = events
        if self.events is None:
            self.events = {}

    def printLog(self, log, error=False):
        if self.log_id is not None:
            if not error:
                Log.log(log, self.log_id)
            else:
                Log.error(log, self.log_id)
        else:
            self.printLogSignl.emit(log, error)

    def getTestEvents(self):
        """Return the list of parsed test records."""
        return self.test_dict_list

    def getTrainEvents(self):
        """Return the list of parsed training records."""
        return self.train_dict_list

    def getThreadID(self):
        """ Return the thread id of the caffe process.
        """
        return self.thread_id

    def addLogStream(self, log_stream):
        """ Add a log stream to to queue of parsing tasks.
        """
        if log_stream:
            self.streams.put(log_stream)

    def setLogging(self, log):
        self.logging = log

    def parseLog(self):
        """Parse log file.

        Returns (train_dict_list, test_dict_list)
        train_dict_list and test_dict_list are lists of dicts that define the
        table rows
        """
        locked = self.lock.acquire()
        if locked is False:
            return self.train_dict_list, self.test_dict_list

        regex_iteration = re.compile('Iteration (\d+)')
        regex_train_output = re.compile(
            'Train net output #(\d+): (\S+) = ([\.\deE+-]+)')
        regex_test_output = re.compile(
            'Test net output #(\d+): (\S+) = ([\.\deE+-]+)')
        regex_learning_rate = re.compile(
            'lr = ([-+]?[0-9]*\.?[0-9]+([eE]?[-+]?[0-9]+)?)')

        # Pick out lines of interest
        iteration = -1
        learning_rate = float('NaN')
        train_dict_list = self.train_dict_list
        test_dict_list = self.test_dict_list
        train_row = None
        test_row = None
        try:
            while self.streams.empty() is False:
                head = self.streams.get()
                log_file = None
                ht = type(head)
                if ht is unicode or ht is str:
                    # open file
                    log_file = open(head, 'r')
                    log_stream = log_file.readlines()
                else:
                    log_stream = head
                try:
                    logfile_year = exs.getLogCreatedYear(log_stream)
                    for line in log_stream:
                        line = line.strip()
                        if self.logging:
                            self.printLog(line)
                        if self.thread_id is None:
                            self.extractThreadID(line)
                        if self.start_time is None:
                            self.start_time = exs.getStartTime(
                                line, logfile_year)
                        self.parseEvent(line)

                        iteration_match = regex_iteration.search(line)
                        if iteration_match:
                            iteration = float(iteration_match.group(1))
                        if iteration == -1:
                            # Only start parsing for other stuff if we've
                            # found the first iteration
                            continue
                        if exs.isLogFormat(line) is False:
                            continue

                        if self.start_time is None:
                            self.start_time = exs.extractDatetimeFromLine(
                                line, logfile_year)
                        time = exs.extractDatetimeFromLine(line, logfile_year)
                        seconds = (time - self.start_time).total_seconds()

                        lr_match = regex_learning_rate.search(line)
                        if lr_match:
                            self.checkKey(Parser.TRAIN, 'LearningRate')
                            learning_rate = float(lr_match.group(1))

                        train_dict_list, train_row = self.parseLine(
                            regex_train_output, train_row, train_dict_list,
                            line, iteration, seconds, learning_rate, time,
                            Parser.TRAIN)
                        test_dict_list, test_row = self.parseLine(
                            regex_test_output, test_row, test_dict_list, line,
                            iteration, seconds, learning_rate, time,
                            Parser.TEST)
                except Exception as e:
                    self.printLog('Parser error: ' + str(e))
                finally:
                    if log_file:
                        log_file.close()
        except Exception as e:
            if self.log_id:
                self.printLog('Failed to parse log ' + str(e), True)
            else:
                print('Failed to parse log ' + str(e))
        finally:
            if locked:
                self.lock.release()
        for lis in self.listener:
            lis.parsingFinished()
        return train_dict_list, test_dict_list

    def parseLine(self, regex_obj, row, row_dict_list, line, iteration,
                  seconds, learning_rate, time, phase):
        """Parse a single line for training or test output.

        Returns a a tuple with (row_dict_list, row)
        row: may be either a new row or an augmented version of the current row
        row_dict_list: may be either the current row_dict_list or an augmented
        version of the current row_dict_list
        """

        output_match = regex_obj.search(line)
        if output_match:
            if not row or row['NumIters'] != iteration:
                # Push the last row and start a new one
                if row:
                    # If we're on a new iteration, push the last row
                    # This will probably only happen for the first row;
                    # otherwisethe full row checking logic below will push and
                    # clear full rows
                    row_dict_list.append(row)

                row = OrderedDict([('NumIters', iteration),
                                   ('Seconds', seconds),
                                   ('LearningRate', learning_rate),
                                   ('DateTime', time)])

            # output_num is not used; may be used in the future
            # output_num = output_match.group(1)
            output_name = output_match.group(2)
            output_val = output_match.group(3)
            row[output_name] = float(output_val)
            self.checkKey(phase, output_name)
        # append a new row to the dict
        if (row and len(row_dict_list) >= 1
                and len(row) == len(row_dict_list[0])):
            # fix the learning rate of the first row
            if len(row_dict_list) == 1:
                row_dict_list[0]['LearningRate'] = row['LearningRate']
                for lis in self.listener:
                    lis.update(phase, row_dict_list[0])
            # The row is full, based on the fact that it has the same number of
            # columns as the first row; append it to the list
            row_dict_list.append(row)
            # notify the listener about the new row
            for lis in self.listener:
                lis.update(phase, row)
            row = None

        return row_dict_list, row

    def parseEvent(self, line):
        """Parse the line for events.

        Notifies the listener about every event found.
        """
        for event, regex in self.events.iteritems():
            event_match = regex.search(line)
            if event_match:
                size = len(event_match.groups())
                groups = []
                for i in range(0, size):
                    groups.append(event_match.group(i + 1))
                for lis in self.listener:
                    lis.handle(event, line, groups)

    def extractThreadID(self, line):
        """ Extract the thread id from the file.
        """
        regex_thread_id = re.compile(
            '[IWEF][\d]{4} [\d]{2}:[\d]{2}:[\d]{2}\.[\d]{6}[\s]+([\d]+)')
        line = line.strip()
        thread_id_match = regex_thread_id.search(line)
        if thread_id_match:
            self.thread_id = thread_id_match.group(1)

    def checkKey(self, phase, key):
        """ Check wheter the key is new and notify the listener in this case.
        """
        if key == "lr":
            key = "LearningRate"
        key_registry = self.getKeys(phase)
        if key not in key_registry:
            key_registry.append(key)
            for lis in self.listener:
                lis.registerKey(phase, key)

    def hasKey(self, phase, key):
        """ Return true if the key was registered for the phase.
        """
        return key in self.getKeys(phase)
Example #57
0
class RLHouse:
    """Provide an insecure sandbox to run commands in for a RL agent.

    The sandbox class is used to invoke arbitrary shell commands.
    This class provides the same interface as the secure Sandbox but doesn't
    provide any actual security or require any special system setup.

    """
    def __init__(self, working_directory, shell_command):
        """Initialize a new sandbox for the given working directory.

        working_directory: the directory in which the shell command should
                           be launched.
        """
        self._is_alive = False
        self.command_process = None
        self.stdout_queue = Queue()
        self.stderr_queue = Queue()
        self.working_directory = working_directory

        self.start_init(shell_command)

    @property
    def is_alive(self):
        """Indicates whether a command is currently running in the sandbox"""
        if self._is_alive:
            sub_result = self.command_process.poll()
            if sub_result is None:
                return True
            self.child_queue.put(None)
            self._is_alive = False
        return False

    def start_init(self, shell_command):
        """Start a command running in the sandbox"""
        if self.is_alive:
            raise SandboxError("Tried to run command with one in progress.")
        working_directory = self.working_directory
        self.child_queue = Queue()
        shell_command = shlex.split(shell_command.replace('\\', '/'))
        try:
            self.command_process = subprocess.Popen(shell_command,
                                                    stdin=subprocess.PIPE,
                                                    stdout=subprocess.PIPE,
                                                    stderr=subprocess.PIPE,
                                                    universal_newlines=True,
                                                    cwd=working_directory)
        except OSError:
            raise SandboxError('Failed to start {0}'.format(shell_command))
        self._is_alive = True
        stdout_monitor = Thread(target=_monitor_file,
                                args=(self.command_process.stdout,
                                      self.stdout_queue))
        stdout_monitor.daemon = True
        stdout_monitor.start()
        stderr_monitor = Thread(target=_monitor_file,
                                args=(self.command_process.stderr,
                                      self.stderr_queue))
        stderr_monitor.daemon = True
        stderr_monitor.start()
        Thread(target=self._child_writer).start()

    def start(self, shell_command):
        """
        A dummy method for the engine to call.

        """
        pass

    def kill_real(self):
        """Stops the sandbox.

        Shuts down the sandbox, cleaning up any spawned processes, threads, and
        other resources. The shell command running inside the sandbox may be
        suddenly terminated.

        """
        if self.is_alive:
            try:
                self.command_process.kill()
            except OSError:
                pass
            self.command_process.wait()
            self.child_queue.put(None)

    def kill(self):
        """
        Dummy method for engine.

        """
        pass

    def retrieve(self):
        """Copy the working directory back out of the sandbox."""
        if self.is_alive:
            raise SandboxError("Tried to retrieve sandbox while still alive")
        pass

    def release(self):
        """Release the sandbox for further use
        Dummy method, not needed for our purposes.

        If running in a jail unlocks and releases the jail for reuse by others.
        Must be called exactly once after Sandbox.kill has been called.

        """
        pass

    def pause(self):
        """Pause the process by sending a SIGSTOP to the child

        A limitation of the method is it will only pause the initial
        child process created any further (grandchild) processes created
        will not be paused.

        This method is a no-op on Windows.
        """
        try:
            self.command_process.send_signal(signal.SIGSTOP)
        except (ValueError, AttributeError, OSError):
            pass

    def resume(self):
        """Resume the process by sending a SIGCONT to the child

        This method is a no-op on Windows
        """
        try:
            self.command_process.send_signal(signal.SIGCONT)
        except (ValueError, AttributeError, OSError):
            pass

    def _child_writer(self):
        queue = self.child_queue
        stdin = self.command_process.stdin
        while True:
            ln = queue.get()
            if ln is None:
                break
            try:
                stdin.write(ln)
                stdin.flush()
            except (OSError, IOError):
                self.kill()
                break

    def write(self, str):
        """Write str to stdin of the process being run"""
        if not self.is_alive:
            return False
        self.child_queue.put(str)

    def write_line(self, line):
        """Write line to stdin of the process being run

        A newline is appended to line and written to stdin of the child process

        """
        if not self.is_alive:
            return False
        self.child_queue.put(line + "\n")

    def read_line(self, timeout=0):
        """Read line from child process

        Returns a line of the child process' stdout, if one isn't available
        within timeout seconds it returns None. Also guaranteed to return None
        at least once after each command that is run in the sandbox.

        """
        if not self.is_alive:
            timeout = 0
        try:
            return self.stdout_queue.get(block=True, timeout=timeout)
        except Empty:
            return None

    def read_error(self, timeout=0):
        """Read line from child process' stderr

        Returns a line of the child process' stderr, if one isn't available
        within timeout seconds it returns None. Also guaranteed to return None
        at least once after each command that is run in the sandbox.

        """
        if not self.is_alive:
            timeout = 0
        try:
            return self.stderr_queue.get(block=True, timeout=timeout)
        except Empty:
            return None

    def check_path(self, path, errors):
        resolved_path = os.path.join(self.working_directory, path)
        if not os.path.exists(resolved_path):
            errors.append("Output file " + str(path) + " was not created.")
            return False
        else:
            return True
Example #58
0
def bench(args):
    config_dir = '{0}/{1}'.format(args.dir, args.bench_name)
    dckr_net_name = args.docker_network_name or args.bench_name + '-br'

    for target_class in [
            BIRDTarget, GoBGPTarget, QuaggaTarget, FRRoutingTarget
    ]:
        if ctn_exists(target_class.CONTAINER_NAME):
            print 'removing target container', target_class.CONTAINER_NAME
            dckr.remove_container(target_class.CONTAINER_NAME, force=True)

    if not args.repeat:
        if ctn_exists(Monitor.CONTAINER_NAME):
            print 'removing monitor container', Monitor.CONTAINER_NAME
            dckr.remove_container(Monitor.CONTAINER_NAME, force=True)

        for ctn_name in get_ctn_names():
            if ctn_name.startswith(ExaBGPTester.CONTAINER_NAME_PREFIX) or \
                ctn_name.startswith(ExaBGPMrtTester.CONTAINER_NAME_PREFIX) or \
                ctn_name.startswith(GoBGPMRTTester.CONTAINER_NAME_PREFIX):
                print 'removing tester container', ctn_name
                dckr.remove_container(ctn_name, force=True)

        if os.path.exists(config_dir):
            shutil.rmtree(config_dir)

    if args.file:
        with open(args.file) as f:
            conf = yaml.load(Template(f.read()).render())
    else:
        conf = gen_conf(args)
        if not os.path.exists(config_dir):
            os.makedirs(config_dir)
        with open('{0}/scenario.yaml'.format(config_dir), 'w') as f:
            f.write(conf)
        conf = yaml.load(Template(conf).render())

    bridge_found = False
    for network in dckr.networks(names=[dckr_net_name]):
        if network['Name'] == dckr_net_name:
            print 'Docker network "{}" already exists'.format(dckr_net_name)
            bridge_found = True
            break
    if not bridge_found:
        subnet = conf['local_prefix']
        print 'creating Docker network "{}" with subnet {}'.format(
            dckr_net_name, subnet)
        ipam = IPAMConfig(pool_configs=[IPAMPool(subnet=subnet)])
        network = dckr.create_network(dckr_net_name,
                                      driver='bridge',
                                      ipam=ipam)

    num_tester = sum(
        len(t.get('neighbors', [])) for t in conf.get('testers', []))
    if num_tester > gc_thresh3():
        print 'gc_thresh3({0}) is lower than the number of peer({1})'.format(
            gc_thresh3(), num_tester)
        print 'type next to increase the value'
        print '$ echo 16384 | sudo tee /proc/sys/net/ipv4/neigh/default/gc_thresh3'

    print 'run monitor'
    m = Monitor(config_dir + '/monitor', conf['monitor'])
    m.run(conf, dckr_net_name)

    is_remote = True if 'remote' in conf['target'] and conf['target'][
        'remote'] else False

    if is_remote:
        print 'target is remote ({})'.format(conf['target']['local-address'])

        ip = IPRoute()

        # r: route to the target
        r = ip.get_routes(dst=conf['target']['local-address'], family=AF_INET)
        if len(r) == 0:
            print 'no route to remote target {0}'.format(
                conf['target']['local-address'])
            sys.exit(1)

        # intf: interface used to reach the target
        idx = [t[1] for t in r[0]['attrs'] if t[0] == 'RTA_OIF'][0]
        intf = ip.get_links(idx)[0]
        intf_name = intf.get_attr('IFLA_IFNAME')

        # raw_bridge_name: Linux bridge name of the Docker bridge
        # TODO: not sure if the linux bridge name is always given by
        #       "br-<first 12 characters of Docker network ID>".
        raw_bridge_name = args.bridge_name or 'br-{}'.format(
            network['Id'][0:12])

        # raw_bridges: list of Linux bridges that match raw_bridge_name
        raw_bridges = ip.link_lookup(ifname=raw_bridge_name)
        if len(raw_bridges) == 0:
            if not args.bridge_name:
                print(
                    'can\'t determine the Linux bridge interface name starting '
                    'from the Docker network {}'.format(dckr_net_name))
            else:
                print('the Linux bridge name provided ({}) seems nonexistent'.
                      format(raw_bridge_name))
            print(
                'Since the target is remote, the host interface used to '
                'reach the target ({}) must be part of the Linux bridge '
                'used by the Docker network {}, but without the correct Linux '
                'bridge name it\'s impossible to verify if that\'s true'.
                format(intf_name, dckr_net_name))
            if not args.bridge_name:
                print(
                    'Please supply the Linux bridge name corresponding to the '
                    'Docker network {} using the --bridge-name argument.'.
                    format(dckr_net_name))
            sys.exit(1)

        # intf_bridge: bridge interface that intf is already member of
        intf_bridge = intf.get_attr('IFLA_MASTER')

        # if intf is not member of the bridge, add it
        if intf_bridge not in raw_bridges:
            if intf_bridge is None:
                print(
                    'Since the target is remote, the host interface used to '
                    'reach the target ({}) must be part of the Linux bridge '
                    'used by the Docker network {}'.format(
                        intf_name, dckr_net_name))
                sys.stdout.write('Do you confirm to add the interface {} '
                                 'to the bridge {}? [yes/NO] '.format(
                                     intf_name, raw_bridge_name))
                try:
                    answer = raw_input()
                except:
                    print 'aborting'
                    sys.exit(1)
                answer = answer.strip()
                if answer.lower() != 'yes':
                    print 'aborting'
                    sys.exit(1)

                print 'adding interface {} to the bridge {}'.format(
                    intf_name, raw_bridge_name)
                br = raw_bridges[0]

                try:
                    ip.link('set', index=idx, master=br)
                except Exception as e:
                    print('Something went wrong: {}'.format(str(e)))
                    print(
                        'Please consider running the following command to '
                        'add the {iface} interface to the {br} bridge:\n'
                        '   sudo brctl addif {br} {iface}'.format(
                            iface=intf_name, br=raw_bridge_name))
                    print('\n\n\n')
                    raise
            else:
                curr_bridge_name = ip.get_links(intf_bridge)[0].get_attr(
                    'IFLA_IFNAME')
                print(
                    'the interface used to reach the target ({}) '
                    'is already member of the bridge {}, which is not '
                    'the one used in this configuration'.format(
                        intf_name, curr_bridge_name))
                print(
                    'Please consider running the following command to '
                    'remove the {iface} interface from the {br} bridge:\n'
                    '   sudo brctl addif {br} {iface}'.format(
                        iface=intf_name, br=curr_bridge_name))
                sys.exit(1)
    else:
        if args.target == 'gobgp':
            target_class = GoBGPTarget
        elif args.target == 'bird':
            target_class = BIRDTarget
        elif args.target == 'quagga':
            target_class = QuaggaTarget
        elif args.target == 'frr':
            target_class = FRRoutingTarget

        print 'run', args.target
        if args.image:
            target = target_class('{0}/{1}'.format(config_dir, args.target),
                                  conf['target'],
                                  image=args.image)
        else:
            target = target_class('{0}/{1}'.format(config_dir, args.target),
                                  conf['target'])
        target.run(conf, dckr_net_name)

    time.sleep(1)

    print 'waiting bgp connection between {0} and monitor'.format(args.target)
    m.wait_established(conf['target']['local-address'])

    if not args.repeat:
        for idx, tester in enumerate(conf['testers']):
            if 'name' not in tester:
                name = 'tester{0}'.format(idx)
            else:
                name = tester['name']
            if 'type' not in tester:
                tester_type = 'normal'
            else:
                tester_type = tester['type']
            if tester_type == 'normal':
                tester_class = ExaBGPTester
            elif tester_type == 'mrt':
                if 'mrt_injector' not in tester:
                    mrt_injector = 'gobgp'
                else:
                    mrt_injector = tester['mrt_injector']
                if mrt_injector == 'gobgp':
                    tester_class = GoBGPMRTTester
                elif mrt_injector == 'exabgp':
                    tester_class = ExaBGPMrtTester
                else:
                    print 'invalid mrt_injector:', mrt_injector
                    sys.exit(1)
            else:
                print 'invalid tester type:', tester_type
                sys.exit(1)
            t = tester_class(name, config_dir + '/' + name, tester)
            print 'run tester', name, 'type', tester_type
            t.run(conf['target'], dckr_net_name)

    start = datetime.datetime.now()

    q = Queue()

    m.stats(q)
    if not is_remote:
        target.stats(q)

    def mem_human(v):
        if v > 1000 * 1000 * 1000:
            return '{0:.2f}GB'.format(float(v) / (1000 * 1000 * 1000))
        elif v > 1000 * 1000:
            return '{0:.2f}MB'.format(float(v) / (1000 * 1000))
        elif v > 1000:
            return '{0:.2f}KB'.format(float(v) / 1000)
        else:
            return '{0:.2f}B'.format(float(v))

    f = open(args.output, 'w') if args.output else None
    cpu = 0
    mem = 0
    cooling = -1
    while True:
        info = q.get()

        if not is_remote and info['who'] == target.name:
            cpu = info['cpu']
            mem = info['mem']

        if info['who'] == m.name:
            now = datetime.datetime.now()
            elapsed = now - start
            recved = info['state']['adj-table'][
                'accepted'] if 'accepted' in info['state']['adj-table'] else 0
            if elapsed.seconds > 0:
                rm_line()
            print 'elapsed: {0}sec, cpu: {1:>4.2f}%, mem: {2}, recved: {3}'.format(
                elapsed.seconds, cpu, mem_human(mem), recved)
            f.write('{0}, {1}, {2}, {3}\n'.format(elapsed.seconds, cpu, mem,
                                                  recved)) if f else None
            f.flush() if f else None

            if cooling == args.cooling:
                f.close() if f else None
                return

            if cooling >= 0:
                cooling += 1

            if info['checked']:
                cooling = 0
Example #59
0
#pnum = int(raw_input('Enter the number of download pages:'))
num=20
pnum=100
#mainpage = str(raw_input('The mainpage:'))  
#startpage = str(raw_input('Start page:'))  
mainpage='http://www.cau.edu.cn'
startpage='http://www.cau.edu.cn'
queue = Queue()  

key = Queue()  
inqueue = Queue()  
list = Newlist()  
thlist = []  
Flock = threading.RLock()  


for i in range(num):  
    th = reptile('s' + str(i), queue,list, key, Flock)  #创建进程  同时加入工作空间
    thlist.append(th)  
#pro = proinsight(key, list, mainpage, inqueue)  
#pro.start()  
for i in thlist:  
    i.start()  
queue.put(startpage)  
for i in range(pnum):  
    queue.put(inqueue.get())  
for i in range(num+10):  
    queue.put(None) 


Example #60
0
class Jail(object):
    """ Provide a secure sandbox to run arbitrary commands in.

    This will only function on specially prepared Ubuntu systems.

    """
    def __init__(self, working_directory):
        """Initialize a new sandbox for the given working directory.

        working_directory: the directory in which the shell command should
                           be launched. Files from this directory are copied
                           into the secure space before the shell command is
                           executed.
        """
        self.locked = False
        jail_base = "/srv/chroot"
        all_jails = os.listdir(jail_base)
        all_jails = [j for j in all_jails if j.startswith("jailuser")]
        for jail in all_jails:
            lock_dir = os.path.join(jail_base, jail, "locked")
            try:
                os.mkdir(lock_dir)
            except OSError:
                # if the directory could not be created, that should mean the
                # jail is already locked and in use
                continue
            with open(os.path.join(lock_dir, "lock.pid"), "w") as pid_file:
                pid_file.write(str(os.getpid()))
            self.locked = True
            self.name = jail
            break
        else:
            raise SandboxError("Could not find an unlocked jail")
        self.jchown = os.path.join(server_info["repo_path"], "worker/jail_own")
        self.base_dir = os.path.join(jail_base, jail)
        self.number = int(jail[len("jailuser"):])
        self.chroot_cmd = "sudo -u {0} schroot -u {0} -c {0} -d {1} -- jailguard.py ".format(
            self.name, "/home/jailuser")

        self._is_alive = False
        self.command_process = None
        self.resp_queue = Queue()
        self.stdout_queue = Queue()
        self.stderr_queue = Queue()
        self._prepare_with(working_directory)

    def __del__(self):
        if self.locked:
            raise SandboxError(
                "Jail object for %s freed without being released" %
                (self.name))

    @property
    def is_alive(self):
        """Indicates whether a command is currently running in the sandbox"""
        if self._is_alive:
            sub_result = self.command_process.poll()
            if sub_result is None:
                return True
            self._is_alive = False
        return False

    def release(self):
        """Release the sandbox for further use

        Unlocks and releases the jail for reuse by others.
        Must be called exactly once after Jail.is_alive == False.

        """
        if self.is_alive:
            raise SandboxError("Sandbox released while still alive")
        if not self.locked:
            raise SandboxError(
                "Attempt to release jail that is already unlocked")
        if os.system("sudo umount %s" %
                     (os.path.join(self.base_dir, "root"), )):
            raise SandboxError("Error returned from umount of jail %d" %
                               (self.number, ))
        lock_dir = os.path.join(self.base_dir, "locked")
        pid_filename = os.path.join(lock_dir, "lock.pid")
        with open(pid_filename, 'r') as pid_file:
            lock_pid = int(pid_file.read())
            if lock_pid != os.getpid():
                # if we ever get here something has gone seriously wrong
                # most likely the jail locking mechanism has failed
                raise SandboxError(
                    "Jail released by different pid, name %s, lock_pid %d, release_pid %d"
                    % (self.name, lock_pid, os.getpid()))
        os.unlink(pid_filename)
        os.rmdir(lock_dir)
        self.locked = False

    def _prepare_with(self, command_dir):
        if os.system("%s c %d" % (self.jchown, self.number)) != 0:
            raise SandboxError("Error returned from jail_own c %d in prepare" %
                               (self.number, ))
        scratch_dir = os.path.join(self.base_dir, "scratch")
        if os.system("rm -rf %s" % (scratch_dir, )) != 0:
            raise SandboxError(
                "Could not remove old scratch area from jail %d" %
                (self.number, ))
        home_dir = os.path.join(scratch_dir, "home/jailuser")
        os.makedirs(os.path.join(scratch_dir, "home"))
        if os.system("cp -r %s %s" % (command_dir, home_dir)) != 0:
            raise SandboxError(
                "Error copying working directory '%s' to jail %d" %
                (command_dir, self.number))
        if os.system("sudo mount %s" %
                     (os.path.join(self.base_dir, "root"), )):
            raise SandboxError("Error returned from mount of %d in prepare" %
                               (self.number, ))
        if os.system("%s j %d" % (self.jchown, self.number)) != 0:
            raise SandboxError("Error returned from jail_own j %d in prepare" %
                               (self.number, ))
        self.home_dir = home_dir
        self.command_dir = command_dir

    def retrieve(self):
        """Copy the working directory back out of the sandbox."""
        if self.is_alive:
            raise SandboxError("Tried to retrieve sandbox while still alive")
        os.system("rm -rf %s" % (self.command_dir, ))
        if os.system("%s c %d" % (self.jchown, self.number)) != 0:
            raise SandboxError("Error returned from jail_own c %d in prepare" %
                               (self.number, ))
        os.system("cp -r %s %s" % (self.home_dir, self.command_dir))

    def start(self, shell_command):
        """Start a command running in the sandbox"""
        if self.is_alive:
            raise SandboxError("Tried to run command with one in progress.")
        shell_command = self.chroot_cmd + shell_command
        shell_command = shlex.split(shell_command.replace('\\', '/'))
        try:
            self.command_process = subprocess.Popen(shell_command,
                                                    stdin=subprocess.PIPE,
                                                    stdout=subprocess.PIPE)
        except OSError:
            raise SandboxError('Failed to start {0}'.format(shell_command))
        self._is_alive = True
        monitor = Thread(target=_guard_monitor, args=(self, ))
        monitor.daemon = True
        monitor.start()

    def _signal(self, signal):
        if not self.locked:
            raise SandboxError("Attempt to send %s to unlocked jail" %
                               (signal, ))
        result = subprocess.call("sudo -u {0} kill -{1} -1".format(
            self.name, signal),
                                 shell=True)
        if result != 0:
            raise SandboxError(
                "Error returned from jail %s sending signal %s" %
                (self.name, signal))

    def kill(self):
        """Stops the sandbox.

        Stops down the sandbox, cleaning up any spawned processes, threads, and
        other resources. The shell command running inside the sandbox may be
        suddenly terminated.

        """
        try:
            self.command_process.stdin.write("KILL\n")
            self.command_process.stdin.flush()
        except IOError as exc:
            if exc.errno != 32:
                raise
        try:
            item = self.resp_queue.get(timeout=5)
            if item[1] != "KILL" and item[1] is not None:
                raise SandboxError(
                    "Bad response from jailguard after kill, %s" % (item, ))
        except Empty:
            pass
        self._signal("CONT")
        for i in range(20):
            if self.command_process.poll() != None:
                break
            if i == 10:
                self._signal("KILL")
            time.sleep(0.1)

        # final check to make sure processes are died and raise error if not
        if self.is_alive:
            raise SandboxError("Could not kill sandbox children")

    def pause(self):
        """Pause the process by sending a SIGSTOP to the child"""
        try:
            self.command_process.stdin.write("STOP\n")
            self.command_process.stdin.flush()
        except IOError as exc:
            if exc.errno == 32:  # Broken pipe, guard exited
                return
            raise
        item = self.resp_queue.get()
        if item[1] != "STOP" and item[1] is not None:
            raise SandboxError("Bad response from jailguard after pause, %s" %
                               (item, ))

    def resume(self):
        """Resume the process by sending a SIGCONT to the child"""
        try:
            self.command_process.stdin.write("CONT\n")
            self.command_process.stdin.flush()
        except IOError as exc:
            if exc.errno == 32:  # Broken pipe, guard exited
                return
            raise
        item = self.resp_queue.get()
        if item[1] != "CONT" and item[1] is not None:
            raise SandboxError("Bad response from jailguard after resume, %s" %
                               (item, ))

    def write(self, data):
        """Write str to stdin of the process being run"""
        for line in data.splitlines():
            self.write_line(line)

    def write_line(self, line):
        """Write line to stdin of the process being run

        A newline is appended to line and written to stdin of the child process

        """
        if not self.is_alive:
            return False
        try:
            self.command_process.stdin.write("SEND %s\n" % (line, ))
            self.command_process.stdin.flush()
        except (OSError, IOError):
            self.kill()

    def read_line(self, timeout=0):
        """Read line from child process

        Returns a line of the child process' stdout, if one isn't available
        within timeout seconds it returns None. Also guaranteed to return None
        at least once after each command that is run in the sandbox.

        """
        if not self.is_alive:
            timeout = 0
        try:
            time, line = self.stdout_queue.get(block=True, timeout=timeout)
            return line
        except Empty:
            return None

    def read_error(self, timeout=0):
        """Read line from child process' stderr

        Returns a line of the child process' stderr, if one isn't available
        within timeout seconds it returns None. Also guaranteed to return None
        at least once after each command that is run in the sandbox.

        """
        if not self.is_alive:
            timeout = 0
        try:
            time, line = self.stderr_queue.get(block=True, timeout=timeout)
            return line
        except Empty:
            return None

    def check_path(self, path, errors):
        resolved_path = os.path.join(self.home_dir, path)
        if not os.path.exists(resolved_path):
            errors.append("Output file " + str(path) + " was not created.")
            return False
        else:
            return True