Ejemplo n.º 1
0
    class EndPoint(object):
        '''
        Representation of a stream's endpoint.
        '''
        def __init__(self, channel, idx):
            self._channel = channel
            self._i = idx
            self._queue = Queue()
            
        def receive(self, block = True):
            try:
                if self._queue.empty():
#                    print 'Updating for end point: [%d]' % (self._i)
                    self._channel._update(self._i)
            except StreamClosedException:
                if self._queue.empty():
                    raise
                else:
                    pass
#            print 'Returning for end point: [%d]' % (self._i)
            return self._queue.get()

        def processed(self):
            self._queue.task_done()

        def send(self, r):
            self._queue.put(r)
    def test_create_cell_recalculator_should(self, mock_recalculate):
        unrecalculated_queue = Queue()
        unrecalculated_queue.put(1)
        unrecalculated_queue.put(1)
        unrecalculated_queue.task_done = Mock()

        leaf_queue = Queue()
        leaf_queue.put(sentinel.one)
        leaf_queue.put(sentinel.two)
        leaf_queue.task_done = Mock()

        target = create_cell_recalculator(leaf_queue, unrecalculated_queue, sentinel.graph, sentinel.context)
        target()

        self.assertTrue(unrecalculated_queue.empty())
        self.assertEquals(
            unrecalculated_queue.task_done.call_args_list,
            [ ((), {}), ((), {}), ]
        )

        self.assertTrue(leaf_queue.empty())

        self.assertEquals(
            mock_recalculate.call_args_list,
            [
                ((sentinel.one, leaf_queue, sentinel.graph, sentinel.context), {}),
                ((sentinel.two, leaf_queue, sentinel.graph, sentinel.context), {})
            ]
        )

        self.assertEquals(
            leaf_queue.task_done.call_args_list,
            [ ((), {}), ((), {}), ]
        )
Ejemplo n.º 3
0
def start_bruteforce():
    global session
    global thread_lock
    queue = Queue(0)
    start_threads_with_args(crack, 15, queue)
    print"[!] Trying fast bruteforce..."
    for x in range(0, 1000):
        if thread_lock:
            break
        queue.put("123abc456def789%03d" % x)
    while True:
        if session != "":
            return session
        if queue.empty():
            break
    print "[!] Trying slow bruteforce..."
    for milliseconds in range(0, how_many):
        if thread_lock:
            break
        queue.put("123abc456def789%s" % (start + milliseconds))
    while True:
        if session != "":
            return session
        if queue.empty():
            break
    return session
Ejemplo n.º 4
0
    def levelOrder(self, root):
        if root is None:
            return []

        rst = []
        curLevelRst = []

        curLevelQueue = Queue()
        nextLevelQueue = Queue()

        curLevelQueue.put(root)
        while not curLevelQueue.empty():
            treenode = curLevelQueue.get()
            curLevelRst.append(treenode.val)
            if treenode.left is not None:
                nextLevelQueue.put(treenode.left)
            if treenode.right is not None:
                nextLevelQueue.put(treenode.right)

            if curLevelQueue.empty():
                temp = curLevelQueue
                curLevelQueue = nextLevelQueue
                nextLevelQueue = temp

                rst.insert(0, curLevelRst)
                curLevelRst = []

        return rst
Ejemplo n.º 5
0
class UserEventLogZODBHandler(logging.Handler):
    """ Python logging handler to store log records into ZODB UserEventLog containers """

    def __init__(self):
        logging.Handler.__init__(self)
        self.queue = Queue(16)
        self.setFormatter(logging.Formatter('%(asctime)s'))

    def emit(self, record):
        if not getattr(record, 'username', None):
            return

        self.format(record)
        assert hasattr(record, 'asctime'), str(record)
        self.queue.put(record)

        @db.transact
        def flush():
            eventlog = db.get_root()['oms_root']['eventlog']
            try:
                while True:
                    if self.queue.empty():
                        break
                    record = self.queue.get_nowait()
                    eventlog.add_event(record)
            except Empty:
                pass

        if not self.queue.empty():
            d = flush()
            d.addErrback(log.err, system='usereventlog-handler')
Ejemplo n.º 6
0
class Bus(object):

    def __init__(self, bus_url):
        self._bus_url = bus_url
        self._messages = Queue()

    def start(self):
        self._start_listening_thread()

    def stop(self):
        self._consumer.should_stop = True
        self._bus_thread.join()
        while not self._messages.empty():
            self._messages.get_nowait()

    def assert_msg_received(self, msg_name, body):
        while not self._messages.empty():
            try:
                message = self._messages.get(timeout=10.0)
                if message['name'] == msg_name and message['data'] == body:
                    return
            except Empty:
                break
        assert False, '{} not received'.format(msg_name)

    def _start_listening_thread(self):
        self._bus_thread = threading.Thread(target=self._start_consuming)
        self._bus_thread.start()

    def _start_consuming(self):
        with kombu.Connection(self._bus_url) as conn:
            self._consumer = Consumer(conn, self._messages)
            self._consumer.run()
Ejemplo n.º 7
0
class PyAPNSNotification(threading.Thread, NotificationAbstract):
    def __init__(self, host, app_id, cert_file, dev_mode = False, reconnect_interval=10, chunk_size=10):
        super(PyAPNSNotification, self).__init__()
        self.keepRunning = True
        self.is_server_ready = False
        self.notifications = Queue()
        pyapns_client.configure({'HOST': host})
        self.reconnect_interval = reconnect_interval
        self.app_id = app_id
        self.cert_file = cert_file
        self.chunk_size = chunk_size
        if  dev_mode:
            self.mode = 'sandbox'
        else:
            self.mode = 'production'

    def run(self):
        while self.keepRunning or not self.notifications.empty():
            if not self.is_server_ready:
                try:
                    pyapns_client.provision(self.app_id, open(self.cert_file).read(), self.mode)
                    self.is_server_ready = True
                except Exception:
                    if self.keepRunning:
                        self.is_server_ready = False
                        time.sleep(self.reconnect_interval)
                        continue
                    else:
                        break

            tokens = []
            messages = []
            for i in xrange(self.chunk_size):
                if self.notifications.empty() and len(tokens):
                    break

                notification = self.notifications.get()

                if notification is None:
                    self.notifications.task_done()
                    break

                tokens.append(notification['token'])
                messages.append(notification['message'])
                self.notifications.task_done()

            try:
                if len(tokens):
                    pyapns_client.notify(self.app_id, tokens, messages)
            except Exception:
                self.is_server_ready = False
                for i in xrange(len(tokens)):
                    self.notifications.put({'token':tokens[i],'message':messages[i]})

    def stop(self):
        self.keepRunning = False
        self.notifications.put(None)

    def perform_notification(self, token, aps_message):
        self.notifications.put({'token':token,'message':aps_message})
Ejemplo n.º 8
0
    def isSurrounded(self, enemy):
        moves = MOVES
        emptyCells = Queue()
        head = self.getHead()
        emptyCells.put(head)

        visited = set(self.queue[1:] + enemy.queue)

        while moves and not emptyCells.empty():
            size = emptyCells.qsize()
            #if size == 1 and moves < MOVES:
            #    return 1
            while size:
                cell = emptyCells.get()
                for i, j in directions:
                    ncell = cell[0] + i * DIAMETER, cell[1] + j * DIAMETER
                    if ncell not in visited and not isOutside(ncell):
                        visited.add(ncell)
                        emptyCells.put(ncell)
                        #print ncell

                size -= 1

            moves -= 1

        return emptyCells.empty()
Ejemplo n.º 9
0
class SimObject(object):
    id = 0
    def __init__(self):
        self.ready = Queue()
        self.target = None
        self.sendVal = None
        SimObject.id += 1
        self.id = SimObject.id
        self.task = self.runTarget()
        self.done = False

    def run(self):
        while not self.done:
            yield
            self.task.next()

    @coroutine
    def runTarget(self):
        first = True
        while not self.ready.empty() or self.target or first:
            first = False
            yield
            if self.target:
                try:
                    self.target.send(self.sendVal)
                except StopIteration:
                    self.target = None
            elif not self.ready.empty():
                self.target = self.ready.get_nowait()
                try:
                    self.target.send(self.sendVal)
                except StopIteration:
                    self.target = None
Ejemplo n.º 10
0
    class sensorProducer:
        def __init__(self, p,listctrl):
            from Queue import Queue
            self.list     = listctrl
            self.port     = p
            self.active   = []
            self.supp     = p.sensor(0)[1]
            for i in range(0, len(self.supp)):
                if self.supp[i] == "1":
                    self.active.append(1)
                else:
                    self.active.append(1)
                
            self.done = Queue(1) # signal the producer to finish
                    
        def start(self):
            while self.done.empty():
                for i in range(3, len(self.active)):
                    if self.active[i]:
                        s = self.port.sensor(i)
                        print i,s
                        self.list.SetStringItem(i, 2, "%s (%s)" % (s[1], s[2]))
                        self.list.SetStringItem(i, 0, "1")
                    if not self.done.empty():
                        break
            self.done.get()

        def stop(self):
            self.done.put(1)
Ejemplo n.º 11
0
class Stack:
    # initialize your data structure here.
    def __init__(self):
        self.q = Queue()
        self.first = None

    # @param x, an integer
    # @return nothing
    def push(self, x):
        if not self.first:
            self.first = x
        else:
            nq = Queue()
            nq.put(self.first)
            while not self.q.empty():
                nq.put(self.q.get())
            self.q = nq
            self.first = x

    # @return nothing
    def pop(self):
        if not self.q.empty():
            self.first = self.q.get()
        else:
            self.first = None

    # @return an integer
    def top(self):
        return self.first

    # @return an boolean
    def empty(self):
        return self.first is None
Ejemplo n.º 12
0
    def testForget(self):
        conn = object()
        queue = Queue()
        MARKER = object()
        # Register an expectation
        self.dispatcher.register(conn, 1, queue)
        # ...and forget about it, returning registered queue
        forgotten_queue = self.dispatcher.forget(conn, 1)
        self.assertTrue(queue is forgotten_queue, (queue, forgotten_queue))
        # A ForgottenPacket must have been put in the queue
        queue_conn, packet, kw = queue.get(block=False)
        self.assertTrue(isinstance(packet, ForgottenPacket), packet)
        # ...with appropriate packet id
        self.assertEqual(packet.getId(), 1)
        # ...and appropriate connection
        self.assertTrue(conn is queue_conn, (conn, queue_conn))
        # If forgotten twice, it must raise a KeyError
        self.assertRaises(KeyError, self.dispatcher.forget, conn, 1)
        # Event arrives, return value must be True (it was expected)
        self.assertTrue(self.dispatcher.dispatch(conn, 1, MARKER, {}))
        # ...but must not have reached the queue
        self.assertTrue(queue.empty())

        # Register an expectation
        self.dispatcher.register(conn, 1, queue)
        # ...and forget about it
        self.dispatcher.forget(conn, 1)
        queue.get(block=False)
        # No exception must happen if connection is lost.
        self.dispatcher.unregister(conn)
        # Forgotten message's queue must not have received a "None"
        self.assertTrue(queue.empty())
Ejemplo n.º 13
0
class RexProConnectionPool(object):

    def __init__(self, host, port, size):
        """
        Connection constructor

        :param host: the server to connect to
        :type host: str (ip address)
        :param port: the server port to connect to
        :type port: int
        :param size: the initial connection pool size
        :type size: int
        """

        self.host = host
        self.port = port
        self.size = size

        self.pool = Queue()
        for i in range(size):
            self.pool.put(self._new_conn())

    def _new_conn(self):
        """
        Creates and returns a new connection
        """
        conn = RexProSocket()
        conn.connect((self.host, self.port))
        return conn

    def get(self):
        """
        Returns a connection, creating a new one if the pool is empty
        """
        if self.pool.empty():
            return self._new_conn()
        return self.pool.get()

    def put(self, conn):
        """
        returns a connection to the pool, will close the connection if the pool is full
        """
        if self.pool.qsize() >= self.size:
            conn.close()
            return

        self.pool.put(conn)

    @contextmanager
    def contextual_connection(self):
        """
        context manager that will open, yield, and close a connection
        """
        conn = self.get()
        yield conn
        self.put(conn)

    def __del__(self):
        while not self.pool.empty():
            self.pool.get().close()
Ejemplo n.º 14
0
class MessageQueue():
    """
    MessageQueue is a class that listens to the topic it subscribes and
    collect the ROS message from those topics.

    Example
    -------
    >>> queue = MessageQueue()
    >>> queue.subscribe(foo_topic, topic_class)
    >>> msg = queue.get()
    """

    def __init__(self):
        self.queue = Queue()

    def _cb(self, msg):
        if msg is not None:
            self.queue.put(msg)

    def subscribe(self, topic, topic_class):
        return rospy.Subscriber(topic, topic_class, self._cb)

    def get(self, timeout=None):
        return self.queue.get(timeout=timeout)

    def clear(self):
        while not self.queue.empty():
            self.queue.get(1)

    def tolist(self):
        data = []
        while not self.queue.empty():
            data.append(self.queue.get(1))
        return data
Ejemplo n.º 15
0
 def printTreeInLevel2(root):
     if(root == Node):
         return
     print
     queue = Queue()
     currLevel = 0
     item = (root, currLevel)
     queue.put(item)
     stack = []
     while(queue.empty() == False):
         # print queue.qsize(),
         item = queue.get()
         node = item[0]
         level = item[1]
         if(level <> currLevel):
             currLevel = level
             while len(stack) > 0:
                 queue.put(stack.pop())
             print
         print node.Data,
         if(currLevel %2 == 0):
             if node.Left <> None:
                 stack.append((node.Left, level + 1))
             if node.Right <> None:
                 stack.append((node.Right, level + 1))
         else:
             if node.Right <> None:
                 stack.append((node.Right, level + 1))
             if node.Left <> None:
                 stack.append((node.Left, level + 1))                
         if(queue.empty() == True):
             while len(stack) > 0:
                 queue.put(stack.pop())
Ejemplo n.º 16
0
class AnimalShelter:
    
    def __init__(self):
        from Queue import Queue
        self.dogs = Queue()
        self.cats = Queue()
        self.oldest = Queue()
    def enqueue(self, data, type):
        if str.lower(type) == "dogs" or str.lower(type) == "dog":
            self.dogs.put(data)
            self.oldest.put(str.lower(type))
        elif str.lower(type) == "cats" or str.lower(type) == "cat":
            self.cats.put(data)
            self.oldest.put(str.lower(type))
    def dequeueAny(self):
        if self.oldest.empty():
            return None
        type = self.oldest.get()
        if type == "dogs" or type == "dog":
            return self.dequeueDog()
        else:
            return self.dequeueCat()
    def dequeueDog(self):
        if not self.dogs.empty():
            return self.dogs.get()
        else:
            return None
    def dequeueCat(self):
        if not self.cats.empty():
            return self.cats.get()
        else:
            return None
Ejemplo n.º 17
0
 def run_steps_parallel(self, steps):
     errors = []
     threads = []
     bucket = Queue()
     namedQueue[current_thread().name] = bucket
     try:
         for step in steps:
             threads.append(MyThread(bucket=bucket, target=self.run_step, args=(step,)))
         map(lambda x: x.start(), threads)
         map(lambda x: x.join(), threads)
         if current_thread().name == 'MainThread':
             root = ParallelLogNode('MainThread')
             post_order(root, root.children, self._context.output)
             root.children = []
             if not bucket.empty():
                 raise bucket.get_nowait()
         else:
             if not bucket.empty():
                 error = bucket.get_nowait()
                 namedQueue[current_thread().parent].put(error)
                 raise error
     except ExecutionPassed as exception:
         exception.set_earlier_failures(errors)
         raise exception
     except ExecutionFailed as exception:
         errors.extend(exception.get_errors())
     if errors:
         raise ExecutionFailures(errors)
Ejemplo n.º 18
0
class RR(Scheduler):
    def init(self):
        self.ready_list = Queue()
        self.running_job = None
        self.quantum = 1 # ms
        self.timer = Timer(self.sim, RR.reschedule,
                          (self, self.processors[0]), self.quantum, one_shot=False,
                          cpu=self.processors[0])
        self.timer.start()
        
    def reschedule(self, cpu):
        if not self.ready_list.empty():
            cpu.resched()

    def on_activate(self, job):
        self.ready_list.put(job)
        job.cpu.resched()

    def on_terminated(self, job):
        self.running_job = None
        job.cpu.resched()

    def schedule(self, cpu):
        if not self.ready_list.empty():
            job = self.ready_list.get()
            if self.running_job is not None:
                self.ready_list.put(self.running_job)
            self.running_job = job
        else:
            job = self.running_job
        return (job, cpu)
Ejemplo n.º 19
0
def levelOrder(root):
    """
    This does the cool level order traversal of a tree
    more usefull than the other one(inorder) for debugging
    it is kinda bfs
    """
    if not root:
        return
    currentLevel = Queue()
    nextLevel = Queue()
    currentLevel.put(root)

    while not currentLevel.empty():
        string = ""
        while not currentLevel.empty():
            curretNode = currentLevel.get()
            string += str(curretNode)
            if curretNode.left:
                nextLevel.put(curretNode.left)
            if curretNode.right:
                nextLevel.put(curretNode.right)

        print string
        # swap the two levels
        currentLevel, nextLevel = nextLevel, currentLevel
Ejemplo n.º 20
0
class Stack:
    # initialize your data structure here.
    def __init__(self):
        self.q = Queue()

    # @param x, an integer
    # @return nothing
    def push(self, x):
        self.q.put(x)

    # @return nothing
    def pop(self):
        p = Queue()
        while self.q.empty() is not True:
            if self.q.qsize() == 1:
                break
            p.put(self.q.get())
        self.q = p

    # @return an integer
    def top(self):
        p = Queue()
        while self.q.empty() is not True:
            if self.q.qsize() == 1:
                tmp = self.q.get()
                p.put(tmp)
                self.q = p
                return tmp
            p.put(self.q.get())

    # @return an boolean
    def empty(self):
        return self.q.empty()
Ejemplo n.º 21
0
Archivo: align.py Proyecto: uubram/RTCR
def get_vj_alignments(ref, reads, cmd_build_index, args_build_index,
        cmd_align, args_align_v, args_align_j, phred_encoding, n_threads):
    """Align V and J germline reference sequences to reads (in FastQ format).
    Yields (v_rec, j_rec) as 2-tuples, where v_rec is a SAMRecord object
    containing an alignment of a V sequence to a read identified by
    v_rec.QNAME. j_rec is similar, but contains an alignment of a J sequence to
    the trimmed version of the same read.

    :ref: AlleleContainer containing both V and J germline reference alleles.
    :reads: handle to FastQ file 
    :cmd_build_index: command that will be run to build an index of the
    reference sequences. If empty "" or None, this command will not be run.
    :args_build_index: string, arguments that will be provided to
    cmd_build_index
    :cmd_align: string, command that will be run to start the aligner
    :args_align_v: string, arguments that will be provided to cmd_align to
    align V sequences to the reads.
    :args_align_j: string, arguments that will be provided to cmd_align to
    align J sequences to the reads.
    :phred_encoding: string passed to the aligner to tell it which encoding to
    use. (e.g. for bowtie2 "33" or "64" is used).
    :n_threads: maximum number of threads/processes the aligner is allowed to
    use.
    """
    with TemporaryDirectory() as dirname:
        logger.info("Created temporary directory: \"%s\""%dirname)
        # e.g. if n_threads is 7, 4 will be given to v aligner and 3 to the
        # j aligner. This is because the v aligner is first and has to do more
        # work.
        n_threads_v = max(int(round(n_threads / float(2))), 1)
        n_threads_j = max(n_threads // 2, 1)
        v_aligner = start_aligner(dirname, ref, "V", cmd_build_index,
            args_build_index, cmd_align, args_align_v, phred_encoding,
            n_threads_v)
        j_aligner = start_aligner(dirname, ref, "J", cmd_build_index,
            args_build_index, cmd_align, args_align_j, phred_encoding,
            n_threads_j)

        v_aligner_input_thread = th.Thread(name = "input to v_aligner",
                target = _copyfileobj,
                args = (reads, v_aligner.stdin))
        v_aligner_input_thread.start()

        # Trim V-region from reads and send trimmed reads to aligner, for J
        # segment alignment, in separate thread
        v_records = Queue()
        j_aligner_input_thread = th.Thread(name = "input to j_aligner",
                target = trim_and_send,
                args = (v_aligner, j_aligner, v_records))
        j_aligner_input_thread.start()

        vj_records = Queue()
        vj_output_thread = th.Thread(name = "collect vj output",
                target = collect_vj_output,
                args = (v_records, j_aligner, vj_records))
        vj_output_thread.start()

        while vj_output_thread.is_alive() or not vj_records.empty():
            while not vj_records.empty():
                yield vj_records.get()
Ejemplo n.º 22
0
 def test_sensor(self):
     q = Queue()
     ID = 'test'
     # Test without noise
     test_sensor = sensor.Sensor(self.athlete, q, ID, noise = 0)
     test_sensor.start()
     sleep(.11)
     test_sensor.stop()
     data = []
     while not q.empty():
         e = q.get_nowait()
         data.append(e.coords)
     assert e.ID == ID
     assert len(data) == 3
     assert allclose(data, 0)
     # Test with noise
     test_sensor = sensor.Sensor(self.athlete, q, ID, noise = 1)
     test_sensor.start()
     sleep(.11)
     test_sensor.stop()
     data = []
     while not q.empty():
         e = q.get_nowait()
         data.append(e.coords)
     assert ~any(array(data) == 0)
Ejemplo n.º 23
0
class observer(Thread):
    def __init__(self, req_cond=None):
        if req_cond == None:
            req_cond = Condition(Lock())
        super(observer, self).__init__()
        self.logger = logging.getLogger('mwtm_' + self.__class__.__name__)
        self.accq = Queue()
        self.accs = None
        self.beq = Queue()
        self.bes = None
        self.req_cond = req_cond

    def update_accounts(self, accs):
        with self.req_cond:
            self.accq.queue.clear()
            self.accq.put(accs)
            self.req_cond.notifyAll()

    def accounts(self):
        with self.req_cond:
            while not self.accq.empty():
                self.accs = self.accq.get(False)
        return self.accs

    def update_backends(self, backends):
        with self.req_cond:
            self.beq.queue.clear()
            self.beq.put(backends)
            self.req_cond.notifyAll()

    def backends(self):
        with self.req_cond:
            while not self.beq.empty():
                self.bes = self.beq.get(False)
        return self.bes
Ejemplo n.º 24
0
class FifoQueue():
    def __init__(self):
        self.queue = Queue()

    def isEmpty(self):
        return self.queue.empty()

    def size(self):
    	return self.queue.qsize()

    def addPcb(self, aPcb):
        self.queue.put(aPcb)

    def getMax(self):
    	if not(self.queue.empty()):
            return self.queue.get()
        else:
        	return None

    #Agregar queue2 a la cola de ready
    def fillTo(self,queue2):
        while(not(queue2.empty())):
            self.queue.put(queue2.get())

    #Elimina un determinado pcb de la cola
    def removePid(self, aPid):
        queue = Queue()
        pcbNotFound = True
        while(not self.isEmpty()):
            currentPcb = self.getMax()
            if(currentPcb.getPid() != aPid):
                queue.put(currentPcb)
        self.fillTo(queue)
Ejemplo n.º 25
0
 def _create_partitions(self, data, box):
     """
     :type data: pyspark.RDD
     :param data: RDD containing ((key, partition id), k-dim vector like)
     :type box: BoundingBox
     :param box: BoundingBox for the entire data set
     """
     todo_q = Queue()
     todo_q.put(0)
     done_q = Queue()
     self.partitions = {0: data}
     self.bounding_boxes = {0: box}
     next_label = 1
     while next_label < self.max_partitions:
         while not todo_q.empty() and next_label < self.max_partitions:
             current_label = todo_q.get()
             current_partition = self.partitions[current_label]
             current_box = self.bounding_boxes[current_label]
             if SMART_PARTITIONING:
                 (part1, part2, median), current_axis = smart_split(current_partition, self.k, next_label)
             else:
                 part1, part2, median = split_partition(current_partition, current_axis, next_label)
             box1, box2 = current_box.split(current_axis, median)
             self.partitions[current_label] = part1
             self.partitions[next_label] = part2
             self.bounding_boxes[current_label] = box1
             self.bounding_boxes[next_label] = box2
             done_q.put(current_label)
             done_q.put(next_label)
             next_label += 1
         if todo_q.empty():
             todo_q = done_q
             done_q = Queue()
         current_axis = (current_axis + 1) % self.k
Ejemplo n.º 26
0
def bfs_expensive_impl(g, start):
    """
     Constraint: граф не взвешенный?

     http://cs.stackexchange.com/questions/4973/does-a-weighted-breadth-first-search-have-memory-when-moving-to-the-next-verte

     Dijkstra's algorithm for weighted?
    """
    assert start
    assert g

    # Finding
    graph_store = Vertex.recode_graph(g)
    start = graph_store[start]

    # Mark
    start.explored = True
    Q = Queue()
    Q.put(start)

    assert Q.qsize() == 1

    while not Q.empty():
        size = Q.qsize()
        v = Q.get()
        print v.self  # data extracting
        assert Q.qsize() == size - 1
        for w in v.ends:
            if not w.explored:
                w.explored = True
                # mark patch
                w.distance = v.distance + 1
                Q.put(w)

    assert Q.empty()
Ejemplo n.º 27
0
    class sensorProducer:
        def __init__(self, p):
            self.port = p
            self.queue = []
            self.supp = p.sensor(0)[1]
            self.supp = self.supp
            for i in range(3, len(self.supp)):
                if self.supp[i] == "1":
                    active = 1
                else:
                    active = 0
                sensor_item = [active, Queue(1)]
                self.queue.append(sensor_item)
            self.done = Queue(1)  # signal the producer to finish

        def start(self):
            while self.done.empty():
                for i in range(3, len(self.queue)):
                    if self.queue[i][0] == 1:
                        s = self.port.sensor(i)
                        debug(str(s))
                        self.queue[i][1].put(s)
                    if not self.done.empty():
                        break
            self.done.get()
Ejemplo n.º 28
0
def bfs(g, v, keep_path = False, iterative = False):
	q = Queue()
	dist = {}

	q.put(v)
	if keep_path:
		dist[v] = (0, v)
	else:
		dist[v] = 0
	if not iterative:
		for i in g.nodes_iter():
			g.node[i]['visited'] = False

	if not keep_path:
		while not q.empty():
			curr = q.get()
			for node in g.neighbors(curr):
				if g.node[node]['visited']:
					continue
				else:
					g.node[node]['visited'] = True
					dist[node] = dist[curr] + 1
					q.put(node)
	else:
		while not q.empty():
			curr = q.get()
			for node in g.neighbors(curr):
				if g.node[node]['visited']:
					continue
				else:
					g.node[node]['visited'] = True
					dist[node] = (dist[curr][0] + 1, curr)
					q.put(node)

	return dist
Ejemplo n.º 29
0
 def run(self, cmd, parallel = True, quiet = False, vewy_quiet = False,
         abandon_output = True, warn_only = False):
     if not parallel:
         for conn in self.conn:
             conn.run(cmd, quiet = quiet, vewy_quiet = vewy_quiet,
                     abandon_output = abandon_output, warn_only = warn_only)
     else:
         threads = []
         queue = Queue()
         def wrapper(conn, cmd, queue):
             try:
                 conn.run(cmd, quiet = quiet, vewy_quiet = vewy_quiet,
                         abandon_output = abandon_output,
                         warn_only = warn_only)
             except Exception as e:
                 queue.put(Exception(conn.hostname + ' => ' + str(e)))
         for conn in self.conn:
             thread = Thread(target = wrapper, args = (conn, cmd, queue, ))
             thread.start()
             threads.append(thread)
         for thread in threads:
             thread.join()
         if not queue.empty():
             l = []
             while not queue.empty():
                 e = queue.get()
                 l.append(str(e));
             raise Exception('\n'.join(l))
Ejemplo n.º 30
0
def BiBFS(startA, startB, wList):
    qA = Queue()
    vA = {startA: 1}
    qA.put(startA)

    qB = Queue()
    vB = {startB: 1}
    qB.put(startB)


    while not qA.empty() or qB.empty():

        xA = qA.get()
        for succ in wList.get_successors(xA):
            if succ in vB:
                return vA[xA] + vB[succ] - 1
            vA[succ] = vA[xA] + 1
            qA.put(succ)

        xB = qB.get()
        for succ in wList.get_successors(xB):
            if succ in vA:
                print xB, succ
                return vB[xB] + vA[succ] - 1
            vB[succ] = vB[xB] + 1
            qB.put(succ)

    return -1
Ejemplo n.º 31
0
def create_step60(maindir, mbconnect=None, maxsongs=100, nfilesbuffer=0):
    """
    Makes sure we have the similar artists to the top 100 most familiar
    artists, and then go on with more similar artists.
    INPUT
       maindir       - root directory of the Million Song dataset
       mbconnect     - open pg connection to Musicbrainz
       maxsongs      - max number of song per search (max=100)
       nfilesbuffer  - number of files we leave unfilled in the dataset
    RETURN
       the number of songs actually created
    """
    # will contain artists TID that are done or already in the queue
    artists_done = set()
    # get all artists ids
    artist_queue = Queue()
    artists = get_most_familiar_artists(nresults=100)
    n_most_familiars = len(artists)
    npr.shuffle(artists)
    for a in artists:
        artists_done.add(a.id)
        artist_queue.put_nowait(a)
    # for each of them create all songs
    cnt_created = 0
    cnt_artists = 0
    while not artist_queue.empty():
        artist = artist_queue.get_nowait()
        cnt_artists += 1
        # CLOSED CREATION?
        if CREATION_CLOSED:
            break
        if cnt_artists % 10 == 0:
            nh5 = count_h5_files(maindir)
            print('found', nh5, 'h5 song files in', maindir,
                  sys.stdout.flush())
            if nh5 > TOTALNFILES - nfilesbuffer:
                return cnt_created
        # verbose
        print('doing artist', cnt_artists, '(pid=' + str(os.getpid()) + ')',
              sys.stdout.flush())
        # encode that artist unless it was done in step10
        #if cnt_artists > n_most_familiars:
        # we had to relaunch this function, lets not redo all the same artists over and over
        if cnt_artists > 1000:
            cnt_created += create_track_files_from_artist(maindir,
                                                          artist,
                                                          mbconnect=mbconnect,
                                                          maxsongs=maxsongs)
        # get similar artists, add to queue
        similars = get_similar_artists(artist)
        if len(similars) == 0: continue
        npr.shuffle(similars)
        similars = similars[:
                            10]  # we keep 10 at random, the radius of artists grows faster
        # the thread dont redo the same artists over and over
        # too bad for the artists we miss (if any...)
        for a in similars:
            if a.id in artists_done:
                continue
            artists_done.add(a.id)
            artist_queue.put_nowait(a)
    return cnt_created
Ejemplo n.º 32
0
        # If the feed is now available and colorbars is running the terminate colorbars and start main
        elif (feed_available and colorbarson):
            logging.info("Terminating colorbars")
            colorbars.terminate()
            print colorbars.poll()
            colorbarson = False
            mainencode = run(mainq, FFCMD, "mainencode")
        elif (feed_available and not colorbarson):
            mainencode = run(mainq, FFCMD, "mainencode")
            
        # print activeCount()
        print enumerate()
               
    else:  # got line
        try:  
            while (not mainq.empty()):
                line = mainq.get_nowait()  # or q.get(timeout=.1)
                if (STREAM_RUNNING in line):
                    logging.info("Stream has started " + RTMP_DEST)
                    mainencode_failedstarts = 0
                logging.info(line)
        except Empty:
            pass
        except KeyboardInterrupt:
            mainencode.terminate()
            raise
        # ... do something with line
    
    time.sleep(CHECK_WAIT)

# p.communicate(input="q")
Ejemplo n.º 33
0
class MasterHandle:
    def __init__(self, id, idToWrites):
        self.hid = id
        self.readCounter = 0
        self.writeCounter = 0
        self.doFinish = False
        self.readMonitorQueues = [Queue() for i in xrange(4)
                                  ]  # One queue for each transaction id
        self.writeCmdQueue = Queue()
        self.writeDataQueue = Queue()
        self.idToWrites = idToWrites
        self.readCmdIdleRand = BoolRandomizer()
        self.writeCmdIdleRand = BoolRandomizer()
        self.writeDataIdleRand = BoolRandomizer()

    def isCompleted(self):
        if not self.doFinish:
            return False
        for q in self.readMonitorQueues:
            if not q.empty():
                return False
        if not self.writeDataQueue.empty():
            return False
        if not self.writeCmdQueue.empty():
            return False
        return True

    def genWrite(self):
        idOffset = randBits(2)
        writeCmd = Transaction()
        writeCmd.addr = self.genRandomAddress()
        if random.random() < 0.1:  # Random assertion of decoding error
            writeCmd.addr = 1 << 12
        writeCmd.hid = self.hid * 4 + idOffset  #Each master can use 4 id
        writeCmd.region = randBits(4)
        writeCmd.len = randBits(4)
        writeCmd.size = randBits(3)
        writeCmd.burst = randBits(2)
        writeCmd.lock = randBits(1)
        writeCmd.cache = randBits(4)
        writeCmd.qos = randBits(4)
        writeCmd.prot = randBits(3)
        self.writeCmdQueue.put(writeCmd)

        writeCmd.linkedDatas = []
        for i in xrange(writeCmd.len + 1):
            writeData = Transaction()
            writeData.data = randBits(32)
            writeData.strb = randBits(4)
            writeData.last = 1 if i == writeCmd.len else 0
            self.writeDataQueue.put(writeData)
            writeCmd.linkedDatas.append(writeData)

        self.idToWrites[writeCmd.hid].append(writeCmd)

    def getNextWriteCmdTrans(self):
        if (self.writeCmdQueue.empty()):
            if self.doFinish:
                return None
            self.genWrite()
        return self.writeCmdQueue.get()

    def getNextWriteDataTrans(self):
        if (self.writeDataQueue.empty()):
            if self.doFinish:
                return None
            self.genWrite()
        return self.writeDataQueue.get()

    def genRandomAddress(self):
        while True:
            value = randBits(12)
            if (value >> 10) != self.hid and ((value >> 8) & 0x3) == self.hid:
                return value

    def genReadCmd(self):
        if self.doFinish:
            return None
        if not self.readCmdIdleRand.get():
            return None
        idOffset = randBits(2)
        trans = Transaction()
        trans.addr = self.genRandomAddress()
        if random.random() < 0.1:  # Random assertion of decoding error
            trans.addr = 1 << 12
        trans.hid = self.hid * 4 + idOffset  #Each master can use 4 id
        trans.region = randBits(4)
        trans.len = randBits(4)
        trans.size = randBits(3)
        trans.burst = randBits(2)
        trans.lock = randBits(1)
        trans.cache = randBits(4)
        trans.qos = randBits(4)
        trans.prot = randBits(3)

        trans.progress = 0
        self.readMonitorQueues[idOffset].put(trans)
        # print("Master START  %d %x" % (trans.hid, trans.addr))
        return trans

    def onReadRsp(self, trans):
        queue = self.readMonitorQueues[trans.hid - self.hid * 4]
        task = queue.queue[0]
        if task.addr != 1 << 12:
            assertEquals(trans.data, task.addr + task.progress,
                         "Readed value is wrong")
        else:
            assertEquals(trans.resp, 3, "yep")
        task.progress += 1
        if task.progress == task.len + 1:
            # print("Master FINISH %d %x" % (task.hid,task.addr))
            assertEquals(trans.last, 1, "Should be last read")
            queue.get()
            self.readCounter += 1
            self.updateDoFinish()

    def genWriteCmd(self):
        if not self.writeCmdIdleRand.get():
            return None
        return self.getNextWriteCmdTrans()

    def genWriteData(self):
        if not self.writeDataIdleRand.get():
            return None
        return self.getNextWriteDataTrans()

    def onWriteRsp(self, trans):
        self.writeCounter = self.writeCounter + 1
        if trans.resp == 3:
            write = self.idToWrites[trans.hid][0]
            assertEquals(write.addr, (1 << 12), "ERROR ?")
            self.idToWrites[trans.hid].remove(write)

        self.updateDoFinish()

    def updateDoFinish(self):
        if self.readCounter > 100 and self.writeCounter > 100:
            self.doFinish = True
Ejemplo n.º 34
0
class TestFetcherProcessor(unittest.TestCase):

    @classmethod
    def setUpClass(self):
        self.projectdb = ProjectDB([os.path.join(os.path.dirname(__file__), 'data_fetcher_processor_handler.py')])
        self.fetcher = Fetcher(None, None, async=False)
        self.status_queue = Queue()
        self.newtask_queue = Queue()
        self.result_queue = Queue()
        self.httpbin_thread = utils.run_in_subprocess(httpbin.app.run, port=14887)
        self.httpbin = 'http://127.0.0.1:14887'
        self.proxy_thread = subprocess.Popen(['pyproxy', '--username=binux',
                                              '--password=123456', '--port=14830',
                                              '--debug'], close_fds=True)
        self.proxy = '127.0.0.1:14830'
        self.processor = Processor(projectdb=self.projectdb,
                                   inqueue=None,
                                   status_queue=self.status_queue,
                                   newtask_queue=self.newtask_queue,
                                   result_queue=self.result_queue)
        self.project_name = 'data_fetcher_processor_handler'
        time.sleep(0.5)

    @classmethod
    def tearDownClass(self):
        self.proxy_thread.terminate()
        self.proxy_thread.wait()
        self.httpbin_thread.terminate()
        self.httpbin_thread.join()

    def crawl(self, url=None, track=None, **kwargs):
        if url is None and kwargs.get('callback'):
            url = dataurl.encode(utils.text(kwargs.get('callback')))

        project_data = self.processor.project_manager.get(self.project_name)
        assert project_data, "can't find project: %s" % self.project_name
        instance = project_data['instance']
        instance._reset()
        task = instance.crawl(url, **kwargs)
        if isinstance(task, list):
            task = task[0]
        task['track'] = track
        task, result = self.fetcher.fetch(task)
        self.processor.on_task(task, result)

        status = None
        while not self.status_queue.empty():
            status = self.status_queue.get()
        newtasks = []
        while not self.newtask_queue.empty():
            newtasks = self.newtask_queue.get()
        result = None
        while not self.result_queue.empty():
            _, result = self.result_queue.get()
        return status, newtasks, result

    def status_ok(self, status, type):
        if not status:
            return False
        return status.get('track', {}).get(type, {}).get('ok', False)

    def assertStatusOk(self, status):
        self.assertTrue(self.status_ok(status, 'fetch'), status.get('track', {}).get('fetch'))
        self.assertTrue(self.status_ok(status, 'process'), status.get('track', {}).get('process'))

    def __getattr__(self, name):
        return name

    def test_10_not_status(self):
        status, newtasks, result = self.crawl(callback=self.not_send_status)

        self.assertIsNone(status)
        self.assertEqual(len(newtasks), 1, newtasks)
        self.assertEqual(result, 'not_send_status')

    def test_20_url_deduplicated(self):
        status, newtasks, result = self.crawl(callback=self.url_deduplicated)

        self.assertStatusOk(status)
        self.assertIsNone(status['track']['fetch']['error'])
        self.assertIsNone(status['track']['fetch']['content'])
        self.assertFalse(status['track']['fetch']['headers'])
        self.assertFalse(status['track']['process']['logs'])
        self.assertEqual(len(newtasks), 2, newtasks)
        self.assertIsNone(result)

    def test_30_catch_status_code_error(self):
        status, newtasks, result = self.crawl(self.httpbin+'/status/418', callback=self.json)

        self.assertFalse(self.status_ok(status, 'fetch'))
        self.assertFalse(self.status_ok(status, 'process'))
        self.assertIn('HTTP 418', status['track']['fetch']['error'])
        self.assertTrue(status['track']['fetch']['content'], '')
        self.assertTrue(status['track']['fetch']['headers'])
        self.assertTrue(status['track']['process']['logs'])
        self.assertIn('HTTPError: HTTP 418', status['track']['process']['logs'])
        self.assertFalse(newtasks)


        status, newtasks, result = self.crawl(self.httpbin+'/status/400', callback=self.catch_http_error)

        self.assertFalse(self.status_ok(status, 'fetch'))
        self.assertTrue(self.status_ok(status, 'process'))
        self.assertEqual(len(newtasks), 1, newtasks)
        self.assertEqual(result, 400)

        status, newtasks, result = self.crawl(self.httpbin+'/status/500', callback=self.catch_http_error)
        self.assertFalse(self.status_ok(status, 'fetch'))
        self.assertTrue(self.status_ok(status, 'process'))
        self.assertEqual(len(newtasks), 1, newtasks)
        self.assertEqual(result, 500)

        status, newtasks, result = self.crawl(self.httpbin+'/status/302',
                                              allow_redirects=False,
                                              callback=self.catch_http_error)
        self.assertFalse(self.status_ok(status, 'fetch'))
        self.assertTrue(self.status_ok(status, 'process'))
        self.assertEqual(len(newtasks), 1, newtasks)
        self.assertEqual(result, 302)

    def test_40_method(self):
        status, newtasks, result = self.crawl(self.httpbin+'/delete', method='DELETE', callback=self.json)

        self.assertStatusOk(status)
        self.assertFalse(newtasks)

        status, newtasks, result = self.crawl(self.httpbin+'/get', method='PATCH',
                                              callback=self.catch_http_error)

        self.assertFalse(self.status_ok(status, 'fetch'))
        self.assertTrue(self.status_ok(status, 'process'))
        self.assertTrue(newtasks)
        self.assertEqual(result, 405)

    def test_50_params(self):
        status, newtasks, result = self.crawl(self.httpbin+'/get', params={
            'roy': 'binux',
            u'中文': '.',
        }, callback=self.json)

        self.assertStatusOk(status)
        self.assertFalse(newtasks)
        self.assertEqual(result['args'], {'roy': 'binux', u'中文': '.'})

    def test_60_data(self):
        status, newtasks, result = self.crawl(self.httpbin+'/post', data={
            'roy': 'binux',
            u'中文': '.',
        }, callback=self.json)

        self.assertStatusOk(status)
        self.assertFalse(newtasks)
        self.assertEqual(result['form'], {'roy': 'binux', u'中文': '.'})

    def test_70_redirect(self):
        status, newtasks, result = self.crawl(self.httpbin+'/redirect-to?url=/get', callback=self.json)

        self.assertStatusOk(status)
        self.assertEqual(status['track']['fetch']['redirect_url'], self.httpbin+'/get')
        self.assertFalse(newtasks)

    def test_80_redirect_too_many(self):
        status, newtasks, result = self.crawl(self.httpbin+'/redirect/10', callback=self.json)

        self.assertFalse(self.status_ok(status, 'fetch'))
        self.assertFalse(self.status_ok(status, 'process'))
        self.assertFalse(newtasks)
        self.assertEqual(status['track']['fetch']['status_code'], 599)
        self.assertIn('redirects followed', status['track']['fetch']['error'])

    def test_90_files(self):
        status, newtasks, result = self.crawl(self.httpbin+'/put', method='PUT',
                                              files={os.path.basename(__file__): open(__file__).read()},
                                              callback=self.json)

        self.assertStatusOk(status)
        self.assertFalse(newtasks)
        self.assertIn(os.path.basename(__file__), result['files'])

    def test_a100_files_with_data(self):
        status, newtasks, result = self.crawl(self.httpbin+'/put', method='PUT',
                                              files={os.path.basename(__file__): open(__file__).read()},
                                              data={
                                                  'roy': 'binux',
                                                  #'中文': '.', # FIXME: not work
                                              },
                                              callback=self.json)
        self.assertStatusOk(status)
        self.assertFalse(newtasks)
        self.assertEqual(result['form'], {'roy': 'binux'})
        self.assertIn(os.path.basename(__file__), result['files'])

    def test_a110_headers(self):
        status, newtasks, result = self.crawl(self.httpbin+'/get',
                                              headers={
                                                  'a': 'b',
                                                  'C-d': 'e-F',
                                              }, callback=self.json)
        self.assertStatusOk(status)
        self.assertFalse(newtasks)
        self.assertEqual(result['headers'].get('A'), 'b')
        self.assertEqual(result['headers'].get('C-D'), 'e-F')

    def test_a120_cookies(self):
        status, newtasks, result = self.crawl(self.httpbin+'/get',
                                              cookies={
                                                  'a': 'b',
                                                  'C-d': 'e-F'
                                              }, callback=self.json)
        self.assertStatusOk(status)
        self.assertFalse(newtasks)
        self.assertIn('a=b', result['headers'].get('Cookie'))
        self.assertIn('C-d=e-F', result['headers'].get('Cookie'))

    def test_a130_cookies_with_headers(self):
        status, newtasks, result = self.crawl(self.httpbin+'/get',
                                              headers={
                                                  'Cookie': 'g=h; I=j',
                                              },
                                              cookies={
                                                  'a': 'b',
                                                  'C-d': 'e-F'
                                              }, callback=self.json)
        self.assertStatusOk(status)
        self.assertFalse(newtasks)
        self.assertIn('g=h', result['headers'].get('Cookie'))
        self.assertIn('I=j', result['headers'].get('Cookie'))
        self.assertIn('a=b', result['headers'].get('Cookie'))
        self.assertIn('C-d=e-F', result['headers'].get('Cookie'))

    def test_a140_response_cookie(self):
        status, newtasks, result = self.crawl(self.httpbin+'/cookies/set?k1=v1&k2=v2',
                                              callback=self.cookies)
        self.assertStatusOk(status)
        self.assertFalse(newtasks)
        self.assertEqual(result, {'k1': 'v1', 'k2': 'v2'})

    def test_a145_redirect_cookie(self):
        status, newtasks, result = self.crawl(self.httpbin+'/cookies/set?k1=v1&k2=v2',
                                              callback=self.json)
        self.assertStatusOk(status)
        self.assertFalse(newtasks)
        self.assertEqual(result['cookies'], {'k1': 'v1', 'k2': 'v2'})

    def test_a150_timeout(self):
        status, newtasks, result = self.crawl(self.httpbin+'/delay/2', timeout=1, callback=self.json)

        self.assertFalse(self.status_ok(status, 'fetch'))
        self.assertFalse(self.status_ok(status, 'process'))
        self.assertFalse(newtasks)
        self.assertEqual(int(status['track']['fetch']['time']), 1)

    def test_a160_etag(self):
        status, newtasks, result = self.crawl(self.httpbin+'/cache', etag='abc', callback=self.json)

        self.assertStatusOk(status)
        self.assertFalse(newtasks)
        self.assertFalse(result)

    def test_a170_last_modifed(self):
        status, newtasks, result = self.crawl(self.httpbin+'/cache', last_modifed='0', callback=self.json)

        self.assertStatusOk(status)
        self.assertFalse(newtasks)
        self.assertFalse(result)

    def test_a180_save(self):
        status, newtasks, result = self.crawl(callback=self.get_save,
                                              save={'roy': 'binux', u'中文': 'value'})

        self.assertStatusOk(status)
        self.assertFalse(newtasks)
        self.assertEqual(result, {'roy': 'binux', u'中文': 'value'})

    def test_a190_taskid(self):
        status, newtasks, result = self.crawl(callback=self.get_save,
                                              taskid='binux-taskid')

        self.assertStatusOk(status)
        self.assertEqual(status['taskid'], 'binux-taskid')
        self.assertFalse(newtasks)
        self.assertFalse(result)

    def test_a200_no_proxy(self):
        old_proxy = self.fetcher.proxy
        self.fetcher.proxy = self.proxy
        status, newtasks, result = self.crawl(self.httpbin+'/get',
                                              params={
                                                  'test': 'a200'
                                              }, proxy=False, callback=self.json)

        self.assertStatusOk(status)
        self.assertFalse(newtasks)
        self.fetcher.proxy = old_proxy

    def test_a210_proxy_failed(self):
        old_proxy = self.fetcher.proxy
        self.fetcher.proxy = self.proxy
        status, newtasks, result = self.crawl(self.httpbin+'/get',
                                              params={
                                                  'test': 'a210'
                                              }, callback=self.catch_http_error)

        self.assertFalse(self.status_ok(status, 'fetch'))
        self.assertTrue(self.status_ok(status, 'process'))
        self.assertEqual(len(newtasks), 1, newtasks)
        self.assertEqual(result, 403)
        self.fetcher.proxy = old_proxy

    def test_a220_proxy_ok(self):
        old_proxy = self.fetcher.proxy
        self.fetcher.proxy = self.proxy
        status, newtasks, result = self.crawl(self.httpbin+'/get',
                                              params={
                                                  'test': 'a220',
                                                  'username': '******',
                                                  'password': '******',
                                              }, callback=self.catch_http_error)

        self.assertStatusOk(status)
        self.assertEqual(result, 200)
        self.fetcher.proxy = old_proxy

    def test_a230_proxy_parameter_fail(self):
        status, newtasks, result = self.crawl(self.httpbin+'/get',
                                              params={
                                                  'test': 'a230',
                                              }, proxy=self.proxy,
                                              callback=self.catch_http_error)

        self.assertFalse(self.status_ok(status, 'fetch'))
        self.assertTrue(self.status_ok(status, 'process'))
        self.assertEqual(result, 403)

    def test_a240_proxy_parameter_ok(self):
        status, newtasks, result = self.crawl(self.httpbin+'/post',
                                              method='POST',
                                              data={
                                                  'test': 'a240',
                                                  'username': '******',
                                                  'password': '******',
                                              }, proxy=self.proxy,
                                              callback=self.catch_http_error)

        self.assertStatusOk(status)
        self.assertEqual(result, 200)

    def test_a250_proxy_userpass(self):
        status, newtasks, result = self.crawl(self.httpbin+'/post',
                                              method='POST',
                                              data={
                                                  'test': 'a250',
                                              }, proxy='binux:123456@'+self.proxy,
                                              callback=self.catch_http_error)

        self.assertStatusOk(status)
        self.assertEqual(result, 200)

    def test_a260_process_save(self):
        status, newtasks, result = self.crawl(callback=self.set_process_save)

        self.assertStatusOk(status)
        self.assertIn('roy', status['track']['save'])
        self.assertEqual(status['track']['save']['roy'], 'binux')

        status, newtasks, result = self.crawl(callback=self.get_process_save,
                                              track=status['track'])

        self.assertStatusOk(status)
        self.assertIn('roy', result)
        self.assertEqual(result['roy'], 'binux')


    def test_zzz_links(self):
        status, newtasks, result = self.crawl(self.httpbin+'/links/10/0', callback=self.links)

        self.assertStatusOk(status)
        self.assertEqual(len(newtasks), 9, newtasks)
        self.assertFalse(result)

    def test_zzz_html(self):
        status, newtasks, result = self.crawl(self.httpbin+'/html', callback=self.html)

        self.assertStatusOk(status)
        self.assertFalse(newtasks)
        self.assertEqual(result, 'Herman Melville - Moby-Dick')

    def test_zzz_etag_enabled(self):
        status, newtasks, result = self.crawl(self.httpbin+'/cache', callback=self.json)
        self.assertStatusOk(status)
        self.assertTrue(result)

        status, newtasks, result = self.crawl(self.httpbin+'/cache',
                                              track=status['track'], callback=self.json)
        self.assertStatusOk(status)
        self.assertFalse(newtasks)
        self.assertFalse(result)

    def test_zzz_etag_not_working(self):
        status, newtasks, result = self.crawl(self.httpbin+'/cache', callback=self.json)
        self.assertStatusOk(status)
        self.assertTrue(result)

        status['track']['process']['ok'] = False
        status, newtasks, result = self.crawl(self.httpbin+'/cache',
                                              track=status['track'], callback=self.json)
        self.assertStatusOk(status)
        self.assertTrue(result)

    def test_zzz_unexpected_crawl_argument(self):
        with self.assertRaisesRegexp(TypeError, "unexpected keyword argument"):
            self.crawl(self.httpbin+'/cache', cookie={}, callback=self.json)

    def test_zzz_curl_get(self):
        status, newtasks, result = self.crawl("curl '"+self.httpbin+'''/get' -H 'DNT: 1' -H 'Accept-Encoding: gzip, deflate, sdch' -H 'Accept-Language: en,zh-CN;q=0.8,zh;q=0.6' -H 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.17 Safari/537.36' -H 'Binux-Header: Binux-Value' -H 'Accept: */*' -H 'Cookie: _gauges_unique_year=1; _gauges_unique=1; _ga=GA1.2.415471573.1419316591' -H 'Connection: keep-alive' --compressed''', callback=self.json)
        self.assertStatusOk(status)
        self.assertTrue(result)

        self.assertTrue(result['headers'].get('Binux-Header'), 'Binux-Value')

    def test_zzz_curl_post(self):
        status, newtasks, result = self.crawl("curl '"+self.httpbin+'''/post' -H 'Origin: chrome-extension://hgmloofddffdnphfgcellkdfbfbjeloo' -H 'Accept-Encoding: gzip, deflate' -H 'Accept-Language: en,zh-CN;q=0.8,zh;q=0.6' -H 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.17 Safari/537.36' -H 'Content-Type: application/x-www-form-urlencoded' -H 'Accept: */*' -H 'Cookie: _gauges_unique_year=1; _gauges_unique=1; _ga=GA1.2.415471573.1419316591' -H 'Connection: keep-alive' -H 'DNT: 1' --data 'Binux-Key=%E4%B8%AD%E6%96%87+value' --compressed''', callback=self.json)
        self.assertStatusOk(status)
        self.assertTrue(result)

        self.assertTrue(result['form'].get('Binux-Key'), '中文 value')

    def test_zzz_curl_put(self):
        status, newtasks, result = self.crawl("curl '"+self.httpbin+'''/put' -X PUT -H 'Origin: chrome-extension://hgmloofddffdnphfgcellkdfbfbjeloo' -H 'Accept-Encoding: gzip, deflate, sdch' -H 'Accept-Language: en,zh-CN;q=0.8,zh;q=0.6' -H 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.17 Safari/537.36' -H 'Content-Type: multipart/form-data; boundary=----WebKitFormBoundaryYlkgyaA7SRGOQYUG' -H 'Accept: */*' -H 'Cookie: _gauges_unique_year=1; _gauges_unique=1; _ga=GA1.2.415471573.1419316591' -H 'Connection: keep-alive' -H 'DNT: 1' --data-binary $'------WebKitFormBoundaryYlkgyaA7SRGOQYUG\r\nContent-Disposition: form-data; name="Binux-Key"\r\n\r\n%E4%B8%AD%E6%96%87+value\r\n------WebKitFormBoundaryYlkgyaA7SRGOQYUG\r\nContent-Disposition: form-data; name="fileUpload1"; filename="1"\r\nContent-Type: application/octet-stream\r\n\r\n\r\n------WebKitFormBoundaryYlkgyaA7SRGOQYUG--\r\n' --compressed''', callback=self.json)
        self.assertStatusOk(status)
        self.assertTrue(result)

        self.assertIn('fileUpload1', result['files'], result)

    def test_zzz_curl_no_url(self):
        with self.assertRaisesRegexp(TypeError, 'no URL'):
            status, newtasks, result = self.crawl(
                '''curl -X PUT -H 'Origin: chrome-extension://hgmloofddffdnphfgcellkdfbfbjeloo' --compressed''',
                callback=self.json)

    def test_zzz_curl_bad_option(self):
        with self.assertRaisesRegexp(TypeError, 'Unknow curl option'):
            status, newtasks, result = self.crawl(
                '''curl '%s/put' -X PUT -H 'Origin: chrome-extension://hgmloofddffdnphfgcellkdfbfbjeloo' -v''' % self.httpbin,
                callback=self.json)

        with self.assertRaisesRegexp(TypeError, 'Unknow curl option'):
            status, newtasks, result = self.crawl(
                '''curl '%s/put' -X PUT -v -H 'Origin: chrome-extension://hgmloofddffdnphfgcellkdfbfbjeloo' ''' % self.httpbin,
                callback=self.json)
Ejemplo n.º 35
0
class Scheduler(object):
    def __init__(self):
        self.ready   = Queue()   
        self.taskmap = {}        

        # Tasks waiting for other tasks to exit
        self.exit_waiting = {}

        # I/O waiting
        self.read_waiting = {}
        self.write_waiting = {}
        
    def new(self,target):
        newtask = Task(target)
        self.taskmap[newtask.tid] = newtask
        self.schedule(newtask)
        return newtask.tid

    def exit(self,task):
        print "Task %d terminated" % task.tid
        del self.taskmap[task.tid]
        # Notify other tasks waiting for exit
        for task in self.exit_waiting.pop(task.tid,[]):
            self.schedule(task)

    def waitforexit(self,task,waittid):
        if waittid in self.taskmap:
            self.exit_waiting.setdefault(waittid,[]).append(task)
            return True
        else:
            return False

    # I/O waiting
    def waitforread(self,task,fd):
        self.read_waiting[fd] = task

    def waitforwrite(self,task,fd):
        self.write_waiting[fd] = task

    def iopoll(self,timeout):
        if self.read_waiting or self.write_waiting:
           r,w,e = select.select(self.read_waiting,
                                 self.write_waiting,[],timeout)
           for fd in r: self.schedule(self.read_waiting.pop(fd))
           for fd in w: self.schedule(self.write_waiting.pop(fd))

    def iotask(self):
        while True:
            if self.ready.empty():
                self.iopoll(None)
            else:
                self.iopoll(0)
            yield

    def schedule(self,task):
        self.ready.put(task)

    def mainloop(self):
         self.new(self.iotask())
         while self.taskmap:
             task = self.ready.get()
             try:
                 result = task.run()
                 if isinstance(result,SystemCall):
                     result.task  = task
                     result.sched = self
                     result.handle()
                     continue
             except StopIteration:
                 self.exit(task)
                 continue
             self.schedule(task)
Ejemplo n.º 36
0
class SerialConnection(object):

    #Callback
    receivedData = None
    receivedAnswer = None
    rawData = None
    baudRateMode = False

    def __init__(self):
        self.status = False
        self.rawMode = False
        self.__readThread = None

        self.__writeThread = None

        self.__cmdQueue = Queue()
        self.__cmdProcesssedEvent = threading.Event()

        self.__serial = None
        self.__currCommand = None
        self.__waitingAnswer = False
        # 5 seconds timeout
        self.__TOTimer = None
        self.__cmdMutex = threading.Lock()
        #self.input_encoding = 'UTF-8'
        #self.output_encoding = 'UTF-8'
        self.__rx_decoder = codecs.getincrementaldecoder('UTF-8')('replace')
        self.__tx_decoder = codecs.getincrementalencoder('UTF-8')('replace')

    def start(self, port, baudrate):
        result = True

        if self.status:
            result = False

        if result:
            try:
                self.status = True
                self.__serial = serial.Serial()
                self.__serial.port = port
                self.__serial.baudrate = baudrate

                if not hasattr(self.__serial, 'cancel_read'):
                    # enable timeout for alive flag polling if cancel_read is not available
                    self.__serial.timeout = 1

                self.__serial.open()
                if self.__serial.in_waiting > 0:
                    self.__serial.timeout = 1
                    self.__serial.read(self.__serial.in_waiting)
                    self.__serial.timeout = 0

            except serial.SerialException as e:
                logger.error('could not open port {!r}: {}\n'.format(port, e))
                self.status = False
                result = False

            if result:
                self.__readThread = threading.Thread(target=self._reader,
                                                     args=())
                self.__readThread.daemon = True
                self.__writeThread = threading.Thread(target=self._writer,
                                                      args=())
                self.__writeThread.daemon = True

                # start thread
                self.__readThread.start()
                self.__writeThread.start()

        return result

    def stop(self):
        if self.status:
            self.status = False
            # Exit write thread
            self.__cmdQueue.put(None)
            if self.__writeThread.isAlive():
                self.__writeThread.join()
            logger.debug("Write thread stopped")

            if self.__serial is not None:
                self.__serial.cancel_read()
                self.__serial.close()

            if self.__readThread.isAlive():
                self.__readThread.join()
            logger.debug("Read thread stopped")

    def reconnect(self, port, baudrate):
        res = True
        try:
            if self.status:
                self.__serial.close()
                self.status = False

            self.__serial.port = port
            self.__serial.baudrate = baudrate
            self.__serial.open()
            self.status = True
        except:
            res = False
            logger.error("Error reconnecting")
        return res

    def _timeoutHandler(self):
        try:
            logger.debug("Command response timeout, command %s" %
                         self.__currCommand.cmd)
            answer = None
            self.__cmdMutex.acquire()
            if self.__waitingAnswer:
                self.__waitingAnswer = False
                answer = ATresponse(False, self.__currCommand.cmd, ["Timeout"])
            self.__cmdMutex.release()

            if answer is not None:
                self.receivedAnswer(answer)
        except:
            logger.error("Timeout handler exception")

        self.__cmdProcesssedEvent.set()

    def sendCommand(self, newCommand):
        if self.status:
            self.__cmdQueue.put(newCommand)

    def clearCommandsQueue(self):
        # empty cmdQueue
        while not self.__cmdQueue.empty():
            self.__cmdQueue.get(False, 1)

    def _writer(self):
        self.__cmdProcesssedEvent.clear()
        while self.status:
            try:
                logger.debug("[W]queue Get")
                newCmd = self.__cmdQueue.get(True)

                if newCmd is None:
                    #Exit from queue block
                    raise Exception("Exit thread")

                logger.debug("[W]new cmd " + newCmd.cmd)

                #self.__cmdMutex.acquire()

                # Check if we are waiting for an answer
                self.__waitingAnswer = True

                self.__currCommand = newCmd
                #self.__cmdMutex.release()

                self.writeDirect(self.__currCommand.getString())

                # Start timeout
                logger.debug("[W]Start timeout timer")
                self.__TOTimer = threading.Timer(5, self._timeoutHandler)
                self.__TOTimer.start()

                logger.debug("[W]Wait event")
                self.__cmdProcesssedEvent.wait()
                self.__cmdProcesssedEvent.clear()
                logger.debug("[W]event rx")

            except Exception:
                logger.error("[W]Exception write thread ")
        logger.debug("exit wr")

    def _reader(self):

        if self.baudRateMode is False:

            try:
                answerString = ''
                while self.status:
                    # read all that is there or wait for one byte
                    logger.debug("[R]wait")
                    readBytes = self.__serial.read(self.__serial.in_waiting
                                                   or 1)
                    #readBytes = self.__serial.read(1024)
                    logger.debug("[R]rx ")

                    if readBytes and self.status:
                        # Check if we are forwarding
                        if not self.rawMode:
                            text = self.__rx_decoder.decode(readBytes)
                            self.rawData(text)

                            self.__cmdMutex.acquire()

                            if self.__waitingAnswer:
                                answerString += text
                                answer = self.__currCommand.checkResponse(
                                    answerString)

                                # Expected answer
                                if answer is not None:
                                    self.__TOTimer.cancel()
                                    self.__waitingAnswer = False
                                    answerString = ''

                                    self.__cmdMutex.release()
                                    logger.debug('[' + text + ']')
                                    logger.debug("[R]Answer ok")

                                    self.receivedAnswer(answer)
                                    self.__cmdProcesssedEvent.set()
                                else:
                                    self.__cmdMutex.release()
                                    logger.debug('[' + text + ']')

                            else:
                                self.__cmdMutex.release()
                                # Aqui se reciben los datos
                                print "Length datos recibidos = ", len(text)
                                self.receivedData(text)

                                if text == "B":
                                    self.__baudrateMode = True
                                    print "baudRateMode = ON"

                                logger.debug('<' + text + '>')
                        else:
                            logger.debug('{' + text + '}')
                            self.receivedData(readBytes)

            except serial.SerialException:
                self.status = False
            logger.debug("exit rd")

        else:
            print "Baudrate Mode"
            self.readerBaudRate()

    def writeDirect(self, data):
        try:
            if self.status:
                if self.rawMode:
                    self.__serial.write(data)
                else:
                    logger.debug("tx " + data)
                    self.rawData(data)
                    self.__serial.write(self.__tx_decoder.encode(data))

        except Exception, v:
            logger.error("Exception writeDirect")
Ejemplo n.º 37
0
class METAFORCE(object):
    def __init__(self, wordlist, knownlist):
        self.guesses = Queue(maxsize=0)
        self.wordlist = wordlist
        self.knownlist = self._string_to_list(knownlist)
        self.chrome_driver = None
        self.chrome_option = None
        self.chrome_binary = None
        self.logger = None

        self._logger_init()
        self._generate_wordlist(self.wordlist, self.knownlist)

    def _logger_init(self, filename='debug.log'):
        self.logger = logging.getLogger('unMetamask')
        log_handler = logging.FileHandler(filename)
        log_handler.setFormatter(
            logging.Formatter('%(name)s %(levelname)s - %(message)s'))
        self.logger.addHandler(log_handler)
        self.logger.setLevel(logging.DEBUG)

    def _chrome_init(self):
        self.chrome_binary = "/usr/bin/chromedriver"
        self.chrome_option = webdriver.ChromeOptions()
        self.chrome_option.add_extension(
            "nkbihfbeogaeaoehlefnkodbefgpgknn.crx")
        self.chrome_driver = webdriver.Chrome(
            executable_path=self.chrome_binary,
            chrome_options=self.chrome_option,
        )

    def _check_exists_by_xpath(self, xpath):
        try:
            self.chrome_driver.find_element_by_xpath(xpath)
        except NoSuchElementException:
            return False
        return True

    def _get_number_needed(self, knownlist):
        return (CONST_MAX_WORDS - len(knownlist))

    def _string_to_list(self, string):
        stub_list = string.split(",")
        return [x.strip(' ') for x in stub_list]

    def _list_to_string(self, list):
        return (' '.join(list))

    def _clear_field(self, element):
        length = len(element.get_attribute('value'))
        if length != 0:
            element.send_keys(length * Keys.BACKSPACE)

    def _generate_wordlist(self, wordlist, knownlist):
        stub = None
        self.logger.info("Attempting to parse provided wordlist")
        if os.path.isfile(wordlist):
            with open(wordlist) as f:
                stub = f.read().splitlines()
            self.logger.info("Building master wordlist...")
            for x in list(
                    itertools.combinations(
                        stub, self._get_number_needed(self.knownlist))):
                self.guesses.put(self.knownlist + list(x))
            self._is_list_created = True
            self.logger.info("Master wordlist completed!")

    def start(self):

        self._chrome_init()

        try:
            while True:
                self.chrome_driver.get(
                    "chrome-extension://nkbihfbeogaeaoehlefnkodbefgpgknn/popup.html"
                )

                #PRIVACY NOTICE
                self.chrome_driver.find_element_by_xpath(
                    "//*[contains(text(), 'Accept')]").click()
                self.chrome_driver.implicitly_wait(0.5)

                #TERMS OF USE
                eula = self.chrome_driver.find_element_by_xpath(
                    '//*[@id="app-content"]/div/div[4]/div/div')
                self.chrome_driver.execute_script(
                    'arguments[0].scrollTop = arguments[0].scrollHeight', eula)
                self.chrome_driver.find_element_by_xpath(
                    '//*[@id="app-content"]/div/div[4]/div/button').click()

                #IMPORT EXISTING DEN
                self.chrome_driver.find_element_by_xpath(
                    "//*[contains(text(), 'Import Existing DEN')]").click()

                #WALLET RECOVERY
                while not self.guesses.empty():
                    stub_guess = self.guesses.get()
                    self.chrome_driver.implicitly_wait(1)

                    #Attempt tp catch successful login
                    if self._check_exists_by_xpath(
                            '//*[@id="app-content"]/div/div[4]/div/div/div[2]/button[2]'
                    ):
                        print "It DOES exist.."
                        return
                    else:
                        print 'attempting %s' % stub_guess

                    textarea = self.chrome_driver.find_element_by_class_name(
                        "twelve-word-phrase")
                    password1 = self.chrome_driver.find_element_by_id(
                        "password-box")
                    password2 = self.chrome_driver.find_element_by_id(
                        "password-box-confirm")

                    self._clear_field(textarea)
                    self._clear_field(password1)
                    self._clear_field(password2)

                    textarea.send_keys(' '.join(stub_guess))
                    password1.send_keys(CONST_PASSWORD)
                    password2.send_keys(CONST_PASSWORD)

                    self.chrome_driver.find_element_by_xpath(
                        '//*[@id="app-content"]/div/div[4]/div/div/button[2]'
                    ).click()

                    self.guesses.task_done()

        except KeyboardInterrupt:
            print "Exiting by User Interrupt"
            self.chrome_driver.quit()
Ejemplo n.º 38
0
class State:
    '''Global robot state variables'''

    STATE_IDLE = 0
    STATE_TURN = 1
    STATE_DRIVE = 2
    STATE_REVERSE = 3
    STATE_TIMED = 4

    DRIVE_MODE_STOP = 0
    DRIVE_MODE_PID = 1

    # Tune these with dynaimc reconfigure.
    DRIVE_SPEED = 0
    REVERSE_SPEED = 0
    TURN_SPEED = 0
    HEADING_RESTORE_FACTOR = 0
    GOAL_DISTANCE_OK = 0
    ROTATE_THRESHOLD = 0
    DRIVE_ANGLE_ABORT = 0

    DRIVE_SPEED_MAX = 0.6
    TURN_SPEED_MAX = 1.2

    def __init__(self):
        self.MapLocation = Location(None)
        self.OdomLocation = Location(None)
        self.CurrentState = State.STATE_IDLE
        self.Goal = None
        self.Start = None
        self.TimerCount = 0
        self.Doing = None
        self.Work = Queue()
        self.dbg_msg = None
        self.Obstacles = 0
        self.JoystickCommand = Joy()
        self.JoystickCommand.axes = [0, 0, 0, 0, 0, 0]

        # Configuration
        State.DRIVE_SPEED = rospy.get_param("DRIVE_SPEED", default=0.3)
        State.REVERSE_SPEED = rospy.get_param("REVERSE_SPEED", default=0.2)
        State.TURN_SPEED = rospy.get_param("TURN_SPEED", default=0.6)
        State.HEADING_RESTORE_FACTOR = rospy.get_param(
            "HEADING_RESTORE_FACTOR", default=2)
        State.GOAL_DISTANCE_OK = rospy.get_param("GOAL_DISTANCE_OK",
                                                 default=0.1)
        State.ROTATE_THRESHOLD = rospy.get_param("ROTATE_THRESHOLD",
                                                 default=math.pi / 16)
        State.DRIVE_ANGLE_ABORT = rospy.get_param("DRIVE_ANGLE_ABORT",
                                                  default=math.pi / 4)

        # Subscribers
        rospy.Subscriber('joystick', Joy, self._joystick, queue_size=10)
        rospy.Subscriber('obstacle', Obstacle, self._obstacle)
        rospy.Subscriber('odom/filtered', Odometry, self._odom)

        # Services
        self.control = rospy.Service('control', Core, self._control)

        # Publishers
        self.state_machine = rospy.Publisher('state_machine',
                                             String,
                                             queue_size=1,
                                             latch=True)
        self.driveControl = rospy.Publisher('driveControl',
                                            Twist,
                                            queue_size=10)

        # Configuration
        self.config_srv = Server(driveConfig, self._reconfigure)

        # Start a thread to do initial configuration.
        thread.start_new_thread(self.do_initial_config, ())

    def _stop_now(self, result):
        self.CurrentState = State.STATE_IDLE
        while not self.Work.empty():
            item = self.Work.get(False)
            item.result = result
            if item.sema is not None:
                item.sema.release()

        if self.Doing is not None:
            self.Doing.result = result

    def _control(self, req):
        for r in req.req[:-1]:
            self.Work.put(Task(r, False), False)

        r = req.req[-1]
        t = Task(r, True)
        self.Work.put(t, True)

        sleep_wait = 0.2
        sleep_turns = r.timeout / sleep_wait
        while not t.sema.acquire(blocking=False):
            rospy.sleep(sleep_wait)
            sleep_turns -= 1
            if sleep_turns == 0:
                # Ugh. Is this safe?
                with package_lock:
                    self._stop_now(MoveResult.TIMEOUT)

        rval = MoveResult()
        rval.result = t.result
        return rval

    @sync(package_lock)
    def _reconfigure(self, config, level):
        State.DRIVE_SPEED = config["DRIVE_SPEED"]
        State.REVERSE_SPEED = config["REVERSE_SPEED"]
        State.TURN_SPEED = config["TURN_SPEED"]
        State.HEADING_RESTORE_FACTOR = config["HEADING_RESTORE_FACTOR"]
        State.GOAL_DISTANCE_OK = config["GOAL_DISTANCE_OK"]
        State.ROTATE_THRESHOLD = config["ROTATE_THRESHOLD"]
        State.DRIVE_ANGLE_ABORT = config["DRIVE_ANGLE_ABORT"]
        self.print_debug('Mobility parameter reconfiguration done.')
        return config

    @sync(package_lock)
    def _joystick(self, joy_command):
        self.JoystickCommand = joy_command

    @sync(package_lock)
    def set_mode(self, msg):
        if msg.data == 1:
            self._stop_now(MoveResult.USER_ABORT)

    def __check_obstacles(self):
        if self.Doing is not None:
            detected = self.Obstacles & self.Doing.request.obstacles

            if (detected & Obstacle.IS_SONAR) != 0:
                self._stop_now(MoveResult.OBSTACLE_SONAR)

            if (detected & Obstacle.IS_VISION) != 0:
                if detected & Obstacle.INSIDE_HOME:
                    self._stop_now(MoveResult.INSIDE_HOME)
                elif detected & Obstacle.TAG_HOME:
                    self._stop_now(MoveResult.OBSTACLE_HOME)
                elif detected & Obstacle.HOME_CORNER:
                    self._stop_now(MoveResult.OBSTACLE_CORNER)
                else:
                    self._stop_now(MoveResult.OBSTACLE_TAG)

    @sync(package_lock)
    def _obstacle(self, msg):
        self.Obstacles &= ~msg.mask
        self.Obstacles |= msg.msg
        self.__check_obstacles()

    @sync(package_lock)
    def _odom(self, msg):
        self.OdomLocation.Odometry = msg

    def drive(self, linear, angular, mode):
        t = Twist()
        t.linear.x = linear
        t.angular.y = mode
        t.angular.z = angular
        self.driveControl.publish(t)

    def print_debug(self, msg):
        if self.dbg_msg is None or self.dbg_msg != msg:
            s = String()
            s.data = msg
            self.state_machine.publish(s)
        self.dbg_msg = msg

    @sync(package_lock)
    def run(self):
        if self.CurrentState == State.STATE_IDLE:
            self.print_debug('IDLE')

            if self.Doing is not None:
                if self.Doing.sema is not None:
                    self.Doing.sema.release()
                self.Doing = None

            if self.Work.empty():
                # Let the joystick drive.
                lin = self.JoystickCommand.axes[4] * State.DRIVE_SPEED
                ang = self.JoystickCommand.axes[3] * State.TURN_SPEED
                if abs(lin) < 0.1 and abs(ang) < 0.1:
                    self.drive(0, 0, State.DRIVE_MODE_STOP)
                else:
                    self.drive(lin, ang, State.DRIVE_MODE_PID)
            else:
                self.Doing = self.Work.get(False)

                if self.Doing.request.timer > 0:
                    self.TimerCount = self.Doing.request.timer * 10
                    self.CurrentState = State.STATE_TIMED
                else:
                    if self.Doing.request.r < 0:
                        self.Doing.request.theta = 0

                    req_theta = self.Doing.request.theta
                    if req_theta > 0:
                        req_theta += State.ROTATE_THRESHOLD / 2.0
                    elif req_theta < 0:
                        req_theta -= State.ROTATE_THRESHOLD / 2.0

                    req_r = self.Doing.request.r
                    if req_r > 0:
                        req_r += State.GOAL_DISTANCE_OK / 2.0
                    elif req_r < 0:
                        req_r -= State.GOAL_DISTANCE_OK / 2.0

                    if self.Doing.request.linear > State.DRIVE_SPEED_MAX:
                        self.Doing.request.linear = State.DRIVE_SPEED_MAX
                    elif self.Doing.request.linear <= 0:
                        self.Doing.request.linear = State.DRIVE_SPEED

                    if self.Doing.request.angular > State.TURN_SPEED_MAX:
                        self.Doing.request.angular = State.TURN_SPEED_MAX
                    elif self.Doing.request.angular <= 0:
                        self.Doing.request.angular = State.TURN_SPEED

                    cur = self.OdomLocation.get_pose()
                    self.Goal = Pose2D()
                    self.Goal.theta = cur.theta + req_theta
                    self.Goal.x = cur.x + req_r * math.cos(self.Goal.theta)
                    self.Goal.y = cur.y + req_r * math.sin(self.Goal.theta)
                    self.Start = cur

                    if self.Doing.request.r < 0:
                        self.CurrentState = State.STATE_REVERSE
                    else:
                        self.CurrentState = State.STATE_TURN

                self.__check_obstacles()

        elif self.CurrentState == State.STATE_TURN:
            self.print_debug('TURN')
            self.__check_obstacles()
            cur = self.OdomLocation.get_pose()
            heading_error = angles.shortest_angular_distance(
                cur.theta, self.Goal.theta)
            if abs(heading_error) > State.ROTATE_THRESHOLD:
                if heading_error < 0:
                    self.drive(0, -self.Doing.request.angular,
                               State.DRIVE_MODE_PID)
                else:
                    self.drive(0, self.Doing.request.angular,
                               State.DRIVE_MODE_PID)
            else:
                self.CurrentState = State.STATE_DRIVE
                self.drive(0, 0, State.DRIVE_MODE_STOP)

        elif self.CurrentState == State.STATE_DRIVE:
            self.print_debug('DRIVE')
            self.__check_obstacles()
            cur = self.OdomLocation.get_pose()
            heading_error = angles.shortest_angular_distance(
                cur.theta, self.Goal.theta)
            goal_angle = angles.shortest_angular_distance(
                cur.theta, math.atan2(self.Goal.y - cur.y,
                                      self.Goal.x - cur.x))
            if self.OdomLocation.at_goal(
                    self.Goal, State.GOAL_DISTANCE_OK
            ) or abs(goal_angle) > State.DRIVE_ANGLE_ABORT:
                self.Goal = None
                self.CurrentState = State.STATE_IDLE
                self.drive(0, 0, State.DRIVE_MODE_STOP)

            elif abs(heading_error) > State.DRIVE_ANGLE_ABORT / 2:
                self._stop_now(MoveResult.PATH_FAIL)
                self.drive(0, 0, State.DRIVE_MODE_STOP)
            else:
                self.drive(self.Doing.request.linear,
                           heading_error * State.HEADING_RESTORE_FACTOR,
                           State.DRIVE_MODE_PID)

        elif self.CurrentState == State.STATE_REVERSE:
            self.print_debug('REVERSE')
            self.__check_obstacles()
            cur = self.OdomLocation.get_pose()
            heading_error = angles.shortest_angular_distance(
                cur.theta, self.Goal.theta)
            goal_angle = angles.shortest_angular_distance(
                math.pi + cur.theta,
                math.atan2(self.Goal.y - cur.y, self.Goal.x - cur.x))
            if self.OdomLocation.at_goal(
                    self.Goal, State.GOAL_DISTANCE_OK
            ) or abs(goal_angle) > State.DRIVE_ANGLE_ABORT:
                self.Goal = None
                self.CurrentState = State.STATE_IDLE
                self.drive(0, 0, State.DRIVE_MODE_STOP)
            elif abs(heading_error) > State.DRIVE_ANGLE_ABORT / 2:
                self._stop_now(MoveResult.PATH_FAIL)
                self.drive(0, 0, State.DRIVE_MODE_STOP)
            else:
                self.drive(-State.REVERSE_SPEED,
                           heading_error * State.HEADING_RESTORE_FACTOR,
                           State.DRIVE_MODE_PID)

        elif self.CurrentState == State.STATE_TIMED:
            self.print_debug('TIMED')
            self.__check_obstacles()
            if self.Doing.request.linear == 0 and self.Doing.request.angular == 0:
                self.drive(0, 0, State.DRIVE_MODE_STOP)
            else:
                self.drive(self.Doing.request.linear,
                           self.Doing.request.angular, State.DRIVE_MODE_PID)

            if self.TimerCount == 0:
                self.CurrentState = State.STATE_IDLE
                self.drive(0, 0, State.DRIVE_MODE_STOP)
            else:
                self.TimerCount = self.TimerCount - 1

    def do_initial_config(self):
        # Do initial configuration.
        params = {
            "DRIVE_SPEED": State.DRIVE_SPEED,
            "REVERSE_SPEED": State.REVERSE_SPEED,
            "TURN_SPEED": State.TURN_SPEED,
            "HEADING_RESTORE_FACTOR": State.HEADING_RESTORE_FACTOR,
            "GOAL_DISTANCE_OK": State.GOAL_DISTANCE_OK,
            "ROTATE_THRESHOLD": State.ROTATE_THRESHOLD,
            "DRIVE_ANGLE_ABORT": State.DRIVE_ANGLE_ABORT,
        }
        dyn_client = Client('mobility')
        dyn_client.update_configuration(params)
        print('Initial configuration sent.')
Ejemplo n.º 39
0
class DummyHardwareBase(Thread):
    """Represents a dummy hardware device using the IPbus control system.
    
    This class can receive, act on, and respond to IPbus packets.  It will
    keep track of registers that are written to, but its initial register
    state is zero throughout the entire address space.
    
    It operates with two threads.  The main program thread receives incoming
    packets and queues them for action and response by a transaction handler
    thread.
    
    Note: Requesting a read on register address 0xffffffff will be interpreted
    as a request to reset the internal state of the hardware, so all registers
    will be set to zero.
    """

    SOCKET_BUFFER_SIZE = 32768
    REQUEST_QUEUE_SIZE = 1024

    def __init__(self):
        Thread.__init__(self)
        self._registers = {}
        self._transaction_queue = Queue(DummyHardware.REQUEST_QUEUE_SIZE)
        self._transactionCounter = 0
        self.stopServing = False
        # This maps request type number to the function that handles the request.
        self._requestTypeHandlerMap = {
            IPbusHeader.TYPE_ID_READ:
            self._handleReadRequest,
            IPbusHeader.TYPE_ID_WRITE:
            self._handleWriteRequest,
            IPbusHeader.TYPE_ID_NON_INCR_READ:
            self._handleFifoReadRequest,
            IPbusHeader.TYPE_ID_NON_INCR_WRITE:
            self._handleFifoWriteRequest,
            IPbusHeader.TYPE_ID_RMW_BITS:
            self._handleReadModifyWriteBitsRequest,
            IPbusHeader.TYPE_ID_RMW_SUM:
            self._handleReadModifyWriteSumRequest,
            IPbusHeader.TYPE_ID_RSVD_ADDR_INFO:
            self._handleGetReservedAddrInfoRequest
        }

    def serveForever(self):
        raise NotImplementedError("DummyHardwareBase is an Abstract Base Class!\n" \
                                  "Please use a concrete implementation such as "\
                                  "DummyHardwareUdp or DummyHardwareTcp!")

    def run(self):
        """Start the transaction handler thread"""
        chipsLog.info("Transaction handler thread started")
        while not self.stopServing:
            if self._transaction_queue.empty():
                sleep(0.001)
            else:
                transaction = self._transaction_queue.get()
                self._actAndRespond(transaction)
        chipsLog.info("Transaction handler thread stopping")

    def closeSockets(self):
        """Allows you to manually close any sockets that may have been opened."""
        try:
            self._socket.close()
            chipsLog.debug("Socket closed successfully.")
        except:
            chipsLog.warn("Error closing socket!")

    def _actAndRespond(self, transaction):
        """Performs the required action and returns the response for a given transaction"""
        self._transactionCounter += 1
        chipsLog.debug("*** Performing transaction #" +
                       str(self._transactionCounter) + " ***")
        try:
            transaction.deserialiseRequests()

            for request in transaction.requests:
                transaction.appendResponse(
                    self._requestTypeHandlerMap[IPbusHeader.getTypeId(
                        request.getHeader())](request))

            transaction.serialiseResponses()
            chipsLog.debug("Sending response packet")
            self._socketSend(transaction)
            chipsLog.debug("Response packet sent!")
            chipsLog.debug("*** Transaction #" +
                           str(self._transactionCounter) + " completed! ***\n")
        except ChipsException, err:
            chipsLog.error("ERROR! Transaction #" +
                           str(self._transactionCounter) +
                           " could not be successfully processed:\n\t" +
                           str(err))
Ejemplo n.º 40
0
class SimpleWatcher(LocalWatcher):
    """
    Only handle modified event in this class.  As we cannot
    rely on DELETE/CREATE etc just using the modification
    with a folder check should do the trick.
    """

    def __init__(self, engine, dao):
        super(SimpleWatcher, self).__init__(engine, dao)
        self._scan_delay = 1
        self._to_scan = dict()
        self._last_scan = dict()

    def _push_to_scan(self, info):
        if isinstance(info, FileInfo):
            ref = info.path
            super(SimpleWatcher, self)._push_to_scan(info)
            return
        else:
            ref = info
        log.warning("should scan: %s", ref)
        self._to_scan[ref] = current_milli_time()

    def empty_events(self):
        return self._watchdog_queue.empty() and len(self._to_scan) == 0

    def get_scan_delay(self):
        return self._scan_delay

    def is_pending_scan(self, ref):
        return ref in self._to_scan

    def handle_watchdog_move(self, evt, _, rel_path):
        # Dest
        dst_path = normalize_event_filename(evt.dest_path)
        if self.client.is_temp_file(os.path.basename(dst_path)):
            return
        log.warning("handle watchdog move: %r", evt)
        dst_rel_path = self.client.get_path(dst_path)
        doc_pair = self._dao.get_state_from_local(rel_path)
        # Add for security src_path and dest_path parent - not sure it is needed
        self._push_to_scan(os.path.dirname(rel_path))
        if self.client.is_inside(dst_path):
            dst_rel_path = self.client.get_path(dst_path)
            self._push_to_scan(os.path.dirname(dst_rel_path))
        if (doc_pair is None):
            # Scan new parent
            log.warning("NO PAIR")
            return
        # It is not yet created no need to move it
        if doc_pair.local_state != 'created':
            doc_pair.local_state = 'moved'
        local_info = self.client.get_info(dst_rel_path, raise_if_missing=False)
        if local_info is None:
            log.warning("Should not disapear")
            return
        self._dao.update_local_state(doc_pair, local_info, versionned=True)
        log.warning("has update with moved status")

    def handle_watchdog_event(self, evt):
        self._metrics['last_event'] = current_milli_time()
        # For creation and deletion just update the parent folder
        src_path = normalize_event_filename(evt.src_path)
        rel_path = self.client.get_path(src_path)
        file_name = os.path.basename(src_path)
        if self.client.is_temp_file(file_name) or rel_path == '/.partials':
            return
        if evt.event_type == 'moved':
            self.handle_watchdog_move(evt, src_path, rel_path)
            return
        # Dont care about ignored file, unless it is moved
        if self.client.is_ignored(os.path.dirname(rel_path), file_name):
            return
        log.warning("Got evt: %r", evt)
        if len(rel_path) == 0 or rel_path == '/':
            self._push_to_scan('/')
            return
        # If not modified then we will scan the parent folder later
        if evt.event_type != 'modified':
            log.warning(rel_path)
            parent_rel_path = os.path.dirname(rel_path)
            if parent_rel_path == "":
                parent_rel_path = '/'
            self._push_to_scan(parent_rel_path)
            return
        file_name = os.path.basename(src_path)
        doc_pair = self._dao.get_state_from_local(rel_path)
        if not os.path.exists(src_path):
            log.warning("Event on a disappeared file: %r %s %s", evt, rel_path, file_name)
            return
        if doc_pair is not None and doc_pair.processor > 0:
            log.warning("Don't update as in process %r", doc_pair)
            return
        if isinstance(evt, DirModifiedEvent):
            self._push_to_scan(rel_path)
        else:
            local_info = self.client.get_info(rel_path, raise_if_missing=False)
            if local_info is None or doc_pair is None:
                # Suspicious
                return
            digest = local_info.get_digest()
            if doc_pair.local_state != 'created':
                if doc_pair.local_digest != digest:
                    doc_pair.local_state = 'modified'
            doc_pair.local_digest = digest
            log.warning("file is updated: %r", doc_pair)
            self._dao.update_local_state(doc_pair, local_info, versionned=True)

    def _execute(self):
        try:
            self._init()
            if not self.client.exists('/'):
                self.rootDeleted.emit()
                return
            self._action = Action("Setup watchdog")
            self._watchdog_queue = Queue()
            self._setup_watchdog()
            log.debug("Watchdog setup finished")
            self._action = Action("Full local scan")
            self._scan()
            self._end_action()
            # Check windows dequeue and folder scan only every 100 loops ( every 1s )
            current_time_millis = int(round(time() * 1000))
            self._win_delete_interval = current_time_millis
            self._win_folder_scan_interval = current_time_millis
            i = 0
            while (1):
                self._interact()
                sleep(0.01)
                while (not self._watchdog_queue.empty()):
                    # Dont retest if already local scan
                    evt = self._watchdog_queue.get()
                    self.handle_watchdog_event(evt)
                # Check to scan
                i += 1
                if i % 100 != 0:
                    continue
                i = 0
                threshold_time = current_milli_time() - 1000 * self._scan_delay
                # Need to create a list of to scan as the dictionary cannot grow while iterating
                local_scan = []
                for path, last_event_time in self._to_scan.iteritems():
                    if last_event_time < threshold_time:
                        local_scan.append(path)
                for path in local_scan:
                    self._scan_path(path)
                    # Dont delete if the time has changed since last scan
                    if self._to_scan[path] < threshold_time:
                        del self._to_scan[path]
                if (len(self._delete_files)):
                    # Enforce scan of all others folders to not loose track of moved file
                    self._scan_handle_deleted_files()
        except ThreadInterrupt:
            raise
        finally:
            self._stop_watchdog()

    def _scan_handle_deleted_files(self):
        log.warning("delete files are: %r", self._delete_files)
        # Need to check for the current file
        to_deletes = copy.copy(self._delete_files)
        # Enforce the scan of all folders to check if the file hasnt moved there
        for path, _ in self._to_scan.iteritems():
            self._scan_path(path)
        for deleted in to_deletes:
            if deleted not in self._delete_files:
                continue
            if deleted not in self._protected_files:
                self._dao.delete_local_state(self._delete_files[deleted])
            else:
                del self._protected_files[deleted]
            # Really delete file then
            del self._delete_files[deleted]

    def _scan_path(self, path):
        if self.client.exists(path):
            log.warning("Scan delayed folder: %s:%d", path, len(self.client.get_children_info(path)))
            local_info = self.client.get_info(path, raise_if_missing=False)
            if local_info is not None:
                self._scan_recursive(local_info, False)
                log.warning("scan delayed done")
        else:
            log.warning("Cannot scan delayed deleted folder: %s", path)
Ejemplo n.º 41
0
class PLC(object):
    def __init__(self, name=None):
        self.connected_sensors = {}
        self.slaveid = 0x00
        self.name = name
        if not name:
            self.name = socket.gethostname()
        self.plcrpcclient = PLCRPCClient(rpc_server="scadasim",
                                         rpc_port=8000,
                                         plc=self.name)
        self.registered = False

        identity = ModbusDeviceIdentification()
        identity.VendorName = 'scadasim'
        identity.ProductCode = 'PLC'
        identity.VendorUrl = 'https://github.com/sintax1/scadasim-plc'
        identity.ProductName = 'scadasim-PLC'
        identity.ModelName = 'SimPLC'
        identity.MajorMinorRevision = '1.0'
        self.identity = identity
        self.speed = 0.2
        self.queue = Queue()
        self.context = None

    def _initialize_store(self, max_register_size=100):
        store = {}

        store[self.slaveid] = CallbackModbusSlaveContext(
            self.queue,
            di=ModbusSequentialDataBlock(0, [False] * 100),
            co=ModbusSequentialDataBlock(0, [False] * 100),
            hr=ModbusSequentialDataBlock(0, [0] * 100),
            ir=ModbusSequentialDataBlock(0, [0] * 100))
        self.context = ModbusServerContext(slaves=store, single=False)

    def _get_sensor_data(self):

        sensor_data = self.plcrpcclient.readSensors()

        for sensor in sensor_data:
            register = sensor_data[sensor]['register_type']

            address = int(sensor_data[sensor]['data_address'])

            if register in ['c', 'd']:
                value = bool(sensor_data[sensor]['value'])
            elif register in ['h', 'i']:
                value = int(sensor_data[sensor]['value'])

            address = address + 1  # section 4.4 of specification
            self.context[self.slaveid].store[register].setValues(
                address, [value])

    def _registerPLC(self):
        self.slaveid = self.plcrpcclient.registerPLC()
        self.registered = True
        self._initialize_store()
        log.debug("[PLC][%s] Registered on scadasim rpc" % self.name)
        return True

    def update(self):
        log.debug("[PLC][%s] Updating PLC values with sensor values" % self)

        while not self.queue.empty():
            # Update scadasim with any new values from Master
            fx, address, values = self.queue.get()
            log.debug("[PLC][%s] setting fx: %s register:%s to value:%s" %
                      (self.name, fx, address, values))
            self.plcrpcclient.setValues(fx=fx, address=address, values=values)

        self._get_sensor_data()

        delay = (-time.time() % self.speed)
        t = threading.Timer(delay, self.update, ())
        t.daemon = True
        t.start()

    def set_speed(self, speed):
        self.speed = speed

    def __repr__(self):
        return "%s" % self.name

    def main(self):

        log.debug("[PLC][%s] Initialized" % self.name)
        while not self.registered:
            log.debug("[PLC][%s] Trying to register with scadasim rpc" %
                      self.name)
            try:
                self._registerPLC()
            except KeyError:
                log.warn(
                    """[PLC][%s] PLC not found within scadasim. Verify Docker
                     Compose container names match list of plcs in scadasim
                     config""")

            time.sleep(1)

        log.debug("[PLC][%s] Starting update service" % self.name)
        self.update()

        log.debug("[PLC][%s] Starting MODBUS Server" % self.name)
        StartTcpServer(self.context,
                       identity=self.identity,
                       address=("0.0.0.0", 502))
Ejemplo n.º 42
0
#!/usr/bin/env python
# encoding:utf-8
"""
@software: PyCharm
@file: 队列.py
@time: 2017/4/30 9:37
"""

from collections import deque  # 双向队列

# d = deque(maxlen = 30)            # 指定元素最大个数
d = deque()
d.append(1)  # 添加元素
d.appendleft(3)  # 从左边添加元素
d.clear()  # 清空队列
d.count(3)  # 统计元素个数
d.extend([2, 4, 5, 3, 1])  # 扩展队列
d.extendleft([9, 8, 0])  # 从左边扩展队列
d.pop()  # 取出数据并删除
d.popleft()  # 从左边取出数据并删除
d.remove(9)  # 删除指定的数据
d.reverse()  # 翻转队列
d.rotate(3)  # 旋转容器N步向右,如果N为负数,则向左

from Queue import Queue

q = Queue()
q.put([1, 2, 3, 4])  # 添加数据
q.get()  # 从列队移除并返回一个数据
q.empty()  # 队列如果是空,返回True 否则返会False
Ejemplo n.º 43
0
class Subscriber(threading.Thread):
    """
    Thread responsible for event subscriptions.
    Issues subscriptions, creates the websocket, and refreshes the
    subscriptions before timer expiry.  It also reissues the
    subscriptions when the APIC login is refreshed.
    """
    def __init__(self, apic):
        threading.Thread.__init__(self)
        self._apic = apic
        self._subscriptions = {}
        self._ws = None
        self._ws_url = None
        self._refresh_time = 45
        self._event_q = Queue()
        self._events = {}
        self._exit = False

    def exit(self):
        """
        Indicate that the thread should exit.
        """
        self._exit = True

    def _send_subscription(self, url):
        """
        Send the subscription for the specified URL.

        :param url: URL string to issue the subscription
        """
        resp = self._apic.get(url)
        subscription_id = json.loads(resp.text)['subscriptionId']
        self._subscriptions[url] = subscription_id
        return resp

    def refresh_subscriptions(self):
        """
        Refresh all of the subscriptions.
        """
        for subscription in self._subscriptions:
            subscription_id = self._subscriptions[subscription]
            refresh_url = '/api/subscriptionRefresh.json?id=' + subscription_id
            self._apic.get(refresh_url)

    def _open_web_socket(self, use_secure=True):
        """
        Opens the web socket connection with the APIC.

        :param use_secure: Boolean indicating whether the web socket
                           should be secure.  Default is True.
        """
        sslopt = {}
        if use_secure:
            sslopt['cert_reqs'] = ssl.CERT_NONE
            self._ws_url = 'wss://%s/socket%s' % (self._apic.ipaddr,
                                                  self._apic.token)
        else:
            self._ws_url = 'ws://%s/socket%s' % (self._apic.ipaddr,
                                                 self._apic.token)

        kwargs = {}
        if self._ws is not None:
            if self._ws.connected:
                self._ws.close()
                self.event_handler_thread.exit()
        self._ws = create_connection(self._ws_url, sslopt=sslopt, **kwargs)
        self.event_handler_thread = EventHandler(self)
        self.event_handler_thread.daemon = True
        self.event_handler_thread.start()

    def _resubscribe(self):
        """
        Reissue the subscriptions.
        Used to when the APIC login timeout occurs and a new subscription
        must be issued instead of simply a refresh.  Not meant to be called
        directly by end user applications.
        """
        self._process_event_q()
        urls = []
        for url in self._subscriptions:
            urls.append(url)
        self._subscriptions = {}
        for url in urls:
            self.subscribe(url)

    def _process_event_q(self):
        """
        Put the event into correct bucket based on URLs that have been
        subscribed.
        """
        if self._event_q.empty():
            return

        while not self._event_q.empty():
            event = self._event_q.get()
            event = json.loads(event)
            # Find the URL for this event
            url = None
            for k in self._subscriptions:
                for id in event['subscriptionId']:
                    if self._subscriptions[k] == str(id):
                        url = k
                        break
            if url not in self._events:
                self._events[url] = []
            self._events[url].append(event)

    def subscribe(self, url):
        """
        Subscribe to a particular APIC URL.  Used internally by the
        Class and Instance subscriptions.

        :param url: URL string to send as a subscription
        """
        # Check if already subscribed.  If so, skip
        if url in self._subscriptions:
            return

        if self._ws is not None:
            if not self._ws.connected:
                self._open_web_socket('https://' in url)

        return self._send_subscription(url)

    def has_events(self, url):
        """
        Check if a particular APIC URL subscription has any events.
        Used internally by the Class and Instance subscriptions.

        :param url: URL string to check for pending events
        """
        self._process_event_q()
        if url not in self._events:
            return False
        result = len(self._events[url]) != 0
        return result

    def get_event(self, url):
        """
        Get an event for a particular APIC URL subscription.
        Used internally by the Class and Instance subscriptions.

        :param url: URL string to get pending event
        """
        if url not in self._events:
            raise ValueError
        event = self._events[url].pop(0)
        return event

    def unsubscribe(self, url):
        """
        Unsubscribe from a particular APIC URL.  Used internally by the
        Class and Instance subscriptions.

        :param url: URL string to unsubscribe
        """
        if url not in self._subscriptions:
            return
        del self._subscriptions[url]
        if not self._subscriptions:
            self._ws.close()

    def run(self):
        while not self._exit:
            # Sleep for some interval (60sec) and send subscription list
            time.sleep(self._refresh_time)
            self.refresh_subscriptions()
Ejemplo n.º 44
0
print 'MAP LEGEND\n0: clear space\n1: wall/obstacle\n'

show(matrix)

q = Queue()

startx, starty = 1, 7
#destx,desty=3,2 # toilet
#destx,desty=12,2 # bedroom
destx, desty = 13, 6  # kitchen
#destx,desty=1,7 # entrace

row, col = desty, destx

q.put((row, col))
while not q.empty():
    row, col = q.get()
    if col + 1 < numcols and matrix[row][col + 1] == "0":
        q.put((row, col + 1))
        matrix[row][col + 1] = "L"
    if row + 1 < numrows and matrix[row + 1][col] == "0":
        q.put((row + 1, col))
        matrix[row + 1][col] = "U"
    if 0 <= col - 1 and matrix[row][col - 1] == "0":
        q.put((row, col - 1))
        matrix[row][col - 1] = "R"
    if 0 <= row - 1 and matrix[row - 1][col] == "0":
        q.put((row - 1, col))
        matrix[row - 1][col] = "D"

row, col = starty, startx
Ejemplo n.º 45
0
class Acceptor(object):
    """
    Describes the current state and events associated with this BA

    Note:
        There will only be one state at a time
        Multiple events may be set

    Args:
        None

    """

    # Set to true for random cheat events
    cheating = False


    def __init__(self):
        # Set to False to kill
        self.running = True

        # Accept all note as default
        self._enables = 0x07
        # Say LRC is present for now
        self._lrc_ok = True
        # Acceptor has it's own lock
        self._mutex = Lock()
        # data byte 0
        self._state = 0x01
        # data byte 1
        self._event = 0x10
        # byte 2 - lower 3 bits
        self._ext = 0x01
        # byte 2 Upper 5 bits
        self._value = 0x00
        # byte 3 is reserverd
        self._resd = 0x00
        # byte 4 is model (00-7FH)
        self._model = 0x01
        # byte 5 is software revision (00-7FH)
        self._rev = 0x01

        self._note_count = 0
        self._cheat_flag = False
        
        self._ack = -1

        # Some states are only sent once, handle them in a queue
        self._b0_ephemeral = Queue()
        self._b1_ephemeral = Queue()
        self._b2_ephemeral = Queue()

        # Background worker thread
        self._serial_thread = None

        # Used to recall in case of NAK
        self._last_msg = None

        #
        self._mon = monitor.Monitor(5, self._timedout)
        self._mon.start()

        # Simulate power up
        power_up = Thread(target=self._power_up)
        power_up.start()


    def enable_note(self, index):
        """
        Set note enable bit so Acceptor accepts note

        Args:
            index -- integer index (1-7) of note to enable
        """
        if index is not int:
            index = int(index)
        if index > 0 and index <= 7:
            # Turn value into bitwise flag
            flag = pow(2, index - 1)
            self._enables |= flag
            print "Enabled note {:d}".format(index)
        else:
            print "Invalid enable {:d}".format(index)

    def disable_note(self, index):
        """
        Clear note enable bit so Acceptor rejects note

        Args:
            index -- integer index (1-7) of note to disable
        """
        if index is not int:
            try:
                index = int(index)
            except:
                print "Invalid Note #"
                return
                
        if index > 0 and index <= 7:
            # Turn value into bitwise flag
            flag = pow(2, index - 1)      
            self._enables &= ~(flag)
            print "Disabled note {:d}.".format(index)
        else:
            print "Invalid disable {:d}".format(index)


    def start(self, portname):
        """
        Start Acceptor in a non-daemon thread

        Args:
            portname -- string name of the port to open and listen on

        Returns:
            None

        """
        self._serial_thread = Thread(target=self._serial_runner,
                                     args=(portname,))
        # Per https://docs.python.org/2/library/threading.html#thread-objects
        # 16.2.1: Daemon threads are abruptly stopped, set to false for proper
        # release of resources (i.e. our comm port)
        self._serial_thread.daemon = False
        self._serial_thread.start()


    def stop(self):
        """
        Blocks until Acceptor can safely be stopped

        Args:
            None

        Returns:
            None
        """
        print "Shutting down..."
        self.running = False
        self._serial_thread.join()
        self._mon.stop()


    def parse_cmd(self, cmd):
        """
        Applies the given command to modify the state/event of
        this acceptor

        Args:
            cmd -- string arg

        Returns:
            Int -- 0 if okay, 1 to exit, 2 for help, 3 for autopilot
        """
        if cmd == 'Q':
            return 1
        if cmd == '?' or cmd == 'H':
            return 2
        if cmd == 'A':
            return 3

        self._mutex.acquire()

        # Handle bill feed command
        if cmd.isdigit():
            val = int(cmd, 10)
            # Convert value to bitwise flag (2^[val-1])
            flag = pow(2, val - 1)
            if flag & self._enables:
                # Are we idle?
                if self._state & 0x01 == 1:
                    feed = Thread(target=self._start_accepting, args=(val,))
                    feed.start()
                else:
                    # Hols the phone, revoke anything in the state queue
                    # Set back to idle because we just had a double-insertion
                    self._b0_ephemeral = Queue()
                    self._state = 0x01
                    self._b1_ephemeral.put(0x02)
            else:
                # Why was this note rejected?
                if val is 0 or val > 7:
                    print "Invalid Bill Number {:d}".format(val)
                else:
                    print "Note {:d} disabled".format(val)
                    # Send reject message
                    self._b1_ephemeral.put(0x02)

        # Handle bill enable/disable command
        elif len(cmd) is 2:
            if cmd[0] == 'D':
                self.disable_note(cmd[1])
            elif cmd[0] == 'E':
                self.enable_note(cmd[1])
            else:
                print "Unkown E/D command {:s}".format(cmd)

        elif cmd is 'C':
            # Toggle random cheating events
            Acceptor.cheating = not Acceptor.cheating
            if Acceptor.cheating:
                print "Cheat Mode Enabled: {:d}% Chance of Cheat".format(
                    CHEAT_RATE)
            else:
                print "Cheat Mode Disabled"
        elif cmd == 'R':
            # Put Rejected
            self._b1_ephemeral.put(0x02)
        elif cmd == 'J':
            # Toggle Jammed
            self._event = self._event ^ 0x04
        elif cmd == 'F':
            # Toggle Stacker Full:
            self._event = self._event ^ 0x08
        elif cmd == 'P':
            # Toggle Cashbox Present
            self._lrc_ok = not self._lrc_ok
        elif cmd == 'W':
            # Toggle Powering Up
            self._ext = self._ext ^ 0x01
        elif cmd == 'I':
            # Put Invalid Command
            self._b2_ephemeral.put(0x02)
        elif cmd == 'X':
            # Put Unit Failure
            self._b2_ephemeral.put(0x04)
        elif cmd == 'Y':
            # Set note count back to zero
            self._note_count = 0
        elif cmd == 'L':
            print format(self._enables, '#010b')
        else:
            print "Unknown Command: {:s}".format(cmd)


        self._mutex.release()
        return 0


    def _serial_runner(self, portname):
        """
        Transmits state of Acceptor over serial port using global poll rate

        Args:
            portname -- string portname to open

        Returns:
            None
        """

        ser = serial.Serial(
            port=portname,
            baudrate=9600,
            bytesize=serial.SEVENBITS,
            parity=serial.PARITY_EVEN,
            stopbits=serial.STOPBITS_ONE
        )

        try:

            while ser.isOpen() and self.running:

                # Wait for data
                serial_in = []
                while ser.inWaiting() > 0:
                    serial_in = list(bytearray(ser.read(8)))
                if len(serial_in) == 0:
                    continue


                self._mon.reset()
                self._mutex.acquire()
                
                # Update our enable/disable register
                self._enables = serial_in[3]
                
                # Check and toggle ACK
                mack = serial_in[2] & 1
                
                if self._ack is -1:
                    self._ack = serial_in[2] & 1
                
                self._ack ^= 1
                    
                # Build next message
                msg = self._get_message()
    
                # Set the ACK
                msg[2] |= mack
    
                # Check if we need to stack or return
                self._accept_or_return(serial_in)
    
                # Set the checksum
                msg[10] = msg[1] ^ msg[2]
                for byte in xrange(3, 9):
                    msg[10] ^= msg[byte]
                    
                # Since we're locked, wipe out any value we may have sent
                # ... but only if we're idle so we're positive the master
                # got our credit message
                if msg[3] is 0x01:
                    self._value = 0x00


                # Send message to master
                ser.write(msg)                
                
                self._mutex.release()

                # Slow down a bit, our virutal environment is too fast
                time.sleep(SOFT_DELAY)

        except serial.SerialException:
            print 'Terminating serial thread'

        ser.close()
        return


    def _accept_or_return(self, master):
        """
        Process stack or return request from master

        Args:
            None

        Returns:
            None
        """
        # If we're in escrow and master says stack
        cmd = master[4]
        if ((cmd & 0x20) == 0x20) and (self._state == 0x04):
            self._accept_bill()
        # If we're in escrow and master says return
        elif ((cmd & 0x40) == 0x40) and (self._state == 0x04):
            self._return_bill()


    def _get_message(self):
        """
        Returns current message as byte array

        Args:
            None

        Returns:
            byte array
        """
        self._check_lrc()

        state = self._state
        event = self._event
        ext = self._ext

        # Pull all ephemerals from queue
        if not self._b0_ephemeral.empty():
            state |= self._b0_ephemeral.get_nowait()
        if not self._b1_ephemeral.empty():
            event |= self._b1_ephemeral.get_nowait()
        if not self._b2_ephemeral.empty():
            ext |= self._b2_ephemeral.get_nowait()


        msg = bytearray([0x02, 0x0B, 0x20, state, event,
                         (ext | (self._value << 3)), self._resd, self._model,
                         self._rev, 0x03, 0x3A])

        # Clear cheat flag if event set
        if ext & 0x01:
            self._cheat_flag = False

        self._last_msg = msg
        return msg


    def _check_lrc(self):
        """
        Checks the state of the LRC and set event if required

        Args:
            None

        Returns:
            None
        """
        if self._lrc_ok:
            self._event |= 0x10
        else:
            self._event &= ~(0x10)

        # Set stacker full if we have enough notes
        if self._note_count >= CASHBOX_SIZE:
            self._event |= 0x08


    def _power_up(self):
        """
        Simulate BA power up - Block for POWER_UP milliseconds

        Args:
            None

        Returns:
            None
        """
        time.sleep(POWER_UP)
        self._ext &= ~(0x01)


    def _start_accepting(self, val):
        """
        Blocks the calling thread as this simulates bill movement from idle to
            escrow.

        Args:
            val -- integer index of note (0-7)

        Returns:
            None
        """
        # If stacker is full, set the stacker full flag and reject note
        if self._note_count >= CASHBOX_SIZE:
            self._event |= 0x08
            self._b1_ephemeral.put(0x02)
        else:
            # Accepting
            self._state = 0x02

            if Acceptor.cheating:
                self._cheat()

            time.sleep(TRANSITION)
            # Only enter escrow mode if cheat flag is not tripped
            if not self._cheat_flag:
                # Escrow - Crtical that both of these bits are set!
                self._mutex.acquire()
                self._state = 0x04
                self._value = val
                self._mutex.release()
            else:
                # Return to idle mode, set reject flag
                self._state = 0x01
                self._b1_ephemeral.put(0x02)
                self._cheat_flag = False


    def _accept_bill(self):
        """
        Simulate the movement of the bill from escrow to stacked

        Args:
            None

        Returns:
            None
        """
        # Stacking
        self._state = 0x08
        time.sleep(TRANSITION)
        # Stacked + Idle
        self._b0_ephemeral.put(0x10)
        self._state = 0x01
        self._note_count = self._note_count + 1


    def _return_bill(self):
        """
        Simulate the movement of the bill from escrow to returned

        Args:
            None

        Returns:
            None
        """
        # Returning
        self._state = 0x20
        time.sleep(TRANSITION)
        # Returned + Idle
        self._b0_ephemeral.put(0x50)
        self._state = 0x01


    def _cheat(self):
        """
        Randomly attempts to "cheat" the acceptor
        """
        if randint(1, 100) <= CHEAT_RATE:
            self._b1_ephemeral.put(0x01)
            self._cheat_flag = True
    def _timedout(self):
        """
        Disable the acceptor because the master has not spoken too us
        in too long

        Args:
            None

        Returns:
            None
        """
        print "Comm timeout"
        # Effectively stop all acceptance
        self._enables = 0
Ejemplo n.º 46
0
class SelectHub(object):
    """
  This class is a single select() loop that handles all Select() requests for
  a scheduler as well as timed wakes (i.e., Sleep()).
  """
    def __init__(self, scheduler):
        # We store tuples of (elapse-time, task)
        self._sleepers = []  # Sleeping items stored as a heap
        self._incoming = Queue()  # Threadsafe queue for new items

        self._scheduler = scheduler
        self._pinger = pox.lib.util.makePinger()

        self._ready = False

        self._thread = Thread(target=self._threadProc)
        self._thread.daemon = True
        self._thread.start()

        # Ugly busy wait for initialization
        #while self._ready == False:
        #  time.sleep(0.2)

    def _threadProc(self):
        tasks = {}
        timeouts = []
        expired = []

        while self._scheduler._hasQuit == False:
            #print("SelectHub cycle")

            if len(timeouts) == 0:
                timeout = None
            else:
                timeout = self._sleepers[0][0] - time.time()
                if timeout < 0: timeout = 0

            #NOTE: Everything you select on eventually boils down to file descriptors,
            #      which are unique, obviously.  It might be possible to leverage this
            #      to reduce hashing cost (i.e. by picking a really good hashing
            #      function), though this is complicated by wrappers, etc...
            rl = {}
            wl = {}
            xl = {}

            timeout = None
            timeoutTask = None

            now = time.time()

            expired = None

            for t, trl, twl, txl, tto in tasks.itervalues():
                if tto != None:
                    if tto <= now:
                        # Already expired
                        if expired is None: expired = []
                        expired.append(t)
                        if tto - now > 0.1:
                            print("preexpired", tto, now, tto - now)
                        continue
                    tt = tto - now
                    if tt < timeout or timeout is None:
                        timeout = tt
                        timeoutTask = t

                if trl:
                    for i in trl:
                        rl[i] = t
                if twl:
                    for i in twl:
                        wl[i] = t
                if txl:
                    for i in txl:
                        xl[i] = t

            if expired:
                for t in expired:
                    del tasks[t]
                    self._return(t, ([], [], []))

            if timeout is None: timeout = CYCLE_MAXIMUM
            ro, wo, xo = select.select(rl.keys() + [self._pinger], wl.keys(),
                                       xl.keys(), timeout)

            if len(ro) == 0 and len(wo) == 0 and len(
                    xo) == 0 and timeoutTask != None:
                # IO is idle - dispatch timers / release timeouts
                del tasks[timeoutTask]
                self._return(timeoutTask, ([], [], []))
            else:
                # We have IO events
                if self._pinger in ro:
                    self._pinger.pongAll()
                    while not self._incoming.empty():
                        stuff = self._incoming.get(True)
                        task = stuff[0]
                        assert task not in tasks
                        tasks[task] = stuff
                    if len(ro) == 1 and len(wo) == 0 and len(xo) == 0:
                        # Just recycle
                        continue
                    ro.remove(self._pinger)

                # At least one thread is going to be resumed
                rets = {}
                for i in ro:
                    task = rl[i]
                    if task not in rets: rets[task] = ([], [], [])
                    rets[task][0].append(i)
                for i in wo:
                    task = wl[i]
                    if task not in rets: rets[task] = ([], [], [])
                    rets[task][1].append(i)
                for i in xo:
                    task = xl[i]
                    if task not in rets: rets[task] = ([], [], [])
                    rets[task][2].append(i)

                for t, v in rets.iteritems():
                    del tasks[t]
                    self._return(t, v)

    def registerSelect(self,
                       task,
                       rlist=None,
                       wlist=None,
                       xlist=None,
                       timeout=None,
                       timeIsAbsolute=False):
        if not timeIsAbsolute:
            if timeout != None:
                timeout += time.time()

        self._incoming.put((task, rlist, wlist, xlist, timeout))
        self._cycle()

    def _cycle(self):
        """
    Cycle the wait thread so that new timers or FDs can be picked up
    """
        self._pinger.ping()

    def registerTimer(self, task, timeToWake, timeIsAbsolute=False):
        """
    Register a task to be wakened up interval units in the future.
    It means timeToWake seconds in the future if absoluteTime is False.
    """
        return self.registerSelect(task, None, None, None, timeToWake,
                                   timeIsAbsolute)

    def _return(self, sleepingTask, returnVal):
        #print("reschedule", sleepingTask)
        sleepingTask.rv = returnVal
        self._scheduler.schedule(sleepingTask)
Ejemplo n.º 47
0
def pollards_rho(n):
    x, y, d = 2, 2, 1
    while 1 == d:
        x = (x * x + 1) % n
        y = (y * y + 1) % n
        y = (y * y + 1) % n
        d = gcd(abs(x - y), n)
    return [d, n / d]


for N in stdin:
    N = int(N)
    if 0 == N: break
    P, N = trial_division(N)
    Q, N = trial_pow(N)
    P.extend(Q)
    F = Queue()
    if 1 != N: F.put(N)
    while not F.empty():
        f = F.get()
        if miller_rabbin(f, 10):
            P.append(f)
        else:
            for n in pollards_rho(f):
                if 1 != n: F.put(n)
    s = ''
    for f, n in sorted(Counter(P).iteritems()):
        s += str(f) + '^' + str(n) + ' '
    print s
exit(0)
Ejemplo n.º 48
0
from Queue import Queue
size = 1000
offSet = 500
T = [[False for x in range(size)] for y in range(size)]
T[offSet][offSet] = [0, '']
Q = Queue()
Q.put([offSet, offSet])
while not Q.empty():
    x, y = Q.get()
    count, path = T[x][y]
    if x + count + 1 < size and not T[x + count + 1][y]:
        T[x + count + 1][y] = [count + 1, path + "E"]
        Q.put([x + count + 1, y])
    if x - count - 1 >= 0 and not T[x - count - 1][y]:
        T[x - count - 1][y] = [count + 1, path + "W"]
        Q.put([x - count - 1, y])
    if y + count + 1 < size and not T[x][y + count + 1]:
        T[x][y + count + 1] = [count + 1, path + "N"]
        Q.put([x, y + count + 1])
    if y - count - 1 >= 0 and not T[x][y - count - 1]:
        T[x][y - count - 1] = [count + 1, path + "S"]
        Q.put([x, y - count - 1])
for t in range(1, int(raw_input()) + 1):
    print "Case #" + str(t) + ":",
    x, y = [int(k) + offSet for k in raw_input().split()]
    print T[x][y][1]
Ejemplo n.º 49
0
class RpcMaas(object):
    """Class representing a connection to the MAAS Service"""
    def __init__(self,
                 entity_match='',
                 entities=None,
                 config_file=DEFAULT_CONFIG_FILE,
                 use_api=True):
        self.entity_label_whitelist = entities
        self.entity_match = entity_match
        self.config_file = config_file
        self.use_api = use_api
        if self.use_api:
            self.driver = get_driver(Provider.RACKSPACE)
            self._get_conn()
            self._get_overview()
            self._add_links()
            self._filter_entities()
        self.q = Queue()

    def _filter_entities(self):
        if not self.entity_label_whitelist:
            self.entities = [
                e['entity'] for e in self.overview
                if self.entity_match in e['entity'].label
            ]
        else:
            self.entities = []
            for entry in self.overview:
                entity = entry['entity']
                for label in self.entity_label_whitelist:
                    if entity.label == label:
                        self.entities.append(entity)
        if not self.entities:
            raise Exception("No Entities found matching --entity or "
                            "--entitymatch")

    def _get_conn(self):
        """Read config file and use extracted creds to connect to MAAS"""
        self.config = ConfigParser.RawConfigParser()
        self.config.read(self.config_file)
        self.conn = None

        try:
            user = self.config.get('credentials', 'username')
            api_key = self.config.get('credentials', 'api_key')
            self.conn = self.driver(user, api_key)
        except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
            url = self.config.get('api', 'url')
            token = self.config.get('api', 'token')
            self.conn = self.driver(None,
                                    None,
                                    ex_force_base_url=url,
                                    ex_force_auth_token=token)

    def _get_overview(self):
        self.overview = self.conn.ex_views_overview()

    def _add_links(self):
        """Add missing parent/child links to objects"""

        # Entity --> Check
        for entry in self.overview:
            entity = entry['entity']
            entity.checks = entry['checks']
            entity.alarms = []
            entity.metrics = []

            # Check --> Entity
            for check in entity.checks:
                check.entity = entity
                check.metrics = []

                # Check <--> Alarm
                check.alarms = []

                for alarm in self.get_alarms(check=check, entry=entry):
                    alarm.check = check
                    alarm.entity = entity
                    check.alarms.append(alarm)
                    entity.alarms.append(alarm)

    def _add_metrics_list_to_check(self, check):
        """Called via ThreadPoolExecutor, result returned via queue."""
        metrics = self.conn.list_metrics(check.entity.id, check.id)
        self.q.put((check, metrics))

    def add_metrics(self):
        """Add metrics list to each checks

        Requires a call per check, so ThreadPoolExecutor is used to
        parallelise and reduce time taken
        """
        with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
            for entity in self.get_entities():
                for check in entity.checks:
                    executor.submit(self._add_metrics_list_to_check, check)
        while not self.q.empty():
            check, metrics = self.q.get()
            for metric in metrics:
                metric.check = check
                metric.entity = check.entity
                check.metrics.append(metric)
                check.entity.metrics.append(metric)

    def get_entities(self):
        """Return list of known entities

        entity_match filter not required as this is done in __init__
        """
        return self.entities

    def get_checks(self, check_match=''):
        """List checks for entities matching a string"""

        checks = []
        for entity in self.entities:
            checks.extend([c for c in entity.checks if check_match in c.label])
        return checks

    def get_alarms(self, entry, check):
        """Get list of alarms

        Params:
            entry: overview dictionary for one entity.

        This function adds a state field to each alarm object
        using information from the 'latest_alarm_states' entry key.
        """
        alarms = []
        for alarm in entry['alarms']:
            if not alarm.check_id == check.id:
                continue
            # add state to the alarm object from latest_alarm_states
            alarm_states = sorted((als for als in entry['latest_alarm_states']
                                   if als.alarm_id == alarm.id),
                                  key=lambda x: x.timestamp)
            if alarm_states:
                alarm.state = alarm_states[-1].state
            else:
                alarm.state = "UNKNOWN"
            alarms.append(alarm)
        return alarms
Ejemplo n.º 50
0
class SplunkHandler(logging.Handler):
    """
    A logging handler to send events to a Splunk Enterprise instance
    running the Splunk HTTP Event Collector.
    """
    def __init__(self,
                 host,
                 port,
                 token,
                 index,
                 hostname=None,
                 source=None,
                 sourcetype='text',
                 verify=True,
                 timeout=60,
                 flush_interval=15.0,
                 queue_size=5000,
                 debug=False,
                 retry_count=5,
                 retry_backoff=2.0,
                 multiple_process=False):

        global instances
        instances.append(self)
        logging.Handler.__init__(self)

        self.host = host
        self.port = port
        self.token = token
        self.index = index
        self.source = source
        self.sourcetype = sourcetype
        self.verify = verify
        self.timeout = timeout
        self.flush_interval = flush_interval
        self.log_payload = ""
        self.SIGTERM = False  # 'True' if application requested exit
        self.timer = None
        self.testing = False  # Used for slightly altering logic during unit testing
        self.multiple_process = multiple_process
        # It is possible to get 'behind' and never catch up, so we limit the queue size
        if self.multiple_process:
            self.queue = JoinableQueue(maxsize=queue_size)
        else:
            self.queue = Queue(maxsize=queue_size)
        self.debug = debug
        self.session = requests.Session()
        self.retry_count = retry_count
        self.retry_backoff = retry_backoff

        self.write_debug_log("Starting debug mode")

        if hostname is None:
            self.hostname = socket.gethostname()
        else:
            self.hostname = hostname

        self.write_debug_log("Preparing to override loggers")

        # prevent infinite recursion by silencing requests and urllib3 loggers
        logging.getLogger('requests').propagate = False
        logging.getLogger('urllib3').propagate = False

        # and do the same for ourselves
        logging.getLogger(__name__).propagate = False

        # disable all warnings from urllib3 package
        if not self.verify:
            requests.packages.urllib3.disable_warnings()

        # Set up automatic retry with back-off
        self.write_debug_log("Preparing to create a Requests session")
        retry = Retry(
            total=self.retry_count,
            backoff_factor=self.retry_backoff,
            method_whitelist=False,  # Retry for any HTTP verb
            status_forcelist=[500, 502, 503, 504])
        self.session.mount('https://', HTTPAdapter(max_retries=retry))

        self.start_worker_thread()

        self.write_debug_log("Class initialize complete")

    def emit(self, record):
        self.write_debug_log("emit() called")

        try:
            record = self.format_record(record)
        except Exception as e:
            self.write_log("Exception in Splunk logging handler: %s" % str(e))
            self.write_log(traceback.format_exc())
            return

        if self.flush_interval > 0:
            try:
                self.write_debug_log("Writing record to log queue")
                # Put log message into queue; worker thread will pick up
                self.queue.put_nowait(record)
            except Full:
                self.write_log("Log queue full; log data will be dropped.")
        else:
            # Flush log immediately; is blocking call
            self._splunk_worker(payload=record)

    def close(self):
        self.shutdown()
        logging.Handler.close(self)

    #
    # helper methods
    #

    def start_worker_thread(self):
        # Start a worker thread responsible for sending logs
        if self.flush_interval > 0:
            self.write_debug_log(
                "Preparing to spin off first worker thread Timer")
            self.timer = Timer(self.flush_interval, self._splunk_worker)
            self.timer.daemon = True  # Auto-kill thread if main process exits
            self.timer.start()

    def write_log(self, log_message):
        print("[SplunkHandler] " + log_message)

    def write_debug_log(self, log_message):
        if self.debug:
            print("[SplunkHandler DEBUG] " + log_message)

    def format_record(self, record):
        self.write_debug_log("format_record() called")

        if self.source is None:
            source = record.pathname
        else:
            source = self.source

        current_time = time.time()
        if self.testing:
            current_time = None

        params = {
            'time': current_time,
            'host': self.hostname,
            'index': self.index,
            'source': source,
            'sourcetype': self.sourcetype,
            'event': self.format(record),
        }

        self.write_debug_log("Record dictionary created")

        formatted_record = json.dumps(params, sort_keys=True)
        self.write_debug_log("Record formatting complete")

        return formatted_record

    def _splunk_worker(self, payload=None):
        self.write_debug_log("_splunk_worker() called")

        if self.flush_interval > 0:
            # Stop the timer. Happens automatically if this is called
            # via the timer, does not if invoked by force_flush()
            self.timer.cancel()

            queue_is_empty = self.empty_queue()

        if not payload:
            payload = self.log_payload

        if payload:
            self.write_debug_log("Payload available for sending")
            url = 'https://%s:%s/services/collector' % (self.host, self.port)
            self.write_debug_log("Destination URL is " + url)

            try:
                self.write_debug_log("Sending payload: " + payload)
                r = self.session.post(
                    url,
                    data=payload,
                    headers={'Authorization': "Splunk %s" % self.token},
                    verify=self.verify,
                    timeout=self.timeout,
                )
                r.raise_for_status()  # Throws exception for 4xx/5xx status
                self.write_debug_log("Payload sent successfully")

            except Exception as e:
                try:
                    self.write_log("Exception in Splunk logging handler: %s" %
                                   str(e))
                    self.write_log(traceback.format_exc())
                except:
                    self.write_debug_log(
                        "Exception encountered," +
                        "but traceback could not be formatted")

            self.log_payload = ""
        else:
            self.write_debug_log(
                "Timer thread executed but no payload was available to send")

        # Restart the timer
        if self.flush_interval > 0:
            timer_interval = self.flush_interval
            if self.SIGTERM:
                self.write_debug_log(
                    "Timer reset aborted due to SIGTERM received")
            else:
                if not queue_is_empty:
                    self.write_debug_log(
                        "Queue not empty, scheduling timer to run immediately")
                    timer_interval = 1.0  # Start up again right away if queue was not cleared

                self.write_debug_log("Resetting timer thread")
                self.timer = Timer(timer_interval, self._splunk_worker)
                self.timer.daemon = True  # Auto-kill thread if main process exits
                self.timer.start()
                self.write_debug_log("Timer thread scheduled")

    def empty_queue(self):
        while not self.queue.empty():
            self.write_debug_log("Recursing through queue")
            try:
                item = self.queue.get(block=False)
                self.log_payload = self.log_payload + item
                self.queue.task_done()
                self.write_debug_log("Queue task completed")
            except Empty:
                self.write_debug_log("Queue was empty")

            # If the payload is getting very long, stop reading and send immediately.
            if not self.SIGTERM and len(self.log_payload) >= 524288:  # 50MB
                self.write_debug_log(
                    "Payload maximum size exceeded, sending immediately")
                return False

        return True

    def force_flush(self):
        self.write_debug_log("Force flush requested")
        self._splunk_worker()

    def shutdown(self):
        self.write_debug_log("Immediate shutdown requested")

        # Only initiate shutdown once
        if self.SIGTERM:
            return

        self.write_debug_log("Setting instance SIGTERM=True")
        self.SIGTERM = True

        if self.flush_interval > 0:
            self.timer.cancel(
            )  # Cancels the scheduled Timer, allows exit immediatley

        self.write_debug_log(
            "Starting up the final run of the worker thread before shutdown")
        # Send the remaining items that might be sitting in queue.
        self._splunk_worker()
Ejemplo n.º 51
0
class TokenBucketQueue(object):
    """Queue with rate limited get operations.

    This uses the token bucket algorithm to rate limit the queue on get
    operations.
    See http://en.wikipedia.org/wiki/Token_Bucket
    Most of this code was stolen from an entry in the ASPN Python Cookbook:
    http://code.activestate.com/recipes/511490/

    :param fill_rate: see :attr:`fill_rate`.
    :keyword capacity: see :attr:`capacity`.

    .. attribute:: fill_rate

        The rate in tokens/second that the bucket will be refilled.

    .. attribute:: capacity

        Maximum number of tokens in the bucket. Default is ``1``.

    .. attribute:: timestamp

        Timestamp of the last time a token was taken out of the bucket.

    """
    RateLimitExceeded = RateLimitExceeded

    def __init__(self, fill_rate, queue=None, capacity=1):
        self.capacity = float(capacity)
        self._tokens = self.capacity
        self.queue = queue
        if not self.queue:
            self.queue = Queue()
        self.fill_rate = float(fill_rate)
        self.timestamp = time.time()

    def put(self, item, block=True):
        """Put an item into the queue.

        Also see :meth:`Queue.Queue.put`.

        """
        self.queue.put(item, block=block)

    def put_nowait(self, item):
        """Put an item into the queue without blocking.

        :raises Queue.Full: If a free slot is not immediately available.

        Also see :meth:`Queue.Queue.put_nowait`

        """
        return self.put(item, block=False)

    def get(self, block=True):
        """Remove and return an item from the queue.

        :raises RateLimitExceeded: If a token could not be consumed from the
            token bucket (consuming from the queue too fast).
        :raises Queue.Empty: If an item is not immediately available.

        Also see :meth:`Queue.Queue.get`.

        """
        get = block and self.queue.get or self.queue.get_nowait

        if not self.can_consume(1):
            raise RateLimitExceeded()

        return get()

    def get_nowait(self):
        """Remove and return an item from the queue without blocking.

        :raises RateLimitExceeded: If a token could not be consumed from the
            token bucket (consuming from the queue too fast).
        :raises Queue.Empty: If an item is not immediately available.

        Also see :meth:`Queue.Queue.get_nowait`.

        """
        return self.get(block=False)

    def qsize(self):
        """Returns the size of the queue.

        See :meth:`Queue.Queue.qsize`.

        """
        return self.queue.qsize()

    def empty(self):
        return self.queue.empty()

    def clear(self):
        return self.items.clear()

    def wait(self, block=False):
        """Wait until a token can be retrieved from the bucket and return
        the next item."""
        while True:
            remaining = self.expected_time()
            if not remaining:
                return self.get(block=block)
            time.sleep(remaining)

    def can_consume(self, tokens=1):
        """Consume tokens from the bucket. Returns True if there were
        sufficient tokens otherwise False."""
        if tokens <= self._get_tokens():
            self._tokens -= tokens
            return True
        return False

    def expected_time(self, tokens=1):
        """Returns the expected time in seconds when a new token should be
        available."""
        tokens = max(tokens, self._get_tokens())
        return (tokens - self._get_tokens()) / self.fill_rate

    def _get_tokens(self):
        if self._tokens < self.capacity:
            now = time.time()
            delta = self.fill_rate * (now - self.timestamp)
            self._tokens = min(self.capacity, self._tokens + delta)
            self.timestamp = now
        return self._tokens

    @property
    def items(self):
        return self.queue.queue
Ejemplo n.º 52
0
class Capturing:
    def __init__(self):
        """
        Start capturing GSM packets and decode them
        Print statements used:
            Green = Normal operation, information
            Yellow = Executing statements
            Red = Something went wrong
        """
        if not os.geteuid() == 0:
            sys.exit(
                "\nYou must be root to run this application, please use sudo and try again.\n"
            )
        self.continue_loop = True

        print(Fore.GREEN + '------------- Imsi Catcher^2 -------------')
        print(Fore.GREEN + 'To stop the loop: Hit Enter')

        # set used location
        if config.other_saving_location:
            user_home_location = config.other_saving_location
        else:
            user_home_location = expanduser("~")
        location = user_home_location + "/IMSI/captures"
        print(Fore.GREEN + 'Saving location: ' + location)

        # make save folder if it does not exist yet. Change bash location to new folder.
        if not os.path.exists(location):
            os.makedirs(location)
        os.chdir(location)

        # make a queue for decodes to be done
        self.stop_decode = False
        self.q = Queue()

        # start wireshark or tshark:
        if config.use_wireshark:
            wiresharkBashCommand = "sudo wireshark -k -f udp -Y gsmtap -i lo"
            print(Fore.YELLOW + 'Executing command: ' +
                  str(wiresharkBashCommand))
            Popen(wiresharkBashCommand, shell=True)
            time.sleep(
                5
            )  # sleep to allow you to hit enter for the warning messages :)

        # make separate folder for this capture (with current time)
        foldername = time.strftime("%d-%m-%Y_%H:%M:%S")
        os.makedirs(foldername)
        os.chdir(foldername)

        #  device queue
        self.deviceq = Queue()
        for device in config.available_antennas:
            self.deviceq.put(device)

        # allow entry of frequencies in both formats
        self.frequencies = []
        temp_frequencies = []
        for freq in config.frequencies:
            temp_frequencies.append(freq)
        for short_freq in config.frequencies_scanner:
            long_freq = str(int(short_freq * 1000000))
            temp_frequencies.append(long_freq)

        # test frequencies if they are really alive
        if config.test_frequencies:
            results = {}
            for freq in temp_frequencies:
                working_freq = self.test_frequency(freq)
                results[freq] = working_freq
            for freq, working in results.iteritems():
                print 'freq: ' + str(freq) + ', working: ' + str(working)
                if working:
                    self.frequencies.append(freq)
        else:
            self.frequencies = temp_frequencies

        # print 'The following frequencies will be processed:'
        # for freq in self.frequencies:
        #     print freq

        # start actually doing stuff
        self.start_loop()

    def start_loop(self):
        """
        Run the capturing and decoding until one presses a key and the enter key. The loop will finish it's workings.
        """
        stop = []
        thread.start_new_thread(self.stop_loop, (stop, ))
        if config.execute_decode:
            thread.start_new_thread(self.decode_loop, ())
        capture_thread = None
        i = 0
        while not stop:
            # capture certain amount of times
            if i < config.number_of_rounds:
                # every freq in one iteration
                for freq in self.frequencies:
                    while True:
                        # only continue if antenna available
                        if self.deviceq.qsize() > 0:
                            # TODO: add check which sees if enough harddiskspace
                            antenna = str(self.deviceq.get())
                            print(Fore.GREEN + 'starting capturing on freq ' +
                                  str(freq))

                            capture_thread = threading.Thread(
                                target=self.capture_raw_data,
                                args=(i, freq, antenna))
                            capture_thread.start()
                            time.sleep(2)
                            break
                        else:
                            time.sleep(10)
                i += 1
            else:
                break

        # keep thread alive for last capture, and then wait for decode loop to die
        flag = 1
        while flag:
            flag = capture_thread.isAlive()
            time.sleep(3)

        self.stop_decode = True
        print(Fore.GREEN + 'Finished ALL capturing')
        if not self.q.empty() and config.execute_decode:
            self.decode_loop

    def decode_loop(self):
        """
        Decode GSM Data to GSM packets with use of a queue (which contains the filenames to decode)
        """
        while True:
            # check if we should stop (so finished capturing, and the decode queue is empty
            if self.stop_decode and self.q.empty():
                print(Back.YELLOW + 'stop_decode = true')
                break
            # if the queue is filled, we should decode that!
            if not self.q.empty():
                filename, freq = self.q.get()
                self.decode_raw_data(filename, freq)
            # if the queue is empty, let's chill for a while
            else:
                time.sleep(5)

        print(Fore.GREEN +
              'Finished ALL decoding; let\'s go to to the next location!')

    def capture_raw_data(self, filenumber, freq, antenna):
        """
        Capture raw antenna data using grgsm_capture.
        """
        filename = 'capture' + str(filenumber) + '_' + str(freq) + '.cfile'
        captureBashCommand = "grgsm_capture.py -c " + filename + " -f " + freq + ' -T ' \
                             + config.capture_length \
                             + ' --args="rtl=' + antenna + '"'
        #captureBashCommand = 'grgsm_capture.py -c %s -f %d -T %s  --args="rtl=%s"' % (
        #     filename, freq, config.capture_length, antenna
        # )
        print(Fore.YELLOW + 'Executing command: ' + str(captureBashCommand))
        print(Fore.GREEN + 'Script will capture for ' + config.capture_length +
              ' seconds.')

        # Start capture
        os.system(captureBashCommand)

        everything_to_hell = False
        # Check if capture was succesful (as sometimes receiver quits for no reason)
        if not os.path.isfile(filename):
            print(
                Back.RED + str(filename) +
                ': I think the capture went wrong, please check!! I will not decode this'
            )
            everything_to_hell = True

        print(Fore.GREEN + str(filename) + ': Finished capturing')
        if not everything_to_hell:
            print(Back.YELLOW + 'adding to queue: ' + str(filename))
            self.q.put((filename, freq))
        self.deviceq.put(antenna)

    def decode_raw_data(self, filename, freq):
        """
        Decode the captured data into GSM packets readable by wireshark. Also deleted raw data when requested to.
        """
        print(Fore.GREEN + str(filename) +
              ': Starting decoding of SDCCH8 and BCCH.')
        SDCCH_bash = 'grgsm_decode -c ' + filename + ' -f ' + freq + ' -m SDCCH8 -t 1'
        BCCH_bash = 'grgsm_decode -c ' + filename + ' -f ' + freq + ' -m BCCH -t 0'

        # Start Tshark if wanted to capture this packet output of this capture file
        if not config.use_wireshark:
            tsharkBashCommand = "sudo tshark -w " + filename[:-6] + ".pcapng -i lo -q"
            args = shlex.split(tsharkBashCommand)
            print(Fore.YELLOW + 'Executing command: ' + str(tsharkBashCommand))
            tshark = Popen(args)
            #print(Back.RED + 'tshark should have started')
            time.sleep(5)  # sleep to allow you to enter sudo password

        # Sleep to allow release of lock on file
        time.sleep(2)

        # Actually start decoding, depending on which channel to decode
        if config.decode_sdcch:
            print(Fore.YELLOW + str(filename) +
                  ': Decoding SDCCH, using commmand: ' + SDCCH_bash)
            SDCCH = Popen(SDCCH_bash, shell=True)
            SDCCH.wait()
        if config.decode_bcch:
            print(Fore.YELLOW + str(filename) +
                  ': Decoding BCCH, using commmand: ' + BCCH_bash)
            BCCH = Popen(BCCH_bash, shell=True)
            BCCH.wait()

        # Delete if wanted
        if config.delete_capture_after_processing:
            print(
                Fore.GREEN + str(filename) +
                ': Deleting capture file as requested (change this in config file).'
            )
            os.remove(filename)

        if not config.use_wireshark:
            tshark.terminate()
            #print(Back.RED + 'tshark should have stopped')

        print(Fore.GREEN + str(filename) + ': Finished decoding')

    def stop_loop(self, stop):
        """
        Stops the loop in start_loop. The loop finishes and is then ended.
        """
        raw_input()
        stop.append(None)
        print(
            Fore.RED +
            'The processing will stop after finishing current capturing and decoding...'
        )

    def test_frequency(self, frequency):
        """
        Test the given frequency with grgsm_livemon, by checking if there's a lot of output in the terminal.
        """
        p = Popen(['grgsm_livemon', '-f', frequency], stdout=PIPE)
        i = 0
        found = False
        print(Fore.GREEN + 'Testing frequency: ' + str(frequency))

        # loop through lines of terminal output
        while True:
            line = p.stdout.readline()

            # skip first 15 lines for false positives
            if i > 15:
                if '2b 2b' in line:
                    found = True
                    print(Back.GREEN + 'Working: ' + str(frequency))
                    break
            # Now something should have happened, if not..quit
            if i > 120:
                break
            i += 1
        p.wait()
        # sleep for reattaching kernel driver again.
        time.sleep(3)
        return found
Ejemplo n.º 53
0
class RepSocketChannel(ZmqSocketChannel):
    """A reply channel to handle raw_input requests that the kernel makes."""

    msg_queue = None

    def __init__(self, context, session, address):
        super(RepSocketChannel, self).__init__(context, session, address)
        self.ioloop = ioloop.IOLoop()
        self.msg_queue = Queue()

    def run(self):
        """The thread's main activity.  Call start() instead."""
        self.socket = self.context.socket(zmq.XREQ)
        self.socket.setsockopt(zmq.IDENTITY, self.session.session)
        self.socket.connect('tcp://%s:%i' % self.address)
        self.iostate = POLLERR | POLLIN
        self.ioloop.add_handler(self.socket, self._handle_events, self.iostate)
        self.ioloop.start()

    def stop(self):
        self.ioloop.stop()
        super(RepSocketChannel, self).stop()

    def call_handlers(self, msg):
        """This method is called in the ioloop thread when a message arrives.

        Subclasses should override this method to handle incoming messages.
        It is important to remember that this method is called in the thread
        so that some logic must be done to ensure that the application leve
        handlers are called in the application thread.
        """
        raise NotImplementedError(
            'call_handlers must be defined in a subclass.')

    def input(self, string):
        """Send a string of raw input to the kernel."""
        content = dict(value=string)
        msg = self.session.msg('input_reply', content)
        self._queue_reply(msg)

    def _handle_events(self, socket, events):
        if events & POLLERR:
            self._handle_err()
        if events & POLLOUT:
            self._handle_send()
        if events & POLLIN:
            self._handle_recv()

    def _handle_recv(self):
        ident, msg = self.session.recv(self.socket, 0)
        self.call_handlers(msg)

    def _handle_send(self):
        try:
            msg = self.msg_queue.get(False)
        except Empty:
            pass
        else:
            self.session.send(self.socket, msg)
        if self.msg_queue.empty():
            self.drop_io_state(POLLOUT)

    def _handle_err(self):
        # We don't want to let this go silently, so eventually we should log.
        raise zmq.ZMQError()

    def _queue_reply(self, msg):
        self.msg_queue.put(msg)
        self.add_io_state(POLLOUT)
Ejemplo n.º 54
0
class MCB(object):
    def __init__(self, size=1024):
        self.port_queues = []
        self.controller_queue = Queue()
        self.has_controller = False
        self.size = size
        self.mem = mmap.mmap(-1, size)

    def read_mem(self, address, length):
        self.mem.seek(address)
        return self.mem.read(length)

    def write_mem(self, address, data):
        self.mem.seek(address)
        self.mem.write(data)

    def create_controller(self, clk, rst):
        if self.has_controller:
            raise Exception("Controller already instantiated!")

        self.has_controller = True

        port_queues = []

        @instance
        def logic():

            while True:
                yield clk.posedge

                # build port list
                while not self.controller_queue.empty():
                    port_queues.append(self.controller_queue.get())

                # check for commands
                for port in port_queues:
                    pw, pn, cmdf, wrf, rdf = port

                    if not cmdf.empty():
                        instr, ba, bl = cmdf.get()

                        if pn is not None:
                            print("[%s] Got command i:%d a:0x%08x bl:%d" %
                                  (pn, instr, ba, bl))

                        # check alignment
                        if pw == 32:
                            assert ba & 3 == 0
                        elif pw == 64:
                            assert ba & 7 == 0
                        elif pw == 128:
                            assert ba & 15 == 0

                        if instr == 0 or instr == 2:
                            # write or write with auto precharge
                            self.mem.seek(ba % self.size)
                            for k in range(bl + 1):
                                mask, data = wrf.get()
                                if pw == 32:
                                    data = struct.pack('<L', data)
                                elif pw == 64:
                                    data = struct.pack('<Q', data)
                                elif pw == 128:
                                    data = struct.pack(
                                        '<Q', data & 2**64 - 1) + struct.pack(
                                            '<Q', data >> 64)
                                for l in range(len(data)):
                                    if not mask & (1 << l):
                                        self.mem.write(data[l])
                                    else:
                                        self.mem.seek(1, 1)
                                if pn is not None:
                                    print(
                                        "[%s] Write word %d/%d a:0x%08x m:0x%02x d:%s"
                                        %
                                        (pn, k + 1, bl + 1, ba + k * pw / 8,
                                         mask, " ".join("{:02x}".format(ord(c))
                                                        for c in data)))
                        elif instr == 1 or instr == 3:
                            # read or read with auto precharge
                            self.mem.seek(ba % self.size)
                            data = self.mem.read(int((bl + 1) * pw / 8))
                            for k in range(bl + 1):
                                if pw == 32:
                                    rdf.put(
                                        struct.unpack(
                                            '<L', data[k * 4:(k + 1) * 4])[0])
                                elif pw == 64:
                                    rdf.put(
                                        struct.unpack(
                                            '<Q', data[k * 8:(k + 1) * 8])[0])
                                elif pw == 128:
                                    rdf.put(
                                        struct.unpack('<Q', data[k * 16:k *
                                                                 16 + 8])[0] +
                                        struct.unpack(
                                            '<Q', data[k * 16 + 8:(k + 1) *
                                                       16])[0] * 2**64)
                                if pn is not None:
                                    print(
                                        "[%s] Read word %d/%d a:0x%08x d:%s" %
                                        (pn, k + 1, bl + 1, ba + k * pw / 8,
                                         " ".join(
                                             "{:02x}".format(ord(c))
                                             for c in data[k * int(pw / 8):
                                                           (k + 1) *
                                                           int(pw / 8)])))
                        else:
                            # refresh
                            pass

        return logic

    def port_cmd_logic(self, cmd_clk, cmd_en, cmd_instr, cmd_byte_addr, cmd_bl,
                       cmd_empty, cmd_full, fifo):
        @instance
        def logic():

            while True:
                yield cmd_clk.posedge

                if not fifo.full() and cmd_en:
                    fifo.put((int(cmd_instr), int(cmd_byte_addr), int(cmd_bl)))

                cmd_full.next = fifo.full()
                cmd_empty.next = fifo.empty()

        return logic

    def port_wr_logic(self, wr_clk, wr_en, wr_mask, wr_data, wr_empty, wr_full,
                      wr_underrun, wr_count, wr_error, fifo):
        @instance
        def logic():

            while True:
                yield wr_clk.posedge

                if not fifo.full() and wr_en:
                    fifo.put((int(wr_mask), int(wr_data)))

                wr_full.next = fifo.full()
                wr_empty.next = fifo.empty()
                wr_count.next = fifo.qsize()

        return logic

    def port_rd_logic(self, rd_clk, rd_en, rd_data, rd_empty, rd_full,
                      rd_overflow, rd_count, rd_error, fifo):
        @instance
        def logic():
            valid = False

            while True:
                yield rd_clk.posedge

                if rd_en:
                    valid = False

                if not fifo.empty() and (rd_en or not valid):
                    valid = True
                    rd_data.next = fifo.get()

                rd_full.next = fifo.full()
                rd_empty.next = not valid
                rd_count.next = fifo.qsize() + int(valid)

        return logic

    def create_read_port(self,
                         cmd_clk,
                         cmd_en,
                         cmd_instr,
                         cmd_byte_addr,
                         cmd_bl,
                         cmd_empty,
                         cmd_full,
                         rd_clk,
                         rd_en,
                         rd_data,
                         rd_empty,
                         rd_full,
                         rd_overflow,
                         rd_count,
                         rd_error,
                         name=None):

        assert len(rd_data) in [32, 64, 128]

        cmd_fifo = Queue(4)
        read_fifo = Queue(64)

        self.port_queues.append(
            (len(rd_data), name, cmd_fifo, None, read_fifo))
        self.controller_queue.put(
            (len(rd_data), name, cmd_fifo, None, read_fifo))

        cmd_logic = self.port_cmd_logic(cmd_clk, cmd_en, cmd_instr,
                                        cmd_byte_addr, cmd_bl, cmd_empty,
                                        cmd_full, cmd_fifo)
        read_logic = self.port_rd_logic(rd_clk, rd_en, rd_data, rd_empty,
                                        rd_full, rd_overflow, rd_count,
                                        rd_error, read_fifo)

        return cmd_logic, read_logic

    def create_write_port(self,
                          cmd_clk,
                          cmd_en,
                          cmd_instr,
                          cmd_bl,
                          cmd_byte_addr,
                          cmd_empty,
                          cmd_full,
                          wr_clk,
                          wr_en,
                          wr_mask,
                          wr_data,
                          wr_empty,
                          wr_full,
                          wr_underrun,
                          wr_count,
                          wr_error,
                          name=None):

        assert len(wr_data) in [32, 64, 128]

        cmd_fifo = Queue(4)
        write_fifo = Queue(64)

        self.port_queues.append(
            (len(wr_data), name, cmd_fifo, write_fifo, None))
        self.controller_queue.put(
            (len(wr_data), name, cmd_fifo, write_fifo, None))

        cmd_logic = self.port_cmd_logic(cmd_clk, cmd_en, cmd_instr,
                                        cmd_byte_addr, cmd_bl, cmd_empty,
                                        cmd_full, cmd_fifo)
        write_logic = self.port_wr_logic(wr_clk, wr_en, wr_mask, wr_data,
                                         wr_empty, wr_full, wr_underrun,
                                         wr_count, wr_error, write_fifo)

        return cmd_logic, write_logic

    def create_readwrite_port(self,
                              cmd_clk,
                              cmd_en,
                              cmd_instr,
                              cmd_bl,
                              cmd_byte_addr,
                              cmd_empty,
                              cmd_full,
                              wr_clk,
                              wr_en,
                              wr_mask,
                              wr_data,
                              wr_empty,
                              wr_full,
                              wr_underrun,
                              wr_count,
                              wr_error,
                              rd_clk,
                              rd_en,
                              rd_data,
                              rd_empty,
                              rd_full,
                              rd_overflow,
                              rd_count,
                              rd_error,
                              name=None):

        assert len(wr_data) in [32, 64, 128]
        assert len(rd_data) in [32, 64, 128]

        cmd_fifo = Queue(4)
        write_fifo = Queue(64)
        read_fifo = Queue(64)

        assert len(wr_data) == len(rd_data)

        self.port_queues.append(
            (len(wr_data), name, cmd_fifo, write_fifo, read_fifo))
        self.controller_queue.put(
            (len(wr_data), name, cmd_fifo, write_fifo, read_fifo))

        cmd_logic = self.port_cmd_logic(cmd_clk, cmd_en, cmd_instr,
                                        cmd_byte_addr, cmd_bl, cmd_empty,
                                        cmd_full, cmd_fifo)
        write_logic = self.port_wr_logic(wr_clk, wr_en, wr_mask, wr_data,
                                         wr_empty, wr_full, wr_underrun,
                                         wr_count, wr_error, write_fifo)
        read_logic = self.port_rd_logic(rd_clk, rd_en, rd_data, rd_empty,
                                        rd_full, rd_overflow, rd_count,
                                        rd_error, read_fifo)

        return cmd_logic, write_logic, read_logic
Ejemplo n.º 55
0
def dictionaryAttack(attack_dict):
    suffix_list = [""]
    custom_wordlist = [""]
    hash_regexes = []
    results = []
    resumes = []
    user_hash = []
    processException = False
    foundHash = False

    for (_, hashes) in attack_dict.items():
        for hash_ in hashes:
            if not hash_:
                continue

            hash_ = hash_.split()[0] if hash_ and hash_.strip() else hash_
            regex = hashRecognition(hash_)

            if regex and regex not in hash_regexes:
                hash_regexes.append(regex)
                infoMsg = "using hash method '%s'" % __functions__[
                    regex].func_name
                logger.info(infoMsg)

    for hash_regex in hash_regexes:
        keys = set()
        attack_info = []

        for (user, hashes) in attack_dict.items():
            for hash_ in hashes:
                if not hash_:
                    continue

                foundHash = True
                hash_ = hash_.split()[0] if hash_ and hash_.strip() else hash_

                if re.match(hash_regex, hash_):
                    try:
                        item = None

                        if hash_regex not in (
                                HASH.CRYPT_GENERIC, HASH.JOOMLA,
                                HASH.WORDPRESS, HASH.UNIX_MD5_CRYPT,
                                HASH.APACHE_MD5_CRYPT, HASH.APACHE_SHA1,
                                HASH.VBULLETIN, HASH.VBULLETIN_OLD, HASH.SSHA,
                                HASH.SSHA256, HASH.SSHA512, HASH.DJANGO_MD5,
                                HASH.DJANGO_SHA1, HASH.MD5_BASE64,
                                HASH.SHA1_BASE64, HASH.SHA256_BASE64,
                                HASH.SHA512_BASE64):
                            hash_ = hash_.lower()

                        if hash_regex in (HASH.MD5_BASE64, HASH.SHA1_BASE64,
                                          HASH.SHA256_BASE64,
                                          HASH.SHA512_BASE64):
                            item = [(user,
                                     hash_.decode("base64").encode("hex")), {}]
                        elif hash_regex in (HASH.MYSQL, HASH.MYSQL_OLD,
                                            HASH.MD5_GENERIC,
                                            HASH.SHA1_GENERIC,
                                            HASH.APACHE_SHA1):
                            item = [(user, hash_), {}]
                        elif hash_regex in (HASH.SSHA, ):
                            item = [(user, hash_), {
                                "salt": hash_.decode("base64")[20:]
                            }]
                        elif hash_regex in (HASH.SSHA256, ):
                            item = [(user, hash_), {
                                "salt": hash_.decode("base64")[32:]
                            }]
                        elif hash_regex in (HASH.SSHA512, ):
                            item = [(user, hash_), {
                                "salt": hash_.decode("base64")[64:]
                            }]
                        elif hash_regex in (HASH.ORACLE_OLD, HASH.POSTGRES):
                            item = [(user, hash_), {'username': user}]
                        elif hash_regex in (HASH.ORACLE, ):
                            item = [(user, hash_), {"salt": hash_[-20:]}]
                        elif hash_regex in (HASH.MSSQL, HASH.MSSQL_OLD,
                                            HASH.MSSQL_NEW):
                            item = [(user, hash_), {"salt": hash_[6:14]}]
                        elif hash_regex in (HASH.CRYPT_GENERIC, ):
                            item = [(user, hash_), {"salt": hash_[0:2]}]
                        elif hash_regex in (HASH.UNIX_MD5_CRYPT,
                                            HASH.APACHE_MD5_CRYPT):
                            item = [(user, hash_), {
                                "salt": hash_.split('$')[2],
                                "magic": "$%s$" % hash_.split('$')[1]
                            }]
                        elif hash_regex in (HASH.JOOMLA, HASH.VBULLETIN,
                                            HASH.VBULLETIN_OLD):
                            item = [(user, hash_), {
                                "salt": hash_.split(':')[-1]
                            }]
                        elif hash_regex in (HASH.DJANGO_MD5, HASH.DJANGO_SHA1):
                            item = [(user, hash_), {
                                "salt": hash_.split('$')[1]
                            }]
                        elif hash_regex in (HASH.WORDPRESS, ):
                            if ITOA64.index(hash_[3]) < 32:
                                item = [(user, hash_), {
                                    "salt": hash_[4:12],
                                    "count": 1 << ITOA64.index(hash_[3]),
                                    "prefix": hash_[:12]
                                }]
                            else:
                                warnMsg = "invalid hash '%s'" % hash_
                                logger.warn(warnMsg)

                        if item and hash_ not in keys:
                            resumed = hashDBRetrieve(hash_)
                            if not resumed:
                                attack_info.append(item)
                                user_hash.append(item[0])
                            else:
                                infoMsg = "resuming password '%s' for hash '%s'" % (
                                    resumed, hash_)
                                if user and not user.startswith(
                                        DUMMY_USER_PREFIX):
                                    infoMsg += " for user '%s'" % user
                                logger.info(infoMsg)
                                resumes.append((user, hash_, resumed))
                            keys.add(hash_)

                    except (binascii.Error, IndexError):
                        pass

        if not attack_info:
            continue

        if not kb.wordlists:
            while not kb.wordlists:

                # the slowest of all methods hence smaller default dict
                if hash_regex in (HASH.ORACLE_OLD, ):
                    dictPaths = [paths.SMALL_DICT]
                else:
                    dictPaths = [paths.WORDLIST]

                message = "what dictionary do you want to use?\n"
                message += "[1] default dictionary file '%s' (press Enter)\n" % dictPaths[
                    0]
                message += "[2] custom dictionary file\n"
                message += "[3] file with list of dictionary files"
                choice = readInput(message, default='1')

                try:
                    if choice == '2':
                        message = "what's the custom dictionary's location?\n"
                        dictPath = readInput(message)
                        if dictPath:
                            dictPaths = [dictPath]
                            logger.info("using custom dictionary")
                    elif choice == '3':
                        message = "what's the list file location?\n"
                        listPath = readInput(message)
                        checkFile(listPath)
                        dictPaths = getFileItems(listPath)
                        logger.info("using custom list of dictionaries")
                    else:
                        logger.info("using default dictionary")

                    dictPaths = filter(None, dictPaths)

                    for dictPath in dictPaths:
                        checkFile(dictPath)

                        if os.path.splitext(dictPath)[1].lower() == ".zip":
                            _ = zipfile.ZipFile(dictPath, 'r')
                            if len(_.namelist()) == 0:
                                errMsg = "no file(s) inside '%s'" % dictPath
                                raise SqlmapDataException(errMsg)
                            else:
                                _.open(_.namelist()[0])

                    kb.wordlists = dictPaths

                except Exception, ex:
                    warnMsg = "there was a problem while loading dictionaries"
                    warnMsg += " ('%s')" % getSafeExString(ex)
                    logger.critical(warnMsg)

            message = "do you want to use common password suffixes? (slow!) [y/N] "

            if readInput(message, default='N', boolean=True):
                suffix_list += COMMON_PASSWORD_SUFFIXES

        infoMsg = "starting dictionary-based cracking (%s)" % __functions__[
            hash_regex].func_name
        logger.info(infoMsg)

        for item in attack_info:
            ((user, _), _) = item
            if user and not user.startswith(DUMMY_USER_PREFIX):
                custom_wordlist.append(normalizeUnicode(user))

        # Algorithms without extra arguments (e.g. salt and/or username)
        if hash_regex in (HASH.MYSQL, HASH.MYSQL_OLD, HASH.MD5_GENERIC,
                          HASH.SHA1_GENERIC, HASH.SHA224_GENERIC,
                          HASH.SHA256_GENERIC, HASH.SHA384_GENERIC,
                          HASH.SHA512_GENERIC, HASH.APACHE_SHA1,
                          HASH.VBULLETIN, HASH.VBULLETIN_OLD):
            for suffix in suffix_list:
                if not attack_info or processException:
                    break

                if suffix:
                    clearConsoleLine()
                    infoMsg = "using suffix '%s'" % suffix
                    logger.info(infoMsg)

                retVal = None
                processes = []

                try:
                    if _multiprocessing:
                        if _multiprocessing.cpu_count() > 1:
                            infoMsg = "starting %d processes " % _multiprocessing.cpu_count(
                            )
                            singleTimeLogMessage(infoMsg)

                        gc.disable()

                        retVal = _multiprocessing.Queue()
                        count = _multiprocessing.Value(
                            'i', _multiprocessing.cpu_count())

                        for i in xrange(_multiprocessing.cpu_count()):
                            process = _multiprocessing.Process(
                                target=_bruteProcessVariantA,
                                args=(attack_info, hash_regex, suffix, retVal,
                                      i, count, kb.wordlists, custom_wordlist,
                                      conf.api))
                            processes.append(process)

                        for process in processes:
                            process.daemon = True
                            process.start()

                        while count.value > 0:
                            time.sleep(0.5)

                    else:
                        warnMsg = "multiprocessing hash cracking is currently "
                        warnMsg += "not supported on this platform"
                        singleTimeWarnMessage(warnMsg)

                        retVal = Queue()
                        _bruteProcessVariantA(attack_info, hash_regex, suffix,
                                              retVal, 0, 1, kb.wordlists,
                                              custom_wordlist, conf.api)

                except KeyboardInterrupt:
                    print
                    processException = True
                    warnMsg = "user aborted during dictionary-based attack phase (Ctrl+C was pressed)"
                    logger.warn(warnMsg)

                    for process in processes:
                        try:
                            process.terminate()
                            process.join()
                        except (OSError, AttributeError):
                            pass

                finally:
                    if _multiprocessing:
                        gc.enable()

                    if retVal:
                        conf.hashDB.beginTransaction()

                        while not retVal.empty():
                            user, hash_, word = item = retVal.get(block=False)
                            attack_info = filter(
                                lambda _: _[0][0] != user or _[0][1] != hash_,
                                attack_info)
                            hashDBWrite(hash_, word)
                            results.append(item)

                        conf.hashDB.endTransaction()

            clearConsoleLine()

        else:
            for ((user, hash_), kwargs) in attack_info:
                if processException:
                    break

                if any(_[0] == user and _[1] == hash_ for _ in results):
                    continue

                count = 0
                found = False

                for suffix in suffix_list:
                    if found or processException:
                        break

                    if suffix:
                        clearConsoleLine()
                        infoMsg = "using suffix '%s'" % suffix
                        logger.info(infoMsg)

                    retVal = None
                    processes = []

                    try:
                        if _multiprocessing:
                            if _multiprocessing.cpu_count() > 1:
                                infoMsg = "starting %d processes " % _multiprocessing.cpu_count(
                                )
                                singleTimeLogMessage(infoMsg)

                            gc.disable()

                            retVal = _multiprocessing.Queue()
                            found_ = _multiprocessing.Value('i', False)
                            count = _multiprocessing.Value(
                                'i', _multiprocessing.cpu_count())

                            for i in xrange(_multiprocessing.cpu_count()):
                                process = _multiprocessing.Process(
                                    target=_bruteProcessVariantB,
                                    args=(user, hash_, kwargs, hash_regex,
                                          suffix, retVal, found_, i, count,
                                          kb.wordlists, custom_wordlist,
                                          conf.api))
                                processes.append(process)

                            for process in processes:
                                process.daemon = True
                                process.start()

                            while count.value > 0:
                                time.sleep(0.5)

                            found = found_.value != 0

                        else:
                            warnMsg = "multiprocessing hash cracking is currently "
                            warnMsg += "not supported on this platform"
                            singleTimeWarnMessage(warnMsg)

                            class Value():
                                pass

                            retVal = Queue()
                            found_ = Value()
                            found_.value = False

                            _bruteProcessVariantB(user, hash_, kwargs,
                                                  hash_regex, suffix, retVal,
                                                  found_, 0, 1, kb.wordlists,
                                                  custom_wordlist, conf.api)

                            found = found_.value

                    except KeyboardInterrupt:
                        print
                        processException = True
                        warnMsg = "user aborted during dictionary-based attack phase (Ctrl+C was pressed)"
                        logger.warn(warnMsg)

                        for process in processes:
                            try:
                                process.terminate()
                                process.join()
                            except (OSError, AttributeError):
                                pass

                    finally:
                        if _multiprocessing:
                            gc.enable()

                        if retVal:
                            conf.hashDB.beginTransaction()

                            while not retVal.empty():
                                user, hash_, word = item = retVal.get(
                                    block=False)
                                hashDBWrite(hash_, word)
                                results.append(item)

                            conf.hashDB.endTransaction()

                clearConsoleLine()
def __compact(t1, t2, graph, k):
    '''
        compact t1 and t2 to @graph and @k
        
        @t1: CateTree
        @t2: CateTree
        @graph: a list of __GraphNode
        @k: a dict whose keys are CateTreeNode and values are __GraphNode
    '''

    traveseQueue = Queue()

    leaf_gn = __GraphNode('leaf', 1, len(graph))
    graph.append(leaf_gn)

    for leaf_node in t1.leaves_set + t2.leaves_set:
        leaf_gn.node_set.append(leaf_node)
        traveseQueue.put(leaf_node)
        k[leaf_node] = leaf_gn
    while not traveseQueue.empty():
        
        v = traveseQueue.get()
        if 0!=len(v.chd_set):
            found = False
            # travese graph reversely to see if there are equal class
            for g_i in xrange(len(graph)-1, -1, -1):
                w = graph[g_i]
                if len(w.out_set)!=len(v.chd_set) or w.label!=v.label or w.height!=v.height:
                    continue
                
                is_same = True
                #get equal class of child nodes
                w_chd_eq_cls = w.out_set
                v_chd_eq_cls = []
                for n in v.chd_set:
                    v_chd_eq_cls.append(k[n])

                #sorted by index of __GraphNode and compare them
                srted_w_chd_eq_cls = sorted(w_chd_eq_cls, cmp=lambda x,y:cmp(x.index, y.index))
                srted_v_chd_eq_cls = sorted(v_chd_eq_cls, cmp=lambda x,y:cmp(x.index, y.index))
                for w_i in xrange(len(srted_w_chd_eq_cls)):
                    w_n = srted_w_chd_eq_cls[w_i]
                    v_n = srted_v_chd_eq_cls[w_i]
                    if 0!=cmp(w_n.index, v_n.index):
                        is_same = False
                        break
                if is_same:
                    if k.has_key(v) and k[v].index!=w.index:
                        raise Exception('error when set K[v]: K[v] has been set')
                    k[v] = w
                    w.node_set.append(v)
                    found = True
                    break
            # if not found, add a new __GraphNode
            if not found:
                new_gn = __GraphNode(v.label, v.height, len(graph))
                k[v] = new_gn
                for chd in v.chd_set:
                    if not k.has_key(chd):
                        raise Exception('not all children of v are in K')
                    k[chd].in_set.append(new_gn)
                    new_gn.out_set.append(k[chd])                
                graph.append(new_gn)
        if v!=t1.root and v!=t2.root:
            v.parent.unprocessed_son_size -= 1
            if 0==v.parent.unprocessed_son_size:
                traveseQueue.put(v.parent)
                v.parent.unprocessed_son_size = len(v.parent.chd_set) # recover unrocessed_son_size to supply next computation
Ejemplo n.º 57
0
def dictionaryAttack(attack_dict):
    suffix_list = [""]
    custom_wordlist = []
    hash_regexes = []
    results = []
    resumes = []
    processException = False
    user_hash = []

    for (_, hashes) in attack_dict.items():
        for hash_ in hashes:
            if not hash_:
                continue

            hash_ = hash_.split()[0]
            regex = hashRecognition(hash_)

            if regex and regex not in hash_regexes:
                hash_regexes.append(regex)
                infoMsg = "using hash method '%s'" % __functions__[
                    regex].func_name
                logger.info(infoMsg)

    for hash_regex in hash_regexes:
        keys = set()
        attack_info = []

        for (user, hashes) in attack_dict.items():
            for hash_ in hashes:
                if not hash_:
                    continue

                hash_ = hash_.split()[0]

                if re.match(hash_regex, hash_):
                    item = None

                    if hash_regex not in (HASH.CRYPT_GENERIC, HASH.WORDPRESS):
                        hash_ = hash_.lower()

                    if hash_regex in (HASH.MYSQL, HASH.MYSQL_OLD,
                                      HASH.MD5_GENERIC, HASH.SHA1_GENERIC):
                        item = [(user, hash_), {}]
                    elif hash_regex in (HASH.ORACLE_OLD, HASH.POSTGRES):
                        item = [(user, hash_), {'username': user}]
                    elif hash_regex in (HASH.ORACLE):
                        item = [(user, hash_), {'salt': hash_[-20:]}]
                    elif hash_regex in (HASH.MSSQL, HASH.MSSQL_OLD,
                                        HASH.MSSQL_NEW):
                        item = [(user, hash_), {'salt': hash_[6:14]}]
                    elif hash_regex in (HASH.CRYPT_GENERIC):
                        item = [(user, hash_), {'salt': hash_[0:2]}]
                    elif hash_regex in (HASH.WORDPRESS):
                        item = [(user, hash_), {
                            'salt': hash_[4:12],
                            'count': 1 << ITOA64.index(hash_[3]),
                            'prefix': hash_[:12]
                        }]

                    if item and hash_ not in keys:
                        resumed = hashDBRetrieve(hash_)
                        if not resumed:
                            attack_info.append(item)
                            user_hash.append(item[0])
                        else:
                            infoMsg = "resuming password '%s' for hash '%s'" % (
                                resumed, hash_)
                            if user and not user.startswith(DUMMY_USER_PREFIX):
                                infoMsg += " for user '%s'" % user
                            logger.info(infoMsg)
                            resumes.append((user, hash_, resumed))
                        keys.add(hash_)

        if not attack_info:
            continue

        if not kb.wordlists:
            while not kb.wordlists:

                # the slowest of all methods hence smaller default dict
                if hash_regex in (HASH.ORACLE_OLD, HASH.WORDPRESS):
                    dictPaths = [paths.SMALL_DICT]
                else:
                    dictPaths = [paths.WORDLIST]

                message = "what dictionary do you want to use?\n"
                message += "[1] default dictionary file '%s' (press Enter)\n" % dictPaths[
                    0]
                message += "[2] custom dictionary file\n"
                message += "[3] file with list of dictionary files"
                choice = readInput(message, default="1")

                try:
                    if choice == "2":
                        message = "what's the custom dictionary's location?\n"
                        dictPaths = [readInput(message)]

                        logger.info("using custom dictionary")
                    elif choice == "3":
                        message = "what's the list file location?\n"
                        listPath = readInput(message)
                        checkFile(listPath)
                        dictPaths = getFileItems(listPath)

                        logger.info("using custom list of dictionaries")
                    else:
                        logger.info("using default dictionary")

                    for dictPath in dictPaths:
                        checkFile(dictPath)

                    kb.wordlists = dictPaths

                except SqlmapFilePathException, msg:
                    warnMsg = "there was a problem while loading dictionaries"
                    warnMsg += " ('%s')" % msg
                    logger.critical(warnMsg)

            message = "do you want to use common password suffixes? (slow!) [y/N] "
            test = readInput(message, default="N")

            if test[0] in ("y", "Y"):
                suffix_list += COMMON_PASSWORD_SUFFIXES

        infoMsg = "starting dictionary-based cracking (%s)" % __functions__[
            hash_regex].func_name
        logger.info(infoMsg)

        for item in attack_info:
            ((user, _), _) = item
            if user and not user.startswith(DUMMY_USER_PREFIX):
                custom_wordlist.append(normalizeUnicode(user))

        if hash_regex in (HASH.MYSQL, HASH.MYSQL_OLD, HASH.MD5_GENERIC,
                          HASH.SHA1_GENERIC):
            for suffix in suffix_list:
                if not attack_info or processException:
                    break

                if suffix:
                    clearConsoleLine()
                    infoMsg = "using suffix '%s'" % suffix
                    logger.info(infoMsg)

                retVal = None
                processes = []

                try:
                    if _multiprocessing:
                        if _multiprocessing.cpu_count() > 1:
                            infoMsg = "starting %d processes " % _multiprocessing.cpu_count(
                            )
                            singleTimeLogMessage(infoMsg)

                        gc.disable()

                        retVal = _multiprocessing.Queue()
                        count = _multiprocessing.Value(
                            'i', _multiprocessing.cpu_count())

                        for i in xrange(_multiprocessing.cpu_count()):
                            p = _multiprocessing.Process(
                                target=_bruteProcessVariantA,
                                args=(attack_info, hash_regex, suffix, retVal,
                                      i, count, kb.wordlists, custom_wordlist))
                            processes.append(p)

                        for p in processes:
                            p.daemon = True
                            p.start()

                        while count.value > 0:
                            time.sleep(0.5)

                    else:
                        warnMsg = "multiprocessing hash cracking is currently "
                        warnMsg += "not supported on this platform"
                        singleTimeWarnMessage(warnMsg)

                        retVal = Queue()
                        _bruteProcessVariantA(attack_info, hash_regex, suffix,
                                              retVal, 0, 1, kb.wordlists,
                                              custom_wordlist)

                except KeyboardInterrupt:
                    print
                    processException = True
                    warnMsg = "user aborted during dictionary-based attack phase (Ctrl+C was pressed)"
                    logger.warn(warnMsg)

                    for process in processes:
                        try:
                            process.terminate()
                            process.join()
                        except OSError:
                            pass

                finally:
                    if _multiprocessing:
                        gc.enable()

                    if retVal:
                        conf.hashDB.beginTransaction()

                        while not retVal.empty():
                            user, hash_, word = item = retVal.get(block=False)
                            attack_info = filter(
                                lambda _: _[0][0] != user or _[0][1] != hash_,
                                attack_info)
                            hashDBWrite(hash_, word)
                            results.append(item)

                        conf.hashDB.endTransaction()

            clearConsoleLine()

        else:
            for ((user, hash_), kwargs) in attack_info:
                if processException:
                    break

                if any(_[0] == user and _[1] == hash_ for _ in results):
                    continue

                count = 0
                found = False

                for suffix in suffix_list:
                    if found or processException:
                        break

                    if suffix:
                        clearConsoleLine()
                        infoMsg = "using suffix '%s'" % suffix
                        logger.info(infoMsg)

                    retVal = None
                    processes = []

                    try:
                        if _multiprocessing:
                            if _multiprocessing.cpu_count() > 1:
                                infoMsg = "starting %d processes " % _multiprocessing.cpu_count(
                                )
                                singleTimeLogMessage(infoMsg)

                            gc.disable()

                            retVal = _multiprocessing.Queue()
                            found_ = _multiprocessing.Value('i', False)
                            count = _multiprocessing.Value(
                                'i', _multiprocessing.cpu_count())

                            for i in xrange(_multiprocessing.cpu_count()):
                                p = _multiprocessing.Process(
                                    target=_bruteProcessVariantB,
                                    args=(user, hash_, kwargs, hash_regex,
                                          suffix, retVal, found_, i, count,
                                          kb.wordlists, custom_wordlist))
                                processes.append(p)

                            for p in processes:
                                p.daemon = True
                                p.start()

                            while count.value > 0:
                                time.sleep(0.5)

                            found = found_.value != 0

                        else:
                            warnMsg = "multiprocessing hash cracking is currently "
                            warnMsg += "not supported on this platform"
                            singleTimeWarnMessage(warnMsg)

                            class Value():
                                pass

                            retVal = Queue()
                            found_ = Value()
                            found_.value = False

                            _bruteProcessVariantB(user, hash_, kwargs,
                                                  hash_regex, suffix, retVal,
                                                  found_, 0, 1, kb.wordlists,
                                                  custom_wordlist)

                            found = found_.value

                    except KeyboardInterrupt:
                        print
                        processException = True
                        warnMsg = "user aborted during dictionary-based attack phase (Ctrl+C was pressed)"
                        logger.warn(warnMsg)

                        for process in processes:
                            try:
                                process.terminate()
                                process.join()
                            except OSError:
                                pass

                    finally:
                        if _multiprocessing:
                            gc.enable()

                        if retVal:
                            conf.hashDB.beginTransaction()

                            while not retVal.empty():
                                user, hash_, word = item = retVal.get(
                                    block=False)
                                hashDBWrite(hash_, word)
                                results.append(item)

                            conf.hashDB.endTransaction()

                clearConsoleLine()
Ejemplo n.º 58
0
class XReqSocketChannel(ZmqSocketChannel):
    """The XREQ channel for issues request/replies to the kernel.
    """

    command_queue = None

    def __init__(self, context, session, address):
        super(XReqSocketChannel, self).__init__(context, session, address)
        self.command_queue = Queue()
        self.ioloop = ioloop.IOLoop()

    def run(self):
        """The thread's main activity.  Call start() instead."""
        self.socket = self.context.socket(zmq.XREQ)
        self.socket.setsockopt(zmq.IDENTITY, self.session.session)
        self.socket.connect('tcp://%s:%i' % self.address)
        self.iostate = POLLERR | POLLIN
        self.ioloop.add_handler(self.socket, self._handle_events, self.iostate)
        self.ioloop.start()

    def stop(self):
        self.ioloop.stop()
        super(XReqSocketChannel, self).stop()

    def call_handlers(self, msg):
        """This method is called in the ioloop thread when a message arrives.

        Subclasses should override this method to handle incoming messages.
        It is important to remember that this method is called in the thread
        so that some logic must be done to ensure that the application leve
        handlers are called in the application thread.
        """
        raise NotImplementedError(
            'call_handlers must be defined in a subclass.')

    def execute(self,
                code,
                silent=False,
                user_variables=None,
                user_expressions=None):
        """Execute code in the kernel.

        Parameters
        ----------
        code : str
            A string of Python code.
            
        silent : bool, optional (default False)
            If set, the kernel will execute the code as quietly possible.

        user_variables : list, optional
            A list of variable names to pull from the user's namespace.  They
            will come back as a dict with these names as keys and their
            :func:`repr` as values.
            
        user_expressions : dict, optional
            A dict with string keys and  to pull from the user's
            namespace.  They will come back as a dict with these names as keys
            and their :func:`repr` as values.

        Returns
        -------
        The msg_id of the message sent.
        """
        if user_variables is None:
            user_variables = []
        if user_expressions is None:
            user_expressions = {}

        # Don't waste network traffic if inputs are invalid
        if not isinstance(code, basestring):
            raise ValueError('code %r must be a string' % code)
        validate_string_list(user_variables)
        validate_string_dict(user_expressions)

        # Create class for content/msg creation. Related to, but possibly
        # not in Session.
        content = dict(code=code,
                       silent=silent,
                       user_variables=user_variables,
                       user_expressions=user_expressions)
        msg = self.session.msg('execute_request', content)
        self._queue_request(msg)
        return msg['header']['msg_id']

    def complete(self, text, line, cursor_pos, block=None):
        """Tab complete text in the kernel's namespace.

        Parameters
        ----------
        text : str
            The text to complete.
        line : str
            The full line of text that is the surrounding context for the 
            text to complete.
        cursor_pos : int
            The position of the cursor in the line where the completion was
            requested.
        block : str, optional
            The full block of code in which the completion is being requested.

        Returns
        -------
        The msg_id of the message sent.
        """
        content = dict(text=text,
                       line=line,
                       block=block,
                       cursor_pos=cursor_pos)
        msg = self.session.msg('complete_request', content)
        self._queue_request(msg)
        return msg['header']['msg_id']

    def object_info(self, oname):
        """Get metadata information about an object.

        Parameters
        ----------
        oname : str
            A string specifying the object name.
        
        Returns
        -------
        The msg_id of the message sent.
        """
        content = dict(oname=oname)
        msg = self.session.msg('object_info_request', content)
        self._queue_request(msg)
        return msg['header']['msg_id']

    def history_tail(self, n=10, raw=True, output=False):
        """Get the history list.

        Parameters
        ----------
        n : int
            The number of lines of history to get.
        raw : bool
            If True, return the raw input.
        output : bool
            If True, then return the output as well.

        Returns
        -------
        The msg_id of the message sent.
        """
        content = dict(n=n, raw=raw, output=output)
        msg = self.session.msg('history_tail_request', content)
        self._queue_request(msg)
        return msg['header']['msg_id']

    def shutdown(self, restart=False):
        """Request an immediate kernel shutdown.

        Upon receipt of the (empty) reply, client code can safely assume that
        the kernel has shut down and it's safe to forcefully terminate it if
        it's still alive.

        The kernel will send the reply via a function registered with Python's
        atexit module, ensuring it's truly done as the kernel is done with all
        normal operation.
        """
        # Send quit message to kernel. Once we implement kernel-side setattr,
        # this should probably be done that way, but for now this will do.
        msg = self.session.msg('shutdown_request', {'restart': restart})
        self._queue_request(msg)
        return msg['header']['msg_id']

    def _handle_events(self, socket, events):
        if events & POLLERR:
            self._handle_err()
        if events & POLLOUT:
            self._handle_send()
        if events & POLLIN:
            self._handle_recv()

    def _handle_recv(self):
        ident, msg = self.session.recv(self.socket, 0)
        self.call_handlers(msg)

    def _handle_send(self):
        try:
            msg = self.command_queue.get(False)
        except Empty:
            pass
        else:
            self.session.send(self.socket, msg)
        if self.command_queue.empty():
            self.drop_io_state(POLLOUT)

    def _handle_err(self):
        # We don't want to let this go silently, so eventually we should log.
        raise zmq.ZMQError()

    def _queue_request(self, msg):
        self.command_queue.put(msg)
        self.add_io_state(POLLOUT)
                                     bt_state._connection_candidates[0])

            #start listening for data from the server on a seperate thread
            t_listener = Thread(target=ReadFromServer,
                                args=[bt_state, on_read])
            t_listener.daemon = True
            t_listener.start()

            #create the player
            p_name = input("Please enter your name: ")
            p = '{"type": "playerCreation", "data": {"playerName": "' + str(
                p_name) + '"}}'
            WriteToServer(bt_state, p)

            while (not game_over):
                while not work_queue.empty():
                    task = work_queue.get()
                    try:
                        if task['type'] == 'question':
                            Question(task['data'])
                        elif task['type'] == 'scoreUpdate':
                            ShowScore(task['data'])
                        elif task['type'] == 'socketError':
                            print 'The server socket is no longer readable'
                            game_over = True
                    except Exception as e:
                        print str(e)
                    time.sleep(1)
                time.sleep(2)
        else:
            print "Could not detect any Bluetooth devices!"
Ejemplo n.º 60
0
class StartWebControlServer(Command):
    """ This class implements methods to start a communication pipe to
receive commands from a web browser"""

    def checkDependencies(self, vf):
        import thread

    def doit(self):
        import thread
        com = self.vf.webControl
        if com.port:
            print 'WARNING: server already running on port', com.port
            return
        com.startServer()
        thread.start_new(com.acceptClients, (self.clients_cb,))

        # print port number to a file in file pmv_server.pid in the directory where the
        # python process was started, if the package Karigell does not exist, otherwise
        # save pmv_server.pid in Karrigell folder.
        import os,sys
        karrigell_path =None
        for d in sys.path:
            dirname = os.path.join(d,'Karrigell-2.0.3')
            if os.path.exists(dirname):
                karrigell_path = dirname
        if not karrigell_path:
            filename = 'pmv_server.pid'
        else:
            filename = os.path.join(karrigell_path,'pmv_server.pid')
        f = open(filename,'w')
        f.write('%d'%com.port)
        f.close()

        from Queue import Queue
        self.cmdQueue = Queue(-1) # infinite size

        # start checking for messages from the web controler
        self.vf.GUI.ROOT.after(10, self.checkForCommands)


    def checkForCommands(self, event=None):
        """periodically check for new commands
        cmd should be a string where the Viewerframework command is separate by ||
        from a tuple representing the args separated by || from a dictonnary
        which represent the keyword arguments of commands
        example:
        'colorByAtomType||'+ '(self.vf.getSelection(), ('lines',))|| {}'
        colorByAtmoType: cmd to run
        (self.vf.getSelection(), ('lines',)): arguments to pass to cmd
        {}: keyword argument to pass to cmd
        """
        if not self.cmdQueue.empty():
            cmd = self.cmdQueue.get(False) # do not block if queue empty
            if cmd:
                #print 'got', cmd
                w = cmd.split('||')
                if self.vf.commands.has_key(w[0]):
                    # cmd should be a string where the Viewerframework command is separate by |
                    # from a tuple representing the args separated by | from a dictonnary
                    # which represent the keyword arguments of commands
                    # example:
                    # "colorByAtomType|"+ "(self.vf.getSelection(), ('lines',))| {}"
                    # colorByAtmoType: cmd to run
                    # (self.vf.getSelection(), ('lines',)): arguments to pass to cmd
                    # {}: keyword argument to pass to cmd

                    args = eval(w[1])
                    kw = eval(w[2])
                    apply( self.vf.commands[w[0]], args,kw )

        self.vf.GUI.ROOT.after(10, self.checkForCommands)


    def clients_cb(self, client, data):
        """get called every time a web page sends a message
The message is sent using Karrigell's Python in Html
"""
        # put it in a thread safe queue
        self.cmdQueue.put(data)