Beispiel #1
0
    def activate(self):
        self.activated = True

        res.fadeoutMusic()

        self.maps = []
        self.change_delay = 2000  # seconds until map moves to next point
        self.map_fadeout = 60.0  # must be a float
        self.last_update = 0
        self.surfaceQueue = queue()
        self.subpixelQueue = queue()

        self.hotspots = cycle(([300, 500], [750, 800], [350, 260], [700, 340],
                               [120, 1000], [800, 830], [480, 900]))

        self.overworld = res.loadImage("overworld.png")

        self.menu = cMenu(Rect((42, 20), sd.get_size()),
                          20,
                          5,
                          'vertical',
                          100, [('New Game', self.new_game),
                                ('Load Game', self.load_game),
                                ('Introduction', self.show_intro),
                                ('Quit Game', self.quit_game)],
                          font="northwoodhigh.ttf",
                          font_size=24)

        self.menu.ready()
        self.change_map()
Beispiel #2
0
    def activate(self):
        self.activated = True

        res.fadeoutMusic()

        self.maps = []
        self.change_delay = 8000        # seconds until map moves to next point
        self.map_fadeout = 60.0         # must be a float
        self.last_update = 0
        self.surfaceQueue = queue()
        self.subpixelQueue = queue()

        self.hotspots = cycle(([300,500], [750, 800], [350, 260], [700, 340], [120, 1000], [800, 830], [480, 900]))
        
        self.overworld = res.loadImage("overworld.png")

        self.menu = cMenu(Rect((42,20), sd.get_size()),
            20, 5, 'vertical', 100,
            [('New Game', self.new_game),
            ('Battle Test', self.continue_game),
            ('Introduction', self.show_intro),
            ('Quit Game', self.quit_game)],
            font="northwoodhigh.ttf", font_size=24)

        self.menu.ready()
        self.change_map()
 def __init__(self, IP, PORT, ID):
         self._sendAddress = (IP, PORT+1)
         
         dbgPrint("INIT", "ioObject ID: " + ID)
         
         self._dpSock = openUDPSocket(IP, PORT, "READ")
         
         self._txQ = queue()
         self._rxQ = queue()
         
         self._ID = ID
         
         self.run()
        def __init__(self, IP, PORT, rvrID):
        
                dbgPrint("INIT", "RVRIOObject ID: " + rvrID)
        
                self._sendAddress = (IP, PORT+1)
                self._dpSock = openUDPSocket(IP, PORT, "READ")
                                
                self._txQ = queue()
                self._rxQ = queue()

                self._ACKQ = queue()
                
                self._rvrID = rvrID
                
                self.run()
Beispiel #5
0
    def execute(self,
                wait=True,
                loglevel=logging.INFO,
                optimize=True,
                implicit_sink=None):
        self.config.optimize = optimize

        if 'inited' not in self.__dict__ or not self.inited:
            raise FlumeException('node.__init__ was never used')

        # XXX: special case of a single source with no output doing an execute()
        #      on it shouldn't fail
        if not hasattr(self, 'outputs'):
            node.init_node(self, outputs=[])

        logger.setLogLevel(loglevel)

        if implicit_sink is not None and \
           not isinstance(self, sink) and \
           self.child is None:
            self.outputs = [queue()]
            this_sink = implicit_sink
            node.init_node(this_sink,
                           inputs=self.outputs,
                           outputs=[],
                           parent=self,
                           source=self.source)

            self.child = this_sink
            this_sink.execute(wait=wait,
                              loglevel=loglevel,
                              optimize=optimize,
                              implicit_sink=implicit_sink)

        else:
            # XXX: pooling here ?
            thread = threading.Thread(target=self.run)
            # Daemonize so that when we Ctrl+C the main program then all underlying
            # threads are instantly killed. Currently don't have any concern about
            # cleanly closing resources.
            thread.daemon = True
            thread.start()

            if self.parent:
                self.parent.execute(wait=wait,
                                    loglevel=loglevel,
                                    optimize=optimize,
                                    implicit_sink=implicit_sink)

            if wait:
                while thread.is_alive():
                    # if you don't join with a timeout then you block the parent
                    # until the child has completely finished and therefore can't
                    # handle any signals in the parent (ie SIGINT)
                    thread.join(1)

                exc_info = self.exc_info.get()

                if exc_info is not None:
                    six.reraise(exc_info[0], exc_info[1], exc_info[2])
Beispiel #6
0
 def __init__(self):
     from johnny.signals import qc_hit, qc_miss, qc_skip
     from Queue import Queue as queue
     self.q = queue()
     qc_hit.connect(self._hit)
     qc_miss.connect(self._miss)
     qc_skip.connect(self._skip)
Beispiel #7
0
 def networkDelayTime(self, times, N, K):
     """
     Solution: Djikstra + PriorityQ
     Time Complexity:
     Space Complexity:
     Inspired By: MySELF!! + Q.505
     :type times: List[List[int]]
     :type N: int
     :type K: int
     :rtype: int
     """
     distance = dict()
     distance_from_K = [float('inf') for _ in range(N)]
     for u, v, w in times:
         if u in distance:
             distance[u].append([v, w])
         else:
             distance[u] = [[v, w]]
     distance_from_K[K-1] = 0
     q = queue()
     q.put((distance_from_K[K-1], K))
     while not q.empty():
         current_node = q.get()
         val = current_node[1]
         dist = current_node[0]
         if val not in distance:
             continue
         neighbors = distance[val]
         for k, v in neighbors:
             if v + dist < distance_from_K[k-1]:
                 distance_from_K[k-1] = v + dist
                 if k in distance:
                     q.put((distance_from_K[k-1], k))
     res = max(distance_from_K)
     return -1 if res == float('inf') else res
Beispiel #8
0
    def test_transaction_rollback(self):
        """Tests johnny's handling of transaction rollbacks.

        Similar to the commit, this sets up a write to a db in a transaction,
        reads from it (to force a cache write of sometime), then rolls back."""
        from Queue import Queue as queue
        from django.db import transaction
        from testapp.models import Genre, Publisher
        from johnny import cache
        if settings.DATABASE_ENGINE == 'sqlite3':
            print "\n  Skipping test requiring multiple threads."
            return

        self.failUnless(transaction.is_managed() == False)
        self.failUnless(transaction.is_dirty() == False)
        connection.queries = []
        cache.local.clear()
        q = queue()
        other = lambda x: self._run_threaded(x, q)

        # load some data
        start = Genre.objects.get(id=1)
        other('Genre.objects.get(id=1)')
        hit, ostart = q.get()
        # these should be the same and should have hit cache
        self.failUnless(hit)
        self.failUnless(ostart == start)
        # enter manual transaction management
        transaction.enter_transaction_management()
        transaction.managed()
        start.title = 'Jackie Chan Novels'
        # local invalidation, this key should hit the localstore!
        nowlen = len(cache.local)
        start.save()
        self.failUnless(nowlen != len(cache.local))
        # perform a read OUTSIDE this transaction... it should still see the
        # old gen key, and should still find the "old" data
        other('Genre.objects.get(id=1)')
        hit, ostart = q.get()
        self.failUnless(hit)
        self.failUnless(ostart.title != start.title)
        # perform a READ inside the transaction;  this should hit the localstore
        # but not the outside!
        nowlen = len(cache.local)
        start2 = Genre.objects.get(id=1)
        self.failUnless(start2.title == start.title)
        self.failUnless(len(cache.local) > nowlen)
        transaction.rollback()
        # we rollback, and flush all johnny keys related to this transaction
        # subsequent gets should STILL hit the cache in the other thread
        # and indeed, in this thread.

        self.failUnless(transaction.is_dirty() == False)
        other('Genre.objects.get(id=1)')
        hit, ostart = q.get()
        self.failUnless(hit)
        start = Genre.objects.get(id=1)
        self.failUnless(ostart.title == start.title)
        transaction.managed(False)
        transaction.leave_transaction_management()
Beispiel #9
0
def sync_one_node(debug, node, force=False, config_cluster=None, cluster_items=None):
    """
    Sync files with only one node
    """
    synchronization_date = time()
    synchronization_duration = 0.0

    if not config_cluster:
        config_cluster = read_config()

        if not config_cluster:
            raise WazuhException(3000, "No config found")

    if not cluster_items:
        cluster_items = get_cluster_items()

    before = time()
    # Get own items status
    own_items = list_files_from_filesystem(config_cluster['node_type'], cluster_items)
    own_items_names = own_items.keys()

    cluster_socket = connect_to_db_socket()
    logging.debug("Connected to cluster database socket")

    if force:
        clear_file_status_one_node(node, cluster_socket)
    all_files, removed = scan_for_new_files_one_node(node, cluster_items, config_cluster, cluster_socket, own_items, True)

    after = time()
    synchronization_duration += after-before
    logging.debug("Time retrieving info from DB: {0}".format(after-before))

    before = time()
    result_queue = queue()
    push_updates_single_node(all_files, node, config_cluster, removed, cluster_items, result_queue)

    after = time()
    synchronization_duration += after-before
    logging.debug("Time sending info: {0}".format(after-before))
    before = time()

    result = result_queue.get()
    update_node_db_after_sync(result, node, cluster_socket)
    after = time()
    synchronization_duration += after-before

    send_recv_and_check(cluster_socket, "clearlast")
    send_recv_and_check(cluster_socket, "updatelast {:d} {:f}".format(int(synchronization_date), synchronization_duration))

    cluster_socket.close()
    logging.debug("Time updating DB: {0}".format(after-before))

    if debug:
        return result
    else:
        return {'updated': len(result['files']['updated']),
                  'error': result['files']['error'],
                  'deleted': result['files']['deleted'],
                  'error': result['error'],
                  'reason': result['reason']}
Beispiel #10
0
def predict_next_move(me, them):
    if len(me) < 10:
        return 0.5  # We need some existing data

    # We can build a Bayesian Network using Naive Bayes to calculate each CPT
    # Features:
    # - Relative score <- Assume they want to beat us
    # - My recent greed <- Assume recent greed has a heavy impact
    # - My overall greed <- Assume overall greed also has some impact
    # - Their recent greed <- Assume they'll try to compensate
    entries = []
    for i in range(5, len(me)):
        entries.append(get_entry(me, them, i))

    f_count = len(entries[0])  # Feature count

    # Get the root node
    index = -1
    info_gain = 0
    max_split = None
    for i in range(0, f_count):
        split = find_optimal_split(entries, i)
        ig = information_gain(entries, i, split)
        if ig > info_gain:
            info_gain = ig
            max_split
            index = i

    pq = queue()
    root = Node(entries, [index], index, max_split)
    node_count = 1

    options = get_options(root)
    for o in options:
        pq.put(o)

    while node_count <= 3:
        o = pq.get()
        if o.n1.has_child(o.on_right):
            continue
        parent_node = o.n1
        node = o.n2
        parent_node.add_child(o.on_right, node)

        options = get_options(node)
        for o in options:
            pq.put(o)

        node_count += 1

    relative_score = round(
        their_score(me, them) * 1.0 / my_score(me, them) * 10)
    my_recent_greed = recent_greediness(me, RECENT_GREEDINESS_LENGTH)
    their_recent_greed = recent_greediness(them, RECENT_GREEDINESS_LENGTH)
    my_overall_greed = greed(me)

    return root.predict_outcome([
        relative_score, my_recent_greed, their_recent_greed, my_overall_greed
    ])
Beispiel #11
0
    def __init__(self, stopEvent):
        self.plc_ok_q = queue()

        ledThread = led.plc_ok_th(self.plc_ok_q, stopEvent)
        threadPool.append(ledThread)
        ledThread.start()

        self.plc_init()
Beispiel #12
0
 def __init__(self):
     """
     init to allow threading
     """
     threading.Thread.__init__(self)
     threading.Thread.daemon = True
     self.packetque = queue()
     self.stop = False
Beispiel #13
0
    def find_best_path(self, food, width, height):
        # Find the best path using the a star algorithm
        path = a_star(width, height, [self.x, self.y], food)

        #Add the current route to the q
        new_q = queue()
        new_q.add(path)
        self.move_queue = new_q
Beispiel #14
0
 def __init__(self):
     """
     init to allow threading
     """
     threading.Thread.__init__(self)
     threading.Thread.daemon = True
     self.packetque = queue()
     self.stop = False
Beispiel #15
0
def send_request_to_nodes(remote_nodes,
                          config_cluster,
                          request_type,
                          args,
                          cluster_depth=1):
    threads = []
    result = {}
    result_node = {}
    result_nodes = {}
    result_queue = queue()
    local_node = get_node()['node']
    remote_nodes_addr = []
    msg = None

    if remote_nodes == None or len(remote_nodes) == 0:
        remote_nodes_addr = list(map(lambda x: x['url'], get_nodes()['items']))
    else:
        remote_nodes_addr = remote_nodes.keys()

    args_str = " ".join(args)

    for node_id in remote_nodes_addr:
        if node_id != None:
            logging.info("Sending {2} request from {0} to {1}".format(
                local_node, node_id, request_type))

            # Push agents id
            if remote_nodes.get(node_id) != None and len(
                    remote_nodes[node_id]) > 0:
                agents = "-".join(remote_nodes[node_id])
                msg = agents
                if args_str > 0:
                    msg = msg + " " + args_str
            else:
                msg = args_str
            t = threading.Thread(target=send_request_to_node,
                                 args=(str(node_id), config_cluster,
                                       request_type, msg, cluster_depth,
                                       result_queue))
            threads.append(t)
            t.start()
            result_node = result_queue.get()
        else:
            result_node['data'] = {}
            result_node['data']['failed_ids'] = []
            for id in remote_nodes[node_id]:
                node = {}
                node['id'] = id
                node['error'] = {'message': "Agent not found", 'code': -1}
                result_node['data']['failed_ids'].append(node)
        result_nodes[node_id] = result_node
    for t in threads:
        t.join()
    for node, result_node in result_nodes.iteritems():
        result = append_node_result_by_type(node, result_node, request_type,
                                            result)
    return result
Beispiel #16
0
 def __init__(self):
     threading.Thread.__init__(self)
     threading.Thread.daemon = True
     self.TUNSETIFF = 0x400454ca
     self.TUNSETOWNER = self.TUNSETIFF + 2
     self.IFF_TUN = 0x0001
     self.IFF_TAP = 0x0002
     self.IFF_NO_PI = 0x1000
     self.packetque = queue()
     self.stop = False
def main():
    run = True
    n = Network()
    p = n.getP()
    clock = pygame.time.Clock()
    points = queue(2)
    time_stamps = queue(2)
    vel_mag = 0
    ctr = 0
    point = 0
    while run:
        clock.tick(60)
        p2 = n.send(p)
        # print(p2)
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                run = False
                pygame.quit()

        ctr += 1
        # if(len(points.q) <2):
        # 	continue
        try:
            if (ctr % 10 == 0):
                points.push(p.get_point())
                time_stamps.push()
                vel = dist(points[1],
                           points[0]) // abs(time_stamps[1] - time_stamps[0])
                vel = int(vel)
                ctr = 0

            direction = (((points.q)[1][0] - (points.q)[0][0]),
                         ((points.q)[1][1] - (points.q)[0][1]))
            print(direction)
            if (hit()):
                pass

        except:
            pass

        p.move()

        redrawWindow(win, p, p2)
Beispiel #18
0
 def __init__(self):
     threading.Thread.__init__(self)
     threading.Thread.daemon = True
     self.TUNSETIFF = 0x400454ca
     self.TUNSETOWNER = self.TUNSETIFF + 2
     self.IFF_TUN = 0x0001
     self.IFF_TAP = 0x0002
     self.IFF_NO_PI = 0x1000
     self.packetque = queue()
     self.stop = False
Beispiel #19
0
 def __init__(self, x, y, animal_range, speed):
     self.x = x
     self.y = y
     self.animal_range = animal_range
     self.speed = speed
     self.hunger = 10
     self.food_near = []
     self.searching_for_food = True
     self.move_queue = queue()
     self.moves = ""
Beispiel #20
0
    def node_func(self, *args, **kwargs):
        """
        internal method responsible for hooking up the nodes in a flume
        pipeline
        """
        node_instance = which_node(*args, **kwargs)

        if not hasattr(self, 'outputs'):
            node.init_node(self, outputs=[queue()])

        source = self.source if hasattr(self, 'source') else self
        node.init_node(node_instance,
                       inputs=self.outputs,
                       outputs=[queue()],
                       parent=self,
                       source=source)

        self.child = node_instance
        return node_instance
    def __init__(self, idNum, numCheckPoint):
        self._idNum = idNum
        self._numCheckPoint = numCheckPoint
        self._point = 0
        self._checkPoints = Array(numCheckPoint)
        self._lineQueue = queue()
        self._check = 0

        # Create the number of checkpoint
        for i in range(numCheckPoint):
            self._checkPoints[i] = checkpoint(0 + 1)
Beispiel #22
0
    def test_transaction_commit(self):
        """Test transaction support in Johnny."""
        from Queue import Queue as queue
        from django.db import transaction
        from testapp.models import Genre, Publisher
        from johnny import cache

        if django.VERSION[:2] < (1, 3):
            if settings.DATABASE_ENGINE == 'sqlite3':
                print "\n  Skipping test requiring multiple threads."
                return
        else:
            if settings.DATABASES.get('default', {}).get('ENGINE', '').endswith('sqlite3'):
                print "\n  Skipping test requiring multiple threads."
                return


        self.failUnless(transaction.is_managed() == False)
        self.failUnless(transaction.is_dirty() == False)
        connection.queries = []
        cache.local.clear()
        q = queue()
        other = lambda x: self._run_threaded(x, q)
        # load some data
        start = Genre.objects.get(id=1)
        other('Genre.objects.get(id=1)')
        hit, ostart = q.get()
        # these should be the same and should have hit cache
        self.failUnless(hit)
        self.failUnless(ostart == start)
        # enter manual transaction management
        transaction.enter_transaction_management()
        transaction.managed()
        start.title = 'Jackie Chan Novels'
        # local invalidation, this key should hit the localstore!
        nowlen = len(cache.local)
        start.save()
        self.failUnless(nowlen != len(cache.local))
        # perform a read OUTSIDE this transaction... it should still see the
        # old gen key, and should still find the "old" data
        other('Genre.objects.get(id=1)')
        hit, ostart = q.get()
        self.failUnless(hit)
        self.failUnless(ostart.title != start.title)
        transaction.commit()
        # now that we commit, we push the localstore keys out;  this should be
        # a cache miss, because we never read it inside the previous transaction
        other('Genre.objects.get(id=1)')
        hit, ostart = q.get()
        self.failUnless(not hit)
        self.failUnless(ostart.title == start.title)
        transaction.managed(False)
        transaction.leave_transaction_management()
Beispiel #23
0
    def __or__(self, other):
        """
        enable the ability to use the bitwise or operator to emulate the pipe
        notation between flume nodes
        """
        if not hasattr(self, 'outputs'):
            node.init_node(self, outputs=[queue()])

        source = self.source if hasattr(self, 'source') else self

        if isinstance(other, tuple):
            # split the stream!
            other = splitter(flumes=list(other))

        node.init_node(other,
                       inputs=self.outputs,
                       outputs=[queue()],
                       parent=self,
                       source=source)

        self.child = other
        return other
Beispiel #24
0
 def setUp(self):
     self.gl_map_object = GLmap()
     self.conf = {'CLIENT_TIMEOUT': 10}
     self.communicator = "None_for_now"
     self.service_id = "HN0101_61014_container-server"
     self.logger = get_logger({}, log_route='recovery')
     self.__trans_path = "/export/HN0101_61014_transaction_journal"
     self.__cont_path = "/export/HN0101_61014_container_journal"
     osd.containerService.container_recovery.final_recovery_status_list = list(
     )
     osd.containerService.container_recovery.list_of_tuple = queue(
         maxsize=0)
     osd.containerService.container_recovery.Thread = Monitor()
    def shortestDistance(self, maze, start, destination):
        """
        Solution: DFS + Dijkstra Algorithm
        Time Complexity: O(mn*log(mn))
        Space Complexity: O(mn)
        Inspired By: https://leetcode.com/problems/the-maze-ii/solution/
        TP:
        - Use Dijkstra Algorithm
        - Use visited array to help to update priority queue
        :type maze: List[List[int]]
        :type start: List[int]
        :type destination: List[int]
        :rtype: int
        """
        if maze is None or len(maze) == 0: return -1
        row = len(maze)
        col = len(maze[0])
        distance = [[float('inf') for _ in range(col)] for _ in range(row)]
        visited = [[False for _ in range(col)] for _ in range(row)]
        distance[start[0]][start[1]] = 0
        q = queue()
        q.put((0, (start[0],start[1])))

        def dijkstra(maze, distance, visited):
            directions = [[1,0],[0,1],[-1,0],[0,-1]]
            while not q.empty() or not visited[destination[0]][destination[1]]:
                s = q.get()[1]
                if visited[s[0]][s[1]]:
                    continue
                visited[s[0]][s[1]] = True
                for direction in directions:
                    count = 0
                    next_i = s[0] + direction[0]
                    next_j = s[1] + direction[1]
                    while next_i >=0 and next_i < row and next_j >=0 and next_j < col and maze[next_i][next_j] != 1:
                        count += 1
                        next_i += direction[0]
                        next_j += direction[1]
                    current_i = next_i - direction[0]
                    current_j = next_j - direction[1]
                    if distance[current_i][current_j] > distance[s[0]][s[1]] + count:
                        distance[current_i][current_j] = distance[s[0]][s[1]] + count
                        q.put((distance[current_i][current_j], (current_i, current_j)))

        dijkstra(maze, distance, visited)

        if distance[destination[0]][destination[1]] != float('inf'):
            return distance[destination[0]][destination[1]]
        else:
            return -1
    def __init__(self, ip, port, id, acktimeout=0.5):

        # members
        self._ip = ip
        self._port = port
        self._id = id
        self._acktimeout = acktimeout
        self._ackQ = queue()

        # open socket
        self._tcpSock = self._openTCPSocket()

        # state
        self._reconnect_tries = 0
Beispiel #27
0
    def __init__(self, iface):

        self._index = 0                                             # Index of the deque readed
        self._level = 0
        self._action = self.MODE_STOP

        self._queue = queue()                                       # Object to store the sequence User's actions
        self._deque = deque()                                       # Object to store the sequence Simons's actions
        self._sounds = SoundSystem()
        self._blinker = Blinker(iface, 'blinker', delay=.1)
        self._buttons = Buttons(iface, 'buttons', release=self.on_response)

        # Defining all inputs channel on iface with the correct properties
        for btn in iface.get_input_channels_ports():
            self._buttons.setup(btn, self._buttons.RELEASE, self._buttons.PUD_UP, 500)
Beispiel #28
0
def nearest_good_cell(m, start):
    q = queue()
    q.put(start)
    
    height = len(m)
    width = len(m[0])
    
    while not q.empty():
        cell = q.get()
        if is_valid(cell, m):
            return cell

        for neighbor in valid_neighbors(cell, width, height):
            q.put(neighbor)
    return None
Beispiel #29
0
def breadthOfTree(root):
    if root is None:
        return 0        
    p, bdh= queue(),0
    p.put(root)
    while not p.empty():
        if bdh<p.qsize():
            bdh = p.qsize()
        s = p.qsize()
        for _ in xrange(s):
            cur = p.get()
            if cur.left is not None:
                p.put(cur.left)
            if cur.right is not None:
                p.put(cur.right)
    return bdh
Beispiel #30
0
def main():

    book1 = book('harper', 'abook', 1, 9)
    book2 = book('haper', 'bbook', 2, 5)
    book3 = book('hooper', 'cbook', 4, 7)

    queuer = queue()
    queuer.enqueue('buy', book1)
    queuer.enqueue('buy', book2)
    queuer.enqueue('buy2', book3)
    print queuer.peek('buy').get_all()
    print queuer.peek('buy2').get_all()
    queuer.dequeue('buy')
    queuer.dequeue('buy2')
    print queuer.peek('buy').get_all()
    print queuer.peek('buy2').get_all()
Beispiel #31
0
    def move_to_food(self):
        #Boolean for determining if animal has found any food
        has_path = False
        if self.move_queue:
            #If the animal has a queue, they have found a food
            has_path = True

        moved = 0
        for i in range(self.speed):
            empty_queue = False
            moves = self.move_queue.de_q()
            try:
                new_q = moves[1:]
            except TypeError as e:
                if not has_path:
                    self.searching_for_food = True
            try:
                try:
                    move = moves[0]
                except TypeError as t:
                    empty_queue = True
            except IndexError as e:
                empty_queue = True

            if empty_queue:
                """
                If the animal has a path set, an empty queue means
                they have reached their food and should exit
                """
                if has_path:
                    break

                move = random.choice(["U", "D", "L", "R"])
                self.searching_for_food = True

            self.move_queue = queue()
            self.move_queue.add(new_q)

            if move == "U":
                self.y += 1
            elif move == "D":
                self.y -= 1
            elif move == "L":
                self.x -= 1
            elif move == "R":
                self.x += 1
def subscribe_channel(x, y, con):
    j = "There is no Channel named \"" + x + "\""
    if len(chann) != 0:
        for i in chann:
            if i.channe == x:
                userqueue = queue(x, y)
                i.push(userqueue)
                i.pushs(y)

                print("usuario " + y + " suscrito a " + i.channe)
                create_queuec(x, y)
                j = "Successfully subscribed"

    je = {"data": j}
    ju = json.dumps(je)
    enc = str.encode(ju)
    encoded = base64.b64encode(enc)
    con.send(encoded)
Beispiel #33
0
    def eat(self, foods):
        x, y = -1, -1
        for food in foods:
            if food.x == self.x and food.y == self.y:
                print(f"📗📗📗{self} yum x: {self.x} y:{self.y}")
                self.searching_for_food = True
                self.hunger += food.type
                x = food.x
                y = food.y
                self.food_near = []
                self.move_queue = queue()
                break
        new_foods = []
        for food in foods:
            if food.x != x or food.y != y:  #Fix to error where too much food gets removed
                new_foods.append(food)

        return new_foods, self.searching_for_food
def createWeightedGraph(opath, nodes, rev_nodes, edges):
    #ARG: string path, dict1, dict2, dict3
    #dict1: key   = node id
    #       value = array of vertice ids
    #dict2: key   = vertex id
    #       value = array of node ids
    #dict3: key   = vertex id
    #       value = array of vertex ids
    #RET: dict: key   = node id
    #           value = dict: key   = node id
    #                         value = weight

    graph = {}
    visited = {}
    q = queue()

    for node in nodes.keys():
        visited[node] = False

    for node in nodes.keys():
        if not visited[node]:
            q.put(node)
            visited[node] = True
            graph[node] = {}

            while (not q.empty()):
                start = q.get()

                for aVertex in nodes[start]:
                    for bVertex in edges[aVertex]:
                        if bVertex in rev_nodes:
                            for onode in rev_nodes[bVertex]:
                                if onode not in graph[start]:
                                    graph[start][onode] = 1
                                else:
                                    graph[start][onode] += 1
                                if not visited[onode]:
                                    graph[onode] = {}
                                    q.put(onode)
                                    visited[onode] = True

    with open(opath, 'w') as file:
        json.dump(graph, file, indent='4')
        file.close()
Beispiel #35
0
    def __init__(self, *args, **kwargs):
        """
        the base node __init__ is just here to be record the arguments and
        keyword arguments used by each node
        """
        self.args = args
        self.kwargs = kwargs
        self.inited = True
        self.running = True
        self.exc_info = queue()
        self.inputs_index = None

        self.stats = dici(points_pushed=0,
                          points_pulled=0)

        self.config = dici()

        self.parent = None
        self.child = None
Beispiel #36
0
    def test_recovery_connection_case3(self):
        osd.containerService.container_recovery.final_recovery_status_list = list(
        )
        osd.containerService.container_recovery.list_of_tuple = queue(
            maxsize=0)
        communicator_obj = "HN0101_container_service"
        dictionary_new = {
            '1.2.3.4:1234': [1],
            '5.6.7.8:5678': [2],
            '9.2.3.1:9231': [3]
        }
        service_component_map = {
            '1.2.3.4:1234': [1],
            '5.6.7.8:5678': [2],
            '9.2.3.1:9231': [3]
        }

        recovery_object = Recovery(conf, dictionary_new, communicator_obj, \
            service_component_map, self.service_id )
        nodes = [{
            'ip': '1.2.3.4',
            'port': '1234'
        }, {
            'ip': '5.6.7.8',
            'port': '5678'
        }, {
            'ip': '9.2.3.1',
            'port': '9231'
        }]
        s = [
            mock_http_connection(200, nodes[0]),
            mock_http_connection(200, nodes[1]),
            mock_http_connection(500, nodes[2])
        ]
        a, b, c, g = recovery_object._get_put_responses(s, nodes)
        self.assertEqual(a, [(200, '1.2.3.4:1234'), (200, '5.6.7.8:5678'),
                             (500, '9.2.3.1:9231')])
        self.assertEqual(b, ['Fake', 'Fake', 'Fake'])
        self.assertEqual(c, ['', '', ''])
        self.assertEqual(
            osd.containerService.container_recovery.final_recovery_status_list,
            [(1, True), (2, True), (3, False)])
        self.assertEqual(g, ['1.2.3.4:1234', '5.6.7.8:5678'])
def create_queue(x, y, con):
    q = queue(x, y)
    j = ""
    alreadyCreated = False
    for i in queu:
        if (i.queu == x):
            alreadyCreated = True
            break
    if (alreadyCreated):
        j = "Queue named \"" + x + "\" is already created"
        print("Queue named \"" + x + "\" is already created")
    else:
        j = "Queue named \"" + x + "\" created successfully!"
        queu.append(q)
    je = {"data": j}
    ju = json.dumps(je)
    enc = str.encode(ju)
    encoded = base64.b64encode(enc)
    con.send(encoded)
Beispiel #38
0
def part2():
    docs = get_training_docs()
    words = load_words('datasets/words.txt')

    pq = queue()
    for i, word in enumerate(words):
        prob = {}
        for label in docs:
            split = defaultdict(int)
            for d in docs[label]:
                split[d.has_word(i)] += 1
            prob[label] = (split[True] + 1) / float(len(docs[label]) + 2)
        score = discrimanitive_score(prob)
        pq.put(Score(score, i))

    print ""
    print "Discriminative words: "
    for i in range(0, 10):
        s = pq.get(False)
        print words[s.word_id], s.score
Beispiel #39
0
def main(group_name):
    add_new_jobs(group_name)

    db = Connection().usenet
    while db.control.find({"done": False, "group": group_name}).count() > 0:
        work_group = queue()
        try:
            count = 0
            for r in db.control.find({"done": False, "group": group_name}):
                count += 1
                work_group.put([r['init'], r['end'], group_name])

            threads = map(lambda x: Thread(name='thread_'+str(x), target=worker, args=(work_group,)), range(10))
            for t in threads: t.start()
            for t in threads: t.join()

            #p.apply_async(worker, [r['init'], r['end'], group_name])
            #p.close()
            #p.join()
        except KeyboardInterruptError:
            print 'Keyboard ^_^'
Beispiel #40
0
 def kthSmallest(self, matrix, k):
     """
     Solution: Heap(Priority Queue)
     Time Complexity:
     Space Complexity: O(n)
     Inspired By: https://leetcode.com/problems/kth-smallest-element-in-a-sorted-matrix/discuss/85173/Share-my-thoughts-and-Clean-Java-Code
     :type matrix: List[List[int]]
     :type k: int
     :rtype: int
     """
     if matrix is None or len(matrix) == 0: return None
     q = queue()
     for i in range(len(matrix[0])):
         q.put((matrix[0][i], (0, i)))
     for i in range(k - 1):
         _node = q.get()
         location = _node[1]
         if location[0] == len(matrix) - 1:
             continue
         q.put((matrix[location[0] + 1][location[1]], (location[0] + 1,
                                                       location[1])))
     res = q.get()
     return res[0]
	def __init__(self):
		self.q = queue(0)
Beispiel #42
0
    }
})

# Set module specific log levels
logging.getLogger('librato').setLevel(logging.CRITICAL)
logging.getLogger('requests').setLevel(logging.CRITICAL)
if _i_am_a_lambda_worker():
    logging.getLogger(root_package_name).setLevel(logging.WARNING)
    logging.getLogger(__name__).setLevel(logging.WARNING)
else:
    logging.getLogger(root_package_name).setLevel(logging.INFO)
    logging.getLogger(__name__).setLevel(logging.INFO)

#amend the logging configuration with a handler streaming to a message queue

q = queue(-1)
ql = MutableQueueListener(q)

qh = QueueHandler(q)
logging.root.addHandler(qh)

ql.start()


def stop_queue_listener():
    ql.stop()


def _attach_log_handler(handler):
    ql.addHandler(handler)
Beispiel #43
0
    def test_transactions(self):
        """Tests transaction rollbacks and local cache for multiple dbs"""

        if len(getattr(settings, "DATABASES", [])) <= 1:
            print "\n  Skipping multi database tests"
            return
        if hasattr(settings, 'DATABASE_ENGINE'):
            if settings.DATABASE_ENGINE == 'sqlite3':
                print "\n  Skipping test requiring multiple threads."
                return
        else:
            from django.db import connections, transaction
            for db in settings.DATABASES.values():
                if db['ENGINE'] == 'sqlite3':
                    print "\n  Skipping test requiring multiple threads."
                    return

            for conname in connections:
                con = connections[conname]
                if not base.supports_transactions(con):
                    print "\n  Skipping test requiring transactions."
                    return

        from django.db import connections, transaction
        from johnny import cache as c
        from Queue import Queue as queue
        q = queue()
        other = lambda x: self._run_threaded(x, q)

        from testapp.models import Genre


        # sanity check 
        self.failUnless(transaction.is_managed() == False)
        self.failUnless(transaction.is_dirty() == False)
        self.failUnless("default" in getattr(settings, "DATABASES"))
        self.failUnless("second" in getattr(settings, "DATABASES"))

        # this should seed this fetch in the global cache
        g1 = Genre.objects.using("default").get(pk=1)
        g2 = Genre.objects.using("second").get(pk=1)
        start_g1 = g1.title

        transaction.enter_transaction_management(using='default')
        transaction.managed(using='default')
        transaction.enter_transaction_management(using='second')
        transaction.managed(using='second')

        g1.title = "Testing a rollback"
        g2.title = "Testing a commit"
        g1.save()
        g2.save()

        # test outside of transaction, should be cache hit and 
        # not contain the local changes
        other("Genre.objects.using('default').get(pk=1)")
        hit, ostart = q.get()
        self.failUnless(ostart.title == start_g1)
        self.failUnless(hit)

        transaction.rollback(using='default')
        transaction.commit(using='second')
        transaction.managed(False, "default")
        transaction.managed(False, "second")

        #other thread should have seen rollback
        other("Genre.objects.using('default').get(pk=1)")
        hit, ostart = q.get()
        self.failUnless(ostart.title == start_g1)
        self.failUnless(hit)

        connections['default'].queries = []
        connections['second'].queries = []
        #should be a cache hit due to rollback
        g1 = Genre.objects.using("default").get(pk=1)
        #should be a db hit due to commit
        g2 = Genre.objects.using("second").get(pk=1)
        self.failUnless(connections['default'].queries == [])
        self.failUnless(len(connections['second'].queries) == 1)

        #other thread sould now be accessing the cache after the get
        #from the commit.
        other("Genre.objects.using('second').get(pk=1)")
        hit, ostart = q.get()
        self.failUnless(ostart.title == g2.title)
        self.failUnless(hit)

        self.failUnless(g1.title == start_g1)
        self.failUnless(g2.title == "Testing a commit")
        transaction.leave_transaction_management("default")
        transaction.leave_transaction_management("second")
Beispiel #44
0
 def __init__(self, map):
     self.map = map
     self.queue = queue()
Beispiel #45
0
    def test_savepoints(self):
        """tests savepoints for multiple db's"""
        from Queue import Queue as queue
        q = queue()
        other = lambda x: self._run_threaded(x, q)

        from testapp.models import Genre
        try:
            from django.db import connections, transaction
        except ImportError:
            # connections doesn't exist in 1.1 and under
            print"\n  Skipping multi database tests"

        if len(getattr(settings, "DATABASES", [])) <= 1:
            print "\n  Skipping multi database tests"
            return
        for name, db in settings.DATABASES.items():
            if name in ('default', 'second'):
                if 'sqlite' in db['ENGINE']:
                    print "\n  Skipping test requiring multiple threads."
                    return
                con = connections[name]
                if not con.features.uses_savepoints:
                    print "\n  Skipping test requiring savepoints."
                    return

        # sanity check 
        self.failUnless(transaction.is_managed() == False)
        self.failUnless(transaction.is_dirty() == False)
        self.failUnless("default" in getattr(settings, "DATABASES"))
        self.failUnless("second" in getattr(settings, "DATABASES"))

        g1 = Genre.objects.using("default").get(pk=1)
        start_g1 = g1.title
        g2 = Genre.objects.using("second").get(pk=1)

        transaction.enter_transaction_management(using='default')
        transaction.managed(using='default')
        transaction.enter_transaction_management(using='second')
        transaction.managed(using='second')

        g1.title = "Rollback savepoint"
        g1.save()

        g2.title = "Committed savepoint"
        g2.save(using="second")
        sid2 = transaction.savepoint(using="second")

        sid = transaction.savepoint(using="default")
        g1.title = "Dirty text"
        g1.save()

        #other thread should see the original key and cache object from memcache,
        #not the local cache version
        other("Genre.objects.using('default').get(pk=1)")
        hit, ostart = q.get()
        self.failUnless(hit)
        self.failUnless(ostart.title == start_g1)
        #should not be a hit due to rollback
        connections["default"].queries = []
        transaction.savepoint_rollback(sid, using="default")
        g1 = Genre.objects.using("default").get(pk=1)

        # i think it should be "Rollback Savepoint" here
        self.failUnless(g1.title == start_g1)

        #will be pushed to dirty in commit
        g2 = Genre.objects.using("second").get(pk=1)
        self.failUnless(g2.title == "Committed savepoint")
        transaction.savepoint_commit(sid2, using="second")

        #other thread should still see original version even 
        #after savepoint commit
        other("Genre.objects.using('second').get(pk=1)")
        hit, ostart = q.get()
        self.failUnless(hit)
        self.failUnless(ostart.title == start_g1)

        connections["second"].queries = []
        g2 = Genre.objects.using("second").get(pk=1)
        self.failUnless(connections["second"].queries == [])

        transaction.commit(using="second")
        transaction.managed(False, "second")

        g2 = Genre.objects.using("second").get(pk=1)
        self.failUnless(connections["second"].queries == [])
        self.failUnless(g2.title == "Committed savepoint")

        #now committed and cached, other thread should reflect new title
        #without a hit to the db
        other("Genre.objects.using('second').get(pk=1)")
        hit, ostart = q.get()
        self.failUnless(ostart.title == g2.title)
        self.failUnless(hit)

        transaction.managed(False, "default")
        transaction.leave_transaction_management("default")
        transaction.leave_transaction_management("second")
Beispiel #46
0
                "backupCount": 5,
                "encoding": "UTF-8",
                "delay": "False",
                "utc": "True",
            },
        },
        "loggers": {"": {"handlers": ["default", "file"], "level": "INFO", "propagate": "True"}},
    }
)

logging.getLogger(__name__).addHandler(logging.NullHandler())
logging.getLogger("librato").setLevel(logging.CRITICAL)

# amend the logging configuration with a handler streaming to a message queue

q = queue(-1)
ql = MutableQueueListener(q)

qh = QueueHandler(q)
logging.root.addHandler(qh)

ql.start()


def stop_queue_listener():
    ql.stop()


def _attach_log_handler(handler):
    ql.addHandler(handler)
Beispiel #47
0
    def execute(self,
                wait=False,
                loglevel=logging.INFO,
                optimize=True,
                implicit_sink=None):

        def find_root(flume):
            """
            figure out the root node of a flume pipeline
            """
            if hasattr(flume, 'parent') and \
               flume.parent is not None:
                return find_root(flume.parent)
            else:
                return flume

        if not hasattr(self, 'outputs'):
            node.init_node(self, outputs=[])

        if len(self.flumes) != 0:

            forwarder_inputs = self.outputs
            self.outputs = []

            for flume in self.flumes:
                flume_input = queue()
                root = find_root(flume)
                root.parent = None
                root.inputs = [flume_input]

                # don't setup the outputs for a sink as it will never
                # push anything out XXX: could be a bit more elegant
                if not isinstance(flume, sink):
                    output = queue()
                    self.flume_outputs.append(output)
                    flume.outputs = [output]

                self.outputs.append(flume_input)

            from flume.procs import reorder
            forwarder = reorder(delay=self.delay)

            source = self.source if hasattr(self, 'source') else self
            node.init_node(forwarder,
                           inputs=self.flume_outputs,
                           outputs=forwarder_inputs,
                           parent=None,
                           source=source,
                           child=self.child)

            forwarder.execute(wait=False,
                              loglevel=loglevel,
                              optimize=optimize,
                              implicit_sink=implicit_sink)

            # start underlying flumes
            for flume in self.flumes:
                flume.child = forwarder
                flume.execute(wait=False,
                              loglevel=loglevel,
                              optimize=optimize,
                              implicit_sink=implicit_sink)

        # override default behavior to execute the underlying flume
        node.execute(self,
                     wait=False,
                     loglevel=loglevel,
                     optimize=optimize,
                     implicit_sink=implicit_sink)
Beispiel #48
0
Datei: a.py Projekt: zTrix/algo
for t in range(T):
    s = f.readline()[:-1]
    n = int(s)
    fa = [[] for i in range(n+1)]
    ans = 0
    for i in range(n):
        s = f.readline()[:-1]
        ary = s.split(' ')
        fc = int(ary[0])
        for j in range(fc):
            fa[i+1].append(int(ary[j+1]))
    for i in range(1, n+1):
        if len(fa[i]) > 1:
            st = set(fa[i])
            q = queue()
            for j in fa[i]:
                q.put(j)
                while not q.empty():
                    cur = q.get()
                    for k in fa[cur]:
                        if st.issuperset({k}):
                            ans = 1
                            break
                        st.add(k)
                        q.put(k)
                    if ans == 1: break
                if ans == 1: break
        if ans == 1: break
    print 'Case #%d: %s' % (t+1, ans == 1 and "Yes" or 'No')