Exemplo n.º 1
0
def algorithm(draw, grid, start, end):
    count = 0
    open_set = PriorityQueue()  #fifo with priority
    open_set.put(
        (0, count, start))  #start is the node aka spot we add it to the queue
    came_from = {}  #what node did this node came from ,to find the best path
    g_score = {
        spot: float("inf")
        for row in grid for spot in row
    }  #keeps track of the current shorts distance from start node to end,set all to infiniti
    g_score[start] = 0  #g score of start node is 0
    f_score = {
        spot: float("inf")
        for row in grid for spot in row
    }  #keeps track of the predicted distance from this node  to end,how long will it take
    f_score[start] = h(start.get_pos(),
                       end.get_pos())  #distance from start to finish node

    open_set_hash = {
        start
    }  #to check if node is in it cause we cant chen in the queu

    while not open_set.empty(
    ):  #runs until open set is empty or not means we considered every node we going to
        for event in pygame.event.get():  #if we want to quit
            if event.type == pygame.QUIT:
                pygame.quit()

        current = open_set.get()[
            2]  #start in 2 cause set is gonna store start and end node
        open_set_hash.remove(
            current)  #remove from sethash to make sure we dont have duplicate

        if current == end:  #if the node we arrived to is the end node
            reconstruct_path(came_from, end, draw)  #make the path we found
            end.make_end()  #not to draw purpl of end node
            start.make_start()  #not to draw purpl of start node
            return True

        for neighbor in current.neighbors:
            temp_g_score = g_score[current] + 1

            if temp_g_score < g_score[
                    neighbor]:  #if we found a better way to reach the neigbor we update the score
                came_from[neighbor] = current  #update the neigbor we came from
                g_score[neighbor] = temp_g_score
                f_score[neighbor] = temp_g_score + h(neighbor.get_pos(),
                                                     end.get_pos())
                if neighbor not in open_set_hash:  #check if neigbor is in the set
                    count += 1  #we add the neibgho cause we have better path than before
                    open_set.put((f_score[neighbor], count, neighbor))
                    open_set_hash.add(neighbor)
                    neighbor.make_open(
                    )  #we already considered the neighbor+changing the color

        draw()

        if current != start:
            current.make_closed(
            )  #if the node we just considered is not start we make red we already considered it,its not gonna be added to opneset

    return False
Exemplo n.º 2
0
from queue import PriorityQueue

n, t = map(int, input().split())

problems = list()
a, b, c, t0 = map(int, input().split())

problems.append(t0)
problem_set = set(problems)

priorityQueue = PriorityQueue(n)
for i in range(1, n):
    last = problems[i - 1]
    l = (a * last + b) % c + 1
    if l in problem_set:
        """index = problems.index(l)
        n2 = n-len(problems)
        bit = problems[index:]
        times = n2 // len(bit)
        leftover = n2 % len(bit)
        s = bit[:leftover]
        bit *= times"""

        index = problems.index(l)
        #front = problems[:index]
        #bit = problems[index:]
        repeat = len(problems) - index
        times = (n - index) // repeat
        leftover = (n - index) % repeat
        #back = problems[index:leftover]
        #bit *= times
Exemplo n.º 3
0
    return error

def countIP(ip_dict, ip_addr):
    if ip_addr in ip_dict:
        ip_dict[ip_addr] += 1
    else:
        ip_dict[ip_addr] = 1

if __name__ == "__main__":
    src = '../Data/capture20110816.pcap.netflow.labeled'
    # We only considered the infected host, as indicated here: (5877 flows)
    # https://mcfp.felk.cvut.cz/publicDatasets/CTU-Malware-Capture-Botnet-47/
    infected_host_addr = '147.32.84.165'
    reservoir_size = 1000
    
    reservoir = PriorityQueue()
    for _ in range(reservoir_size):
        reservoir.put((0.0, ""))
    
    ah = open(src, 'r')
    
    ip_dict = {}
    protocol_set = set()
    flags_set = set()
    text_label_set = set()
    
    
    ah.readline()#skip first line
    counter = 0
    for line_ah in ah:
        # Convert the record into an array
Exemplo n.º 4
0
import timeit
import math
from queue import PriorityQueue

start = timeit.default_timer()

with open('p083_matrix.txt') as matrix_file:
    matrix = [
        list(map(int, line.split(','))) for line in matrix_file.readlines()
    ]

size = len(matrix)
distances = [[math.inf] * size for _ in range(size)]
unvisited = PriorityQueue()
unvisited.put((matrix[0][0], 0, 0))

while not unvisited.empty():
    dist, row, col = unvisited.get()

    if (row, col) == (size - 1, size - 1):
        print(dist)
        break

    if dist >= distances[row][col]:
        continue

    distances[row][col] = dist

    if row != 0:
        unvisited.put((dist + matrix[row - 1][col], row - 1, col))
    if col != 0:
Exemplo n.º 5
0
    def __init__(self, board, size):

        start = time.time()

        self.visited = [['N' for i in range(size)] for j in range(size)]
        self.steps = [['X' for i in range(size)] for j in range(size)]

        pQ = PriorityQueue()
        v = set()
        prevPos = {}

        pQ.put((2, 0, 0, 0))
        v.add((0, 0))
        prevPos[(0, 0)] = None

        while not pQ.empty():

            estimate, i, j, depth = pQ.get()

            self.visited[i][j] = 'Y'
            self.steps[i][j] = depth

            if i == size - 1 and j == size - 1:

                break

            m = board[i][j]

            for i_2, j_2 in ((i - m, j), (i + m, j), (i, j - m), (i, j + m)):

                if (0 <= i_2 < size) and (0 <= j_2 < size) and ((i_2, j_2)
                                                                not in v):

                    if i_2 == size - 1 and j_2 == size - 1:

                        estimate = depth + 1
                    elif i_2 == size - 1 or j_2 == size - 1:

                        estimate = depth + 2
                    else:

                        estimate = depth + 3

                    pQ.put((estimate, i_2, j_2, self.steps[i][j] + 1))
                    v.add((i_2, j_2))
                    prevPos[(i_2, j_2)] = (i, j)

        if self.visited[size - 1][size - 1] != 'N':

            self.value = self.steps[size - 1][size - 1]
        else:

            k = 0

            for n in self.visited:

                k -= n.count('N')

            self.value = k

        end = time.time()

        self.evalTime = (end - start) * 1000

        # Puzzle Pathfinder for SOLVABLE puzzles
        self.shortestPath = []
        if self.value > 0:

            backtrack = (size - 1, size - 1)

            while backtrack != None:

                self.shortestPath.append(backtrack)
                backtrack = prevPos[backtrack]
Exemplo n.º 6
0
from queue import PriorityQueue

pq = PriorityQueue(maxsize=0)

pq.put((9, 'a'))
pq.put((7, 'c'))
pq.put((1, 'd'))

print(pq.queue)

pq.get()
pq.get()

print(pq.queue)
    def generateWeighted(self):

        # preprocessing
        if not self.imageWeight:
            image_denoise = denoise_tv_bregman(self.image, 20.0)
            image_gray = rgb2gray(image_denoise)
            image_lab = rgb2lab(image_denoise)

            image_entropy = 2**(entropy(img_as_ubyte(image_gray), disk(20)))
            image_entropy /= np.max(image_entropy)

            color = [
                sobel(image_lab[:, :, channel])**2 for channel in range(1, 3)
            ]
            image_sobel = functools.reduce(op.add, color)**(1 / 2) / 5

            self.imageWeight = (0.3 * image_entropy + 0.7 * image_sobel)
            self.imageWeight /= np.mean(self.imageWeight)

        # blue noise generation
        startingPoint = Point(random.randint(0, self.imageWidth),
                              random.randint(0, self.imageHeight))

        self.sampledList.append(list(startingPoint))
        self.processingList.append(startingPoint)
        self.grid.insert(startingPoint)

        pQueue = PriorityQueue()

        while len(self.processingList) > 0 and len(
                self.sampledList) < self.pointCount:
            randomPoint = self.processingList.pop(
                random.randint(0,
                               len(self.processingList) - 1))
            for i in range(0, self.newPointsToGenerate):
                newPoint = randomPoint.generateRandomPointAround(self.minDist)
                # early boundary check
                if newPoint.x > self.edgeThreshold and newPoint.x < self.imageWidth - self.edgeThreshold \
                    and newPoint.y > self.edgeThreshold and newPoint.y < self.imageHeight - self.edgeThreshold:
                    newPoint.priority = -self.imageWeight[newPoint.y][
                        newPoint.x]
                    pQueue.put(newPoint)

            found = False
            while not pQueue.empty() and not found:
                newPoint = pQueue.get()
                if not self.grid.checkInNeighborhood(newPoint):
                    self.processingList.append(newPoint)
                    self.sampledList.append(list(newPoint))
                    self.grid.insert(newPoint)
                    found = True

        if DEBUG:
            im_info = Image.open(sys.argv[1]).info
            if 'dpi' in im_info:
                my_dpi = im_info['dpi'][0]
            else:
                my_dpi = 60
            figSize = self.imageWidth / my_dpi, self.imageHeight / my_dpi
            fig = plt.figure(figsize=figSize, dpi=my_dpi)
            ax = plt.Axes(fig, [0., 0., 1., 1.])
            ax.set_axis_off()
            fig.add_axes(ax)
            ax.plot(*zip(*self.sampledList),
                    color='black',
                    marker=',',
                    lw=0,
                    linestyle="")
            extent = ax.get_window_extent().transformed(
                fig.dpi_scale_trans.inverted())
            fig.savefig(
                ('%d_points_weighted_blue_noise.png' % len(self.sampledList)),
                bbox_inches=extent.expanded(0.9, 0.9),
                pad_inches=0,
                dpi=my_dpi)

            ax.clear()
            ax.plot(*zip(*self.sampledList),
                    color='yellow',
                    marker=',',
                    lw=0,
                    linestyle="")
            ax.imshow(self.imageWeight, cmap='gray')
            fig.savefig(('%d_points_weighted_blue_noise_filter.png' %
                         len(self.sampledList)),
                        bbox_inches=extent.expanded(0.9, 0.9),
                        pad_inches=0,
                        dpi=my_dpi)

            img = cv2.imread(
                ('%d_points_weighted_blue_noise.png' % len(self.sampledList)),
                0)
            img2 = img
            f = np.fft.fft2(img)
            fshift = np.fft.fftshift(f)
            magnitude_spectrum = np.log(np.abs(fshift))

            ax.clear()
            ax.imshow(magnitude_spectrum, cmap='gray')
            fig.savefig(('%d_points_weighted_blue_noise_dft.png' %
                         len(self.sampledList)),
                        bbox_inches=extent.expanded(0.9, 0.9),
                        pad_inches=0,
                        dpi=my_dpi)

        self.generateBoundary()

        return self.sampledList
Exemplo n.º 8
0
        sum = x * x + 3 * x + 2 * x * y + y + y * y
        sum += size
        binary = "{0:b}".format(sum)

        if binary.count('1') % 2 == 1:
            val = '#'

        maze[y].append(val)

start = Point(1, 1)
end = Point(31, 39)

startStr = str(start)

pointMap = {str(start): start}
openQueue = PriorityQueue()
openQueue.put((0, startStr))

found = False

while not openQueue.empty() and not found:
    #find least f, get node and call it q
    currentStr = openQueue.get()[1]
    current = pointMap[currentStr]

    x = current.x
    y = current.y

    maze[y][x] = 'O'

    successors = []
Exemplo n.º 9
0
 def __init__(self) -> None:
     self.queue = PriorityQueue()
Exemplo n.º 10
0
import sys
from queue import PriorityQueue

N = int(sys.stdin.readline())
priorityQueue = PriorityQueue(maxsize=N)

for i in range(N):
    temp = int(sys.stdin.readline())
    if temp == 0 and priorityQueue.qsize() == 0:
        print(0)
    elif temp == 0 and priorityQueue.qsize() != 0:
        print(priorityQueue.get())
    else:
        priorityQueue.put(temp)

Exemplo n.º 11
0
    def run(self,
            timesteps=None,
            episodes=None,
            max_episode_timesteps=None,
            deterministic=False,
            episode_finished=None):
        """
        Runs the agent on the environment.

        Args:
            timesteps: Number of timesteps
            episodes: Number of episodes
            max_episode_timesteps: Max number of timesteps per episode
            deterministic: Deterministic flag
            episode_finished: Function handler taking a `Runner` argument and returning a boolean indicating
                whether to continue execution. For instance, useful for reporting intermediate performance or
                integrating termination conditions.
        """

        self.episode = self.agent.episode
        if episodes is not None:
            episodes += self.agent.episode

        self.timestep = self.agent.timestep
        if timesteps is not None:
            timesteps += self.agent.timestep

        # Keep track of episode reward and episode length for statistics.
        self.start_time = time.time()
        self.max_episode_timesteps = max_episode_timesteps
        self.batch_losses = []
        self.q_values = []

        while True:
            search_graph = Digraph()

            state = self.environment.reset()
            self.agent.reset()

            self.counter = count()

            self.nodes_created = 0
            self.nodes_visited = 0
            self.leafs_created = 0
            self.leafs_visited = 0

            root = Node(self.environment, state)
            search_graph.node(root.label, "root")
            node = None
            self.unopened_nodes = PriorityQueue()
            self.unopened_nodes.put((0,
                                0,
                                -next(self.counter),
                                root))

            self.episode_solution = None
            self.upper_bound = -np.inf

            self.episode_timestep = 0
            episode_start_time = time.time()

            while True:
                if node is None:
                    if not self.unopened_nodes.empty():
                        node = self.unopened_nodes.get()[-1]
                        phase = "best_first"
                    else:
                        break
                else:
                    phase = "depth_first"

                highest_lower_bound, node_bound = self.explore_bounds(node)

                if node.terminal:
                    self.nodes_visited += 1
                    self.leafs_visited += 1

                    if node.current_value > self.upper_bound:
                        self.upper_bound = node.current_value
                        self.episode_solution = deepcopy(node)
                        self.print_tree_info(node, highest_lower_bound, phase + ": new bound")
                    else:
                        self.print_tree_info(node, highest_lower_bound, phase + ": leaf")

                    current_solution = copy(node.environment.current_solution)
                    node = root
                    for vertex in current_solution:
                        node = node.child_nodes[vertex]
                        self.agent.current_states = node.state
                        self.agent.current_actions["action"] = node.action

                        updates = self.agent.observe(terminal=node.terminal,
                                                     reward=node.current_value,
                                                     return_loss_per_instance=True)
                        if updates is not None:
                            self.batch_losses.append(np.mean(updates))

                    node = None
                    continue

                if node_bound or (self.max_episode_timesteps is not None and self.episode_timestep == self.max_episode_timesteps):
                    self.print_tree_info(node, highest_lower_bound, phase + ": bounding")
                    # search_graph.node_attr(node.label, color="red")
                    node = None
                    continue
                elif node.layer > 0:
                    self.nodes_visited += 1

                self.agent.act(states=node.state, deterministic=deterministic)
                q_values = copy(self.agent.next_internals[0])
                # q_values /= node.environment.nr_cities
                self.q_values.append(np.mean(q_values))

                for vertex in range(self.environment.nr_vertices):
                    if vertex not in node.environment.current_solution:
                        child_environment = deepcopy(node.environment)
                        state, action, terminal, reward = child_environment.execute(actions=vertex)

                        if action not in node.child_nodes.values():
                            child_node = Node(environment=child_environment, state=state, action=action,
                                              terminal=terminal, parent_node=node, q_value=q_values[action])
                            node.child_nodes[action] = child_node

                            if terminal:
                                self.leafs_created += 1
                            self.nodes_created += 1

                            search_graph.node(child_node.label,
                                              "{}\nub: {:.4f}\nlb: {:.4f}\ncw: {:.4f}\nqv: {:.4f}".format(child_node.label,
                                                                                                          self.upper_bound,
                                                                                                          child_node.current_value + child_node.q_value,
                                                                                                          child_node.current_value,
                                                                                                          child_node.q_value))
                            search_graph.edge(node.label, child_node.label, "{}.".format(self.nodes_created))

                            self.episode_timestep += 1
                            self.timestep += 1
                        else:
                            print()

                self.print_tree_info(node, highest_lower_bound, phase + ": branching")

                sorted_child_nodes = sorted(node.child_nodes.values(), key=lambda x: x.q_value)

                for child_node in sorted_child_nodes[1:]:
                    self.unopened_nodes.put((child_node.q_value,
                                             child_node.current_value,
                                             -next(self.counter),
                                             child_node))

                node = sorted_child_nodes[0]

            if self.episode_solution is not None:
                self.environment = self.episode_solution.environment
                episode_reward = -self.environment.current_value

                time_passed = time.time() - episode_start_time

                self.episode_rewards.append(episode_reward)
                self.episode_timesteps.append(self.episode_timestep)
                self.episode_times.append(time_passed)

                if (timesteps is not None and self.agent.timestep >= timesteps) or \
                        (episodes is not None and self.agent.episode >= episodes):
                    # agent.episode / agent.timestep are globally updated
                    break

                if episode_finished and not self.episode_finished():
                    break

                if self.episode > 500 and self.episode % 500 == 0:
                    # node = self.episode_solution
                    # while node is not None:
                    #     search_graph.attr(node.label, color="green")
                    #     node = node.parent_node

                    search_graph.view(tempfile.mktemp('.gv'))
                self.episode += 1
            else:
                break

        self.agent.close()
        self.environment.close()
Exemplo n.º 12
0
    def safeMoveTo(self, s : Ship, t : Point): #A* Movement. Suggested move by priority.

        sPos = s.position
        blocked = calc.shipMaps[s]['blocked'] + self.next
        blocked = np.where(blocked>0,1,0)

        #print("=====")
        #print(sPos)
        #print(blocked)

        #1. Obstacle Calculation

            #Obstacle are "walls" on the nav graph. Consist of the points of
                #Enemy ships with less halite (threshold => enemy block)
                #Enemy shipyards 
                #Position of friendly on next turn

        #2. Navigation

            #A* 

                #sPos: start position
                #pred: predecessor of a node. (Which point was relaxed to find next point)
                #dist: distance from sPos to point
                #pqMap: maps distances in priority queue to process points 
                #t: initally target point. During reconstruction, becomes "next" point in A* path
                
                
                #algorithm: starts from sPos, put in priority queue.
                #While priority queue is not empty and target is not found, relax next node in queue.
                #Add adjacent (processPoints) to pq.

                #Check if t is reachable (pred not None)
                #If it is, loop back pred until reached sPos to find path.
                #Else, move randomly.
                

            #Swapping
                #If bot wishes to stay still but cannot (self.next turn ally boat moves in)
                #Move randomly
                #This means that if the bot has a goal, it will move toward the goal. This includes friendly
                #As obstacles are calculated through self.next.
                #Because movement is sorted in priority, higher priority ships will never get blocked 
                #By lower priority.


        #TODO: Improve obstacle calculation

        #Stay still
        if sPos == t:
            #Someone with higher priority needs position, must move. Or being attacked.
            if blocked[t.x][t.y]:
                for processPoint in self.getAdjacent(sPos):
                    if not blocked[processPoint.x][processPoint.y]:
                        self.next[processPoint.x][processPoint.y] = 1
                        return self.directionTo(sPos,processPoint)
                self.next[sPos.x][sPos.y] = 1
                return None
            else:
                self.next[sPos.x][sPos.y] = 1
                return None

        #A*
        pred = {}
        dist = {}
        pq = PriorityQueue()
        pqMap = {}

        pqMap[self.dist(sPos,t)] = [sPos]
        pq.put(self.dist(sPos,t))
        pred[sPos] = sPos
        dist[sPos] = self.dist(sPos,t)

            # Main

        while not pq.empty():
            if t in dist:
                break
            currentPoint = pqMap.get(pq.get()).pop()
            for processPoint in self.getAdjacent(currentPoint):
                if blocked[processPoint.x][processPoint.y] or processPoint in dist: 
                    continue
                dist[processPoint] = dist[currentPoint] + 1
                priority =  dist[processPoint] + self.dist(processPoint,t)
                pqMap[priority] = pqMap.get(priority,[])
                pqMap[priority].append(processPoint)
                pq.put(priority)
                pred[processPoint] = currentPoint
        
        #TODO: Catch this exception. Or make sure this never happens. Don't just move randomly.
        if not t in pred:
            #Random move
            for processPoint in self.getAdjacent(sPos):
                if not blocked[processPoint.x][processPoint.y]:
                    self.next[processPoint.x][processPoint.y] = 1
                    return self.directionTo(sPos,processPoint)
            self.next[sPos.x][sPos.y] = 1
            return None

            # Path reconstruction
        while pred[t] != sPos:
            t = pred[t]

        desired = self.directionTo(sPos,t)
        self.next[t.x][t.y] = 1
        
        return desired
Exemplo n.º 13
0
def dijkstra(graph: dict = None,
             start: str = None,
             end: str = None,
             queue: PriorityQueue = PriorityQueue(),
             visited: dict = {}) -> dict:
    """
    Find best path based on lowest cost
    """
    print("---foo start---")
    print("start", start)

    if graph is None or graph == {}:
        print("empty graph")
        return

    if not isinstance(start, tuple):
        # cost,node,via
        start = (0, start, None)

    if start[1] == end:
        # should shop processing arcs of this node
        print("target reached")
        visited[start[1]] = {'cost': start[0], 'via': start[2]}
        return visited

    if start[1] not in graph.keys():
        # mark as visited and backtrack
        print("dead end")
        visited[start[1]] = {'cost': start[0], 'via': start[2]}
        return

    if start not in queue.queue:
        _cost, _node, _via = start
        queue.put((_cost, _node, _via))

    print("we can go to", graph[start[1]].items())
    for node, value in graph[start[1]].items():
        print("node", node, "cost", value)
        #print("nodes in q",[n[1] for n in queue.queue])
        if node in visited.keys():
            print("node in visited, skip")
            continue
        entry = [e for e in queue.queue if node in e]
        if entry:
            print("node already in queue", entry[0])
            if start[0] + value < entry[0][0]:
                queue.queue.remove(entry[0])
                queue.put((start[0] + value, node, start[1]))
                print("updated priority for",
                      (start[0] + value, node, start[1]))
                print("queue is", queue.queue)
        else:
            queue.put((start[0] + value, node, start[1]))
            print((start[0] + value, node, start[1]), "enqueued")
            print("queue is", queue.queue)

    _cost, _node, _via = queue.get()
    visited[_node] = {'cost': _cost, 'via': _via}

    print("done with & removed", (_cost, _node, _via), "from queue")
    print("queue is", queue.queue)
    print("visited", visited)

    for entry in queue.queue:
        print("q entry", entry)
        print("---recur start---")
        path = dijkstra(graph, entry, end, queue, visited)
        print("---recur end---")
        if path:
            return path

    print("---foo end---")
    return visited
Exemplo n.º 14
0
def bfsHash(start, zeroPos, des, step, change_position, cost_swap):
    # 之前采取的是哈希表,由于哈希表会存在冲突问题,然后采取O(n)的后移操作,在面对需要用到大量操作数的时候
    # 算法效率上就会大幅度降低,所以最后用回python自带的字典
    que = PriorityQueue()
    que2 = PriorityQueue()
    first = node(start, 0, zeroPos, des, [], [], 0)
    que.put(first)
    mymap = {}
    s = ""
    for i in start:
        s += str(i)
    mymap[s] = 1
    m = -1

    # 开始搜索
    while not que.empty():
        tempN = que.get()
        # print(list_to_string(tempN.operation))
        temp = tempN.num.copy()
        pos = tempN.zeroPos
        if check_list(des, temp):  # 若为目标局势则跳出
            return tempN
        if len(tempN.operation
               ) == step and tempN.flag == 0:  # 符合强制交换条件,开始执行变换操作
            temp = tempN.num.copy()
            if change_position[0] - 1 == pos:
                pos = change_position[1] - 1
            elif change_position[1] - 1 == pos:
                pos = change_position[0] - 1
            temp[change_position[0] -
                 1], temp[change_position[1] -
                          1] = temp[change_position[1] -
                                    1], temp[change_position[0] - 1]
            swap = []
            if not check(temp, des):
                pos1, pos2, cost_swap = getRightChange(temp, des, tempN.step,
                                                       cost_swap)
                if pos1 == pos:
                    pos = pos2
                elif pos2 == pos:
                    pos = pos1
                temp[pos1], temp[pos2] = temp[pos2], temp[pos1]
                swap.append(pos1 + 1)
                swap.append(pos2 + 1)
            s = ""
            for i in temp:
                s += str(i)
            mymap[s] = 1
            operation = tempN.operation.copy()
            temp_step = tempN.step
            tempN = node(temp, temp_step, pos, des, operation, swap, 1)
            if cost_swap > tempN.cost:
                cost_swap = tempN.cost
            if tempN.cost not in map_cost:
                map_cost[tempN.cost] = 1  # 对于每次最佳交换我们都要记录他的估价值
            else:
                map_cost[tempN.cost] += 1
            if check_list(des, temp):  # 若交换后刚好为目标局势那就直接返回
                operation.append(' ')  # 应测试组要求加上一个字符防止评测判断不到交换这一步
                tempN = node(temp, temp_step, pos, des, operation, swap, 1)
                return tempN
            else:
                que2.put(tempN)  # 把所有交换后的节点都放在que2队列
                continue

        # cnt用来对付无解情况,四个方向(cnt=4)都无路可走就为无解情况。
        # 如果这个情况出现在强制交换要求的步数前那么我们要添加“反复横跳”操作使得他达到强制交换要求的步数
        cnt = 0
        for i in range(4):
            if changeId[pos][i] != -1:
                pos = tempN.zeroPos
                temp = tempN.num.copy()
                temp[pos], temp[changeId[pos][i]] = temp[changeId[pos]
                                                         [i]], temp[pos]
                s = ""
                for j in temp:
                    s += str(j)
                if s not in mymap:
                    mymap[s] = 1
                    operation = tempN.operation.copy()
                    operation.append(dir[i])
                    temp_step = tempN.step + 1
                    temp_num = temp
                    tempM = node(temp_num, temp_step, changeId[pos][i], des,
                                 operation, tempN.swap, tempN.flag)
                    que.put(tempM)
                else:
                    cnt += 1
            else:
                cnt += 1

        if cnt == 4 and tempN.step < step:  # 进行“反复横跳”操作
            # 对于在强制交换前就发现无解的情况,我们直接处理成白块来回摆动的情况让他直接到达目标步数
            temp = tempN.num.copy()
            operation = tempN.operation.copy()
            m = operation[len(operation) - 1]
            delta = step - len(operation)
            pos = tempN.zeroPos
            temp, operation, pos = getOrder(temp, operation, delta, m,
                                            pos)  # 添加“反复横跳”的操作序列
            tempM = node(temp, step, pos, des, operation, tempN.swap,
                         tempN.flag)
            que.put(tempM)
    if not que2.empty():
        #print(1)
        return bfsAfterSwap(que2, des, mymap, cost_swap)
Exemplo n.º 15
0
 def __init__(self, perutt_loader, batch_size=16, cache_size=32):
     self.max_size = cache_size * batch_size
     self.num_utts = batch_size
     self.pqueue = PriorityQueue()
     self.loader = perutt_loader
Exemplo n.º 16
0
        self.cons = []
        self.visited = False
        self.width = -float("infinity")

    def __str__(self):
        return str(self.width)


graph = [Node() for _ in range(5)]
edges = [(0, 1, 1), (1, 3, 10), (0, 3, 5), (0, 2, 2), (2, 4, 5), (3, 4, 2),
         (2, 3, 3)]
for s, e, w in edges:
    graph[s].cons.append((e, w))
    graph[e].cons.append((s, w))

que = PriorityQueue(-1)
start = 0
end = 4
graph[start].width = float("infinity")
que.put((0, start))


def check(graph, c):
    print("visiting", c)
    graph[c].visited = True
    for t, w in graph[c].cons:
        if graph[t].visited == False:
            print(t, w)
            alt = max(graph[t].width, min(graph[c].width, w))
            print("alt is ", alt, "width[t] is", graph[t].width)
            if alt > graph[t].width:
Exemplo n.º 17
0
    def beam_decode(self,
                    init_h,
                    enc_hids,
                    context,
                    beam_width,
                    max_unroll,
                    topk=1):
        '''
        https://github.com/budzianowski/PyTorch-Beam-Search-Decoding/blob/master/decode_beam.py
        :param init_h: input tensor of shape [B, H] for start of the decoding
        :param enc_hids: if you are using attention mechanism you can pass encoder outputs, [B, T, H] where T is the maximum length of input sentence
        :param topk: how many sentence do you want to generate
        :return: decoded_batch
        '''
        batch_size = init_h.size(0)
        decoded_words = np.zeros((batch_size, topk, max_unroll), dtype=np.int)
        sample_lens = np.zeros((batch_size, topk), dtype=np.int)
        scores = np.zeros((batch_size, topk))

        for idx in range(batch_size):  # decoding goes sentence by sentence
            if isinstance(init_h, tuple):  # LSTM case
                h = (init_h[0][idx, :].view(1, 1, -1),
                     init_h[1][idx, :].view(1, 1, -1))
            else:
                h = init_h[idx, :].view(1, 1, -1)
            enc_outs = enc_hids[idx, :, :].unsqueeze(
                0) if self.use_attention else None

            # Start with the start of the sentence token
            x = gVar(torch.LongTensor([[SOS_ID]]))

            # Number of sentence to generate
            endnodes = []
            number_required = min((topk + 1), topk - len(endnodes))

            # starting node -  hidden vector, previous node, word id, logp, length
            node = BeamSearchNode(h, None, x, 0, 1)
            nodes = PriorityQueue()

            # start the queue
            nodes.put((-node.eval(), node))
            qsize = 1

            # start beam search
            while True:
                # give up when decoding takes too long
                if qsize > 2000: break

                # fetch the best node
                score, n = nodes.get()
                x = n.wordid
                h = n.h

                if n.wordid.item() == EOS_ID and n.prevNode != None:
                    endnodes.append((score, n))
                    # if we reached maximum # of sentences required
                    if len(endnodes) >= number_required:
                        break
                    else:
                        continue

                # decode for one step using decoder
                out, h = self.forward(h.squeeze(0), enc_outs, None,
                                      x)  # out [1 x 1 x vocab_size]
                out = out.squeeze(1)  # [1 x vocab_size]

                # PUT HERE REAL BEAM SEARCH OF TOP
                log_prob, indexes = torch.topk(out,
                                               beam_width)  # [1 x beam_width]
                nextnodes = []

                for new_k in range(beam_width):
                    decoded_t = indexes[0][new_k].view(1, -1)
                    log_p = log_prob[0][new_k].item()

                    node = BeamSearchNode(h, n, decoded_t, n.logp + log_p,
                                          n.len + 1)
                    score = -node.eval()
                    nextnodes.append((score, node))

                # put them into queue
                for i in range(len(nextnodes)):
                    score, nn = nextnodes[i]
                    nodes.put((score, nn))
                    # increase qsize
                qsize += len(nextnodes) - 1

            # choose nbest paths, back trace them
            if len(endnodes) == 0:
                endnodes = [nodes.get() for _ in range(topk)]

            uid = 0
            for score, n in sorted(endnodes, key=operator.itemgetter(0)):
                utterance, length, score = [], 0, 0.0
                utterance.append(n.wordid)
                # back trace
                while n.prevNode != None:
                    n = n.prevNode
                    utterance.append(n.wordid)
                    length = length + 1
                    score = score + n.logp
                utterance = utterance[::-1]  #reverse
                decoded_words[idx,
                              uid, :min(length, max_unroll)] = utterance[:min(
                                  length, max_unroll)]
                sample_lens[idx, uid] = min(length, max_unroll)
                scores[idx, uid] = score
                uid = uid + 1

        return decoded_words, sample_lens, scores
Exemplo n.º 18
0
def solve(G):
    """
    Args:
        G: networkx.Graph

    Returns:
        T: networkx.Graph
    """

    # copy G and its nodes to use later
    nds = G.nodes()
    G_copy = G.copy()

    # remove self edges from graph
    for n in nds:
        try:
            G_copy.remove_edge(n, n)
        except:
            continue

    # base case for Kn graphs
    for node in nds:
        degree = np.sum([1 for i in G.neighbors(node) if i != node])
        # print(len(nds))
        # print(degree)
        if degree == len(nds) - 1:
            # print(nds, G_copy.degree(node))
            # print("SHOULD NOT GO HERE")
            dct = G.neighbors(node)
            for i in dct:
                if i != node:
                    G_copy.remove_node(i)
            # print("IT GOT HERE!")
            # print(G_copy)
            # print(is_valid_network(G, G_copy))
            return G_copy

    # form MST T, given G
    T = nx.minimum_spanning_tree(G)

    nodez = T.copy().nodes()
    Q = PriorityQueue()
    for n in nodez:
        Q.put((T.degree(n), n))

    #remove one node
    T_copy = T.copy()  # compare against
    T_test = T.copy()  # test tree - remove from here

    # # remove two nodes at once
    # Q_copy = Q
    # T_parr = T.copy() # compare against
    # T_test_parr = T.copy() # test tree - remove two from here

    while Q.qsize() != 0:

        first_removal = Q.get()[1]
        # second_removal = None

        # #if option for second DO IT
        # if Q.qsize() >= 2:
        #     second_removal = Q.get()[1]

        if first_removal in T_test:

            # 2 nodes
            # if second_removal in T_test_parr and first_removal in T_test_parr:

            #     T_test_parr_copy = T_test.copy()
            #     T_test_parr.remove_node(first_removal)
            #     T_test_parr.remove_node(second_removal)
            #     if (len(T_test.nodes()) > 0 and is_valid_network(G, T_test):

            T_test_copy = T_test.copy()
            T_test.remove_node(first_removal)

            # heuristic 1: only remove vertices where resulting network is valid
            if (len(T_test.nodes()) > 0 and is_valid_network(G, T_test) and

                    # heuristic 2: only remove verties whose removal reduces average pairwise distance
                    average_pairwise_distance(T_test) <
                    average_pairwise_distance(T_copy)):

                if len(T_copy.nodes()) > 0:
                    #neighs = T_copy.neighbors(v)
                    T_copy.remove_node(first_removal)
                    # if w and w in T_copy:
                    #     T_copy.remove_node(w)
                    #T_copy.remove_node(w)

                    # for node in T_copy.nodes():
                    #     # print("ADDED:", node)
                    #     Q.put((T_copy.degree(node), node))

                    Q = PriorityQueue()
                    for node in T_copy.nodes():
                        Q.put((T_copy.degree(node), node))

                else:
                    T_test = T_test_copy
            else:
                T_test = T_test_copy

    return T_copy
Exemplo n.º 19
0
    def beam_decode(self,
                    hid,
                    seq_len,
                    context,
                    start_token,
                    stop_at_token=None,
                    beam_width=10,
                    topk=5):
        '''
        :param target_tensor: target indexes tensor of shape [B, T] where B is the batch size and T is the maximum length of the output sentence
        :param decoder_hidden: input tensor of shape [1, B, H] for start of the decoding
        :param encoder_outputs: if you are using attention mechanism you can pass encoder outputs, [T, B, H] where T is the maximum length of input sentence
        :return: decoded_batch
        '''
        endnodes = []
        number_required = min((topk + 1), topk - len(endnodes))
        # Start with the start of the sentence token: torch.LongTensor([[SOS_token]], device=device)
        # starting node -  hidden vector, previous node, word id, logp, length, logits
        node = beam_search_node.BeamSearchNode(hid, None, start_token, 0, 1,
                                               None)
        nodes = PriorityQueue()

        # start the queue
        # The smaller the value is, the higher the priority of the node is.
        # The unique ID of a node is used to avoid conflict between elements in the heap.
        nodes.put((-node.eval(), id(node), node))
        qsize = 1

        # start beam search
        while True:
            # give up when decoding takes too long
            if qsize > 2000:
                break

            # fetch the best node
            score, _, n = nodes.get()

            action_v = n.wordid
            # Get the embedding of the sampled output token.
            decoder_input = self.emb(action_v)
            decoder_hidden = n.h

            # tensor.item(): if only one element is in the tensor, tensor.item() will return the value of the element.
            if n.wordid.item(
            ) == stop_at_token or n.leng == seq_len and n.prevNode != None:
                endnodes.append((score, id(n), n))
                # if we reached maximum # of sentences required
                if len(endnodes) >= number_required:
                    break
                else:
                    continue

            # decode for one step using decoder
            # decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden, encoder_output)
            out_logits, decoder_hidden = self.decode_one(
                decoder_hidden, decoder_input, context)
            decoder_output = F.log_softmax(out_logits, dim=1)
            # out_probs = out_probs_v.data.cpu().numpy()[0]

            # PUT HERE REAL BEAM SEARCH OF TOP
            log_prob, indexes = torch.topk(decoder_output, beam_width)
            nextnodes = []

            for new_k in range(beam_width):
                decoded_t = indexes[0][new_k].view(-1)
                log_p = log_prob[0][new_k].item()

                # hidden vector, previous node, word id, logp, length
                node = beam_search_node.BeamSearchNode(decoder_hidden, n,
                                                       decoded_t,
                                                       n.logp + log_p,
                                                       n.leng + 1, out_logits)
                score = -node.eval()
                nextnodes.append((score, node))

            # put them into queue
            for i in range(len(nextnodes)):
                score, nn = nextnodes[i]
                nodes.put((score, id(nn), nn))
                # increase qsize
            qsize += len(nextnodes) - 1

        # choose nbest paths, back trace them
        if len(endnodes) == 0:
            endnodes = [nodes.get() for _ in range(topk)]

        utterances = []
        all_res_logits = []
        # The sorted() function sorts the elements of a given iterable
        # in a specific order (either ascending or descending).
        # The syntax of sorted() is: sorted(iterable, key=None, reverse=False)
        # This is a great way to sort a list of tuples on the second key:
        # a = [ (1,'z'), (2, 'x'), (3, 'y') ]
        # a.sort(key=operator.itemgetter(1))
        # Or using lambda: a.sort(key=lambda x: x[1])
        for score, _, n in sorted(endnodes, key=operator.itemgetter(0)):
            utterance = []
            res_logits = []

            # back trace
            while n.prevNode != None:
                utterance.append(n.wordid.item())
                res_logits.append(n.logits)
                n = n.prevNode

            # [::-1]: Reverse.
            utterance = utterance[::-1]
            res_logits = res_logits[::-1]
            utterances.append(utterance)
            all_res_logits.append(torch.cat(res_logits))

        return all_res_logits, utterances
Exemplo n.º 20
0
def a_star_search(draw_func, grid, start, end):
    """
    A* Search  Algorithm
    ====================
    A* is an informed Search (the algorithm knows the location of the end node when starting) algorithm that is always 
    guaranteed to find the shortest path between a start and end node.
    It does so by making use of a heuristic function (a_star_heuristic) to determine which search path to extend.
    This is based on the current cost of the path plus the expected cost of the rest of the path (guessed by heuristic).
    This is formulated as:
    f(n) = g(n) + h(n) 
    where g(n) is the cost of the current path from start to the current node, 
    h(n) is the result of the heuristic function used to guess the expected cost/path length from the next node to the end node
    and f(n) ("f score") is the addition of these two, the value of which is used to direct the search

    Primary characteristics:
    + Complete solution     - If the solution exist, it is guaranteed to be found
    + Optimal Solution      - Guaranteed to find the shortest path
    - Complexity (O(b^d))   - Stores all observed nodes in memory

    So for each node that is being evaluated we have to record its distance from the start (along the current path) and its f score.
    Each node will also have alist of neighbours that will be evaluated using the above formula until the final node is reached.
    """

    # the open set is the list of discovered nodes that need to be evaluated to see if they are to be expanded further
    open_set = PriorityQueue()
    # IMPORTANT
    # the reason we are using the priority queue is that PQ.get() will return the node with the lowest score i.e. first compares f-score, then compares path_distance
    # because of this its crucial that the order of the elements in the queue is f-score, path_length, node

    # open_set.put("f score", "path distance to here", "next nieghbor to evaluate")
    open_set.put((0, 0, start))

    # PriorityQueues have no way to check if an element is contained in them so use open_set_dict to keep track of nodes in the PriorityQueue
    open_set_dict = {start}

    came_from = {}  # this be a dict of observed node and where they came from

    # make maps of f and g scores for every node with a default of infinity
    g_scores = {node: float('inf') for row in grid for node in row}
    f_scores = {node: float('inf') for row in grid for node in row}

    # define values for starting node
    g_scores[start] = 0
    f_scores[start] = a_star_heuristic(start.get_position(),
                                       end.get_position())

    # get coordinates of end node which will be used every time the heuristic is called
    end_node_pos = end.get_position()

    # keep track of length of observed path, update on every iteration
    path_length = 0
    while not open_set.empty():

        # let the player quit the game if the want
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                pygame.quit()

        current_node = open_set.get()[2]  # last element is the next node
        open_set_dict.remove(
            current_node
        )  # the node does not need to be re-evaluated later so remove from open_set_dict

        if current_node == end:
            # draw the shortest path
            # current_node(arg) will be the end node if reach this code block
            # the lambda drawing function is again passed in here  to allow the reconstruct_path func to run it properly without variable being passed all around the code
            reconstruct_path(came_from, end, draw_func)
            end.set_end_node()
            return True

        # get f score for all neighbours
        for neighbour in current_node.neighbours:
            # f(n) = g(n) + h(n)
            # NOTE: we're adding 1 here because the distance from each node to its neighbour is 1
            # obviously this is not the case normally for this algorithm like in google maps where different roads have different lengths
            g_score = g_scores[current_node] + 1

            # if this path to neighbour is better than any previous one
            if g_score < g_scores[neighbour]:

                # record the previous node (used in constructing final path)
                came_from[neighbour] = current_node

                g_scores[neighbour] = g_score
                h_score = a_star_heuristic(neighbour.get_position(),
                                           end_node_pos)
                f_score = g_score + h_score

                f_scores[neighbour] = f_score

                if neighbour not in open_set_dict:
                    path_length += 1
                    # open_set.put("f score", "path distance to here", "next nieghbor to evaluate")
                    open_set.put((f_scores[neighbour], path_length, neighbour))
                    open_set_dict.add(neighbour)

                    # change node colour as it is now open/being evaluated
                    neighbour.set_being_searched()

        # now redraw the grid
        draw_func()

        # set the node colour to show it has been searched but do not do it for the start node as we want it to keep its colour as the algorithm progresses
        if current_node != start:
            current_node.set_been_searched()

    return False
Exemplo n.º 21
0
class Scheduler(object):

    lock = threading.Lock()
    listOfAlreadyBuiltPackages = set()
    listOfPackagesToBuild = []
    listOfPackagesCurrentlyBuilding = set()
    sortedList = []
    listOfPackagesNextToBuild = PriorityQueue()
    listOfFailedPackages = []
    priorityMap = {}
    pkgWeights = {}
    logger = None
    event = None
    stopScheduling = False
    mapPackagesToGraphNodes = {}
    coreToolChainBuild = False

    @staticmethod
    def setEvent(event):
        Scheduler.event = event

    @staticmethod
    def setLog(logName, logPath, logLevel):
        Scheduler.logger = Logger.getLogger(logName, logPath, logLevel)

    @staticmethod
    def setParams(sortedList, listOfAlreadyBuiltPackages):
        Scheduler.sortedList = sortedList

        Scheduler.listOfAlreadyBuiltPackages = listOfAlreadyBuiltPackages

        for pkg in Scheduler.sortedList:
            pkgName, pkgVersion = StringUtils.splitPackageNameAndVersion(pkg)
            if (pkg not in Scheduler.listOfAlreadyBuiltPackages
               or pkgName in constants.testForceRPMS):
                Scheduler.listOfPackagesToBuild.append(pkg)

        Scheduler.listOfPackagesCurrentlyBuilding = set()
        Scheduler.listOfPackagesNextToBuild = PriorityQueue()
        Scheduler.listOfFailedPackages = []

        # When performing (only) make-check, package dependencies are
        # irrelevant; i.e., all the packages can be "make-checked" in
        # parallel. So skip building the dependency graph. This is not
        # merely an optimization! A given package can define
        # additional packages to be installed in its build environment
        # when performing a make-check, under %if %{with_check}.
        # However, these are not really build-time-dependencies in the
        # usual sense; i.e., there is no ordering requirement when
        # building these packages; they only make sense when running a
        # `make check`. Hence, trying to build a dependency graph out
        # of them will result in anomalies such as cycles in the
        # graph. So skip building the graph altogether and schedule
        # all the `make check`s in parallel.
        skipGraphBuild = constants.rpmCheck
        Scheduler._setPriorities(skipGraphBuild)

        if constants.publishBuildDependencies:
            # This must be called only after calling _setPriorities(),
            # which builds the dependency graph.
            Scheduler._publishBuildDependencies()


    @staticmethod
    def notifyPackageBuildCompleted(package):
        with Scheduler.lock:
            if package in Scheduler.listOfPackagesCurrentlyBuilding:
                Scheduler.listOfPackagesCurrentlyBuilding.remove(package)
                Scheduler.listOfAlreadyBuiltPackages.add(package)
                if not constants.rpmCheck:
                    Scheduler._markPkgNodeAsBuilt(package)

    @staticmethod
    def notifyPackageBuildFailed(package):
        with Scheduler.lock:
            if package in Scheduler.listOfPackagesCurrentlyBuilding:
                Scheduler.listOfPackagesCurrentlyBuilding.remove(package)
                Scheduler.listOfFailedPackages.append(package)

    @staticmethod
    def isAllPackagesBuilt():
        if Scheduler.listOfPackagesToBuild:
            return False
        return True

    @staticmethod
    def isAnyPackagesFailedToBuild():
        if Scheduler.listOfFailedPackages:
            return True
        return False

    @staticmethod
    def isAnyPackagesCurrentlyBuilding():
        if Scheduler.listOfPackagesCurrentlyBuilding:
            return True
        return False

    @staticmethod
    def getNextPackageToBuild():
        with Scheduler.lock:
            if Scheduler.stopScheduling:
                return None

            if not Scheduler.listOfPackagesToBuild:
                if Scheduler.event is not None:
                    Scheduler.event.set()

            if Scheduler.listOfPackagesNextToBuild.empty():
                Scheduler._getListNextPackagesReadyToBuild()

            if Scheduler.listOfPackagesNextToBuild.empty():
                return None

            packageTup = Scheduler.listOfPackagesNextToBuild.get()

            package = packageTup[1]
            if not constants.startSchedulerServer and Scheduler.listOfPackagesNextToBuild.qsize() > 0:
                ThreadPool.activateWorkerThreads(
                    Scheduler.listOfPackagesNextToBuild.qsize())
            Scheduler.listOfPackagesCurrentlyBuilding.add(package)
            Scheduler.listOfPackagesToBuild.remove(package)
            return package

    @staticmethod
    def getDoneList():
        return list(Scheduler.listOfAlreadyBuiltPackages)


    @staticmethod
    def _publishBuildDependencies():
            Scheduler.logger.debug("Publishing Build dependencies")
            dependencyLists = {}
            for package in list(Scheduler.mapPackagesToGraphNodes.keys()):
                dependencyLists[package] = []
                pkgNode = Scheduler.mapPackagesToGraphNodes[package]
                for childPkg in list(pkgNode.childPkgNodes):
                    dependencyLists[package].append(childPkg.packageName + "-" + childPkg.packageVersion)
            with open(str(constants.logPath) + "/BuildDependencies.json", 'w') as graphfile:
                graphfile.write(json.dumps(dependencyLists, sort_keys=True, indent=4))


    @staticmethod
    def __getRequiredTypePackages(pkg, requiresType):
        listRPMPackages = []
        if requiresType == "build":
            listRPMPackages.extend(SPECS.getData().getBuildRequiresForPkg(pkg))
        elif requiresType == "install":
            listRPMPackages.extend(SPECS.getData().getRequiresAllForPkg(pkg))

        # Remove duplicates.
        listRPMPackages = list(set(listRPMPackages))

        listPackages = set()

        for reqPkg in listRPMPackages:
            basePkg = SPECS.getData().getBasePkg(reqPkg)
            listPackages.add(basePkg)

        return list(listPackages)


    @staticmethod
    def _getBuildRequiredPackages(pkg):
        return Scheduler.__getRequiredTypePackages(pkg, "build")


    @staticmethod
    def _getRequiredPackages(pkg):
        return Scheduler.__getRequiredTypePackages(pkg, "install")

    def _createNodes():
        # Create a graph node to represent every package
        for package in Scheduler.sortedList:
            packageName, packageVersion = StringUtils.splitPackageNameAndVersion(package)
            node = DependencyGraphNode(packageName, packageVersion,
                                       Scheduler._getWeight(package))
            Scheduler.mapPackagesToGraphNodes[package] = node

            if package in Scheduler.listOfAlreadyBuiltPackages:
                node.built = 1

    def _createCoreToolChainGraphNodes():
        # GRAPH-BUILD STEP 1: Initialize graph nodes for each core tool chain package.
        Scheduler._createNodes()

        # GRAPH-BUILD STEP 2: Mark package dependencies in the graph.
        # The package dependency is linear like A - B - C - D in accordance to packages in sortedlist
        # Unless package A is build none other packages B,C,D are build
        for index,package in enumerate(Scheduler.sortedList):
            pkgNode = Scheduler.mapPackagesToGraphNodes[package]
            for childPkg in Scheduler.sortedList[:index]:
                childPkgNode = Scheduler.mapPackagesToGraphNodes[childPkg]
                pkgNode.childPkgNodes.add(childPkgNode)
                childPkgNode.parentPkgNodes.add(pkgNode)

    def _createGraphNodes():

        # GRAPH-BUILD STEP 1: Initialize graph nodes for each package.
        #
        # Create a graph with a node to represent every package and all
        # its dependent packages in the given list.
        Scheduler._createNodes()

        for package in Scheduler.sortedList:
            pkgNode = Scheduler.mapPackagesToGraphNodes[package]
            for childPackage in Scheduler._getBuildRequiredPackages(package):
                childPkgNode = Scheduler.mapPackagesToGraphNodes[childPackage]
                pkgNode.buildRequiresPkgNodes.add(childPkgNode)

            for childPackage in Scheduler._getRequiredPackages(package):
                childPkgNode = Scheduler.mapPackagesToGraphNodes[childPackage]
                pkgNode.installRequiresPkgNodes.add(childPkgNode)

        # GRAPH-BUILD STEP 2: Mark package dependencies in the graph.
        #
        # Add parent-child relationships between dependent packages.
        # If a package 'A' build-requires or install-requires package 'B', then:
        #   - Mark 'B' as a child of 'A' in the graph.
        #   - Mark 'A' as a parent of 'B' in the graph.
        #
        #                     A
        #
        #                  /     \
        #                 v       v
        #
        #                B         C
        #
        for package in Scheduler.sortedList:
            pkgNode = Scheduler.mapPackagesToGraphNodes[package]

            for childPkgNode in pkgNode.buildRequiresPkgNodes:
                pkgNode.childPkgNodes.add(childPkgNode)
                childPkgNode.parentPkgNodes.add(pkgNode)

            for childPkgNode in pkgNode.installRequiresPkgNodes:
                pkgNode.childPkgNodes.add(childPkgNode)
                childPkgNode.parentPkgNodes.add(pkgNode)

    def _optimizeGraph():

        # GRAPH-BUILD STEP 3: Convert weak (install-requires) dependencies
        #                     into strong (aux-build-requires) dependencies.
        #
        # Consider the following graph on the left, where package 'A'
        # install-requires 'B' and build-requires 'C'.  Package 'C'
        # install-requires 'D'. Package 'D' build-requires 'E' and
        # install-requires 'F'.
        #
        #  b     : build-requires dependency
        #  i     : install-requires dependency
        #  aux-b : auxiliary build-requires dependency (explained later)
        #
        # Now, we know that install-requires dependencies are weaker
        # than build-requires dependencies. That is, for example, in the
        # original graph below, package 'B' does not need to be built
        # before package 'A', but package 'C' must be built before
        # package 'A'.
        #
        # Using this knowledge, we optimize the graph by re-organizing
        # the dependencies such that all of them are strong (we call
        # these newly computed build-dependencies as "auxiliary build
        # dependencies"). The optimized graph for the example below is
        # presented on the right -- the key property of the optimized
        # graph is that every child package *MUST* be built before its
        # parent(s). This process helps relax package dependencies to
        # a great extent, by giving us the flexibility to delay
        # building certain packages until they are actually needed.
        # Another important benefit of this optimization is that it
        # nullifies certain dependencies altogether (eg: A->B), thereby
        # enabling a greater level of build-parallelism.
        #
        #      Original Graph                  Optimized Graph
        #                             +
        #          A                  |       B              A
        #                             +
        #       i / \ b               |                b/    |aux-b  \aux-b
        #        /   \                +                /     |        \
        #       v     v               |               v      v         v
        #                             +
        #      B        C             |              C       D          F
        #                             +
        #                \i           |                   b/
        #                 \           +                   /
        #                  v          |                  v
        #                             +
        #                  D          |                 E
        #                             +
        #                b/  \i       |
        #                /    \       +
        #               v      v      |
        #                             +
        #              E        F     |
        #
        #
        # In the code below, we use 'accumulated-install-requires' set
        # as a placeholder to bubble-up install-requires dependencies of
        # each package to all its ancestors. In each such path, we look
        # for the nearest ancestor that has a build-requires dependency
        # on that path going up from the given package to that ancestor.
        # If we find such an ancestor, we convert the bubbled-up
        # install-requires packages accumulated so far into the
        # auxiliary-build-requires set at that ancestor. (This is how
        # 'D' and 'F' become aux-build-requires of 'A' in the optimized
        # graph above).
        #
        # Graph Traversal : Bottom-up (starting with packages that
        #                   have no children).
        #
        nodesToVisit = set()
        for package in Scheduler.sortedList:
            pkgNode = Scheduler.mapPackagesToGraphNodes[package]
            if len(pkgNode.childPkgNodes) == 0:
                nodesToVisit.add(pkgNode)

        while nodesToVisit:
            pkgNode = nodesToVisit.pop()

            pkgNode.accumInstallRequiresPkgNodes |= pkgNode.installRequiresPkgNodes

            if len(pkgNode.childPkgNodes) == 0:
                # Count self-visit if you don't expect any other
                # visitors.
                pkgNode.numVisits += 1

            for parentPkgNode in pkgNode.parentPkgNodes:
                if (pkgNode not in parentPkgNode.buildRequiresPkgNodes) and \
                   (pkgNode not in parentPkgNode.installRequiresPkgNodes):
                    raise Exception ("Visitor to parent is not its child " + \
                                     " Visitor: " + pkgNode.packageName + \
                                     " Parent:  " + parentPkgNode.packageName)

                if pkgNode in parentPkgNode.buildRequiresPkgNodes:
                    parentPkgNode.auxBuildRequiresPkgNodes |= pkgNode.accumInstallRequiresPkgNodes
                else:
                    parentPkgNode.accumInstallRequiresPkgNodes |= pkgNode.accumInstallRequiresPkgNodes

                parentPkgNode.numVisits += 1
                # Each child is expected to visit the parent once.
                # Note that a package might have the same packages as
                # both build-requires and install-requires children.
                # They don't count twice.
                numExpectedVisits = len(parentPkgNode.childPkgNodes)
                if parentPkgNode.numVisits == numExpectedVisits:
                    nodesToVisit.add(parentPkgNode)
                elif parentPkgNode.numVisits > numExpectedVisits:
                    raise Exception ("Parent node visit count > num of children " + \
                                     " Parent node: " + parentPkgNode.packageName + \
                                     " Visit count: " + str(parentPkgNode.numVisits) + \
                                     " Num of children: " + str(numExpectedVisits))

            pkgNode.accumInstallRequiresPkgNodes.clear()

        # Clear out the visit counter for reuse.
        for package in Scheduler.sortedList:
            pkgNode = Scheduler.mapPackagesToGraphNodes[package]
            if pkgNode.numVisits == 0:
                raise Exception ("aux-build-requires calculation never visited " \
                                 "package " + pkgNode.packageName)
            else:
                pkgNode.numVisits = 0

        # GRAPH-BUILD STEP 4: Re-organize the dependencies in the graph based on
        #                     the above optimization.
        #
        # Now re-arrange parent-child relationships between packages using the
        # following criteria:
        # If a package 'A' build-requires or aux-build-requires package 'B', then:
        #   - Mark 'B' as a child of 'A' in the graph.
        #   - Mark 'A' as a parent of 'B' in the graph.
        # If a package 'A' only install-requires package 'B', then:
        #   - Remove 'B' as a child of 'A' in the graph.
        #   - Remove 'A' as a parent of 'B' in the graph.
        # No node should have a non-zero accum-install-requires set.

        for package in Scheduler.sortedList:
            pkgNode = Scheduler.mapPackagesToGraphNodes[package]
            childPkgNodesToRemove = set()
            for childPkgNode in pkgNode.childPkgNodes:
                if (childPkgNode not in pkgNode.buildRequiresPkgNodes) and \
                   (childPkgNode not in pkgNode.auxBuildRequiresPkgNodes):
                       # We can't modify a set during iteration, so we
                       # accumulate the set of children we want to
                       # remove, and delete them after the for-loop.
                       childPkgNodesToRemove.add(childPkgNode)
                       childPkgNode.parentPkgNodes.remove(pkgNode)

            pkgNode.childPkgNodes = pkgNode.childPkgNodes - \
                                    childPkgNodesToRemove

            for newChildPkgNode in pkgNode.auxBuildRequiresPkgNodes:
                pkgNode.childPkgNodes.add(newChildPkgNode)
                newChildPkgNode.parentPkgNodes.add(pkgNode)

    def _calculateAllRequiredPackagesPerNode():
        # pkgNode contains information about spec package without deeping into subpackages.
        # getRequiresTreeOfBasePkgsForPkg() creates full build time dependency list base on
        # subpackages dependencies by walking the tree of:
        #     BuildRequires and their Requires tree + Requires and their Requires tree
        # Let's keep graph simple by caching this preprocessed information per each pkgNode.
        # It shouldn't add much memory overhead.
        for package in Scheduler.sortedList:
            pkgNode = Scheduler.mapPackagesToGraphNodes[package]
            pkgNode.allRequiredPackages.extend(SPECS.getData().getRequiresTreeOfBasePkgsForPkg(package))

    def _calculateCriticalChainWeights():

        # GRAPH-BUILD STEP 5: Calculate critical-chain-weight of packages.
        #
        # Calculation of critical-chain-weight (the key scheduling
        # metric):
        # --------------------------------------------------------
        # Let us define a "chain" of a given package to be the
        # sequence of parent packages that can be built starting from
        # that package. For example, if a package 'A' build-requires
        # 'B', which in turn build-requires 'C', then one of the
        # chains of 'C' is C->B->A. Now, if there are
        # multiple such chains possible from 'C', then we define the
        # "critical-chain" of 'C' to be the longest of those chains,
        # where "longest" is determined by the time it takes to build
        # all the packages in that chain. The build-times of any two
        # chains can be compared based on the sum of the
        # individual weights of each package in their respective
        # chains.
        #
        # Below, we calculate the critical-chain-weight of each
        # package (which is the maximum weight of all the paths
        # leading up to that package). Later on, we will schedule
        # package-builds by the decreasing order of the packages'
        # critical-chain-weight.
        #
        #
        #               ...  ...        ...
        #                 \   |         /
        #                  v  v        v
        #
        #                     A        B        C
        #
        #                      \       |       /
        #                       \      |      /
        #                        v     v     v
        #
        #                              D
        #
        #                            /
        #                           /
        #                          v
        #
        #                          E
        #
        #
        # In the above graph, the critical chain weight of 'D' is
        # computed as:
        # criticalChainWeight(D) = weight(D) +
        #                          max (criticalChainWeight(A),
        #                               criticalChainWeight(B),
        #                               weight(C))
        #
        # Graph Traversal : Top-down (starting with packages that
        #                   have no parents).
        #
        nodesToVisit = set()
        for package in Scheduler.sortedList:
            pkgNode = Scheduler.mapPackagesToGraphNodes[package]
            if len(pkgNode.parentPkgNodes) == 0:
                nodesToVisit.add(pkgNode)

        while nodesToVisit:
            pkgNode = nodesToVisit.pop()

            if len(pkgNode.parentPkgNodes) == 0:
                pkgNode.criticalChainWeight = pkgNode.selfWeight
                # Count self-visit if you don't expect any other
                # visitors.
                pkgNode.numVisits += 1

            for childPkgNode in pkgNode.childPkgNodes:
                if pkgNode not in childPkgNode.parentPkgNodes:
                    raise Exception ("Visitor to child node is not its parent " + \
                                     " Visitor: " + pkgNode.packageName + \
                                     " Child node: " + childPkgNode.packageName)

                if childPkgNode.numVisits == len(childPkgNode.parentPkgNodes):
                    raise Exception ("Child node visit count > number of parents " + \
                                     " Child node: " + childPkgNode.packageName + \
                                     " Visit count: " + childPkgNode.numVisits + \
                                     " Num of parents: " + \
                                     str(len(childPkgNode.parentPkgNodes)))

                childPkgNode.criticalChainWeight = max(
                    childPkgNode.criticalChainWeight,
                    pkgNode.criticalChainWeight + childPkgNode.selfWeight)

                childPkgNode.numVisits += 1
                # We can visit this package's children only after this
                # package has been visited by all its parents (thus
                # guaranteeing that its criticalChainWeight has
                # stabilized).
                if childPkgNode.numVisits == len(childPkgNode.parentPkgNodes):
                    nodesToVisit.add(childPkgNode)

        # Clear out the visit counter for reuse.
        for package in Scheduler.sortedList:
            pkgNode = Scheduler.mapPackagesToGraphNodes[package]
            if pkgNode.numVisits == 0:
                raise Exception ("critical-chain-weight calculation never visited " + \
                                 "package " + pkgNode.packageName)
            else:
                pkgNode.numVisits = 0


    def _buildGraph():
        if Scheduler.coreToolChainBuild:
             Scheduler._createCoreToolChainGraphNodes()
        else:
            Scheduler._createGraphNodes()
            Scheduler._optimizeGraph()
            Scheduler._calculateAllRequiredPackagesPerNode()
        Scheduler._calculateCriticalChainWeights()


    @staticmethod
    def _parseWeights():
        Scheduler.pkgWeights.clear()
        with open(constants.packageWeightsPath, 'r') as weightFile:
            Scheduler.pkgWeights = json.load(weightFile)

    # A package's weight is an indicator of the time required to build
    # that package, relative to other packages. These weights do not
    # take build-time/install-time dependencies into account -- they
    # are the individual build-times of the respective packages.
    # Package weights are positive integers, with a default value of 1.
    @staticmethod
    def _getWeight(package):
	# Package weights are assumed to be independent of package
	# version (i.e., in the case of multi-version packages such as
	# Go or Kubernetes, all the versions have the same weight). So
	# convert packageName-version to packageName before looking up
	# the package weight.
        package, _ = StringUtils.splitPackageNameAndVersion(package)
        try:
            return int(Scheduler.pkgWeights[package]) + 1
        except KeyError:
            return 1

    @staticmethod
    def _getPriority(package):
        try:
            return int(Scheduler.priorityMap[package])
        except KeyError:
            return 0


    @staticmethod
    def _setPriorities(skipGraphBuild):
        if skipGraphBuild:
            for package in Scheduler.sortedList:
                Scheduler.priorityMap[package] = 0
        else:
            Scheduler._parseWeights()
            Scheduler._buildGraph()

            for package in Scheduler.sortedList:
                pkgNode = Scheduler.mapPackagesToGraphNodes[package]
                Scheduler.priorityMap[package] = pkgNode.criticalChainWeight

        Scheduler.logger.debug("set Priorities: Priority of all packages")
        Scheduler.logger.debug(Scheduler.priorityMap)


    @staticmethod
    def _checkNextPackageIsReadyToBuild(package):
        pkgNode = Scheduler.mapPackagesToGraphNodes[package]
        if pkgNode.built == 1:
            Scheduler.logger.warning("This pkg %s-%s is already built," \
                                     "but still present in listOfPackagesToBuild" \
                                     % (pkgNode.packageName, pkgNode.packageVersion))
            return False

        if Scheduler.coreToolChainBuild:
            # For CoreToolchain list just use the graph
            for childPkgNode in pkgNode.childPkgNodes:
                if childPkgNode.built == 0:
                    return False
        else:
            # For the rest of the packages with parallel build we have to consider entire
            # tree of dependencies cached in allRequiredPackages set.
            for p in pkgNode.allRequiredPackages:
                if Scheduler.mapPackagesToGraphNodes[p].built == 0:
                    return False

        return True

    @staticmethod
    def _markPkgNodeAsBuilt(package):
        pkgNode = Scheduler.mapPackagesToGraphNodes[package]
        Scheduler.logger.debug("Marking pkgNode as built = %s" % pkgNode.packageName)
        pkgNode.built = 1


    @staticmethod
    def _getListNextPackagesReadyToBuild():
        for pkg in Scheduler.listOfPackagesToBuild:
            if pkg in Scheduler.listOfPackagesCurrentlyBuilding:
                continue
            if constants.rpmCheck or Scheduler._checkNextPackageIsReadyToBuild(pkg):
                Scheduler.listOfPackagesNextToBuild.put((-Scheduler._getPriority(pkg), pkg))
                Scheduler.logger.debug("Adding " + pkg + " to the schedule list")
Exemplo n.º 22
0
 def __init__(self):
     from queue import PriorityQueue
     self.timeline = PriorityQueue()
Exemplo n.º 23
0
from queue import PriorityQueue
import copy

q = PriorityQueue()

T = int(input())

for t in range(T):
    R, C = map(int, input().split())
    grid = [list(map(int, input().split())) for _ in range(R)]
    ans = copy.deepcopy(grid)

    for i in range(R):
        for j in range(C):
            q.put((2000000 - grid[i][j], (i, j)))

    while not q.empty():
        node = q.get()[1]

        directions = [[0, -1], [0, 1], [1, 0], [-1, 0]]
        for d in directions:
            r, c = node[0] + d[0], node[1] + d[1]
            if not (0 <= r < R and 0 <= c < C): continue
            if ans[node[0]][node[1]] - ans[r][c] > 1:
                ans[r][c] = ans[node[0]][node[1]] - 1
                q.put((2000000 - ans[r][c], (r, c)))

    count = 0
    for i in range(R):
        for j in range(C):
            count += ans[i][j] - grid[i][j]
def main():

    # We make these variables global so that inputFile.py can set them.
    # After this they are never modified.
    global variables
    global depth
    global bfe  # bold font e
    global fNames
    global degrees
    global B
    global G
    global l
    global r
    global startSolution
    global workingDirectory
    global logTolerance
    global verbose
    global projectiveVariableGroups

    # See the initialization above for an example of how to use this
    # variable
    global algebraicTorusVariableGroups

    global nonzeroCoordinate

    global bertiniVariableGroupString

    global bertiniVariables
    global revisedEquationsText
    global variableGroupText
    global bertiniTrackingOptionsText

    global maxProcesses

    global explorationOrder

    global targetDimensions  # a list of multidimensions

    global loadDimensionLinearsAndStartSolution
    global loadDegreeLinears
    global pruneByDimension
    global pruneByPoint
    global pathToBertini

    # We read in the users configuration by evaluating the following
    # string appended with the file 'inputFile.py'.
    setVariablesToGlobal = """
global variables
global depth
global bfe
global fNames
global degrees
global B
global G
global l
global r
global startSolution
global workingDirectory
global logTolerance
global verbose
global projectiveVariableGroups
global algebraicTorusVariableGroups
global nonzeroCoordinates
global maxProcesses
global targetDimensions
global explorationOrder
global loadDimensionLinearsAndStartSolution
global loadDegreeLinears
global pruneByDimension
global pruneByPoint
global pathToBertini
"""
    # Read in the user's input frim the files 'bertiniInput_*'
    try:
        with open("bertiniInput_variables", "r") as f:
            bertiniVariables = f.read().strip()
        if os.path.exists("bertiniInput_trackingOptions"):
            with open("bertiniInput_trackingOptions", "r") as f:
                bertiniTrackingOptionsText = f.read().strip()
        else:
            bertiniInput_trackingOptions = ""
        with open("bertiniInput_equations", "r") as f:
            bertiniEquations = f.read().strip()
    # If anything goes wrong with reading in this input, then exit
    except:
        print(
            "Exiting due to incomplete input. Please include the following files:"
        )
        print("\t" + "bertiniInput_variables")
        print("\t" + "bertiniInput_trackingOptions")
        print("\t" + "bertiniInput_equations")

    # Set the string variableGroupText which is passed to bertini
    variableGroupText = ""

    # Set the list variable which is used by multiregeneration.py
    variables = []
    lines = bertiniVariables.splitlines()
    for i in range(len(lines)):
        variableGroupType = lines[i].split(" ")[0]
        if not (variableGroupType == "variable_group"
                or variableGroupType == "hom_variable_group"):
            print(
                "Exiting because a variable group other that 'variable_group' or 'hom_variable_group' was declared."
            )
        if variableGroupType == "hom_variable_group":
            projectiveVariableGroups.append(i)
        variableGroupText += lines[i] + "\n"
        variables.append(lines[i][lines[i].find(" "):].replace(
            " ", "").replace(";", "").split(","))
    # Initialize bfe to the dimension of the ambient product of
    # projective spaces.

    bfe = []
    for i in range(len(variables)):
        isProjectiveGroup = 0
        if i in projectiveVariableGroups:
            isProjectiveGroup = 1
        bfe.append(len(variables[i]) - isProjectiveGroup)
    # Show
    if verbose > 1:
        print("Ambient space dimension ")
        print(bfe)
        print("variables")
        print(variables)
        print("projectiveVariableGroups")
        print(projectiveVariableGroups)
        print("variableGroupText")
        print(variableGroupText)

    # A string which stores the defining equations of the user inputed
    # functions
    revisedEquationsText = ""

    lines = bertiniEquations.splitlines()

    # A list which stores the user inputed names of the functions
    fNames = []
    for i in range(len(lines)):
        functionLine = lines[i].split(" ")[0]
        if functionLine == "function":
            fNames += lines[i][lines[i].find(" "):].replace(" ", "").replace(
                ";", "").split(",")
        else:
            revisedEquationsText += lines[i] + "\n"
    if verbose > 1:
        print("G")
        print(G)
        print("revisedEquationsText")
        print(revisedEquationsText)

    #We read in the user's input from 'inputFile.py' by executing the
    #following
    exec(setVariablesToGlobal + open("inputFile.py").read())

    # print to screen system summary.
    if verbose > 0:
        print(
            "\n################### Setup multiregeneration ####################\n"
        )
        print("These variable groups have been selected:\n" +
              variableGroupText)
        print("Solutions are found in run/_completed_smooth_solutions and:")
        for c, f in enumerate(fNames):  # 0 is the depth we start with
            if c >= depth:
                print("depth >= " + str(c) + " satisfy " + f + " = 0")
# Determine random linear polynomials l[i][j] and start solution
    if loadDimensionLinearsAndStartSolution:
        l = []
        for i in range(len(variables)):
            with open("dimensionLinears_%s" % i, "r") as f:
                A = (line.rstrip() for line in f)
                A = list(line for line in A if line)
            l.append(A)
        with open("startSolution", "r") as f:
            startSolution = (line.rstrip() for line in f)
            startSolution = list(line for line in startSolution if line)
    else:
        (l, startSolution) = getLinearsThroughPoint(variables)
    # Determine random linear polynomials r[i][j] degree linears
    r = []
    # Populate the 2D list 'r' with random linear equations, unless the
    # user has specified their own
    if not loadDegreeLinears:
        for i in range(len(variables)):
            r.append([])
            maxdeg = 0
            for s in range(len(fNames)):
                maxdeg = max(maxdeg, degrees[s][i])
            for d in range(maxdeg):
                r[i].append(getGenericLinearInVariableGroup(i))
    elif loadDegreeLinears:
        for i in range(len(variables)):
            maxdeg = 0
            for s in range(len(fNames)):
                maxdeg = max(maxdeg, degrees[s][i])
            with open("degreeLinears_%s" % i, "r") as f:
                A = (line.rstrip() for line in f)
                A = list(line for line in A if line)
            r.append(A)
    # The variable B specifies how many of the inputed equations to use.
    # If the user has not specified a value, then assume that all
    # equations are to be used.
    if B == None:
        B = len(fNames)
        if verbose > 1:
            print("B is set to %d" % B)
    if verbose > 0:
        print("\nExploring tree in order", explorationOrder)

    if verbose > 0:
        print(
            "\n################### Starting multiregeneration ####################\n"
        )
    #####################
# Make directory to store final solutions
    if os.path.isdir(workingDirectory):
        shutil.rmtree(workingDirectory)
    os.makedirs(workingDirectory)
    os.chdir(workingDirectory)
    os.makedirs("all_full_depth_solutions")
    os.makedirs("_truncated_singular_solutions")
    os.makedirs("_saturated_solutions")
    completedSmoothSolutions = "_completed_smooth_solutions"
    os.makedirs(completedSmoothSolutions)
    for i in range(depth, depth + len(fNames)):
        os.makedirs(completedSmoothSolutions + "/depth_%s" % i)
    # Write start solution and linears to a file which is availalbe to
    # the user
    with open("_tracking_information", "w") as trackingInfo:
        trackingInfo.write("\nUsing start solution\n")
        for i in startSolution:
            trackingInfo.write(i)
            trackingInfo.write("\n")
        trackingInfo.write("\nUsing dimension linears\n")
        for i in range(len(variables)):
            for j in range(len(l[i])):
                trackingInfo.write("l[%s][%s]" % (i, j))
                trackingInfo.write("\n")
                trackingInfo.write(l[i][j])
                trackingInfo.write("\n")
        trackingInfo.write("\nUsing degree linears\n")
        for i in range(len(variables)):
            for j in range(len(r[i])):
                trackingInfo.write(r[i][j])
                trackingInfo.write("\n")
# branch node outline

    global queue
    queue = mp.Manager().Queue()  # a message queue for the child
    #processes to comunication with this one
    global priorityQueue
    priorityQueue = PriorityQueue()  # where this process, which is the
    #queue manager, stores the jobs that need to be done

    priorityQueue.put((0, [depth, G, B, bfe, startSolution]))

    # One extra process for the progress updater
    if verbose >= 1:
        pool = mp.Pool(maxProcesses + 1)
    else:
        pool = mp.Pool(maxProcesses)

    with jobsInPool.get_lock():
        jobsInPool.value = 0

    if verbose >= 1:
        pool.apply_async(updateProgressInfo)

    #This loop looks for messages from the child processes in the queue,
    # then puts them in the priority queue. When there is space for more
    # jobs, explore nodes in the priority queue.
    while True:
        if priorityQueue.empty() and queue.empty() and jobsInPool.value == 0:
            break
        if not queue.empty():
            job = queue.get()
            if explorationOrder == "breadthFirst":
                priority = job[0]
                priorityQueue.put(
                    (priority, job)
                )  #depth is first in tuple, will process lower depth jobs first
            elif explorationOrder == "depthFirst":
                priority = -1 * job[0]
                priorityQueue.put((priority, job))
            else:
                print(
                    "Error: explorationOrder should be 'breadthFirst' or 'depthFirst', not '%s'"
                    % explorationOrder)
                sys.exit(1)
        if not priorityQueue.empty() and jobsInPool.value < maxProcesses:
            with jobsInPool.get_lock():
                if verbose > 1:
                    print("queue size =", queue.qsize(), "priorityQueue size",
                          priorityQueue.qsize(), "jobsInPool =",
                          jobsInPool.value)
                jobsInPool.value += 1
                args = priorityQueue.get()[1]
                if verbose > 1:
                    print("dequeued node", args)
                pool.apply_async(processNode, (args, ), callback=decJobsInPool)
                if verbose > 1:
                    print("queue size =", queue.qsize(), "priorityQueue size",
                          priorityQueue.qsize(), "jobsInPool =",
                          jobsInPool.value)

    pool.terminate()
    pool.close()
    if verbose >= 1:
        updateProgressDisplay(cursorLeftAtBotten=True)
        print("Done.")
Exemplo n.º 25
0
 def reducer_init(self):
     self.queue = PriorityQueue(maxsize=100)
Exemplo n.º 26
0
 def __init__(self):
     self.priority_queue = PriorityQueue()
Exemplo n.º 27
0
from os import path

# configuration constants
DEFAULT_CAPACITY = 20
G_ENTRY_PCT = .6
ELEVATOR_SPEED = 1.5  # 1.5 seconds per floor traveled
MAX_WAIT = 60  #if someone has waited > 60s
SUPER_MAX_WAIT = 120  #if someone has waited > 120s

# log filename
LOG_DIR = "logs"
PERSON_LOG_FNAME = "person.sqlite3"
ELEVATOR_LOG_FNAME = "elevator.sqlite3"
FLOOR_LOG_FNAME = "floor.sqlite3"

# arrivals
ARRIVALS_DIR = "arrivals"
ARRIVALS_DATA_SET_CSV = path.join("data", "class_enrollment_list.csv")

# logging
VERBOSE = False

# global future event queue
#   structure of items
#       (time_of_event, object, new_state, [args])
FEQ = PriorityQueue()
CURR_DAY = 0  # current day
CURR_TIME = 0
ELEVATORS = []
BUILDING = None
Exemplo n.º 28
0
    def run(start_node: Node, end_node: Node,
            distance_service: DistanceInterface):

        curr_node = start_node
        curr_distance = 0

        node_dict = {
            curr_node: {
                "sum_distance": 0,
                "preceding": None,
                "visited": True
            }
        }

        neighbor_queue = PriorityQueue()
        for (n_distance, n_node) in distance_service.get_neighbours_by_node(
                start_node.node_id):
            neighbor_queue.put([n_distance, n_node])
            node_dict[n_node] = {
                "sum_distance": n_distance,
                "preceding": deepcopy(curr_node),
                "visited": False,
            }

        while curr_node != end_node and not neighbor_queue.empty():
            curr_distance, curr_node = neighbor_queue.get()

            # Skip invalid nodes
            if node_dict[curr_node]["sum_distance"] != curr_distance:
                continue

            node_dict[curr_node]["visited"] = True

            for (n_distance,
                 n_node) in distance_service.get_neighbours_by_node(
                     curr_node.node_id):
                updated_distance = curr_distance + n_distance

                if n_node not in node_dict.keys():
                    neighbor_queue.put([updated_distance, n_node])
                    node_dict[n_node] = {
                        "sum_distance": updated_distance,
                        "preceding": deepcopy(curr_node),
                        "visited": False,
                    }

                elif node_dict[n_node]["sum_distance"] > updated_distance:
                    if node_dict[n_node]["visited"]:
                        raise AssertionError(
                            "The fetched node is already visited but the path is not optimal!"
                        )

                    neighbor_queue.put([updated_distance, n_node])
                    node_dict[n_node]["preceding"] = deepcopy(curr_node)
                    node_dict[n_node]["sum_distance"] = updated_distance

        # Evaluate the solution ------------------------------------------------
        if curr_node != end_node:
            return {
                "path": [],
                "distance": float("inf"),
                "expanded": len(node_dict)
            }

        sum_distance = node_dict[end_node]["sum_distance"]

        # Reconstruct the path
        path = []
        while curr_node != start_node:
            path.insert(0, deepcopy(curr_node))
            curr_node = node_dict[curr_node]["preceding"]

        return {
            "path": path,
            "distance": sum_distance,
            "expanded": len(node_dict)
        }
Exemplo n.º 29
0
    def _get_non_overlapping_repeating_blocks(self):
        # The LCP intervals that are calculated from the extend suffix array are all potential blocks.
        # However some potential blocks overlap. To decide the definitive blocks we sort the potential blocks on the
        # amount of witnesses they occur in.
        potential_blocks = self.token_index.split_lcp_array_into_intervals()
        # we add all the intervals to a priority queue based on 1) number of witnesses 2) block length
        queue = PriorityQueue()
        for interval in potential_blocks:
            queue.put(interval)

        occupied = RangeSet()
        real_blocks = []

        while not queue.empty():
            item = queue.get()
            # print(item)
            # test intersection with occupied
            potential_block_range = item._as_range()
            # check the intersection with the already occupied ranges
            block_intersection = potential_block_range.intersection(occupied)
            if not block_intersection:
                # print("Selected!")
                occupied.union_update(potential_block_range)
                real_blocks.append(Block(potential_block_range))
                continue

            # check complete overlap or partial
            if block_intersection == potential_block_range:
                # print("complete overlap; skip")
                continue

            # print("partial overlap!")
            occurrence_difference = potential_block_range.difference(block_intersection)
            # print(occurrence_difference)

            # check on left partial overlap
            # filter it

            # determine start positions
            start_pos = item.block_occurrences()

            # print(start_pos)
            resulting_difference = RangeSet()
            count = 0
            for range in occurrence_difference.contiguous():
                if range[0] in start_pos:
                    resulting_difference.add_range(range[0], range[-1]+1)
                    count+=1
            # print(resulting_difference)

            if count < 2:
                continue

            # in case of right partial overlap
            # calculate the minimum allowed range

            minimum_length = item.length
            for range in resulting_difference.contiguous():
                if len(range) < minimum_length:
                    minimum_length = len(range)

            # print(minimum_length)

            result = RangeSet()
            for range in resulting_difference.contiguous():
                result.add_range(range[0], range[0]+minimum_length)
            # print("Selecting partial result: "+str(result))

            occupied.union_update(result)
            real_blocks.append(Block(result))

        return real_blocks
 def __init__(self, k):
     self.k = k
     from queue import PriorityQueue
     self.pq = PriorityQueue()