Example #1
0
    def __init__(self,
                 parser=None,
                 save=None,
                 sent=None,
                 requester=None,
                 login=None,
                 url_filter=None,
                 proxy=None,
                 queue_parse_size=-1,
                 queue_save_size=-1,
                 queue_proxy_size=-1):

        self._cust_parser = parser  #Parser解析器实例,解析器的子类或无
        self._cust_saver = save  #saver实例,saver的子类或无
        self._cust_request = requester  #url实例,requester的子类
        self._cust_proxy = proxy  # 代理服务器实例
        self._cust_sent = sent  #sent实例,senter的子类
        self._cust_login = login  #login实例
        self._url_filter = url_filter  #默认值:无,也可以是UrlFilter() # default: None, also can be UrlFilter()

        self._thread_REQUEST_list = []  #获取url线程列表,在start_working()中定义长度
        self._thread_parser = None  #解析器线程,如果没有解析器实例,则为None
        self._thread_saver = None  #saver线程,如果没有saver实例,则为None
        self._thread_senter = None  #senter线程,如果没有sent实例,则为None
        self._thread_requester = None  #requester线程
        self._thread_proxy = None  #proxy线程
        self._thread_stop_flag = False  #默认值:False,线程的停止标志 # default: False, stop flag of threads
        self._login_flag = False
        self._cookie = {}
        self._queue_deal = queue.PriorityQueue(-1)
        self._queue_url = queue.PriorityQueue(-1)
        self._queue_parse = queue.PriorityQueue(queue_parse_size)
        self._queue_save = queue.PriorityQueue(queue_save_size)
        self._queue_proxy = queue.PriorityQueue(queue_proxy_size)
        self._stop_spider = False
        self._lock = threading.Lock()  #_number_dict需要的锁
        self._number_dict = {
            TypeEnum.URL_DEAL_RUN: 0,  #通过urler去重处理完成的url计数
            TypeEnum.URL_DEAL_NOT: 0,  # 通过urler去重处理完成的url计数
            TypeEnum.URL_DEAL_SUCC: 0,  # 通过urler去重处理完成的url计数
            TypeEnum.URL_DEAL_FAIL: 0,  # 通过urler去重处理完成的url计数
            TypeEnum.URL_REQUEST_RUN: 0,  #正在请求的获取任务的计数
            TypeEnum.URL_REQUEST_NOT: 0,  #尚未请求的URL的计数
            TypeEnum.URL_REQUEST_SUCC: 0,  #已成功请求的URL的计数
            TypeEnum.URL_REQUEST_FAIL: 0,  #请求失败的url计数
            TypeEnum.HTM_PARSE_RUN: 0,  #正在运行的分析任务的计数
            TypeEnum.HTM_PARSE_NOT: 0,  #尚未分析的URL的计数
            TypeEnum.HTM_PARSE_SUCC: 0,  #已成功分析的URL计数
            TypeEnum.HTM_PARSE_FAIL: 0,  #已分析的URL计数失败
            TypeEnum.ITEM_SAVE_RUN: 0,  #正在运行的保存任务的计数
            TypeEnum.ITEM_SAVE_NOT: 0,  #尚未保存的URL的计数
            TypeEnum.ITEM_SAVE_SUCC: 0,  #已成功保存的URL的计数
            TypeEnum.ITEM_SAVE_FAIL: 0,  #已保存的URL计数失败
            TypeEnum.PROXY_LIFE: 0,  # 可用的代理数
            TypeEnum.PROXY_FAIL: 0,  # 不可用的代理数
        }

        print("线程池已初始化")
        # logging.warning("线程池已初始化")

        self._thread_monitor = MonitorThread("monitor", self)  #监视线程
        self._thread_monitor.setDaemon(True)
        self._thread_monitor.start()
        return
 def initiate_queues(self, s, t):
     Q = [queue.PriorityQueue(), queue.PriorityQueue()]
     Q[0].put((0, s))
     Q[1].put((0, t))
     return Q
# 优先级队列
import queue
q = queue.PriorityQueue()

# 每个元素都是元组
# 数字越小优先级越高
# 同优先级先进先出

q.put((1, "work"))
q.put((2, "eat"))
q.put((-1, "drink"))

print(q.get())
print(q.get())
print(q.get())

# queue.LifoQueue 后进先出队列, 类似堆栈
# q.deque  双向队列
    lista = []
    while queue:
        lista.append(queue.get())
        if (queue.empty()):
            return lista


t = 1

while (t > 0):

    enter = input().split()
    n_no = int(enter[0])
    n_aresta = int(enter[1])

    queue = Q.PriorityQueue()

    for i in range(n_aresta):
        enter = input().split()
        queue.put((int(enter[2]), int(enter[0]), int(enter[1])))

    graph = {}
    lista = []
    lista = convert(queue)

    current = lista[0]
    graph[current[1]] = [[current[0], current[2]]]
    graph[current[2]] = [[current[0], current[1]]]

    qtd = 1
    val = current[0]
def a_star_search(start_point, goal_point, step, robot_radius, clearance,
                  ANG_THRESHOLD, DIST_THRESHOLD):
    x_node = []
    y_node = []
    x_parent = []
    y_parent = []

    start_point = discretize(start_point[0], start_point[1], start_point[2],
                             ANG_THRESHOLD, DIST_THRESHOLD)
    goal_point = discretize(goal_point[0], goal_point[1], goal_point[2],
                            ANG_THRESHOLD, DIST_THRESHOLD)

    visited = np.zeros([
        int(300 / DIST_THRESHOLD),
        int(200 / DIST_THRESHOLD),
        int(360 / ANG_THRESHOLD)
    ])
    start = (0, start_point, None)
    goal = (0, goal_point, None)

    nodes_explored = queue.PriorityQueue()
    path_dict = {}
    nodes_explored.put(start)

    while True:
        current_node = list(nodes_explored.get())
        current_node[0] = current_node[0] - cost_to_go(current_node[1],
                                                       goal[1])

        if (current_node[2] != None):
            str1 = str(current_node[1][0])
            str2 = str(current_node[1][1])
            str3 = str(current_node[1][2])

            str4 = str(current_node[2][0])
            str5 = str(current_node[2][1])
            str6 = str(current_node[2][2])

            str_node = str1 + ',' + str2 + ',' + str3
            str_parent = str4 + ',' + str5 + ',' + str6
            path_dict[str_node] = str_parent

        else:
            str1 = str(current_node[1][0])
            str2 = str(current_node[1][1])
            str3 = str(current_node[1][2])
            str4 = str(current_node[2])

            str_node = str1 + ',' + str2 + ',' + str3
            str_parent = str4
            path_dict[str_node] = str_parent

        actions = action_step(current_node, step, ANG_THRESHOLD)

        for new_node in actions:
            angle = new_node[2] + current_node[1][2]
            if angle < 0:
                angle = angle + 360

            if angle > 360:
                angle = angle - 360

            if angle == 360:
                angle = 0

            node = (current_node[1][0] + new_node[0],
                    current_node[1][1] + new_node[1], angle)
            node = discretize(node[0], node[1], node[2], ANG_THRESHOLD,
                              DIST_THRESHOLD)
            node_cost = current_node[0] + new_node[3] + cost_to_go(
                node, goal[1])
            node_parent = current_node[1]

            if check_obstacle(node[0], node[1], robot_radius,
                              clearance) == False:
                if (visited[int(node[0] / DIST_THRESHOLD)][int(
                        node[1] / DIST_THRESHOLD)][int(node[2] /
                                                       ANG_THRESHOLD)] == 0):
                    visited[int(node[0] / DIST_THRESHOLD)][int(
                        node[1] / DIST_THRESHOLD)][int(node[2] /
                                                       ANG_THRESHOLD)] = 1
                    x_node.append(node[0])
                    y_node.append(node[1])
                    x_parent.append(node_parent[0])
                    y_parent.append(node_parent[1])
                    new_node = (node_cost, node, node_parent)
                    nodes_explored.put(new_node)

        if check_goal(current_node[1][0], current_node[1][1], goal_point[0],
                      goal_point[1]):
            print("GOAL REACHED!!!")
            print("Plotting path...")
            path = []

            str_p1 = str(current_node[2][0])
            str_p2 = str(current_node[2][1])
            str_p3 = str(current_node[2][2])
            parent = str_p1 + ',' + str_p2 + ',' + str_p3

            while parent != "None":
                temp = path_dict.get(parent)
                if (parent[1] == '.' and parent[5] == '.'):
                    par_1 = float(parent[0]) + float(parent[2]) / 10
                    par_2 = float(parent[4]) + float(parent[6]) / 10
                if (parent[2] == '.' and parent[7] == '.'):
                    par_1 = float(parent[0] +
                                  parent[1]) + float(parent[3]) / 10
                    par_2 = float(parent[5] +
                                  parent[6]) + float(parent[8]) / 10
                if (parent[1] == '.' and parent[6] == '.'):
                    par_1 = float(parent[0]) + float(parent[2]) / 10
                    par_2 = float(parent[4] +
                                  parent[5]) + float(parent[7]) / 10
                if (parent[2] == '.' and parent[6] == '.'):
                    par_1 = float(parent[0] +
                                  parent[1]) + float(parent[3]) / 10
                    par_2 = float(parent[5]) + float(parent[7]) / 10
                if (parent[3] == '.' and parent[9] == '.'):
                    par_1 = float(parent[0] + parent[1] +
                                  parent[2]) + float(parent[4]) / 10
                    par_2 = float(parent[6] + parent[7] +
                                  parent[8]) + float(parent[10]) / 10
                if (parent[3] == '.' and parent[7] == '.'):
                    par_1 = float(parent[0] + parent[1] +
                                  parent[2]) + float(parent[4]) / 10
                    par_2 = float(parent[6]) + float(parent[8]) / 10
                if (parent[3] == '.' and parent[8] == '.'):
                    par_1 = float(parent[0] + parent[1] +
                                  parent[2]) + float(parent[4]) / 10
                    par_2 = float(parent[6] +
                                  parent[7]) + float(parent[9]) / 10
                if (parent[1] == '.' and parent[7] == '.'):
                    par_1 = float(parent[0]) + float(parent[2]) / 10
                    par_2 = float(parent[4] + parent[5] +
                                  parent[6]) + float(parent[8]) / 10
                if (parent[2] == '.' and parent[8] == '.'):
                    par_1 = float(parent[0] +
                                  parent[1]) + float(parent[3]) / 10
                    par_2 = float(parent[5] + parent[6] +
                                  parent[7]) + float(parent[9]) / 10
                path.append((par_1, par_2))
                parent = temp
                if ((par_1, par_2) == (start_point[0], start_point[1])):
                    break

            path.append((start_point[0], start_point[1]))

            return path, x_node, y_node, x_parent, y_parent
Example #6
0
def uniformCostOrAStar(sleeptime=0, astar=True, tracepath=False):
    start = datetime.datetime.now()  # Starts a timer
    resetColors(
    )  # Recolors the previous shown pathway (if there is one) back to white
    board.resetVisitorFlags()  # Resets the visitor flags back to False
    # Both A* and UCS use a priorityqueue as agenda, that's also why I have combined both in one function
    agenda = queue.PriorityQueue()
    previous_pathway = []
    # If the algorithm is A* then we will incorporate a heuristic, we will subtract the board.start value since this
    # will be added later on. If the algorithm is UCS then we will not incorporate the heuristic. An item in the agenda
    # will look like this (value, [travelled_pathway]) where value is either heuristic + travelled distance (A*) or just
    # travelled distance in case of UCS.
    if astar:
        agenda.put(
            (calculateHeuristic(board.start, board.destination) -
             board.board[board.start[0]][board.start[1]], [board.start]))
        alg = "Algorithm A*"
    else:
        agenda.put(
            [-board.board[board.start[0]][board.start[1]], [board.start]])
        alg = "Uniform Cost Search"
    while agenda.qsize() > 0:
        pointer = agenda.get()  # Gets the first item from the priorityqueue
        pathway, current_point = pointer[1], pointer[1][-1]
        shortest_distance_to_current = board.visited[current_point[0]][
            current_point[1]]
        travelled_distance = pointer[0] + board.board[current_point[0]][
            current_point[1]]
        if astar:
            # In the A* algorithm the heuristic is incorporated in the first value of the agenda
            travelled_distance -= calculateHeuristic(current_point,
                                                     board.destination)
        if str(shortest_distance_to_current).isdigit(
        ) and travelled_distance >= shortest_distance_to_current:
            # If this point has been visited yet and the current travelled distance is more than the previous
            # shortest distance to this point then we will not continue with this pathway
            continue
        # Else we will update the shortest distance to this point to the current travelled distance
        board.visited[current_point[0]][current_point[1]] = travelled_distance
        # If the tracepath is selected on the GUI then we will update the pathway each iteration
        if tracepath:
            time.sleep(sleeptime)
            # Delete the part of the previous pathway which isn't present in the current pathway
            unshowCalculation(
                [coord for coord in previous_pathway if coord not in pathway])
            # Show the part of the current pathway which isn't active yet
            showCalculation(
                [coord for coord in pathway if coord not in previous_pathway])
        if current_point == board.destination:  # If we have reached the destination
            if not tracepath:  # Show the pathway if tracepath was disabled, otherwise it is already shown
                showCalculation(pathway)
            messagebox.config(
                state='normal')  # Allow us to make a change in the messagebox
            messagebox.insert(
                'end',
                f'Algorithm: {alg}\nTotal Distance: {travelled_distance}\n'
                f'Elapsed time: {datetime.datetime.now() - start}\n'
            )  # update messagebox
            messagebox.config(
                state='disabled'
            )  # Close access to messagebox again (prevents user input/disrupt)
            return
        # If we are not at the destination yet then we will add all possible and viable neighbors to the agenda
        # Viable neighbors are those which are on the board, which are not obstacles, which are not in the pathway
        # yet (prevents cycles) and which have not been visited yet or have only been visited by a longer path
        for point in filter(
                lambda point: not str(board.board[point[0]][point[1]]).isalpha(
                ) and point not in pathway and
            (not board.visited[point[0]][point[1]] or travelled_distance >
             board.visited[point[0]][point[1]]),
                findNeighbors(current_point, board.size)):
            if astar:
                agenda.put((calculateHeuristic(point, board.destination) +
                            travelled_distance, pathway + [point]))
            else:
                agenda.put((travelled_distance, pathway + [point]))
        previous_pathway = pathway
    # This will only execute when no pathway has been found
    unshowCalculation(
        previous_pathway[1:])  # Decolor the last pathway of the calculation
    messagebox.config(
        state='normal')  # Allow us to make a change in the messagebox
    messagebox.insert('end', f'Algorithm: {alg} \nNo pathway found!\n')
    messagebox.config(
        state='disabled'
    )  # Close access to messagebox again (prevents user input/disrupt)
    return
def Solve_ThiningAstar(maze, start, goal, dim, prob):
    print("Thining A star")
    maze_org = copy.deepcopy(maze)
    assert (prob > 0.02)
    blocked_nodes = int(dim * dim * prob)
    new_blocked_nodes = int(
        dim * dim *
        (prob * 0.5))  # reduce number of blockages by 50% of current value(q)
    blockage_to_remove = blocked_nodes - new_blocked_nodes

    # Simplified maze
    while blockage_to_remove > 0:
        row = random.randint(0, dim - 1)
        column = random.randint(0, dim - 1)
        while maze[row][column] == 0:
            row = random.randint(0, dim - 1)
            column = random.randint(0, dim - 1)
        maze[row][column] = 0
        blockage_to_remove -= 1

    ## Print the new maze
    #print("Simplified maze")
    #for row in range(0,dim):
    #    for column in range(0,dim):
    #        print("%d "%maze[row][column] ,end="")
    #    print()

    total_eu = math.sqrt(2 * ((dim - 1)**2))
    assert (maze[0][0] == 0) and (maze[dim - 1][dim - 1] == 0)
    Path_queue = queue.PriorityQueue()
    Path_queue.put((total_eu, [start]))
    Visited = set()
    Done_find = False
    path = []
    explored_nodes = []
    # Pick the first entry from the priority queue
    # Get its non-blocked children by running A* on the thinned maze.
    # Add the children to the priority queue based on the heuristic distance
    # If goal is found or queue is empty, exit the loop and return the path
    while not Path_queue.empty() and not Done_find:
        path_full = Path_queue.get()
        path = path_full[-1]
        node = path[-1]
        explored_nodes.append(node)
        if node == goal:
            break
        elif node not in Visited:
            for node_child in GetChildNodeforParentWithThining(
                    maze_org, maze, node, dim, (dim * dim) - 1, "Manhattan",
                    Visited):
                if node_child[0] not in Visited:
                    path_1 = list(path)
                    path_1.append(node_child[0])
                    Path_queue.put((node_child[1], path_1))
                    if node_child[0] == goal:
                        Done_find = True
                        path = path_1
                        break
        Visited.add(node)

    return path, maze_org, explored_nodes
Example #8
0
import queue

q1 = queue.Queue(5)  # 创建一个先进先出的队列
q2 = queue.LifoQueue(5)  # 创建一个后进先出的队列
q3 = queue.PriorityQueue(5)  # 优先级队列

# put()  : 输入
q1.put(1)

# get()  : 获取
q1.get()

# qsize : 获取当前队列的长度,不是百分之百的准确
print(q1.qsize())

# empty  : 判断队列是否为空,返回的是布尔值
print(q1.empty())

# full  : 判断队列是否为满,返回为布尔值
print(q1.full())
Example #9
0
def search(model, vocab, prob_sequence, sequence, post_stress, state, session, \
                temp, dictMeters, fsaLine, dictWordTransitions, dictPartSpeechTags, breadth, wordPool):
    def beamSearchOneLevel(model, vocab, prob_sequence, sequence, post_stress, state, session, \
                    temp, dictMeters, fsaLine, dictWordTransitions, dictPartSpeechTags, breadth, wordPool):
        def decayRepeat(word,sequence, scale):
            safe_repeat_words = []
            #safe_repeat_words = set(["with,the,of,in,i"])
            score_adjust = 0
            decr = -scale
            for w in range(len(sequence)):
                if(word==sequence[w] and word not in safe_repeat_words):
                    score_adjust += decr
                decr += scale/10 #decreases penalty as the words keep getting further from the new word
            return score_adjust
        def partsOfSpeechFilter(word1,word2,dictPartSpeechTags,dictPossiblePartsSpeech):
            okay_tags = set(["RB","RBR","RBS"]) #THESE ARE THE ADVERBS
            tag1 = dictPartSpeechTags[word1]
            tag2 = dictPartSpeechTags[word2]
            #if(tag1==tag2 and tag1 not in okay_tags):
            #    return True
            if(tag1 not in dictPossiblePartsSpeech[tag2]):
                return True
            else:
                return False
        if(post_stress==0):
            return("begin_line")
        ret = []
        scale = .02 #scale is the significant magnitude required to affect the score of bad/good things
        dist, state = model.compute_fx(session, vocab, prob_sequence, sequence, state, temp)
        for pred_stress in list(fsaLine[post_stress].prevs):
            word_set = set([])
            for word in dictWordTransitions[(pred_stress,post_stress)]:
                #PREVENTS REPEAT ADJACENT WORDS OR PROBLEM-TAGGED WORDS
                if(word == sequence[0]):
                    continue
                if(partsOfSpeechFilter(word,sequence[0],dictPartSpeechTags,dictPossiblePartsSpeech)):
                    continue
                #FACTORS IN SCORE ADJUSTMENTS
                score_adjust = decayRepeat(word, sequence, 100*scale) #repeats
                score_adjust += scale*len(word)/50 #length word
                if(word in wordPool):
                    score_adjust += scale
                #CALCULATES ACTUAL SCORE
                key = np.array([[vocab[word]]])
                new_prob = dist[key]
                score_tuple = (new_prob, state)
                score_tup = (score_tuple[0]+score_adjust,score_tuple[1]) #NOTE SHOULD SCORE_ADJUST BE ADDED HERE OR JUST IN THE ITEM LINE?
                item = (score_tup[0],(score_tup, [word]+sequence, pred_stress))
                if(item[0]==[[-float("inf")]]):
                    continue
                ret+=[item]
        return ret
    masterPQ = Q.PriorityQueue()
    checkList = []
    checkSet = set([])
    score_tuple = (prob_sequence, state)
    first = (score_tuple[0],(score_tuple, sequence, post_stress))
    masterPQ.put(first)#initial case
    set_explored = set([])
    while(not masterPQ.empty()):
        depthPQ = Q.PriorityQueue()
        while(not masterPQ.empty()):
            try:
                next_search = masterPQ.get()
            except:
                continue
            possible_branches = beamSearchOneLevel(model, vocab, next_search[1][0][0], next_search[1][1], next_search[1][2],\
                                next_search[1][0][1], session, temp, dictMeters, fsaLine, dictWordTransitions,\
                                dictPartSpeechTags, breadth, wordPool)
            if(possible_branches == "begin_line"):
                checkList+=[next_search]
                continue
            for branch in possible_branches:
                if(branch == []):
                	continue
                test = tuple(branch[1][1]) #need to make sure each phrase is being checked uniquely (want it to be checked once in possible branches then never again)
                if(test in set_explored):
                    continue
                set_explored.add(test)
                depthPQ.put(branch)
                try:
                    if(depthPQ.qsize()>breadth):
                        depthPQ.get()
                except:
                    pass
        masterPQ = depthPQ
    return checkList
Example #10
0
File: api.py Project: zhixd83/PyODM
    def download_zip(self,
                     destination,
                     progress_callback=None,
                     parallel_downloads=16,
                     parallel_chunks_size=10):
        """Download this task's assets archive to a directory.

        Args:
            destination (str): directory where to download assets archive. If the directory does not exist, it will be created.
            progress_callback (function): an optional callback with one parameter, the download progress percentage.
            parallel_downloads (int): maximum number of parallel downloads if the node supports http range.
            parallel_chunks_size (int): size in MB of chunks for parallel downloads
        Returns:
            str: path to archive file (.zip)
        """
        info = self.info()
        if info.status != TaskStatus.COMPLETED:
            raise NodeResponseError("Cannot download task, task status is " +
                                    str(info.status))

        if not os.path.exists(destination):
            os.makedirs(destination, exist_ok=True)

        try:
            download_stream = self.get('/task/{}/download/all.zip'.format(
                self.uuid),
                                       stream=True)
            headers = download_stream.headers

            zip_path = os.path.join(
                destination, "{}_{}_all.zip".format(self.uuid,
                                                    int(time.time())))

            # Keep track of download progress (if possible)
            content_length = download_stream.headers.get('content-length')
            total_length = int(
                content_length) if content_length is not None else None
            downloaded = 0
            chunk_size = int(parallel_chunks_size * 1024 * 1024)
            use_fallback = False
            accept_ranges = headers.get('accept-ranges')

            # Can we do parallel downloads?
            if accept_ranges is not None and accept_ranges.lower(
            ) == 'bytes' and total_length is not None and total_length > chunk_size and parallel_downloads > 1:
                num_chunks = int(math.ceil(total_length / float(chunk_size)))
                num_workers = parallel_downloads

                class nonloc:
                    completed_chunks = AtomicCounter(0)
                    merge_chunks = [False] * num_chunks
                    error = None

                def merge():
                    current_chunk = 0

                    with open(zip_path, "wb") as out_file:
                        while current_chunk < num_chunks and nonloc.error is None:
                            if nonloc.merge_chunks[current_chunk]:
                                chunk_file = "%s.part%s" % (zip_path,
                                                            current_chunk)
                                with open(chunk_file, "rb") as fd:
                                    out_file.write(fd.read())

                                os.unlink(chunk_file)

                                current_chunk += 1
                            else:
                                time.sleep(0.1)

                def worker():
                    while True:
                        task = q.get()
                        part_num, bytes_range = task
                        if bytes_range is None or nonloc.error is not None:
                            q.task_done()
                            break

                        try:
                            # Download chunk
                            res = self.get(
                                '/task/{}/download/all.zip'.format(self.uuid),
                                stream=True,
                                headers={'Range': 'bytes=%s-%s' % bytes_range})
                            if res.status_code == 206:
                                with open("%s.part%s" % (zip_path, part_num),
                                          'wb') as fd:
                                    for chunk in res.iter_content(4096):
                                        fd.write(chunk)

                                with nonloc.completed_chunks.lock:
                                    nonloc.completed_chunks.value += 1

                                    if progress_callback is not None:
                                        progress_callback(
                                            100.0 *
                                            nonloc.completed_chunks.value /
                                            num_chunks)

                                nonloc.merge_chunks[part_num] = True
                            else:
                                nonloc.error = RangeNotAvailableError()
                        except OdmError as e:
                            time.sleep(5)
                            q.put((part_num, bytes_range))
                        except Exception as e:
                            nonloc.error = e
                        finally:
                            q.task_done()

                q = queue.PriorityQueue()
                threads = []
                for i in range(num_workers):
                    t = threading.Thread(target=worker)
                    t.start()
                    threads.append(t)

                merge_thread = threading.Thread(target=merge)
                merge_thread.start()

                range_start = 0

                for i in range(num_chunks):
                    range_end = min(range_start + chunk_size - 1,
                                    total_length - 1)
                    q.put((i, (range_start, range_end)))
                    range_start = range_end + 1

                # block until all tasks are done
                q.join()

                # stop workers
                for i in range(len(threads)):
                    q.put((-1, None))
                for t in threads:
                    t.join()

                merge_thread.join()

                if nonloc.error is not None:
                    if isinstance(nonloc.error, RangeNotAvailableError):
                        use_fallback = True
                    else:
                        raise nonloc.error
            else:
                use_fallback = True

            if use_fallback:
                # Single connection, boring download
                with open(zip_path, 'wb') as fd:
                    for chunk in download_stream.iter_content(4096):
                        downloaded += len(chunk)

                        if progress_callback is not None and total_length is not None:
                            progress_callback(
                                (100.0 * float(downloaded) / total_length))

                        fd.write(chunk)

        except (requests.exceptions.Timeout,
                requests.exceptions.ConnectionError, ReadTimeoutError) as e:
            raise NodeConnectionError(e)

        return zip_path
Example #11
0
import numpy as np
import queue

with open("input") as f:
    cave = np.array([[int(c) for c in l] for l in f.read().splitlines()])

h, w = cave.shape
risks = np.full(cave.shape, -1)
frontier = queue.PriorityQueue(cave.size)

frontier.put((0, (0, 0)))
risks[0, 0] = 0
while not frontier.empty():
    risk, (y, x) = frontier.get()
    if (y, x) == (h - 1, w - 1):
        break

    for ny, nx in [(y - 1, x), (y + 1, x), (y, x - 1), (y, x + 1)]:
        if 0 <= ny < h and 0 <= nx < w:
            if risks[ny, nx] == -1:
                risks[ny, nx] = risk + cave[ny, nx]
                frontier.put((risks[ny, nx], (ny, nx)))

print(risks[h - 1, w - 1])
Example #12
0
import random
import queue as q

grid = [[0] * 3 for i in range(3)]
grid_sol = [[1, 2, 3], [4, 5, 6], [7, 8, 0]]
prev_move = None
Q = q.PriorityQueue()


def prettyPrint(grid):
    print("====================================")
    for arr in grid:
        for i in range(3):
            print(arr[i], end=" ")
        print()
    print("=====================================")


def whereis(grid, x):
    for arr in grid:
        try:
            j = arr.index(x)
            i = grid.index(arr)
        except:
            continue

    return i, j


def getData():
    print(
Example #13
0
import queue

arr = eval(input())
min_heap = queue.PriorityQueue()

for num in arr:
    min_heap.put(num)

pre = 0
while not min_heap.empty():
    cur = min_heap.get()
    if cur - 1 > 0 and cur != pre + 1:
        print(pre + 1)
        break
    pre = cur
Example #14
0
 def __init__(self, procs_map):
     self.events = queue.PriorityQueue()
     self.procs = dict(procs_map)
Example #15
0
 def clear(self):
     try:
         import queue
     except ImportError:
         import Queue as queue
     self._queue = queue.PriorityQueue()
Example #16
0
#后进先出队列

import queue
q_lifo = queue.LifoQueue()

for i in range(4):
    q_lifo.put_nowait(i)

while True:
    print(q_lifo.get(timeout=1))        #延迟1秒,如果get不到数据,则抛出异常
'''
#优先队列

import queue

q_PQ = queue.PriorityQueue()

q_PQ.put({1: "LC"})  #按着元组的第一个元素进行对比,小的优先
q_PQ.put({8: "Jack"})
# q_PQ.put({6:"Mark"})
# q_PQ.put({4:"Joe"})

print(q_PQ.queue[0][1])

#
# print(q_PQ.queue[3])
#
# while q_PQ.qsize():
#     print(q_PQ.get())
Example #17
0
    return result

letters = Counter(input().lower())
words = []
for word in open('/usr/share/dict/words').read().split():
    word = word.lower()
    if not (Counter(word) - letters):
        words.append(word)

words.sort()
words.sort(key=len)
#for word in words:
#    print(word)

words = [word for word in words if 5 <= len(word) <= 8]
paths = queue.PriorityQueue()
paths.put((0, 0, ['']))

done_paths = []

while len(done_paths) < 50000:
    neg_score, dist, path = paths.get_nowait()
    if dist > 120:
        done_paths.append((-neg_score, dist, path))
        continue
    for next_word in words:
        if next_word in path: continue
        next_neg_score = neg_score - score(len(next_word))
        next_dist = dist + distance(path[-1], next_word)
        paths.put((next_neg_score, next_dist, path + [next_word]))
Example #18
0
def greedy_search(
    task,
    default_parameters,
    parameters,
    initial_population=3,
    limit=10,
    report=None,
    report_best=None,
):
    import itertools
    names = [k for k, _ in parameters.items()]
    values = [v for _, v in parameters.items()]
    all_params = list(itertools.product(*values))
    random.shuffle(all_params)
    [print(r) for r in all_params]
    best = {'eval': None, 'params': None, 'artifact': None}

    import queue
    open_list = queue.PriorityQueue()
    close_list = {}
    results = []  # for displaying

    def random_configs():
        import random
        while True:
            yield {k: _select(v) for k, v in parameters.items()}

    def neighbors(parent):
        import random
        results = []
        for k in names:
            for kv in parameters[k]:
                if parent[k] is not kv:
                    other = parent.copy()
                    other[k] = kv
                    results.append(other)
        return results

    def _key(local_parameters):
        return tuple(local_parameters[k] for k in names)

    def _iter(local_parameters):
        artifact, eval = task(merge_hash(default_parameters, local_parameters))
        _update_best(artifact, eval, local_parameters, results, best, report,
                     report_best)
        #
        close_list[_key(local_parameters)] = eval  # tuples are hashable
        open_list.put((eval, local_parameters))

    try:
        for i, local_parameters in zip(range(initial_population),
                                       random_configs()):
            _iter(local_parameters)
        for i in range(initial_population, limit):
            if open_list.empty():
                break
            _, parent = open_list.get()
            children = neighbors(parent)

            open_children = []
            for c in children:
                if _key(c) not in close_list:
                    open_children.append(c)

            _iter(_select(open_children))
    finally:
        print("Best parameter:\n{}\neval: {}".format(best['params'],
                                                     best['eval']))
        print(results)
    return best['artifact'], best['params'], best['eval']
Example #19
0
 def __init__(self):
     self.q = queue.PriorityQueue()
     self.count = 0
Example #20
0
data = pickle.load(open('pickles/vectorlists.pickle', 'rb'))

fonts = []
for d in data[1:]:
    fonts.append(d[0])

fonts = [f.lower() for f in fonts]
#print(fonts)

transformed = pickle.load(open('pickles/TSNE-trained.pickle', 'rb'))

xs = transformed[:, 0]
ys = transformed[:, 1]
zs = transformed[:, 2]

font = sys.argv[1]
font = font.lower()

idx = fonts.index(font)

distances = Q.PriorityQueue()

for i in range(len(fonts)):
    if i != idx:
        dist = EuclideanDist(xs[i], ys[i], zs[i], xs[idx], ys[idx], zs[idx])
        distances.put(node(i, dist))

for i in range(4):
    curr = distances.get()
    print(curr.i, fonts[curr.i], curr.dist)
Example #21
0
    def calculate_shortest_paths(self, start: Vertex) -> dict[Vertex, _Path]:
        """Calculate shortest paths from vertex "start" to all others."""
        logger.info("calculating shortest paths from vertex: {}".format(start))
        assert start in self._vertices, (
            "unexpected vertex: {}".format(start))

        shortest_paths: dict[Vertex, _PathRef] = {}
        next_path_by_distance: queue.PriorityQueue[_Path] = (
            queue.PriorityQueue())

        # add all vertices connected to the start vertex to "next" queue
        for e in self._vertices_outgoing_edges.get(start, []):
            logger.debug("adding initial edge: {}".format(e))
            next_path_by_distance.put(e)

        # implicit shortest path to starting node is 0
        shortest_paths[start] = _PathRef(Edge(start, start, 0))

        # pop the next path.  If we've never seen the destination vertex it
        # is a shortest path so add a result and add its edges to next
        # vertices.  If we have seen it check to see if this is an alternate
        # shortest path to the vertex, but then add no next vertex.  Do this
        # until there is no more work (no more items in queue).
        while not next_path_by_distance.empty():
            next_path = next_path_by_distance.get_nowait()
            vertex = next_path.get_destination_vertex()
            logger.debug("popped next path: {}.  shortest paths: {}".format(
                next_path, shortest_paths))
            if vertex in shortest_paths:
                logger.debug("vertex in shortest paths")
                existing_path_ref = shortest_paths[vertex]
                existing_path = existing_path_ref.get()
                assert existing_path.get_distance() <= next_path.get_distance()
                if existing_path.get_distance() == next_path.get_distance():
                    logger.debug("new alternative path")
                    # we have an alternate shortest path to this vertex
                    new_alternatives = _PathAlternatives(
                        existing_path_ref.copy(), _PathRef(next_path))
                    logger.debug(
                        ("replacing path with alternatives.  "
                         "Original path: {}.  New alternatives: {}").format(
                            existing_path,
                            next_path
                        ))
                    existing_path_ref.set(new_alternatives)
                else:
                    logger.debug("this is a longer path than exists")
                # do not add any new paths to search
            else:
                # a new shortest path!
                logger.debug("vertex NOT in shortest paths")
                path_ref = _PathRef(next_path)
                shortest_paths[vertex] = path_ref
                # add all outgoing edges to next paths.
                for e in self._vertices_outgoing_edges[vertex]:
                    new_path = _PathSequence(
                        path_ref,
                        e)
                    logger.debug("adding additional path: {}".format(new_path))
                    next_path_by_distance.put_nowait(new_path)

        # unpack the path refs to return
        ret: dict[Vertex, _Path] = {}
        for (k, v,) in shortest_paths.items():
            ret[k] = v.get()
        return ret
Example #22
0
# -*- coding: utf-8 -*-
import collections
import threading
import time
import json
import queue
import database
import pymysql
from request import BaseRequest
from urllib import parse

category_map = [
    '热血', '格斗', '魔法', '侦探', '竞技', '恐怖', '战国', '魔幻', '冒险', '校园', '搞笑', '少女',
    '少男', '科幻', '港产', '其他'
]
priority_queue = queue.PriorityQueue()  #存放网址的队列
threads = []


#队列优先级
class Job(object):
    def __init__(self, priority, description, url=None):
        self.priority = priority
        self.description = description
        self.url = url
        return

    def __lt__(self, other):
        return self.priority < other.priority

def getPriorityQueue(list):
    q = Q.PriorityQueue()
    for node in list:
        q.put(Ordered_Node(heuristics[node], node))
    return q, len(list)
Example #24
0
 def __init__(self):
     SimpleChannel.__init__(self)
     self.name=common.random_string(l_min=20,l_max=20)
     self._sequence=common.SerializedSequence("channel_"+self.name)
     self._queue=queue.PriorityQueue()
Example #25
0
import queue

with open("C-small-1-attempt0.in") as f:
    with open("output.txt", "w") as g:
        t = int(f.readline().strip())
        for i in range(1, t + 1):
            [n, k] = map(int, f.readline().split())
            my_queue = queue.PriorityQueue()
            my_queue.put(-n)
            for _ in range(k):
                tmp = -my_queue.get() - 1
                a = tmp // 2 + tmp % 2
                b = tmp // 2
                my_queue.put(-a)
                my_queue.put(-b)
            g.write("Case #{}: {} {}\n".format(i, a, b))
Example #26
0
import subprocess

from matcher import (wikipedia, database, wikidata_api, netstring, mail,
                     overpass, space_alert, model)
from time import time, sleep
from datetime import datetime
from matcher.place import Place, PlaceMatcher, bbox_chunk
from matcher.view import app

app.config.from_object('config.default')
database.init_app(app)
re_point = re.compile(r'^Point\(([-E0-9.]+) ([-E0-9.]+)\)$')

active_jobs = {}

task_queue = queue.PriorityQueue()


def wait_for_slot(send_queue):
    print('get status')
    try:
        status = overpass.get_status()
    except overpass.OverpassError as e:
        r = e.args[0]
        body = f'URL: {r.url}\n\nresponse:\n{r.text}'
        mail.send_mail('Overpass API unavailable', body)
        send_queue.put({'type': 'error', 'error': "Can't access overpass API"})
        return False
    except requests.exceptions.Timeout:
        body = 'Timeout talking to overpass API'
        mail.send_mail('Overpass API timeout', body)
Example #27
0
 def __init__(self, heuristic: 'Heuristic'):
     super().__init__()
     self.heuristic = heuristic
     self.frontier = queue.PriorityQueue()
     self.frontier_set = set()
Example #28
0
    clearbuttons()


def showError(ts):
    LOG("Showerror!!!  Game Over")
    LOG("total numreadfails = " + str(numreadfails))
    AddCmd(ts + 0.2, CMD_FLASH_COLOR, [SIMON_ERROR, 0.5, True])
    AddCmd(ts + 0.8, CMD_FLASH_COLOR, [SIMON_ERROR, 0.5, True])
    AddCmd(ts + 1.4, CMD_FLASH_COLOR, [SIMON_ERROR, 0.5, True])


#################################################################
# our queue and queue functions, definitions, etc
#

cmdq = queue.PriorityQueue(0)
nextCmd = None


# adds a command to the queue sorted by ts, keeping nextCmd as the next one in time sequence
def AddCmd(cmdAt, cmd, data):
    global nextCmd
    global cmdq
    #LOG("added command " + str(cmd) + " " + str(data) + " at " + str(cmdAt))
    if nextCmd == None:
        nextCmd = {'ts': cmdAt, 'cmd': cmd, 'data': data}
    elif cmdAt < nextCmd['ts']:
        cmdq.put((nextCmd['ts'], nextCmd['cmd'], nextCmd['data']))
        nextCmd = {'ts': cmdAt, 'cmd': cmd, 'data': data}
    else:
        cmdq.put((cmdAt, cmd, data))
Example #29
0
def ksp(graph, source, target, num_k, weight=None):
    graph_copy = graph.copy()
    # Shortest path from the source to the target
    A = [nx.shortest_path(graph_copy, source, target, weight=weight)]
    A_costs = [path_cost(graph_copy, A[0], weight)]

    # Initialize the heap to store the potential kth shortest path
    B = queue.PriorityQueue()

    for k in range(1, num_k):
        # The spur node ranges from the first node to the next to last node in the shortest path
        try:
            for i in range(len(A[k - 1]) - 1):
                # Spur node is retrieved from the previous k-shortest path, k - 1
                spurNode = A[k - 1][i]
                # The sequence of nodes from the source to the spur node of the previous k-shortest path
                rootPath = A[k - 1][:i]

                # We store the removed edges
                removed_edges = []

                for path in A:
                    if len(path) - 1 > i and rootPath == path[:i]:
                        # Remove the links that are part of the previous shortest paths which share the same root path
                        edge = (path[i], path[i + 1])
                        if not graph_copy.has_edge(*edge):
                            continue
                        removed_edges.append(
                            (edge, graph_copy.get_edge_data(*edge)))
                        graph_copy.remove_edge(*edge)

                # Calculate the spur path from the spur node to the sink
                try:
                    spurPath = nx.shortest_path(graph_copy,
                                                spurNode,
                                                target,
                                                weight=weight)

                    # Entire path is made up of the root path and spur path
                    totalPath = rootPath + spurPath
                    totalPathCost = path_cost(graph_copy, totalPath, weight)
                    # Add the potential k-shortest path to the heap
                    B.put((totalPathCost, totalPath))

                except nx.NetworkXNoPath:
                    pass

                #Add back the edges that were removed from the graph
                #for removed_edge in removed_edges:
                #    print(removed_edge)
                #    graph_copy.add_edge(
                #        *removed_edge[0],
                #        **removed_edge[1]
                #    )

            # Sort the potential k-shortest paths by cost
            # B is already sorted
            # Add the lowest cost path becomes the k-shortest path.
            while True:
                try:
                    cost_, path_ = B.get(False)
                    if path_ not in A:
                        A.append(path_)
                        A_costs.append(cost_)
                        break
                except queue.Empty:
                    break
        except IndexError:
            pass

    return A, A_costs
Example #30
0
    def __init__(self,
                 fetcher,
                 parser=None,
                 saver=None,
                 proxieser=None,
                 url_filter=None,
                 max_count=500,
                 max_count_in_proxies=100):
        """
        constructor
        """
        self._inst_fetcher = fetcher  # fetcher instance, subclass of Fetcher
        self._inst_parser = parser  # parser instance, subclass of Parser or None
        self._inst_saver = saver  # saver instance, subclass of Saver or None
        self._inst_proxieser = proxieser  # proxieser instance, subclass of Proxieser

        self._queue_fetch = queue.PriorityQueue(
        )  # (priority, counter, url, keys, deep, repeat) 优先度,计数器,网址,键,深度,重复次数
        self._queue_parse = queue.PriorityQueue(
        )  # (priority, counter, url, keys, deep, content)
        self._queue_save = queue.Queue(
        )  # (url, keys, item), item can be anything
        self._queue_proxies = queue.Queue(
        )  # {"http": "http://auth@ip:port", "https": "https://auth@ip:port"}

        self._thread_fetcher_list = []  # fetcher thread list
        self._thread_parser = None  # parser thread
        self._thread_saver = None  # saver thread
        self._thread_proxieser = None  # proxieser thread

        self._thread_stop_flag = False  # default: False, stop flag of threads
        self._url_filter = url_filter  # default: None, also can be UrlFilter()

        self._number_dict = {
            TPEnum.TASKS_RUNNING: 0,  # the count of tasks which are running
            TPEnum.URL_FETCH_NOT:
            0,  # the count of urls which haven't been fetched
            TPEnum.URL_FETCH_SUCC:
            0,  # the count of urls which have been fetched successfully
            TPEnum.URL_FETCH_FAIL:
            0,  # the count of urls which have been fetched failed
            TPEnum.URL_FETCH_COUNT:
            0,  # the count of urls which appeared in self._queue_fetch
            TPEnum.HTM_PARSE_NOT:
            0,  # the count of urls which haven't been parsed
            TPEnum.HTM_PARSE_SUCC:
            0,  # the count of urls which have been parsed successfully
            TPEnum.HTM_PARSE_FAIL:
            0,  # the count of urls which have been parsed failed
            TPEnum.ITEM_SAVE_NOT:
            0,  # the count of urls which haven't been saved
            TPEnum.ITEM_SAVE_SUCC:
            0,  # the count of urls which have been saved successfully
            TPEnum.ITEM_SAVE_FAIL:
            0,  # the count of urls which have been saved failed
            TPEnum.PROXIES_LEFT: 0,  # the count of proxies which are avaliable
            TPEnum.PROXIES_FAIL:
            0,  # the count of proxies which are unavaliable
        }
        self._lock = threading.Lock()  # the lock which self._number_dict needs

        self._max_count = max_count  # maximum count of items which in parse queue or save queue
        self._max_count_in_proxies = max_count_in_proxies  # maximum count of items which in proxies queue

        # set monitor thread
        self._thread_monitor = MonitorThread("monitor", self)  # 创建监控进为守护进程
        self._thread_monitor.setDaemon(True)
        self._thread_monitor.start()
        logging.info("%s has been initialized", self.__class__.__name__)
        return