コード例 #1
0
def from_dblp_affs():
    # supress the scipy warning:
    # RuntimeWarning: The number of calls to function has reached maxfev = 400.
    warnings.simplefilter("ignore")

    data_name = 'dblp_four_areas'
    data_folder = './datasets/'
    cache_folder = mkdir('./cache/')

    raw_data_file = os.path.join(data_folder, 'dblp_papers_v11.txt')
    paper_records_cache_file = os.path.join(cache_folder,
                                            'dblp_paper_records_affs.pkl')
    dblp_data_cache_file = os.path.join(cache_folder, 'dblp_data_affs.pkl')
    adj_A_cache_file = os.path.join(cache_folder, 'dblp_adj_A_affs.pkl')
    att_A_cache_file = os.path.join(cache_folder, 'dblp_att_A_affs.pkl')
    subsample_step = 8

    dblp_data, data = memoize(load_dblp_data_affs,
                              dblp_data_cache_file,
                              refresh=True)(data_name, raw_data_file,
                                            paper_records_cache_file,
                                            subsample_step)
    adj_A = memoize(compute_adj, adj_A_cache_file, refresh=True)(dblp_data)
    adj_A = adj_A[::1, ::1]

    G = nx.from_scipy_sparse_matrix(adj_A, create_using=nx.DiGraph())

    return G, data, 'dblp_affs'
def load_full_data(data_name, data_folder, cache_folder, subsample_step=8):
    raw_data_file = join(data_folder, 'dblp_papers_v11.txt')
    paper_records_cache_file = join(cache_folder, 'dblp_paper_records_affs.pkl')
    dblp_data_cache_file = join(cache_folder, 'dblp_data_affs.pkl')
    adj_A_cache_file = join(cache_folder, 'dblp_adj_A_affs.pkl')

    dblp_data, att_A = memoize(load_dblp_data_affs, dblp_data_cache_file,refresh=True)(data_name,
                            raw_data_file, paper_records_cache_file,
                            subsample_step)
    adj_A = memoize(compute_adj, adj_A_cache_file, refresh=True)(dblp_data)
    return dblp_data, adj_A, att_A
コード例 #3
0
def load_data(data_folder, cache_folder):
    raw_data_file = join(data_folder, 'votingDatain13Articles.mat')
    MP_data_cache_file = join(cache_folder, 'MP_data.pkl')
    attr_M_cache_file = join(cache_folder, 'MP_attr.pkl')
    A_cache_file = join(cache_folder, 'MP_A.pkl')

    MP_data = memoize(load_MP_data, MP_data_cache_file,
                      refresh=True)(raw_data_file)
    attr_M = memoize(get_all_votes, attr_M_cache_file, refresh=True)(MP_data)
    A = memoize(get_friends_A, A_cache_file, refresh=True)(MP_data)
    ud = True
    return MP_data, A, attr_M, ud
コード例 #4
0
def bidirectional_best_first_graph_search(problem, h=None, h_reverse=None):
    h = memoize(h or problem.h, 'h')
    h_reverse = memoize(h_reverse or problem.h_reverse, 'h_reverse')
    node_forward = Node(problem.initial)
    node_backward = Node(problem.goal)
    frontier_forward = PriorityQueue('min', h)
    frontier_backward = PriorityQueue('min', h_reverse)
    frontier_forward.append(node_forward)
    frontier_backward.append(node_backward)
    explored = set()

    while frontier_forward and frontier_backward:
        node_forward = frontier_forward.pop()
        # print(node_forward.state)
        if problem.goal_test_forward(node_forward.state):
            print('[f]meet point:')
            print(node_forward.state)
            while True:
                node_backward = frontier_backward.pop()
                if node_backward.state == node_forward.state:
                    break
            return [node_forward, node_backward]
        explored.add(node_forward.state)

        for child in node_forward.expand(problem):
            if child.state not in explored and child not in frontier_forward:
                frontier_forward.append(child)
                problem.backward_goal.append(child.state)
        problem.backward_goal.remove(node_forward.state)

        node_backward = frontier_backward.pop()
        # print(node_backward.state)
        if problem.goal_test_backward(node_backward.state):
            print('[b]meet point:')
            print(node_backward.state)
            while True:
                node_forward = frontier_forward.pop()
                if node_backward.state == node_forward.state:
                    break
            return [node_forward, node_backward]
        explored.add(node_backward.state)
        # problem.backward_goal = [problem.initial]
        for child in node_backward.expand(problem):
            if child.state not in explored and child not in frontier_backward:
                frontier_backward.append(child)
                problem.forward_goal.append(child.state)
        problem.forward_goal.remove(node_backward.state)

    return None
コード例 #5
0
def best_first_graph_search(problem, f):
    '''MODIFICATION: a timeout check has been added'''
    """Search the nodes with the lowest f scores first.
    You specify the function f(node) that you want to minimize; for example,
    if f is a heuristic estimate to the goal, then we have greedy best
    first search; if f is node.depth then we have breadth-first search.
    There is a subtlety: the line "f = memoize(f, 'f')" means that the f
    values will be cached on the nodes as they are computed. So after doing
    a best first search you can examine the f values of the path returned."""
    f = memoize(f, 'f')
    node = Node(problem.initial)
    frontier = PriorityQueue('min', f)
    frontier.append(node)
    explored = set()
    start = time.time()
    while frontier and (time.time() - start < TIMEOUT):
        node = frontier.pop()
        if problem.goal_test(node.state):
            return node
        explored.add(tuple(sorted(node.state.pieces)))
        for child in node.expand(problem):
            if tuple(sorted(child.state.pieces)
                     ) not in explored and child not in frontier:
                frontier.append(child)
            elif child in frontier:
                if f(child) < frontier[child]:
                    del frontier[child]
                    frontier.append(child)
    return None
def load_data(data_name, data_folder, cache_folder, subsample_step=8):
    raw_data_file = join(data_folder, 'dblp_papers_v11.txt')
    paper_records_cache_file = join(cache_folder, 'dblp_paper_records.pkl')
    dblp_data_cache_file = join(cache_folder, 'dblp_data.pkl')
    adj_A_cache_file = join(cache_folder, 'dblp_adj_A.pkl')
    att_A_cache_file = join(cache_folder, 'dblp_att_A.pkl')

    dblp_data = memoize(load_dblp_data,
                        dblp_data_cache_file)(data_name, raw_data_file,
                                              paper_records_cache_file,
                                              subsample_step)
    adj_A = memoize(compute_adj, adj_A_cache_file, refresh=False)(dblp_data)
    att_A = memoize(compute_attributes, att_A_cache_file,
                    refresh=False)(dblp_data)

    return dblp_data, adj_A, att_A
コード例 #7
0
def best_first_search_tree(problem, f):
    """Search the nodes with the lowest f scores first.
    You specify the function f(node) that you want to minimize; for example,
    if f is a heuristic estimate to the goal, then we have greedy best
    first search; if f is node.depth then we have breadth-first search.
    There is a subtlety: the line "f = memoize(f, 'f')" means that the f
    values will be cached on the nodes as they are computed. So after doing
    a best first search you can examine the f values of the path returned."""
    # print("he sido llamado")
    f = memoize(f, 'f')
    node = Node(problem.initial)
    frontier = PriorityQueue('min', f)
    frontier.append(node)
    # frontier.mostrar()
    # explored = set()
    while frontier:
        node = frontier.pop()
        if problem.goal_test(node.state):
            return node
        # explored.add(node.state)
        for child in node.expand(problem):
            frontier.append(child)
            '''if child.state not in explored and child not in frontier:
                frontier.append(child)
            elif child in frontier:
                if f(child) < frontier[child]: # mira si ya hay una forma de llegar q es mayor a la que encontre ahora?
                    del frontier[child]
                    frontier.append(child)'''
    return None
コード例 #8
0
def astar_search(problem,):
    """A* search is best-first graph search with f(n) = g(n)+h(n).
    You need to specify the h function when you call astar_search, or
    else in your Problem subclass."""
    """****************changed from h = memoize(h or problem.h, 'h')*******************"""
    h = memoize(problem.h, 'h')
    return best_first_graph_search(problem, lambda n: n.path_cost + h(n))
コード例 #9
0
def informed_bfs(problem, f):
    f = utils.memoize(f, 'f')
    node = pb.TreeNode(pb.ProblemState(obj.Track(), piece_qty))

    if problem.end_test(node.state):
        return node

    frontier = utils.PriorityQueue('min', f)
    frontier.append(node)
    explored = set()

    while frontier:
        node = frontier.pop()
        if problem.end_test(node.state):
            return node
        explored.add(node.state)
        for child in node.expand(problem):
            if child.state not in explored and child not in frontier:
                frontier.append(child)
            elif child in frontier:
                incumbent = frontier[child]
                if f(child) < f(incumbent):
                    del frontier[incumbent]
                    frontier.append(child)
    return None
コード例 #10
0
ファイル: search.py プロジェクト: bernardokrohn/IA
def best_first_graph_search(problem, f):
    """Search the nodes with the lowest f scores first.
    You specify the function f(node) that you want to minimize; for example,
    if f is a heuristic estimate to the goal, then we have greedy best
    first search; if f is node.depth then we have breadth-first search.
    There is a subtlety: the line "f = memoize(f, 'f')" means that the f
    values will be cached on the nodes as they are computed. So after doing
    a best first search you can examine the f values of the path returned."""
    f = memoize(f, 'f')
    node = Node(problem.initial)
    if problem.goal_test(node.state):
        return node
    frontier = PriorityQueue(min, f)
    frontier.append(node)
    explored = set()
    while frontier:
        node = frontier.pop()
        if problem.goal_test(node.state):
            return node
        explored.add(node.state)
        for child in node.expand(problem):
            if child.state not in explored and child not in frontier:
                frontier.append(child)
            elif child in frontier:
                incumbent = frontier[child]
                if f(child) < f(incumbent):
                    del frontier[incumbent]
                    frontier.append(child)
    return None
コード例 #11
0
ファイル: search.py プロジェクト: 153672/8600_hw1
def best_first_graph_search(problem, f):
    """Search the nodes with the lowest f scores first.
    You specify the function f(node) that you want to minimize; for example,
    if f is a heuristic estimate to the goal, then we have greedy best
    first search; if f is node.depth then we have breadth-first search.
    There is a subtlety: the line "f = memoize(f, 'f')" means that the f
    values will be cached on the nodes as they are computed. So after doing
    a best first search you can examine the f values of the path returned."""
    f = memoize(f, 'f')
    node = Node(problem.initial)
    if problem.goal_test(node.state):
        return node
    frontier = PriorityQueue(min, f)
    frontier.append(node)
    explored = set()
    while frontier:
        node = frontier.pop()
        if problem.goal_test(node.state):
            return node
        explored.add(node.state)
        for child in node.expand(problem):
            if child.state not in explored and child not in frontier:
                frontier.append(child)
            elif child in frontier:
                incumbent = frontier[child]
                if f(child) < f(incumbent):
                    del frontier[incumbent]
                    frontier.append(child)
    return None
コード例 #12
0
def a_star_search(problem, stats=False):
    h = memoize(problem.h_g, 'h')
    node = Node(problem.initial)
    nodes_generated = 1
    explored = set()
    if problem.goal_test(node.state):
        if stats:
            return (node, explored, nodes_generated)
        return node
    frontier = PriorityQueue('min', h)
    frontier.append(node)
    while frontier:
        node = frontier.pop()
        if problem.goal_test(node.state):
            if stats:
                return (node, explored, nodes_generated)
            return node
        explored.add(node.state)
        for child in node.expand(problem):
            nodes_generated += 1
            if child.state not in explored and child not in frontier:
                frontier.append(child)
            elif child in frontier:
                incumbent = frontier[child]
                if h(child) < h(incumbent):
                    del frontier[incumbent]
                    frontier.append(child)
    return None
コード例 #13
0
def best_first_graph_search(problem, f, display=False):
    """Search the nodes with the lowest f scores first.
    You specify the function f(node) that you want to minimize; for example,
    if f is a heuristic estimate to the goal, then we have greedy best
    first search; if f is node.depth then we have breadth-first search.
    There is a subtlety: the line "f = memoize(f, 'f')" means that the f
    values will be cached on the nodes as they are computed. So after doing
    a best first search you can examine the f values of the path returned."""
    f = memoize(f, 'f')
    node = Node(problem.initial)
    frontier = PriorityQueue('min', f)
    frontier.append(node)
    explored = set()
    while frontier:
        node = frontier.pop()
        if problem.goal_test(node.state):
            if display:
                print(len(explored), "paths have been expanded and",
                      len(frontier), "paths remain in the frontier")
                print("Total path cost is:" + str(node.path_cost))
            return node
        explored.add(node.state)
        for child in node.expand(problem):
            if child.state not in explored and child not in frontier:
                frontier.append(child)
            elif child in frontier:
                if f(child) < frontier[child]:
                    del frontier[child]
                    frontier.append(child)
    return None
コード例 #14
0
ファイル: search.py プロジェクト: bernardokrohn/IA
def recursive_best_first_search(problem, h=None):
    """[Figure 3.26]"""
    h = memoize(h or problem.h, 'h')

    def RBFS(problem, node, flimit):
        if problem.goal_test(node.state):
            return node, 0  # (The second value is immaterial)
        successors = node.expand(problem)
        if len(successors) == 0:
            return None, infinity
        for s in successors:
            s.f = max(s.path_cost + h(s), node.f)
        while True:
            # Order by lowest f value
            successors.sort(key=lambda x: x.f)
            best = successors[0]
            if best.f > flimit:
                return None, best.f
            if len(successors) > 1:
                alternative = successors[1].f
            else:
                alternative = infinity
            result, best.f = RBFS(problem, best, min(flimit, alternative))
            if result is not None:
                return result, best.f

    node = Node(problem.initial)
    node.f = h(node)
    result, bestf = RBFS(problem, node, infinity)
    return result
コード例 #15
0
def depth_limited_best_first_graph_search(problem, f, depth_limit):
    f = memoize(f, 'f')
    node = Node(problem.initial)
    total_nodes = 0
    if problem.goal_test(node.state):
        return node, total_nodes
    frontier = PriorityQueue(min, f)
    frontier.append(node)
    explored = set()
    while frontier:
        node = frontier.pop()
        total_nodes += 1
        if problem.goal_test(node.state):
            return node, total_nodes
        explored.add(node.state)
        if node.depth < depth_limit:
            for child in node.expand(problem):
                if child.state not in explored and child not in frontier:
                    frontier.append(child)
                elif child in frontier:
                    incumbent = frontier[child]
                    if f(child) < f(incumbent):
                        del frontier[incumbent]
                        frontier.append(child)
    return None, total_nodes
コード例 #16
0
ファイル: search.py プロジェクト: 153672/8600_hw1
def recursive_best_first_search(problem, h=None):
    "[Fig. 3.26]"
    h = memoize(h or problem.h, 'h')

    def RBFS(problem, node, flimit):
        if problem.goal_test(node.state):
            return node, 0   # (The second value is immaterial)
        successors = node.expand(problem)
        if len(successors) == 0:
            return None, infinity
        for s in successors:
            s.f = max(s.path_cost + h(s), node.f)
        while True:
            successors.sort(lambda x,y: cmp(x.f, y.f)) # Order by lowest f value
            best = successors[0]
            if best.f > flimit:
                return None, best.f
            if len(successors) > 1:
                alternative = successors[1].f
            else:
                alternative = infinity
            result, best.f = RBFS(problem, best, min(flimit, alternative))
            if result is not None:
                return result, best.f

    node = Node(problem.initial)
    node.f = h(node)
    result, bestf = RBFS(problem, node, infinity)
    return result
コード例 #17
0
def astar_search(problem, h=None, display=False):
    """A* search is best-first graph search with f(n) = g(n)+h(n).
    You need to specify the h function when you call astar_search, or
    else in your Problem subclass."""
    h = memoize(h or problem.h, 'h')
    return best_first_graph_search(problem, lambda n: n.path_cost + h(n),
                                   display)
def best_first_graph_search(problem, f):

    f = memoize(f, 'f')
    node = Node(problem.initial)
    frontier = PriorityQueue('min', f)
    frontier.append(node)
    explored = list()
    while frontier:
        node = frontier.pop()
        print("Current Node:", node.state)
        if problem.goal_test(node.state):
            trace_path(node)
            return node
        explored.append(node.state)
        print("Explored Nodes:", explored)
        for child in node.expand(problem):
            if child.state not in explored and child not in frontier:
                frontier.append(child)
            elif child in frontier:
                if f(child) < frontier[child]:
                    del frontier[child]
                    frontier.append(child)
        temp_front = list()
        for e in frontier.heap:
            val, node = e
            temp_front.append(node.state)
        print("Frontier Nodes:", temp_front)
        print("\n")
    return None
コード例 #19
0
def scalability_test():
    testing_list = [5,10,20,40,80,160,320,640,1280,2560,5120,10240]
    search_time = []
    # save key variables
    cwd = os.getcwd()
    file = join(cwd, *['results','scalability_global_varyingNumAttr.pkl'])

    for num_attr in testing_list:
        A, attr_M, ud = load_data(num_attr)
        A = A[::1, ::1]

        # compute selector store
        store = compute_store(attr_M)

        # compute the summary
        summary = memoize(summarize, summary_file,
                          refresh=True)(A, store, ud)

        for rec in summary:
            print(('iter: {:d}, attr: {:s}, separator_idx: {:d}, cut_point: {:.4f}, '
                   'block_idx: {:d}, time: {:.4f}').format(rec['iter'], rec['attr'],rec['separator_idx'],
    													   rec['cut_point'], rec['block_idx'], rec['time']))
            search_time.append(rec['time'])

        Obj = (testing_list,search_time)
        f = open(file,'wb')
        pickle.dump(Obj, f)
        f.close()

    f = open(file,'rb')
    Obj = pickle.load(f)
    f.close()
    print(Obj)
コード例 #20
0
def greedy_best_first_graph_search(problem, h=None):
    h = h or problem.h
    h = memoize(h, 'h')

    def f(n):
        return h(n)

    return best_first_graph_search(problem, f)
コード例 #21
0
def from_dblp_topics():
    # supress the scipy warning:
    # RuntimeWarning: The number of calls to function has reached maxfev = 400.
    warnings.simplefilter("ignore")

    data_name = 'dblp_four_areas'
    data_folder = './datasets/'
    cache_folder = mkdir('./cache/')
    # summary_file = 'dblp_summary.pkl'
    # max_num_selector = 1

    # load dblp_data
    raw_data_file = os.path.join(data_folder, 'dblp_papers_v11.txt')
    paper_records_cache_file = os.path.join(cache_folder,
                                            'dblp_paper_records.pkl')
    dblp_data_cache_file = os.path.join(cache_folder, 'dblp_data.pkl')
    adj_A_cache_file = os.path.join(cache_folder, 'dblp_adj_A.pkl')
    att_A_cache_file = os.path.join(cache_folder, 'dblp_att_A.pkl')

    subsample_step = 8
    dblp_data = memoize(load_dblp_data,
                        dblp_data_cache_file)(data_name, raw_data_file,
                                              paper_records_cache_file,
                                              subsample_step)
    adj_A = memoize(compute_adj, adj_A_cache_file, refresh=False)(dblp_data)
    att_A = memoize(compute_attributes, att_A_cache_file,
                    refresh=False)(dblp_data)

    adj_A = adj_A[::1, ::1]
    print(adj_A.shape)

    G = nx.from_scipy_sparse_matrix(adj_A, create_using=nx.DiGraph())
    att_A = att_A[::1]

    # perform LSA, center the data
    LSA_cache_file = os.path.join(cache_folder, 'dblp_lsa.pkl')
    att_A -= np.mean(att_A, axis=0)
    lsi_A = memoize(compute_LSA, LSA_cache_file, refresh=False)(att_A, 50)

    attr_proj = att_A.dot(lsi_A)

    data = pd.DataFrame(attr_proj)
    data.columns = list(map(str, list(range(50))))

    return G, data, 'dblp_topics'
コード例 #22
0
def best_first_graph_search(problem, f, debug=False):
    """Search the nodes with the lowest f scores first.
    You specify the function f(node) that you want to minimize; for example,
    if f is a heuristic estimate to the goal, then we have greedy best
    first search; if f is node.depth then we have breadth-first search.
    There is a subtlety: the line "f = memoize(f, 'f')" means that the f
    values will be cached on the nodes as they are computed. So after doing
    a best first search you can examine the f values of the path returned."""
    try:
        f = utils.memoize(f, 'f')
        node = Node(problem.initial)
        if problem.goal_test(node.state):
            return node
        #frontier = utils.PriorityQueue(order=min, f=f)
        frontier = sorted_collection.SortedCollection(key=f, order=min)
        frontier.append(node)
        explored = set()
        if debug: debugfirstmoves = [] #debug
        while frontier:
            node = frontier.pop()
            if debug:
                if len(node.solution()) > 0:
                #if (len(node.solution()) > 0) and (node.solution()[0] not in debugfirstmoves): #debug
                    print('        examining node: {} (f={})\n{}'.format(node.solution()[-1], f(node), str(node.state))) # debug
                    debugfirstmoves.append(node.solution()[0])
                    #print('        examining node: {} (f={})\n{}'.format(node.solution(), f(node), str(node.state))) # debug
                    # if(f(node)==0): #debug
                    #     print(repr(node.state)) #debug
                    #     import pdb; pdb.set_trace() #debug
            if problem.goal_test(node.state):
                return node
            explored.add(repr(node.state))
            for child in node.expand(problem):
                if repr(child.state) not in explored and child not in frontier:
                    frontier.append(child)
                elif child in frontier:
                    #import pdb; pdb.set_trace()
                    # here we have a node already in frontier with the same
                    # state.  We check to see if that 'incumbent' node
                    # has a higher path cost.  If so, we replace the 
                    # incumbent node with this new 'child' node (both
                    # get to the same state thru different paths)
                    incumbent = frontier[frontier.index(child)]
                    #incumbent = frontier[child]
    #                if f(child) < f(incumbent):
                    #print('FOUND AN INCUMBANT IN FRONTIER: state=state:{}, f(inccument)={}, f(child)={}'.format(
                    #    repr(incumbent.state)==repr(child.state), f(incumbent), f(child)))
                    if (f(child) < f(incumbent)) and (child == incumbent):
                        if debug: print('DELETING frontier[incumbent]')
                        del frontier[incumbent]
                        frontier.append(child)
        return None
    except KeyboardInterrupt:
        #print('frontier: {}, f-scores: {}'.format(frontier, list(map(f, frontier))))
        print('child: {} (f={}), len(frontier)={}, len(explored)={}'.format(child, f(child), len(frontier), len(explored)))
        raise
コード例 #23
0
def best_first_graph_search(problem, f):
    """Search the nodes with the lowest f scores first.
    You specify the function f(node) that you want to minimize; for example,
    if f is a heuristic estimate to the goal, then we have greedy best
    first search; if f is node.depth then we have depth-first search.
    There is a subtlety: the line "f = memoize(f, 'f')" means that the f
    values will be cached on the nodes as they are computed. So after doing
    a best first search you can examine the f values of the path returned."""
    f = memoize(f, 'f')
    return graph_search(problem, PriorityQueue(min, f))
コード例 #24
0
def astar_search(problem, h=None):
    """A* search is best-first graph search with f(n) = g(n)+h(n).
    You need to specify the h function when you call astar_search.
    Uses the pathmax trick: f(n) = max(f(n), g(n)+h(n))."""
    h = h or problem.h
    h = memoize(h, 'h')

    def f(n):
        return max(getattr(n, 'f', -infinity), n.path_cost + h(n))

    return best_first_graph_search(problem, f)
コード例 #25
0
def iterative_deepening_astar_search(problem, h=None):
    h = memoize(h or problem.h, 'h')
    prefix = 0
    cost_limit = 0
    result = None
    while not prefix:
        result = cost_limited_astar_search(problem, cost_limit,
                                           lambda n: n.path_cost + h(n))
        prefix = result[0]
        if not prefix:
            cost_limit = result[1]
    return result[1]
コード例 #26
0
def best_first_tree_search(problem, f, display=False):
    # from search -- just modified to make it tree search
    f = memoize(f, 'f')
    node = Node(problem.initial)
    frontier = PriorityQueue('min', f)
    frontier.append(node)
    while frontier:
        node = frontier.pop()
        if problem.goal_test(node.state):
            return node
        for child in node.expand(problem):
            frontier.append(child)
    return None
コード例 #27
0
def load_dblp_data(data_name, data_file, cache_file, subsample_step):
    target_venues = load_venues(data_name)
    paper_records = memoize(load_paper_records, cache_file)(data_file,
                                                            target_venues)
    paper_records = subsample_paper_records(paper_records, subsample_step)

    fos_records = load_fos_records(paper_records)

    pid_id_dict = {pid: i for i, pid in enumerate(paper_records.keys())}
    dblp_data = {
        'data_name': data_name,
        'paper_records': paper_records,
        'fos_records': fos_records,
        'target_venues': target_venues,
        'pid_id_dict': pid_id_dict
    }
    return dblp_data
コード例 #28
0
def best_first_graph_search(problem, f):

    f = memoize(f, 'f')
    node = Node(problem.initial)
    frontier = PriorityQueue('min', f)
    frontier.append(node)

    explored = list()
    itr = 1
    print("Initial Node: " + number_to_city_map[node.state])
    while frontier:
        print("Iteration#" + str(itr))
        dist, current_city = frontier.heap[0]
        print("Current Node: " + number_to_city_map[current_city.state])
        itr = itr + 1
        node = frontier.pop()
        if problem.goal_test(node.state):
            print("Found the goal node")
            print(trace_path(node))
            return node
        print("Evaluation function(" + number_to_city_map[current_city.state] +
              ")" + "=" + str(dist))
        explored.append(node.state)
        print("Explored:")
        explrd = list()
        for e in explored:
            explrd.append(number_to_city_map[e])
        print(explrd)

        for child in node.expand(problem):
            if child.state not in explored and child not in frontier:
                frontier.append(child)
            elif child in frontier:
                if f(child) < frontier[child]:
                    del frontier[child]
                    frontier.append(child)
        frnt = dict()
        print("Frontier:")
        for e in frontier.heap:
            dist, city = e
            frnt[city] = dist
        print(sorted(frnt.items(), key=lambda x: x[1]))

    return None
コード例 #29
0
def recursive_best_first_search(problem, h=None):

    h = memoize(h or problem.h, 'h')

    def RBFS(problem, node, flimit):
        if problem.goal_test(node.state):
            print("Reached Dallas, which is the destination city")
            return node, 0  # (The second value is immaterial)
        successors = node.expand(problem)
        if len(successors) == 0:
            return None, infinity
        for s in successors:
            s.f = max(s.path_cost + h(s), node.f)
        while True:
            # Order by lowest f value
            successors.sort(key=lambda x: x.f)
            best = successors[0]
            if len(successors) > 1:
                alternative = successors[1].f
            else:
                alternative = infinity

            print("f_limit:" + str(flimit))
            print("best:" + str(best.f))
            print("alternative:" + str(alternative))
            print("current_city:" + number_to_city_map[node.state])
            if best.f > flimit:
                print("next_city: Fail")
                print("\n")
                return None, best.f
            print("next_city:" + number_to_city_map[best.state])
            print("\n")
            result, best.f = RBFS(problem, best, min(flimit, alternative))
            if result is not None:
                return result, best.f

    node = Node(problem.initial)
    node.f = h(node)
    result, bestf = RBFS(problem, node, infinity)
    trace_path(result)
    return result
コード例 #30
0
def best_first_graph_search(problem, f):
    f = memoize(f, 'f')
    node = Node(problem.initial)
    frontier = PriorityQueue('min', f)
    frontier.append(node)
    explored = set()

    while frontier:
        node = frontier.pop()
        if problem.goal_test(node.state):
            return node
        explored.add(node.state)

        for child in node.expand(problem):
            if child.state not in explored and child not in frontier:
                frontier.append(child)
            elif child in frontier:
                if f(child) < frontier[child]:
                    del frontier[child]
                    frontier.append(child)
    return None
コード例 #31
0
ファイル: search.py プロジェクト: bernardokrohn/IA
def greedy_search(problem, h=None):

    h = memoize(h or problem.h, 'h')
    return best_first_graph_search(problem, h)
コード例 #32
0
    def do_include(match):
        text = open('templates/'+match.groups()[0]).read()
        return text
    while r_include.findall(text): 
        text = r_include.sub(do_include, text)

    execspace = _compiletemplate.bases.copy()
    tmpl_compiler = Compiler(source=text, mainClassName='GenTemplate')
    tmpl_compiler.addImportedVarNames(execspace.keys())
    exec str(tmpl_compiler) in execspace
    if base: 
        _compiletemplate.bases[base] = execspace['GenTemplate']

    return execspace['GenTemplate']

_compiletemplate = memoize(__compiletemplate)
_compiletemplate.bases = {}

def render(template, terms=None, asTemplate=False, base=None, 
           isString=False):
    """
    Renders a template, caching where it can.
    
    `template` is the name of a file containing the a template in
    the `templates/` folder, unless `isString`, in which case it's the 
    template itself.

    `terms` is a dictionary used to fill the template. If it's None, then
    the caller's local variables are used instead, plus context, if it's not 
    already set, is set to `context`.
コード例 #33
0
ファイル: line.py プロジェクト: Womble/FreeFree
    nud=0.0004285850633666147*nu0*math.sqrt(T)
    nus=nu*(1-v/C0)
    if abs(nus-nu0)>5*nud: return 0
    else:                  return math.exp(-(nu*(1-v/C0)-nu0)**2/nud**2) /nud/math.sqrt(pi)   
#ground state has no intrinsic width and unbound has practically none so we run into numerical issues trying to calc a voigt profile, instead just do Doppler
#    if 1:
#        nu_col=4.8e-7*Ne/(T*cns.Boltzmann/ev)**1.5 #ion collision frequency s^-1
#        lam=1.579e5/T
#        a=(gamma[l-1]+ 2 *nu_col)/(4*pi*Vd) 
#        u=(nu-nu0)/Vd
#        return voigt(a,u)/(Vd*math.sqrt(pi))
    

def _thermalRecomb (n,T):
    return 3.262e-6*M(n,T)
thermalRecomb=memoize(_thermalRecomb)

def M(n,T):
    X=Einf*(1.0/n**2)/(cns.Boltzmann*T) #ionisation energy of nth level in units of k_B T
    if X<100:
        return E1(X)*math.exp(X)/(n*math.sqrt(T))**3
    else:
        return (1.0/X - 1.0/X**2 + 2.0/X**3 - 6.0/X**4 + 24.0/X**5) / (n*math.sqrt(T))**3 #n->inf expansion of e^x . E1(x) accurate to ~1+1^-8 at X=100

def _lineEmiss_cgs (nu, Ne, Nn, u, l, T, v=0, lineProfile=1):
    """gives the emissivity at the line centre
ne :elsectron number density (cm^-3)
nu: recombinations to which hydrogen state
T: temperature (K)
v: velocity of the emitting material (+ve= towards observer)"""
    if u==np.inf:   
コード例 #34
0
ファイル: search.py プロジェクト: 153672/8600_hw1
def astar_search(problem, h=None):
    """A* search is best-first graph search with f(n) = g(n)+h(n).
    You need to specify the h function when you call astar_search, or
    else in your Problem subclass."""
    h = memoize(h or problem.h, 'h')
    return best_first_graph_search(problem, lambda n: n.path_cost + h(n))
コード例 #35
0
    def evaluate(self, env):
        return self.branches[self.index.evaluate(env)].evaluate(env)
    def size(self):
        return sum((node.size() for node in self.branches), 1)
    def find(self, goal, env):
        for i, branch in enumerate(self.branches):
            for env1 in self.index.find(i, env):
                for env2 in branch.find(goal, env1):
                    yield env2

def extend(env, var, value):
    result = dict(env)
    result[var] = value
    return result

Constant = memoize(ConstantNode)
const0, const1 = Constant(0), Constant(1)

Variable = VariableNode

def Choice(index, *branches):
    if len(set(branches)) == 1:
        return branches[0]
    elif all(branch is Constant(i) for i, branch in enumerate(branches)):
        return index
    else:
        return ChoiceNode(index, *branches)

#Choice = ChoiceNode

def naively_express(variables, table):
コード例 #36
0
ファイル: gaunt.py プロジェクト: Womble/FreeFree
    # not in range for Hummer's fit - check to see if Scheuer's 
    # approximation is OK
    elif u < 1.0e-4 and gamma >= 1.0 :
    # use Scheuer's (1960) long-wavelength approximation (see Hummer)
    # this works ok for u < 10**-4 and gamma > 1
    #g[SchuMask]= -0.55133*(log(gamma[SchuMask]) + log(u[SchuMask]) + 0.056745)
        g=  math.log(gamma)
        g+= math.log(u)    #do individual ops to prevent uneccesary
        g+= 0.056745            #creation of large temporary arrays
        g*=-0.55133
    # not in range for Scheuer's fit, try Elwert's high-energy approx
    # (see Hummer) for u < 10**-4, and gamma < 1
    elif ((u < 1.0e-4) and  (gamma < 1.0)):
    # use Elwert's (1954) approximation (see Hummer)
        g=-math.log(u)
        g+=0.80888
        g*=0.55133
    #if none are applicable gaunt factor defaults to 1

    if g<0.1 : g=0.1 #floor gaunt factor at 0.1
        
    return g

gh=vectorize(memoize(lambda t,z,nu : ghelp(t,z,nu)))

def gaunt(t,z,nu):
    if almost_eq(t,t.flat[0], 1e-6).all():
        return ones_like(t)*ghelp(t.flat[0],z,nu)
    else:
        return gh(t,z,nu)
コード例 #37
0
    def build_lang_list(self):

        #Try to find out where we're located...
        cur_country_code, cur_timezone = None, None
        try:
            whatismyip = 'http://www.linuxmint.com/installer/show_my_ip.php'
            ip = urllib.urlopen(whatismyip).readlines()[0]
            gi = GeoIP.open('/usr/share/GeoIP/GeoIPCity.dat', GeoIP.GEOIP_STANDARD)
            gir = gi.record_by_addr(ip)
            cur_country_code, cur_timezone = gir['country_code'], gir['time_zone']
        except:
            pass #best effort, we get here if we're not connected to the Internet            

        self.cur_country_code = cur_country_code or os.environ.get('LANG', 'US').split('.')[0].split('_')[-1]  # fallback to LANG location or 'US'
        self.cur_timezone = cur_timezone

        #Load countries into memory
        countries = {}
        for line in shell_exec("isoquery --iso 3166 | cut -f1,4-").stdout:
            ccode, cname = line.strip().split(None, 1)
            countries[ccode] = cname

        #Load languages into memory
        languages = {}
        for line in shell_exec("isoquery --iso 639").stdout:
            _, code3, code2, language = line.strip().split('\t')
            languages[code2 or code3] = language

        # Construct language selection model
        model = gtk.ListStore(str, str, gtk.gdk.Pixbuf, str)
        set_iter = None
        flag_path = lambda ccode: self.resource_dir + '/flags/16/' + ccode.lower() + '.png'
        from utils import memoize
        flag = memoize(lambda ccode: gtk.gdk.pixbuf_new_from_file(flag_path(ccode)))
        for locale in shell_exec("awk -F'[@ \.]' '/UTF-8/{ print $1 }' /usr/share/i18n/SUPPORTED | uniq").stdout:
            locale = locale.strip()
            try:
                if '_' in locale:
                    lang, ccode = locale.split('_')
                    language, country = languages[lang], countries[ccode]
                else:
                    lang = locale
                    language = languages[lang]
                    country = ''
            except:
                print "Error adding locale '%s'" % locale
                continue
            pixbuf = flag(ccode) if not lang in 'eo ia' else flag('_' + lang)
            iter = model.append((language, country, pixbuf, locale))
            if (ccode == cur_country_code and
                (not set_iter or
                 set_iter and lang == 'en' or  # prefer English, or
                 set_iter and lang == ccode.lower())):  # fuzzy: lang matching ccode (fr_FR, de_DE, es_ES, ...)
                set_iter = iter

        # Sort by Country, then by Language
        model.set_sort_column_id(0, gtk.SORT_ASCENDING)
        model.set_sort_column_id(1, gtk.SORT_ASCENDING)
        # Set the model and pre-select the correct language
        treeview = self.wTree.get_widget("treeview_language_list")
        treeview.set_model(model)
        if set_iter:
            path = model.get_path(set_iter)
            treeview.set_cursor(path)
            treeview.scroll_to_cell(path)
コード例 #38
0
ファイル: bdd.py プロジェクト: darius/mccarthy-to-bryant
class Node(object):
    "A binary-decision-diagram node."
    __invert__ = lambda self:        self(const1, const0)
    __and__    = lambda self, other: self(const0, other)
    __or__     = lambda self, other: self(other, const1)
    __xor__    = lambda self, other: self(other, ~other)

def Equiv(p, q):   return p(~q, q)
def Implies(p, q): return p(const1, q)
class ConstantNode(Node):
    rank = float('Inf')   # (Greater than any variable's rank.)
    def __init__(self, value):     self.value = value
    def evaluate(self, env):       return self.value
    def __call__(self, *branches): return branches[self.value]

Constant = memoize(ConstantNode)
const0, const1 = Constant(0), Constant(1)

def Variable(rank):
    return build_node(rank, const0, const1)

class ChoiceNode(Node):
    value = None    # (Explained below.)
    def __init__(self, rank, if0, if1):
        assert rank < if0.rank and rank < if1.rank
        self.rank = rank
        self.if0 = if0
        self.if1 = if1
    def evaluate(self, env):
        branch = (self.if0, self.if1)[env[self.rank]]
        return branch.evaluate(env)
コード例 #39
0

class ConstantNode(Node):
    rank = float('Inf')  # Greater than every variable.

    def __init__(self, value):
        self.value = value

    def evaluate(self, env):
        return self.value

    def __call__(self, *branches):
        return branches[self.value]


Constant = memoize(ConstantNode)
lit0, lit1 = Constant(0), Constant(1)


def Variable(rank, arity=2):
    return build_node(rank, tuple(map(Constant, range(arity))))


class ChoiceNode(Node):
    value = None

    def __init__(self, rank, branches):
        self.rank = rank
        self.branches = branches
        for b in branches:
            assert rank < b.rank
コード例 #40
0
def bestFS(problem, h=None):
    h = memoize(h or problem.h, 'h')
    return search.best_first_graph_search(problem, lambda n: h(n))