Пример #1
0
    def do_turn(self, turn_data: TurnData) -> Action:
        state = turn_data
        agent_x, agent_y = state.agent_data[0].position
        if not self.sequence0 and not self.sequence1:
            problem = Graph(state.map)
            if not state.agent_data[0].carrying:
                problem.agent = f'{agent_x},{agent_y}'
                heuristics = heuristic_list(problem)
                self.sequence0 = graph_search(problem, heuristics)
            else:
                problem.agent = f'{agent_x},{agent_y}'
                problem.final = True
                heuristics = heuristic_list(problem)
                self.sequence1 = graph_search(problem, heuristics)

        if self.sequence0:
            return self.sequence0.pop()
        elif self.sequence1:
            return self.sequence1.pop()
Пример #2
0
def getGraphFromJson(jsonStructure):

    # obtain all edges
    edgesList = []

    for edgeStructure in jsonStructure["edges"]:
        edgesList.append(
            Edge.Edge(
                EdgeInfo.EdgeInfo(edgeStructure["edgeID"],
                                  edgeStructure["formEdgeID"]),
                edgeStructure["toID"]))

    # obtain all Nodes
    nodesList = []

    for nodeStructure in jsonStructure["nodes"]:

        # type "L" nodes should be ignored
        # they are accessory nodes indicating the origin (web address) of the node content
        if nodeStructure["type"] != "L":

            nodeID = nodeStructure["nodeID"]

            adjEdges = []

            # get all edges from current node
            for edgeStructure in jsonStructure["edges"]:
                if edgeStructure["fromID"] == nodeID:
                    currentEdgeId = edgeStructure["edgeID"]

                    # get edge object
                    for e in edgesList:
                        if e.getEdgeInfo().getEdgeId() == currentEdgeId:
                            adjEdges.append(e)
                            break

            # obtain scheme attribute (if exists)
            schemeValue = None
            if nodeStructure.get("scheme") is not None:
                schemeValue = nodeStructure["scheme"]

            schemeIDValue = None
            if nodeStructure.get("schemeID") is not None:
                schemeIDValue = nodeStructure["schemeID"]

            nodesList.append(
                Node.Node(
                    NodeInfo.NodeInfo(nodeID,
                                      nodeStructure["text"].encode('utf8'),
                                      nodeStructure["type"],
                                      nodeStructure["timestamp"], schemeValue,
                                      schemeIDValue), adjEdges))

    return Graph.Graph(nodesList)
Пример #3
0
 def do_turn(self, turn_data) -> Action:
     state = turn_data
     # state = self.updateState(state, turn_data)
     if not self.sequence0 and not self.sequence1:
         problem = Graph(state.map)
         if not state.agent_data[0].carrying:
             agentx, agenty = turn_data.agent_data[0].position
             problem.agent = f'{agentx},{agenty}'
             self.sequence0 = search(problem)
         else:
             problem.final = True
             agentx, agenty = turn_data.agent_data[0].position
             problem.agent = f'{agentx},{agenty}'
             self.sequence1 = search(problem)
         # if not self.sequence:
         # return None
     if self.sequence0:
         return self.sequence0.pop()
     elif self.sequence1:
         return self.sequence1.pop()
Пример #4
0
 def updateRoutingTable(self):
     """
     Update routing table using distributed Djistra algorithm.
     """
     g = Graph()
     networks = {}
     # print(self.sw.name, self.lsdb)
     for rid, lsa in self.lsdb.items():
         for neigh in lsa['networks']:
             # rid, neigh[2]
             subnet, netmask, neighid = neigh
             g.add_edge(rid, neighid)
             netaddr = ipprefix(subnet, netmask)
             if netaddr not in networks:
                 networks[netaddr] = set()
             networks[netaddr].add(rid)
     # print(self.sw.name, g.adj)
     # print(self.sw.name, networks)
     next_hops = g.find_shortest_paths(self.sw.router_id)
     # print(self.sw.name, next_hops)
     for netaddr, nodes in networks.items():
         if len(nodes) == 1:
             dst = nodes.pop()
             if dst == self.sw.router_id:
                 nhop = None
             else:
                 nhop, _ = next_hops.get(dst, (None, None))
         elif len(nodes) == 2:
             n1, n2 = nodes
             if self.sw.router_id in nodes:
                 dst = nhop = (n2 if n1 == self.sw.router_id else n1)
             else:
                 dst = (n1 if next_hops[n1][1] < next_hops[n2][1] else n2)
                 nhop, _ = next_hops[dst]
         for pn, p in self.sw.data_ports.items():
             gateway = p.ownNeigh(nhop)
             if ipprefix(p.IP(), p.Netmask()) == netaddr:
                 gateway = '0.0.0.0'
             if gateway is not None:
                 r = (netaddr, gateway, pn)
                 self.sw.pending_pwospf_table[netaddr] = r
Пример #5
0
def run_maze():
    visited = set()
    graph = Graph()
    travel_path = []
    dfs_rooms = graph.dfs(player.current_room)
    rooms = [room for room in dfs_rooms]
    while (len(visited) < len(room_graph) - 1):
        current_room = rooms[0]
        next_room = rooms[1]
        shortest_path = graph.bfs(current_room, next_room)

        while len(shortest_path) > 1:
            current_room_neighbors = dfs_rooms[shortest_path[0]]
            next_room = shortest_path[1]
            if next_room in current_room_neighbors:
                travel_path.append(current_room_neighbors[next_room])
            shortest_path.remove(shortest_path[0])
        rooms.remove(current_room)
        visited.add(current_room)

    return travel_path
Пример #6
0
    def getGraphFromBratAnnotationFile(self, currentFileName):

        # obtain all edges tuples, where
        # first element of the tuple is the Edge object
        # second element if the from id of the corresponding edge (will be added to the corresponding Node's below)
        edgesList = []

        with open(paths["AAECCorpus"] + "/" + currentFileName + ".ann",
                  'r') as annotationFile:
            for line in annotationFile:

                splittedLine = line.split("\t")

                if splittedLine[0][0] == "R":
                    # Line contains a relation (edge)
                    currentEdgeId = splittedLine[0]
                    edgeType = splittedLine[1].split(" ")[0]
                    currentEdgeSourceNodeId = (
                        splittedLine[1].split(" ")[1]).split(":")[1]
                    currentEdgeTargetNodeId = (
                        splittedLine[1].split(" ")[2]).split(":")[1]

                    newEdgeInfo = EdgeInfo.EdgeInfo(currentEdgeId, edgeType)

                    edgesList.append((Edge.Edge(newEdgeInfo,
                                                currentEdgeTargetNodeId),
                                      currentEdgeSourceNodeId))

        # obtain all Nodes
        nodesList = []

        with open(paths["AAECCorpus"] + "/" + currentFileName + ".ann",
                  'r',
                  encoding='utf-8') as annotationFile:
            for line in annotationFile:

                splittedLine = line.split("\t")

                if splittedLine[0][0] == "T":
                    # Line contains a node
                    adjEdges = [
                        edgeTuple[0] for edgeTuple in edgesList
                        if edgeTuple[1] == splittedLine[0]
                    ]

                    nodesList.append(
                        Node.Node(
                            NodeInfo.NodeInfo(splittedLine[0], splittedLine[2],
                                              splittedLine[1].split(" ")[0],
                                              None, None, None), adjEdges))

        return Graph.Graph(nodesList)
Пример #7
0
    def run(self, keywords):

        print("running Congruence with keyword {}".format(keywords))
        self.keywords = keywords
        self.recursive_search(self.keywords, self.keywords, 1, langs=['en'])
        wordcounts = self.dbf.get_wordcounts(self.keywords)
        self.g = graph.GlobalGraph(wordcounts, n=6)
        lgg = self.g.to_json()
        self.dbf.insert_graph(lgg)
        g = self.dbf.get_graph()
        # jstr = json.dumps(g)
        # return str(g)
        return g
Пример #8
0
def earliest_ancestor(parents_children, start_vert):

    graph = Graph()

    for pair in parents_children:
        graph.add_vert(pair)

    parents = {p for p, c in parents_children}
    children = {c for p, c in parents_children}

    paths = []
    for p in parents:
        route = graph.dfs(start_vert, p)
        paths.append(route)

    valid_paths = [p for p in paths if p != None and len(p) > 1]
    # valid_paths = list(filter(lambda p: p != None, paths))

    if len(valid_paths) == 0:
        return -1

    longest_path = max(valid_paths, key=len)

    return longest_path[-1]
Пример #9
0
    def test_directed_graph_description(self):
        graph_source = '''0 1 0
1 0 1
0 0 0'''
        graph_options = {'source_type': 'adjacency_matrix', 'directed': 'true'}

        expected_description = '''graph
G {
fontsize = 4.0;
ratio = auto;
1 [shape = circle, height=.1, width=.1];
2 [shape = circle, height=.1, width=.1];
3 [shape = circle, height=.1, width=.1];
2 -> 1 [ label = "" ];
2 -> 1 [ label = "" ];
2 -> 1 [ label = "" ];
}'''
        self.assertEqual(
            Graph(graph_source, graph_options).to_graphviz_description(),
            expected_description)
Пример #10
0
    def __init__(self, parent=None):
        super(Control, self).__init__(parent)

        self.exit = False
        self.terminalVisible = False

        layout1 = QHBoxLayout(self)  # Top master layout
        layout2 = QVBoxLayout()  # widget on the left
        layout3 = QVBoxLayout()  # terminal on the right

        self.toolbar1 = Toolbar1(self)
        self.stockStats = StockStats(self)
        self.graph = Graph(self)
        self.dateRange = DateRange(self)
        self.sliders = RangeSlider(self, Qt.Horizontal)
        self.toolbar2 = Toolbar2(self)
        self.scrollArea = ScrollArea(self)
        #~ self.terminal   = QIPythonWidget(parent=self,
        #~ namespace={"Control":self},
        #~ visible=self.terminalVisible)

        layout2.addLayout(self.toolbar1)
        layout2.addLayout(self.stockStats)
        layout2.addWidget(self.graph)
        layout2.addLayout(self.dateRange)
        layout2.addWidget(self.sliders)
        layout2.addLayout(self.toolbar2)
        layout2.addWidget(self.scrollArea)

        #~ layout3.addWidget(self.terminal)

        layout1.addLayout(layout2)
        layout1.addLayout(layout3)

        self.sliders.sliderMoved.connect(self.graph.setSpan)

        QShortcut(QKeySequence("Ctrl+I"), self, self.showTerminal)
Пример #11
0
    nodes = set([node])

    while len(nodes) > 0:
        node = nodes.pop()
        s += node.data

        for next_node in g.get_neighbours(node):
            nodes.add(next_node)

    return s


noded = {}
puzzle_input = load_input('puzzle_inputs/Day7.txt')
g = Graph()

programs = set()
targets = set()

# parse puzzle input into a graph
for line in puzzle_input:
    program_name = line.split()[0]
    programs.add(program_name)
    weight = get_numbers(line)[0]
    n = get_or_create_node(program_name, weight)

    if '-> ' not in line:
        continue

    sline = line.split('-> ')[1].split(', ')
Пример #12
0
def create_bags_graph(edgelist):
    g = Graph(directed=True, edgelist=edgelist)
    return g
Пример #13
0
class TestGraphMethods(TestCase):
    def setUp(self):
        self.__graph = Graph()

    def test_structure(self):
        self.assertIsInstance(self.__graph, Graph)

    def test_add_node(self):
        self.assertEqual(len(self.__graph.get_all_node()), 0)
        self.assertNotEqual(len(self.__graph.get_all_node()), 1)

        self.__graph.add_node(node=1)

        self.assertNotEqual(len(self.__graph.get_all_node()), 0)
        self.assertEqual(len(self.__graph.get_all_node()), 1)

    def test_add_edges(self):

        # Adding nodes
        self.__graph.add_node(node='B')
        self.__graph.add_node(node='A')

        # Connecting two nodes
        self.__graph.add_edge(edge=('A', 'B'))

        # Adding nodes
        self.__graph.add_node(node='C')
        self.__graph.add_node(node='D')

        self.__graph.add_edge(edge=('A', 'D'))

        # Assertions
        self.assertTrue(self.__graph.is_connected('A', 'B'))
        self.assertTrue(self.__graph.is_connected('B', 'A'))
        self.assertFalse(self.__graph.is_connected('C', 'D'))
        self.assertFalse(self.__graph.is_connected('B', 'D'))

        with self.assertRaises(TypeError):
            self.__graph.add_edge(edge=('Z', 'F'))

    def test_if_two_nodes_was_on_the_same_network(self):

        # Adding nodes
        self.__graph.add_node(node='C')
        self.__graph.add_node(node='D')
        self.__graph.add_node(node='A')
        self.__graph.add_node(node='B')

        self.__graph.add_edge(edge=('A', 'D'))

        # Assertions
        self.assertFalse(self.__graph.same_network(('A', 'B')))
        self.assertFalse(self.__graph.same_network(('B', 'A')))
        self.assertFalse(self.__graph.same_network(('C', 'D')))
        self.assertFalse(self.__graph.same_network(('B', 'D')))
        self.assertTrue(self.__graph.same_network(('A', 'D')))
        self.assertTrue(self.__graph.same_network(('D', 'A')))
Пример #14
0
    n_node = 43098
elif opt.dataset == 'yoochoose1_64':
    n_node = 37484
elif opt.dataset == 'yoochoose1_4':
    n_node = 37484

train_data = Data(train_data,
                  all_seqs=all_train_seq,
                  sub_graph=True,
                  method=opt.method,
                  shuffle=True)
test_data = Data(test_data,
                 sub_graph=True,
                 method=opt.method,
                 shuffle=False)
graph = Graph(all_train_seq)
biases, node_list = graph.generate_gat()

model = MTD(hidden_size=opt.hidden_size,
            emb_size=opt.emb_size,
            n_node=n_node,
            method=opt.method,
            lr=opt.lr,
            l2=opt.l2,
            decay=opt.lr_dc_step * len(train_data.inputs) / opt.batch_size,
            lr_dc=opt.lr_dc,
            dropout=opt.dropout,
            kg=opt.kg,
            num_head=opt.num_head,
            num_block=opt.num_block,
            nonhybrid=opt.nonhybrid,
Пример #15
0
 def setUp(self):
     self.__graph = Graph()
Пример #16
0
class GA:
    def __init__(self, salesman_num, city_num, start_index, filename, max_gen,
                 distance_weight, balance_weight):
        self.population_size = 30  # 种群规模大小
        self.population = []  # 种群
        # self.all_pop_fitness = []  # 记录每次迭代过程种群适应度之和变化情况,看情况可以添加
        self.salesman_num = salesman_num  # 旅行商数量
        self.city_num = city_num  # 城市的数量(也是检查点数量)
        self.chrom_len = salesman_num + city_num - 2  # 染色体长度=(城市的数量-1) + (旅行商数量-1),染色体长度由编码方式决定
        self.start_index = start_index  # 旅行商起始城市的序号,序号取值[1,总的城市数]
        self.MAX_GEN = max_gen  # 迭代最大次数
        self.gen_count = 1  # 迭代计数器
        self.dummy_points = [
            x for x in range(self.city_num + 1, self.city_num +
                             self.salesman_num)
        ]  # 虚点。虚点详细见下初始化函数check_vertex_init_population
        self.fitness_func = None  # 使用到的适应度函数

        self.filename = filename  # 读取文件名
        self.graph_obj = Graph(filename)  # 针对实验1 检查点路线:创建utils里面Graph对象
        self.china_cities = []  # 针对实验2 运输机航线

        self.per_pop_best_chrom = []  # 每代的最优个体
        self.per_pop_best_chrom_fit = 0  # 每代的最优个体的适应度
        self.per_pop_best_path = []  # 每代最优个体的路线
        self.per_pop_best_dis_sum = INF  # 每代最优个体的总距离
        self.all_per_pop_best_chrom = []  # 记录每次迭代过程中每代最优个体变化情况
        self.all_per_pop_best_chrom_fit = [
        ]  # 记录每次迭代过程中每代最优个体的适应度变化情况(可以不必设置,因为check_vertex_fitness_func可以计算单体适应度)
        self.all_per_pop_best_dist_sum = []  # 记录每次迭代过程中每代最优个体的总距离变化情况

        self.best_chrom = []  # 全局最优个体,不一定是每代的最优个体,每代的最优个体可能比以往的最优个体差
        self.best_chrom_fit = 0  # 全局最优个体的适应度
        self.best_path = []  # 全局最优个体的路径
        self.best_dis_sum = INF  # 全局最优个体的路径之和
        self.all_best_chrom = []  # 记录每次迭代过程中全局最优个体的变化情况
        self.all_best_chrom_fit = []  # 记录每次迭代过程中全局最优个体的适应度变化情况
        self.all_best_dist_sum = []  # 记录每次迭代过程中全局最优个体的总距离

        self.cross_prob = 0.8  # 交叉概率
        self.mutation_prob = 0.15  # 变异概率
        self.cross_pmx_prob = 0.5  # 交叉选择部分匹配交叉PMX的概率,这部分没用到,只用到cross_ox_prob
        self.cross_ox_prob = 0.5  # 交叉选择顺序匹配交叉OX的概率

        self.mutation_swap_prob = 0.3  # 变异选择"交换两个元素"的概率
        self.mutation_reverse_prob = 0.4  # 变异选择"反转两个元素之间所有元素"的概率
        self.mutation_insert_prob = 1 - self.mutation_swap_prob - self.mutation_reverse_prob  # 变异选择"一个元素插入到另一个元素后面"的概率

        self.distance_weight = distance_weight  # 总路程权重
        self.balance_weight = balance_weight  # 均衡度数权重

    def check_vertex_init_population(self):
        """
        针对实验1:检查点路线
        调用floyd算法得到任意两点的最短路径,初始化种群以及全局最优解:初始化self.population_num个染色体,染色体长度为self.chrom_len
        Returns:
        """
        """
        chrom组成
        如:总共8个城市,3号城市为起始点.下列染色体组成我们把起点和终点省略,看情况是否增加虚点
        1个旅行商:一个chrom = [把3剔除,其余数字由1到8组成]
            如[1,5,4,2,6,8,7]表示旅行商路线为3->1->5->4->2->6->8->7->3
        2个旅行商:一个chrom = [1个9(9代表虚点,其实也是起点3),其余数字由1到8组成]。以此类推到多个旅行商的情况。
            如[1,5,4,9,2,6,8,7]表示:
                旅行商1路线为3->1->5->4->3(9)
                旅行商2路线为3(9)->2->6->8->7->3
        3个旅行商:一个chrom = [9,10,其余数字由1到8组成]
            如[1,5,4,9,2,6,10,8,7]
                旅行商1路线为3->1->5->4->3(9)
                旅行商2路线为3->2->6->3(10)
                旅行商3路线为3->8->7->3
        """
        # 调用floyd算法得到任意两点的最短路径
        self.graph_obj.short_path_floyd()
        # 染色体的适应度函数是实验一检查点路线对应的check_vertex_fitness_func
        self.fitness_func = self.check_vertex_fitness_func

        for i in range(self.population_size):
            # 注意:城市起点从1开始,而不是从0
            chrom = [x for x in range(1, self.city_num + 1)]
            # 把起始点剔除
            chrom.remove(self.start_index)
            # 多个旅行商,增加salesman_num-1个虚点
            chrom.extend(self.dummy_points)
            random.shuffle(chrom)
            self.population.append(chrom)

        # 初始化全局最优个体和它的适应度
        self.best_chrom = self.population[0]
        self.best_chrom_fit = self.fitness_func(self.best_chrom)

    def china_city_init_pop(self):
        """
        针对实验2:运输机路线
        chrom组成
        如:总共8个城市,3号城市为起始点.下列染色体组成我们把起点和终点省略,看情况是否增加虚点
        1个旅行商:一个chrom = [把3剔除,其余数字由1到8组成]
            如[1,5,4,2,6,8,7]表示旅行商路线为3->1->5->4->2->6->8->7->3
        2个旅行商:一个chrom = [1个9(9代表虚点,其实也是起点3),其余数字由1到8组成]。以此类推到多个旅行商的情况。
            如[1,5,4,9,2,6,8,7]表示:
                旅行商1路线为3->1->5->4->3(9)
                旅行商2路线为3(9)->2->6->8->7->3
        3个旅行商:一个chrom = [9,10,其余数字由1到8组成]
            如[1,5,4,9,2,6,10,8,7]
                旅行商1路线为3->1->5->4->3(9)
                旅行商2路线为3->2->6->3(10)
                旅行商3路线为3->8->7->3
        """
        # 读取中国城市文件,并初始化china_cities
        self.china_cities = utils.read_china_cities_coord(self.filename)
        # 染色体的适应度函数是实验二中国城市对应的china_city_fitness_func
        self.fitness_func = self.china_city_fitness_func

        for i in range(self.population_size):
            # 注意:城市起点从1开始,而不是从0
            chrom = [x for x in range(1, self.city_num + 1)]
            # 把起始点剔除
            chrom.remove(self.start_index)
            # 多个旅行商,增加salesman_num-1个虚点
            chrom.extend(self.dummy_points)
            random.shuffle(chrom)
            self.population.append(chrom)
        # 初始化全局最优个体和它的适应度
        self.best_chrom = self.population[0]
        self.best_chrom_fit = self.fitness_func(self.best_chrom)

    def binary_tourment_select(self, population):
        """
        二元锦标赛:从种群中抽取2个个体参与竞争,获胜者个体进入到下一代种群
        Args:
            population: 目前种群

        Returns:
            new_population:下一代种群
        """
        new_population = []  # 下一代种群
        for i in range(self.population_size):
            # 随机选择2个个体
            competitors = random.choices(population, k=2)
            # 选择适应度大的个体
            winner = max(competitors, key=lambda x: self.fitness_func(x))
            new_population.append(winner)
        return new_population

    def cross_ox(self, parent_chrom1, parent_chrom2):
        """
        对两个父代染色体进行OX交叉,得到两个子代染色体
        Args:
            parent_chrom1: 父代染色体1
            parent_chrom2: 父代染色体2

        Returns:
            child_chrom1:子代染色体1
            child_chrom2:子代染色体2
        """
        # random.randint(a,b)返回值域[a,b]
        index1, index2 = random.randint(0, self.chrom_len - 1), random.randint(
            0, self.chrom_len - 1)
        if index1 > index2:
            index1, index2 = index2, index1
        # temp_gene1为parent_chrom1被选中的染色体片段[index1:index2)
        temp_gene1 = parent_chrom1[index1:index2]
        # temp_gene2为parent_chrom2被选中的染色体片段[index1:index2)
        temp_gene2 = parent_chrom2[index1:index2]
        """
        将parent_chrom1被选中的基因片段复制给child_chrom1,
        这里复制是指child_chrom1[index1:index2] = parent_chrom1[index1:index2]
        然后parent_chrom2除了temp_gene1包含的基因,parent_chrom2剩下基因按照顺序放到child_chrom1中
        即(|parent_chrom2|-|temp_gene1|)基因按顺序放到child_chrom1。
        同理,child_chrom2交换一下parent_chrom1和parent_chrom2,也可以得到
        如:
        parent_chrom1 = [1, 2, 3, 4, 5, 6, 7, 8, 9], sel1选中部分[3, 4, 5, 6]
        parent_chrom2 = [5, 7, 4, 9, 1, 3, 6, 2, 8], sel2选中部分[6, 9, 2, 1]
        child_chrom1  = [7, 9, 3, 4, 5, 6, 1, 2, 8]
            1、child_chrom1对应部分放入sel1
            2、遍历parent_chrom2,parent_chrom2不属于sel1部分基因,按照顺序放入
        """
        child_chrom1, child_chrom2 = [], []
        child_p1, child_p2 = 0, 0
        # 得到child_chrom1
        for i in parent_chrom2:
            if child_p1 == index1:
                child_chrom1.extend(temp_gene1)
                child_p1 += 1
            if i not in temp_gene1:
                child_chrom1.append(i)
                child_p1 += 1

        # 得到child_chrom2
        for i in parent_chrom1:
            if child_p2 == index1:
                child_chrom2.extend(temp_gene2)
                child_p2 += 1
            if i not in temp_gene2:
                child_chrom2.append(i)
                child_p2 += 1

        return child_chrom1, child_chrom2

    def cross_pmx(self, parent_chrom1, parent_chrom2):
        """
        pmx部分匹配,里面需要冲突检测
        Args:
            parent_chrom1: 父代1
            parent_chrom2: 父代2

        Returns:
            chrom1, chrom2:子代1,子代2
        """
        index1, index2 = random.randint(0, self.chrom_len - 1), random.randint(
            0, self.chrom_len - 1)
        if index1 > index2:
            index1, index2 = index2, index1
        """
        如:
        index1 = 2, index2 = 6
        parent_chrom1 = [1, 2, 3, 4, 5, 6, 7, 8, 9], 选中部分[3, 4, 5, 6]
        parent_chrom2 = [5, 4, 6, 9, 2, 1, 7, 8, 3], 选中部分[6, 9, 2, 1]
        选中部分的映射关系即1<->6<->3 ; 2<->5 ; 9<->4
        可以看出存在1<->6<->3,说明6在父代1和2选中部分,6后续不需要冲突检测,所以应该1<->3
        """
        parent_part1, parent_part2 = parent_chrom1[
            index1:index2], parent_chrom2[index1:index2]

        child_chrom1, child_chrom2 = [], []
        child_p1, child_p2 = 0, 0  # 指针用来解决复制到指定位置问题
        # 子代1
        for i in parent_chrom1:
            # 指针到达父代的选中部分
            if index1 <= child_p1 < index2:
                # 将父代2选中基因片段复制到子代1指定位置上
                child_chrom1.append(parent_part2[child_p1 - index1])
                child_p1 += 1
                continue
            # 指针未到达父代的选中部分
            if child_p1 < index1 or child_p1 >= index2:
                # 父代1未选中部分含有父代2选中部分基因
                if i in parent_part2:
                    tmp = parent_part1[parent_part2.index(i)]
                    """
                    这里可能出现很长的映射链,如:
                    parent_part1 = [2, 3, 7, 5, 6, 14, 10, 11, 13]
                    parent_part2 = [4, 2, 1, 3, 5, 7,  14, 6,  10]
                    映射链:1 <-> 7 <-> 14 <-> 10 <-> 13 
                    所以采用循环的形式
                    """
                    while tmp in parent_part2:
                        tmp = parent_part1[parent_part2.index(tmp)]
                    child_chrom1.append(tmp)
                elif i not in parent_part2:
                    child_chrom1.append(i)
                child_p1 += 1
        # 子代2
        for i in parent_chrom2:
            # 指针到达父代的选中部分
            if index1 <= child_p2 < index2:
                # 将父代1选中基因片段复制到子代2指定位置上
                child_chrom2.append(parent_part1[child_p2 - index1])
                child_p2 += 1
                continue
            # 指针未到达父代的选中部分
            if child_p2 < index1 or child_p2 >= index2:
                # 父代2未选中部分含有父代1选中部分基因
                if i in parent_part1:
                    tmp = parent_part2[parent_part1.index(i)]
                    # 解决1<->6<->3
                    while tmp in parent_part1:
                        tmp = parent_part2[parent_part1.index(tmp)]
                    child_chrom2.append(tmp)
                elif i not in parent_part1:
                    child_chrom2.append(i)
                child_p2 += 1

        return child_chrom1, child_chrom2

    def crossover(self, population):
        """
        种群按概率执行交叉操作
        Args:
            population: 种群

        Returns:
            new_population:新一代种群
        """
        # 交叉:比较特殊,只有PMX和OX
        new_population = []
        # 二元锦标赛选择出新的一代
        selected_pop = self.binary_tourment_select(population)
        for i in range(self.population_size):
            prob = random.random()  # 随机数,决定是PMX还是OX
            two_chrom = random.choices(selected_pop, k=2)
            if prob <= self.cross_ox_prob:
                # 执行OX
                child_chrom1, child_chrom2 = self.cross_ox(
                    two_chrom[0], two_chrom[1])
                new_population.append(child_chrom1)
                new_population.append(child_chrom2)
            else:
                # 执行PMX
                child_chrom1, child_chrom2 = self.cross_pmx(
                    two_chrom[0], two_chrom[1])
                new_population.append(child_chrom1)
                new_population.append(child_chrom2)
        return new_population

    def mutate_swap(self, parent_chrom):
        """
        交换变异:当前染色体 [1,5,4,2,6,8,7],交换1和5位置上元素变成了[1,8,4,2,6,5,7]
        Args:
            parent_chrom: 父代染色体

        Returns:
            child_chrom:交换变异产生的子代染色体
        """
        # 如果index1和index2相等,则交换变异相当于没有执行
        index1 = random.randint(0, self.chrom_len - 1)
        index2 = random.randint(0, self.chrom_len - 1)
        child_chrom = parent_chrom[:]
        child_chrom[index1], child_chrom[index2] = child_chrom[
            index2], child_chrom[index1]
        return child_chrom

    def mutate_reverse(self, parent_chrom):
        """
        逆转变异:随机选择两点(可能为同一点),逆转其中所有的元素
        parent_chrom = [1, 2, 3, 4, 5, 6, 7, 8, 9]
        child_chrom  = [1, 2, 6, 5, 4, 3, 7, 8, 9]
        Args:
            parent_chrom:父代

        Returns:
            child_chrom:逆转变异后的子代
        """
        index1, index2 = random.randint(0, self.chrom_len - 1), random.randint(
            0, self.chrom_len - 1)
        if index1 > index2:
            index1, index2 = index2, index1
        child_chrom = parent_chrom[:]
        tmp = child_chrom[index1:index2]
        tmp.reverse()
        child_chrom[index1:index2] = tmp
        return child_chrom

    def mutate_insert(self, parent_chrom):
        """
        插入变异:随机选择两个位置,然后将这第二个位置上的元素插入到第一个元素后面。
        parent_chrom = [1, 2, 3, 4, 5, 6, 7, 8, 9]
        child_chrom  = [1, 2, 4, 5, 3, 6, 7, 8, 9]
        Args:
            parent_chrom:父代

        Returns:
            child_chrom:子代
        """
        index1, index2 = random.randint(0, self.chrom_len - 1), random.randint(
            0, self.chrom_len - 1)
        child_chrom = parent_chrom[:]
        child_chrom.pop(index2)
        child_chrom.insert(index1 + 1, parent_chrom[index2])
        return child_chrom

    def mutation(self, population):
        """
        种群按概率执行变异操作
        Args:
            population: 种群

        Returns:
            new_population:新一代种群
        """
        """
        prob_sum表示一种累加和的列表,比如:
        四种变异可能性[0.2, 0.3, 0.4, 0.1]
        prob_sum = [0.2, 0.5, 0.9, 1]
        变异只有三种变异,这里采用了硬编码
        """
        prob_sum = []
        prob_sum.extend([
            self.mutation_swap_prob,
            self.mutation_swap_prob + self.mutation_reverse_prob, 1
        ])
        new_population = []
        for i in range(self.population_size):
            p = random.random()
            if p <= prob_sum[0]:
                # 交换变异
                child_chrom = self.mutate_swap(population[i])
                new_population.append(child_chrom)
            elif p <= prob_sum[1]:
                # 逆序变异
                child_chrom = self.mutate_reverse(population[i])
                new_population.append(child_chrom)
            else:
                # 插入变异
                child_chrom = self.mutate_insert(population[i])
                new_population.append(child_chrom)
        return new_population

    def compute_pop_fitness(self, population):
        """
        计算当前种群所有个体的的适应度
        Args:
            population: 种群

        Returns:
            种群所有个体的的适应度
        """
        return [self.fitness_func(chrom) for chrom in population]

    def get_best_chrom(self, population):
        """
        找到种群中最优个体
        Args:
            population: 种群

        Returns:
            population[index]:最优个体
            index:最优个体在种群中下标
        """
        tmp = self.compute_pop_fitness(population)
        index = tmp.index(max(tmp))
        return population[index], index

    def get_check_vertex_distance(self, chrom):
        """
        针对实验1:检查点路线
        根据染色体解码,得到所有旅行商走的路线以及每条路线总路程
        Args:
            chrom: 染色体

        Returns:
            all_routines:所有旅行商走的路线
            routines_dis:每条路线总路程组成列表
        """
        # 起始点5,城市9个,旅行商3,虚点10,11
        # [4, 6, 11, 9, 2, 1, 10, 7, 8, 3]
        tmp_chrom = chrom[:]
        # 将增加的虚点还原成起始点
        for i in range(len(chrom)):
            if chrom[i] in self.dummy_points:
                tmp_chrom[i] = self.start_index

        # 根据起始点把chrom分成多段
        one_routine = []  # 一个旅行商路线,可以为空
        all_routines = []  # 所有旅行商路线
        for v in tmp_chrom:
            if v == self.start_index:
                all_routines.append(one_routine)
                one_routine = []
            elif v != self.start_index:
                one_routine.append(v)
        # 还有一次需要添加路线
        all_routines.append(one_routine)

        routines_dis = []  # 所有路径总距离组成的列表
        # 计算每一条路总的距离
        for r in all_routines:
            distance = 0
            # 有一个旅行商路线为空列表,即一个旅行商不出门
            if len(r) == 0:
                distance = INF
                routines_dis.append(distance)
            else:
                r_len = len(r)
                for i in range(r_len):
                    # 别忘了最后加上起始点到第一个点的距离
                    if i == 0:
                        distance += self.graph_obj.adj_matrix[
                            self.start_index][r[i]]
                    if i + 1 < r_len:
                        distance += self.graph_obj.adj_matrix[r[i]][r[i + 1]]
                    # 最后一个顶点,下一站是起始点
                    elif i == r_len - 1:
                        distance += self.graph_obj.adj_matrix[r[i]][
                            self.start_index]
                routines_dis.append(distance)
        return all_routines, routines_dis

    def check_vertex_obj_func(self, chrom):
        """
        针对实验1:检查点路线
        计算个体的目标函数值
        目标函数 Z = distance_weight*总路程 + balance_weight*均衡度
        均衡度 = (max(l)-min(l))/ max(l)
        Args:
            chrom: 染色体(个体)

        Returns:
            obj:个体的目标函数值
        """
        all_routines, routines_dis = self.get_check_vertex_distance(chrom)
        sum_path = sum(routines_dis)
        max_path = max(routines_dis)
        min_path = min(routines_dis)
        balance = (max_path - min_path) / max_path
        obj = self.distance_weight * sum_path + \
              self.balance_weight * balance

        return obj

    def check_vertex_fitness_func(self, chrom):
        """
        针对实验 1:检查点路线
        计算个体的适应度值,即个体目标函数值的倒数
        Args:
            chrom:染色体

        Returns:
            个体的适应度
        """
        return math.exp(1.0 / self.check_vertex_obj_func(chrom))

    def check_vertex_ga_process(self):
        """
        针对实验 1:检查点路线
        GA的流程
        Returns:

        """
        self.check_vertex_init_population()
        best_dist_list = []  # 全局最优解每一条路径长度
        self.ga_process_iterator(best_dist_list,
                                 self.get_check_vertex_distance)

    def get_china_city_distance(self, chrom):
        """
        针对实验 2:运输机航线
        Args:
            chrom: 染色体

        Returns:
            all_routines:所有运输机路线组成列表
            routines_dis:每条路线总路程组成列表
        """
        # 起始点5,城市9个,旅行商3,虚点10,11
        # [4, 6, 11, 9, 2, 1, 10, 7, 8, 3]
        tmp_chrom = chrom[:]
        # 将增加的虚点还原成起始点
        for i in range(len(chrom)):
            if chrom[i] in self.dummy_points:
                tmp_chrom[i] = self.start_index
        # 根据起始点把chrom分成多段
        one_routine = []  # 一个旅行商路线,可以为空
        all_routines = []  # 所有旅行商路线
        for v in tmp_chrom:
            if v == self.start_index:
                all_routines.append(one_routine)
                one_routine = []
            elif v != self.start_index:
                one_routine.append(v)
        # 还有一次需要添加路线
        all_routines.append(one_routine)

        routines_dis = []  # 所有路径总距离组成的列表
        # 计算每一条路总的距离
        for r in all_routines:
            distance = 0
            # 有一个旅行商路线为空列表,即一个旅行商不出门
            if len(r) == 0:
                distance = INF
                routines_dis.append(distance)
            else:
                r_len = len(r)
                for i in range(r_len):
                    # 别忘了最后加上起始点到第一个点的距离
                    if i == 0:
                        distance += utils.geo_distance(
                            self.china_cities[self.start_index],
                            self.china_cities[r[i]])
                    if i + 1 < r_len:
                        distance += utils.geo_distance(
                            self.china_cities[r[i]],
                            self.china_cities[r[i + 1]])
                    # 最后一个顶点,下一站是起始点
                    elif i == r_len - 1:
                        distance += utils.geo_distance(
                            self.china_cities[r[i]],
                            self.china_cities[self.start_index])
                routines_dis.append(distance)
        return all_routines, routines_dis

    def china_city_obj_func(self, chrom):
        """
        针对实验 2:运输机航线
        计算个体的目标函数值
        目标函数 Z = distance_weight*总路程 + balance_weight*均衡度
        均衡度 = (max(l)-min(l))/ max(l)
        Args:
            chrom: 染色体(个体)

        Returns:
            obj:个体的目标函数值
        """
        all_routines, routines_dis = self.get_china_city_distance(chrom)
        sum_path = sum(routines_dis)
        max_path = max(routines_dis)
        min_path = min(routines_dis)
        balance = (max_path - min_path) / max_path
        obj = self.distance_weight * sum_path + \
              self.balance_weight * balance

        return obj

    def china_city_fitness_func(self, chrom):
        """
        针对实验 2:运输机航线
        计算个体的适应度值,即个体目标函数值的倒数
        Args:
            chrom:染色体

        Returns:
            个体的适应度
        """
        return math.exp(1.0 / self.china_city_obj_func(chrom))

    def china_city_ga_process(self):
        """
        针对实验 2:运输机航线
        GA流程
        Returns:

        """
        self.china_city_init_pop()
        best_dist_list = []  # 全局最优解每一条路径长度
        self.ga_process_iterator(best_dist_list, self.get_china_city_distance)

    def ga_process_iterator(self, best_dist_list, get_distance_func):
        """
        GA算法的迭代过程
        Args:
            best_dist_list: 全局最优解每一条路径长度
            get_distance_func: 计算距离使用的函数
                * get_check_vertex_distance:实验1
                * get_china_city_distance:实验2
        Returns:

        """
        # 遗传算法的迭代过程
        while self.gen_count <= self.MAX_GEN:
            # 每次迭代记录种群适应度之和,也可以不用记录
            # self.all_pop_fitness.append(sum(self.compute_pop_fitness(self.population)))

            # 锦标赛选择
            pop_new = self.binary_tourment_select(self.population)
            # -------------------交叉------------------------------------------
            # 随机数决定是否交叉
            p_cross = random.random()
            if p_cross <= self.cross_prob:
                pop_new = self.crossover(pop_new)
            # -------------------变异------------------------------------------
            # 随机数决定是否变异
            p_mutate = random.random()
            if p_mutate <= self.mutation_prob:
                pop_new = self.mutation(pop_new)
            # -------------------新的一代有关参数更新-------------------------------
            # *******************新的一代的最优个体有关参数更新**********************
            # 计算种群所有个体的适应度
            pop_fitness_list = self.compute_pop_fitness(pop_new)
            # 每代最优个体per_pop_best_chrom及其在种群中的下标best_index
            self.per_pop_best_chrom, best_index = self.get_best_chrom(pop_new)
            # 每代最优个体的适应度
            self.per_pop_best_chrom_fit = pop_fitness_list[best_index]
            # 每代最优个体最好的路径组成和每条路路径长度per_pop_best_dist_list
            self.per_pop_best_path, per_pop_best_dist_list = get_distance_func(
                self.per_pop_best_chrom)
            # 每代最优个体所有旅行商路线之和
            self.per_pop_best_dis_sum = sum(per_pop_best_dist_list)

            # 记录下每代最优个体
            self.all_per_pop_best_chrom.append(self.per_pop_best_chrom)
            # 记录下每代最优个体的适应度
            self.all_per_pop_best_chrom_fit.append(self.per_pop_best_chrom_fit)
            # 记录每次迭代过程中每代最优个体的总距离变化情况
            self.all_per_pop_best_dist_sum.append(self.per_pop_best_dis_sum)

            # *******************全局最优个体有关参数更新****************************
            # 每代最优个体与全局最优个体根据适应度比较,如果每代最优个体适应度更小,则更新全局最优个体
            if self.per_pop_best_chrom_fit > self.best_chrom_fit:
                self.best_chrom = self.per_pop_best_chrom
                self.best_chrom_fit = self.per_pop_best_chrom_fit
                # 全局最优个体最好的路径组成和每条路路径长度
                self.best_path, best_dist_list = get_distance_func(
                    self.best_chrom)
                # self.best_path = self.per_pop_best_path
                # 全局最优个体的路径之和
                self.best_dis_sum = self.per_pop_best_dis_sum

                # 记录下每次迭代过程中全局最优个体
                self.all_best_chrom.append(self.best_chrom)

            # 记录每次迭代过程中全局最优个体的适应度变化情况
            self.all_best_chrom_fit.append(self.best_chrom_fit)
            # 记录每次迭代过程中全局最优个体的总距离
            self.all_best_dist_sum.append(self.best_dis_sum)

            # 输出
            if self.gen_count % 50 == 0:
                print("经过%d次迭代" % self.gen_count)
                print("全局最优解距离为:%f,全局最优解长度为%d" %
                      (self.best_dis_sum, len(self.best_chrom)))
                print("全局最优解为{}".format(self.best_chrom))
                print("全局最优解路线为{}".format(self.best_path))
                print("全局最优解路线长度列表为{}".format(best_dist_list))
                print(
                    "---------------------------------------------------------"
                )
                print(
                    "每代最优解距离为:%f,每代最优解长度为%d" %
                    (self.per_pop_best_dis_sum, len(self.per_pop_best_chrom)))
                print("每代最优解为{}".format(self.per_pop_best_chrom))
                print("每代最优解路线为{}".format(self.per_pop_best_path))
                print("每代最优解路线长度列表为{}".format(per_pop_best_dist_list))
                print(
                    "**************************************************************************"
                )

            # *******************种群有关参数更新****************************
            # 更新种群
            self.population = pop_new
            # 计数器加1
            self.gen_count += 1
            # -------------------新的一代有关参数更新结束------------------------------------------------

    def plot_dis_sum_diff(self):
        """
        画出每代最优个体的总路程和全局最优个体总路程变化情况
        Returns:

        """
        plt.figure()
        x = [i for i in range(1, self.MAX_GEN + 1)]
        # 每代最优个体的总路程变化情况
        plt.plot(x, self.all_per_pop_best_dist_sum, color='r')
        # 全局最优个体总路程变化情况
        plt.plot(x, self.all_best_dist_sum, color='b')
        plt.legend(['每代最优个体', '全局最优个体'])
        plt.xlabel("迭代次数", fontsize=14)
        plt.ylabel("最优个体总路程", fontsize=14)

        plt.show()

    def plot_chrom_fit_diff(self):
        """
        画出每代最优个体的适应度和全局最优个体适应度变化情况
        Returns:

        """
        plt.figure()
        x = [i for i in range(1, self.MAX_GEN + 1)]
        per_pop_best_fit = [i for i in self.all_per_pop_best_chrom_fit]
        best_fit = [i for i in self.all_best_chrom_fit]
        # 每代最优个体的适应度变化情况
        plt.plot(x, per_pop_best_fit, color='r')
        # 全局最优个体适应度变化情况
        plt.plot(x, best_fit, color='b')
        plt.legend(['每代最优个体', '全局最优个体'])
        plt.xlabel("迭代次数", fontsize=14)
        plt.ylabel("最优个体适应度", fontsize=14)
        plt.show()

    def print_check_best_chrom_routine(self):
        """
        打印最优检查点路线
        Returns:

        """
        print("所有路线长度为:{}".format(self.best_dis_sum))
        best_path, best_dist_list = self.get_check_vertex_distance(
            self.best_chrom)
        # 打印全局最优个体的所有路线(包括起点和终点)
        for i in range(len(best_path)):
            print("第{}个巡检员路线长度为:{}".format(i + 1, best_dist_list[i]))
            print("第{}个巡检员路线为:".format(i + 1), end="")
            if len(best_path[i]) == 0:
                print("该巡检员不出门")  # 这种情况可以通过设置目标函数避免
            else:
                for j in range(len(best_path[i])):
                    if j == 0:
                        print(self.start_index, end="")
                        self.graph_obj.print_path(self.start_index,
                                                  best_path[i][j])
                    if j + 1 < len(best_path[i]):
                        self.graph_obj.print_path(best_path[i][j],
                                                  best_path[i][j + 1])
                    elif j == len(best_path[i]) - 1:
                        self.graph_obj.print_path(best_path[i][j],
                                                  self.start_index)
                print()

    def print_china_city_best_routine(self):
        """
        打印运输机最优航线
        Returns:

        """
        print("运输机所有路线长度为:{}".format(self.best_dis_sum))
        best_path, best_dist_list = self.get_china_city_distance(
            self.best_chrom)
        # 打印全局最优个体的所有路线城市(包括起点和终点)
        for i in range(len(best_path)):
            print("第{}架运输机路线长度为:{}".format(i + 1, best_dist_list[i]))
            print("第{}架运输机路线为:".format(i + 1), end="")
            if len(best_path[i]) == 0:
                print("该运输机不出发")  # 这种情况可以通过设置目标函数避免
            else:
                for j in range(len(best_path[i])):
                    if j == 0:
                        print("{} ——> {} ".format(
                            self.china_cities[self.start_index][0],
                            self.china_cities[best_path[i][j]][0]),
                              end="")
                    if j + 1 < len(best_path[i]):
                        print("——> {} ".format(
                            self.china_cities[best_path[i][j + 1]][0]),
                              end="")
                    elif j == len(best_path[i]) - 1:
                        print("——> {}".format(
                            self.china_cities[self.start_index][0]))
Пример #17
0
def run(filename, train_sample, train_label, test_sample, test_label, title, M,
        thresh, CART_step):
    train_sample, train_sample_size = Load.loadSample(train_sample)
    train_label, train_label_size = Load.loadLabel(train_label)
    assert train_sample_size == train_label_size, 'train_sample_size does not match train_label_size'

    test_sample, test_sample_size = Load.loadSample(test_sample)
    test_label, test_label_size = Load.loadLabel(test_label)
    assert test_sample_size == test_label_size, 'test_sample_size does not match test_label_size'

    train_sample = Preprocess.normalize(train_sample,
                                        True).values.tolist()  # list
    test_sample = Preprocess.normalize(test_sample,
                                       True).values.tolist()  # list

    label_to_index = {
        label: index
        for index, label in enumerate(set(train_label['x'].tolist()))
    }
    train_index = Preprocess.labelMap(train_label, label_to_index)  # list
    test_index = Preprocess.labelMap(test_label, label_to_index)  # list

    input_size = len(train_sample[0])
    sample_size = len(train_sample)
    sample_weights = [1 / sample_size for _ in range(sample_size)]
    classifier_weights = []
    classifier_thresholds = []
    threshold_positions = []
    test_corrs = []
    test_times = [i + 1 for i in range(M)]

    for i in range(M):
        threshold, position, errors = Calc.CART(train_sample, train_index,
                                                sample_weights, thresh,
                                                CART_step)
        total_error = Calc.gentleError(np.array(sample_weights),
                                       np.array(errors))
        classifier_weights.append(round(Calc.classifierError(total_error), 3))
        classifier_thresholds.append(threshold)
        threshold_positions.append(position)
        sample_weights = Calc.updateVariableWeights(np.array(sample_weights),
                                                    total_error, errors)
        # print('errors: {}'.format(errors))
        # print('sample_weights: {}'.format(sample_weights))
        # print('classifier_threshold: {} in {}'.format(threshold, position))
        print('total_error: {}'.format(total_error))
        print('threshold_positions:   {}'.format(threshold_positions))
        print('classifier_thresholds: {}'.format(classifier_thresholds))
        print('classifier_weights:    {}'.format(classifier_weights))

        test_corr = 0
        test_size = len(test_sample)
        for sample, index in zip(test_sample, test_index):
            vote = 0
            for threshold, position, weight in zip(classifier_thresholds,
                                                   threshold_positions,
                                                   classifier_weights):
                if sample[position] >= threshold:
                    vote += weight
                elif sample[position] < threshold:
                    vote -= weight
            if vote >= 0 and index == 1:
                test_corr += 1
            elif vote < 0 and index == 0:
                test_corr += 1
        test_corrs.append(round(test_corr / test_size, 3))
        Log.log(filename, 'M: {}; correction: {}\n'.format(M, test_corrs[-1]))
        print(
            '-----------------thresh: {}; CART_step: {}; iter: {}-----------------'
            .format(thresh, CART_step, i + 1))

    Graph.draw(filename, test_times, test_corrs, test_times[-1], 1.0, title)
    return test_corrs
Пример #18
0
        q.enqueue([(player.current_room, d)])
    visited = set()
    while q.size:
        path = q.dequeue()
        curr_room = path[-1][0]
        if "?" in gr.rooms[curr_room].values():
            return [d for _, d in path][1:]
        elif curr_room not in visited:
            visited.add(curr_room)
            for direction in curr_room.get_exits():
                next_room = curr_room.get_room_in_direction(direction)
                q.enqueue(path + [(next_room, direction)])
    return None


gr = Graph()
for room in world.rooms.values():
    gr.add_vertex(room)

while True:
    if not any("?" in d.values() for d in gr.rooms.values()):
        break
    linear_dir = gr.go_in_direction_until_dead_end(player.current_room)
    get_current_room(linear_dir)
    traversal_path += linear_dir
    path_to_unexplored_room = find_unexplored_room()
    if path_to_unexplored_room is not None:
        traversal_path += path_to_unexplored_room
        get_current_room(path_to_unexplored_room)

# TRAVERSAL TEST
Пример #19
0
    def yield_test_questions_K_edges(self,
                                     resampled=False,
                                     K=1,
                                     subset=False,
                                     expand_outfit=False):
        """
        Yields questions, each of them with their own adj matrix.
        Each node on the question will be expanded to create K edges, so the adj
        matrix will have K*N edges.
        Also, the edges between the nodes of the outfit will be also present (except for the correct choice edges).
        The method to get this edges will be BFS.

        Args:
            resampled: if True, use the resampled version
            K: number of edges to expand for each question node
            subset: if true, use only a subset of the outfit as the query, and
                    use the rest as links to the choices.

        Returns:
            yields questions
        """
        assert K >= 0
        from utils import Graph

        # each question consists on N*4 edges to predict
        # self.questions is a list of questions with N elements and 4 possible choices (answers)
        questions = self.questions if not resampled else self.questions_resampled
        n_nodes = self.test_adj.shape[0]
        for question in questions:
            outfit_ids = []
            choices_ids = []
            gt = []
            valid = []
            # keep only a subset of the outfit
            if subset:
                outfit_subset = np.random.choice(question[0], 3, replace=False)
            else:
                outfit_subset = question[0]
            for index in outfit_subset:  # indexes of outfit nodes
                i = 0
                for index_answer in question[
                        1]:  # indexes of possible choices answers
                    outfit_ids.append(index)
                    choices_ids.append(index_answer)
                    gt.append(
                        int(i == 0))  # the correct connection is the first
                    # a link is valid if the candidate item is from the same category as the missing item
                    valid.append(int(question[2][i] == question[3]))
                    i += 1

            # question adj with only the outfit edges
            question_adj = sp.csr_matrix((n_nodes, n_nodes))
            question_adj = question_adj.tolil()
            if not expand_outfit:
                for j, u in enumerate(outfit_subset[:-1]):
                    for v in outfit_subset[j + 1:]:
                        question_adj[u, v] = 1
                        question_adj[v, u] = 1

            if K > 0:
                # the K edges that will be sampled from each not will not belong to the outfit, and should not be the query edges, so remove them
                available_adj = self.test_adj.copy()
                available_adj = available_adj.tolil()
                for j, u in enumerate(question[0][:-1]):
                    for v in question[0][j + 1:]:
                        available_adj[u, v] = 0
                        available_adj[v, u] = 0
                if expand_outfit:  # activate intra-outfit edges
                    for j, u in enumerate(outfit_subset[:-1]):
                        for v in outfit_subset[j + 1:]:
                            available_adj[u, v] = 1
                            available_adj[v, u] = 1
                for u, v in zip(outfit_ids, choices_ids):
                    available_adj[u, v] = 0
                    available_adj[v, u] = 0
                available_adj = available_adj.tocsr()
                available_adj.eliminate_zeros()

                G = Graph(available_adj)

                extra_edges = []
                # now fill the adj matrix with the expanded edges for each node (only for the choices)
                nodes_to_expand = choices_ids[:4]

                if expand_outfit:  # expand the outfit items as well
                    nodes_to_expand.extend(outfit_subset)

                for node in nodes_to_expand:
                    edges = G.run_K_BFS(node, K)

                    for edge in edges:
                        u, v = edge
                        question_adj[u, v] = 1
                        question_adj[v, u] = 1
                        extra_edges.append(edge)

            question_adj = question_adj.tocsr()

            yield question_adj, np.array(outfit_ids), np.array(
                choices_ids), np.array(gt), np.array(valid)
Пример #20
0
def test_compatibility(args):
    args = namedtuple("Args", args.keys())(*args.values())
    load_from = args.load_from
    config_file = load_from + '/results.json'
    log_file = load_from + '/log.json'

    with open(config_file) as f:
        config = json.load(f)
    with open(log_file) as f:
        log = json.load(f)

    # Dataloader
    DATASET = config['dataset']
    if DATASET == 'polyvore':
        # load dataset
        dl = DataLoaderPolyvore()
        orig_train_features, adj_train, train_labels, train_r_indices, train_c_indices = dl.get_phase(
            'train')
        full_train_adj = dl.train_adj
        orig_val_features, adj_val, val_labels, val_r_indices, val_c_indices = dl.get_phase(
            'valid')
        orig_test_features, adj_test, test_labels, test_r_indices, test_c_indices = dl.get_phase(
            'test')
        full_test_adj = dl.test_adj
        dl.setup_test_compatibility(resampled=args.resampled)
    elif DATASET == 'ssense':
        dl = DataLoaderFashionGen()
        orig_train_features, adj_train, train_labels, train_r_indices, train_c_indices = dl.get_phase(
            'train')
        orig_val_features, adj_val, val_labels, val_r_indices, val_c_indices = dl.get_phase(
            'valid')
        orig_test_features, adj_test, test_labels, test_r_indices, test_c_indices = dl.get_phase(
            'test')
        adj_q, q_r_indices, q_c_indices, q_labels, q_ids, q_valid = dl.get_test_questions(
        )
        full_train_adj = dl.train_adj
        full_test_adj = dl.test_adj
        dl.setup_test_compatibility(resampled=args.resampled)
    else:
        raise NotImplementedError(
            'A data loader for dataset {} does not exist'.format(DATASET))

    NUMCLASSES = 2
    BN_AS_TRAIN = False
    ADJ_SELF_CONNECTIONS = True

    def norm_adj(adj_to_norm):
        return normalize_nonsym_adj(adj_to_norm)

    train_features, mean, std = dl.normalize_features(orig_train_features,
                                                      get_moments=True)
    val_features = dl.normalize_features(orig_val_features, mean=mean, std=std)
    test_features = dl.normalize_features(orig_test_features,
                                          mean=mean,
                                          std=std)

    train_support = get_degree_supports(adj_train,
                                        config['degree'],
                                        adj_self_con=ADJ_SELF_CONNECTIONS)
    val_support = get_degree_supports(adj_val,
                                      config['degree'],
                                      adj_self_con=ADJ_SELF_CONNECTIONS)
    test_support = get_degree_supports(adj_test,
                                       config['degree'],
                                       adj_self_con=ADJ_SELF_CONNECTIONS)

    for i in range(1, len(train_support)):
        train_support[i] = norm_adj(train_support[i])
        val_support[i] = norm_adj(val_support[i])
        test_support[i] = norm_adj(test_support[i])

    num_support = len(train_support)
    placeholders = {
        'row_indices':
        tf.compat.v1.placeholder(tf.int32, shape=(None, )),
        'col_indices':
        tf.compat.v1.placeholder(tf.int32, shape=(None, )),
        'dropout':
        tf.compat.v1.placeholder_with_default(0., shape=()),
        'weight_decay':
        tf.compat.v1.placeholder_with_default(0., shape=()),
        'is_train':
        tf.compat.v1.placeholder_with_default(True, shape=()),
        'support': [
            tf.compat.v1.sparse_placeholder(tf.float32, shape=(None, None))
            for sup in range(num_support)
        ],
        'node_features':
        tf.compat.v1.placeholder(tf.float32, shape=(None, None)),
        'labels':
        tf.compat.v1.placeholder(tf.float32, shape=(None, ))
    }

    model = CompatibilityGAE(placeholders,
                             input_dim=train_features.shape[1],
                             num_classes=NUMCLASSES,
                             num_support=num_support,
                             hidden=config['hidden'],
                             learning_rate=config['learning_rate'],
                             logging=True,
                             batch_norm=config['batch_norm'])

    # Construct feed dicts for train, val and test phases
    train_feed_dict = construct_feed_dict(placeholders, train_features,
                                          train_support, train_labels,
                                          train_r_indices, train_c_indices,
                                          config['dropout'])
    val_feed_dict = construct_feed_dict(placeholders,
                                        val_features,
                                        val_support,
                                        val_labels,
                                        val_r_indices,
                                        val_c_indices,
                                        0.,
                                        is_train=BN_AS_TRAIN)
    test_feed_dict = construct_feed_dict(placeholders,
                                         test_features,
                                         test_support,
                                         test_labels,
                                         test_r_indices,
                                         test_c_indices,
                                         0.,
                                         is_train=BN_AS_TRAIN)

    # Add ops to save and restore all the variables.
    saver = tf.compat.v1.train.Saver()

    def eval():
        # use this as a control value, if the model is ok, the value will be the same as in log
        val_avg_loss, val_acc, conf, pred = sess.run(
            [model.loss, model.accuracy, model.confmat,
             model.predict()],
            feed_dict=val_feed_dict)

        print("val_loss=", "{:.5f}".format(val_avg_loss), "val_acc=",
              "{:.5f}".format(val_acc))

    with tf.compat.v1.Session() as sess:
        saver.restore(sess, load_from + '/' + 'best_epoch.ckpt')

        count = 0
        preds = []
        labels = []

        # evaluate the the model for accuracy prediction
        eval()

        prob_act = tf.nn.sigmoid

        K = args.k
        for outfit in dl.comp_outfits:
            before_item = time.time()
            items, score = outfit

            num_new = test_features.shape[0]

            new_adj = sp.csr_matrix((num_new, num_new))  # no connections

            if args.k > 0:
                # add edges to the adj matrix
                available_adj = dl.test_adj.copy()
                available_adj = available_adj.tolil()

                i = 0
                for idx_from in items[:-1]:
                    for idx_to in items[i + 1:]:
                        # remove outfit edges, they won't be expanded
                        available_adj[idx_to, idx_from] = 0
                        available_adj[idx_from, idx_to] = 0
                    i += 1
                available_adj = available_adj.tocsr()
                available_adj.eliminate_zeros()

            if args.subset:  # use only a subset (of size 3) of the outfit
                items = np.random.choice(items, 3)

            new_features = test_features

            # predict edges between the items
            query_r = []
            query_c = []

            i = 0
            item_indexes = items
            for idx_from in item_indexes[:-1]:
                for idx_to in item_indexes[i + 1:]:
                    query_r.append(idx_from)
                    query_c.append(idx_to)
                i += 1

            if args.k > 0:
                G = Graph(available_adj)
                nodes_to_expand = np.unique(items)
                for node in nodes_to_expand:
                    edges = G.run_K_BFS(node, K)
                    for edge in edges:
                        u, v = edge
                        new_adj[u, v] = 1
                        new_adj[v, u] = 1

            query_r = np.array(query_r)
            query_c = np.array(query_c)

            new_adj = new_adj.tocsr()

            new_support = get_degree_supports(
                new_adj,
                config['degree'],
                adj_self_con=ADJ_SELF_CONNECTIONS,
                verbose=False)
            for i in range(1, len(new_support)):
                new_support[i] = norm_adj(new_support[i])
            new_support = [sparse_to_tuple(sup) for sup in new_support]

            new_feed_dict = construct_feed_dict(placeholders,
                                                new_features,
                                                new_support,
                                                train_labels,
                                                query_r,
                                                query_c,
                                                0.,
                                                is_train=BN_AS_TRAIN)

            pred = sess.run(prob_act(model.outputs), feed_dict=new_feed_dict)

            predicted_score = pred.mean()
            print("[{}] Mean scores between outfit: {:.4f}, label: {}".format(
                count, predicted_score, score))
            # TODO: remove this print
            print("Total Elapsed: {:.4f}".format(time.time() - before_item))
            count += 1

            preds.append(predicted_score)
            labels.append(score)

        preds = np.array(preds)
        labels = np.array(labels)

        AUC = compute_auc(preds, labels)

        # use this as a control value, if the model is ok, the value will be the same as in log
        eval()

        print('The AUC compat score is: {}'.format(AUC))

    print('Best val score saved in log: {}'.format(config['best_val_score']))
    print('Last val score saved in log: {}'.format(log['val']['acc'][-1]))

    print("mean positive prediction: {}".format(
        preds[labels.astype(bool)].mean()))
    print("mean negative prediction: {}".format(preds[np.logical_not(
        labels.astype(bool))].mean()))
Пример #21
0
def estimate_extrinsics_pnp(tagpose_estimator,
                            cam_intrinsic,
                            cam_dist,
                            point2d_coord,
                            point2d_cid,
                            point2d_fid,
                            point2d_pid,
                            point2d_mid,
                            verbose=0):
    """ Estimates extrinsic parameters for each camera from the given 2D point correspondences alone.
        It estimates the essential matrix for camera pairs along the observation graph.

    Input:
        tagpose_estimator: custom object, Estimates the pose between a camera and the calibration objects.
        cam_intrinsic: list of 3x3 np.array, Intrinsic calibration of each camera.
        cam_dist: list of 1x5 np.array, Distortion coefficients following the OpenCV pinhole camera model.
        point2d_coord: Nx2 np.array, Array containing 2D coordinates of N points.
        point2d_cid: Nx1 np.array, Array containing the camera id for each of the N points.
        point2d_fid: Nx1 np.array, Array containing the frame id for each of the N points.
        point2d_pid: Nx1 np.array, Array containing a unique point id for each of the N points.
        point2d_mid: Nx1 np.array, Array containing a marker-unique id for each of the N points.

    Returns:
        cam_extrinsic: list of 4x4 np.array, Intrinsic calibration of each camera.
        calib_object_points3d: Mx3 np.array, 3D Points of the calibration object in a object based frame.
    """
    assert len(cam_intrinsic) >= 2, "Too little cameras."
    assert len(cam_intrinsic) == len(cam_dist), "Shape mismatch."
    assert len(point2d_cid.shape) == 1, "Shape mismatch."
    assert len(point2d_fid.shape) == 1, "Shape mismatch."
    assert len(point2d_pid.shape) == 1, "Shape mismatch."
    assert len(point2d_mid.shape) == 1, "Shape mismatch."
    assert point2d_coord.shape[0] == point2d_cid.shape[0], "Shape mismatch."
    assert point2d_coord.shape[0] == point2d_fid.shape[0], "Shape mismatch."
    assert point2d_coord.shape[0] == point2d_pid.shape[0], "Shape mismatch."
    assert point2d_coord.shape[0] == point2d_mid.shape[0], "Shape mismatch."
    assert len(cam_intrinsic) == len(
        np.unique(point2d_cid).flatten().tolist()), "Shape mismatch."

    if verbose > 0:
        print('\n\n------------')
        print('- Estimating extrinsic parameters by solving PNP problems')

    num_cams = len(cam_intrinsic)
    num_frames = np.max(point2d_fid) + 1

    # get model shape
    calib_object_points3d = tagpose_estimator.object_points.copy()

    # 1. Iterate cams and estimate relative pose to the calibration object for each frame
    scores_object, T_obj2cam = estimate_and_score_object_poses(
        tagpose_estimator, point2d_coord, point2d_cid, point2d_fid,
        point2d_mid, cam_intrinsic, cam_dist)

    def _calc_score(T1, T2):
        """ Estimates how closely T1 * T2 = eye() holds. """
        R = np.matmul(T1, T2) - np.eye(T1.shape[0])
        return np.sum(np.abs(R))  # frobenius norm

    # try to find the pair of frames which worked best --> estimate relative camera pose from there
    scores_rel_calib = dict(
    )  # store how good this guess seems to be for calibrating a cam pair
    for fid1 in range(num_frames):  # for each pair of frames
        for fid2 in range(fid1, num_frames):
            scores_rel_calib[fid1, fid2] = dict()
            for cid1 in range(num_cams):  # check each pair of cams
                for cid2 in range(cid1 + 1, num_cams):
                    # check if its valid
                    if (T_obj2cam[fid1][cid2] is None) or (T_obj2cam[fid1][cid1] is None) or \
                            (T_obj2cam[fid2][cid2] is None) or (T_obj2cam[fid2][cid1] is None):
                        s_rel = float('inf')
                    else:

                        # calculate the transformation cam1 -> cams2 using fid1
                        T12_fid1 = np.matmul(
                            T_obj2cam[fid1][cid2],
                            np.linalg.inv(T_obj2cam[fid1][cid1]))

                        # calculate the transformation cam2 -> cams1 using fid2
                        T21_fid2 = np.matmul(
                            T_obj2cam[fid2][cid1],
                            np.linalg.inv(T_obj2cam[fid2][cid2]))

                        # for perfect estimations the two mappings should be the inverse of each others
                        s_rel = _calc_score(T12_fid1, T21_fid2)

                    scores_rel_calib[fid1, fid2][cid1, cid2] = s_rel

    # 3. Find out which frames are optimal for a given cam pair
    cam_pair_best_fid = dict()
    for cid1 in range(num_cams):
        for cid2 in range(cid1 + 1, num_cams):
            min_fid = None
            min_v = float('inf')
            for fid_pair, score_dict in scores_rel_calib.items():
                # get an initial value
                if min_fid is None:
                    min_fid = fid_pair
                    min_v = score_dict[cid1, cid2]
                    continue

                # if current best is worse than current item replace
                if min_v > score_dict[cid1, cid2]:
                    min_fid = fid_pair
                    min_v = score_dict[cid1, cid2]

            cam_pair_best_fid[cid1, cid2] = min_fid

    # 3. Build observation graph and use djikstra to estimate relative camera poses
    observation_graph = Graph()
    for cid in range(num_cams):
        observation_graph.add_node(cid)

    # populate with edges
    score_accumulated = [0 for _ in range(num_cams)
                         ]  # accumulate score for each cam
    for cid1 in range(num_cams):
        for cid2 in range(cid1 + 1, num_cams):
            fid_pair = cam_pair_best_fid[cid1, cid2]
            s = scores_rel_calib[fid_pair][cid1, cid2]
            observation_graph.add_edge(cid1, cid2, s)
            observation_graph.add_edge(cid2, cid1, s)
            score_accumulated[cid1] += s
            score_accumulated[cid2] += s

    # root cam (the one that has the lowest overall score)
    root_cam_id = np.argmin(np.array(score_accumulated))

    if verbose > 1:
        print('- Accumulated score (lower is better): ', score_accumulated)
        print('- Choosing root cam', root_cam_id)

    # 4. Determine which relative poses to estimate
    # use Dijkstra to find "cheapest" path (i.e. the one with most observations) from the starting cam to all others
    cam_path = dict(
    )  # contains how to get from the starting cam to another one cam_path[target_cam] = [path]
    for cid in range(num_cams):
        if cid == root_cam_id:
            cam_path[cid] = [cid]
            continue
        cost, camchain = shortest_path(observation_graph, root_cam_id, cid)
        cam_path[cid] = camchain

    if verbose > 1:
        for k, v in cam_path.items():
            print('- Camchain to %d: ' % k, v)

    # 5. Put together the relative camera poses
    relative_pose = dict()  # contains the trafo from start -> target
    relative_pose_pair = dict(
    )  # contains already estimated poses between cameras;
    # is the trafo from j -> i;  xi = relative_pose_pair[i, j] * xj
    for target_camid in range(num_cams):
        if target_camid == root_cam_id:
            relative_pose[target_camid] = np.eye(4)
            continue

        M = np.eye(4)  # this is the trafo from start -> X
        for i in range(len(cam_path[target_camid]) - 1):  # traverse cam path
            # get current cam_pair on the cam_path
            inter_camid1 = cam_path[target_camid][
                i]  # this is where we currently are
            inter_camid2 = cam_path[target_camid][
                i + 1]  # this is where we want to transform to

            swapped = False
            if inter_camid2 < inter_camid1:
                t = inter_camid2
                inter_camid2 = inter_camid1
                inter_camid1 = t
                swapped = True

            if verbose > 1:
                print(
                    '- Attempting to estimate the relative pose from cam %d --> %d'
                    % (inter_camid1, inter_camid2))

            # calculate only when not calculated yet
            if (inter_camid1, inter_camid2) not in relative_pose_pair.keys():
                fid1, fid2 = cam_pair_best_fid[inter_camid1, inter_camid2]

                msg = "Calibration impossible! There is no way feasible way to calibrate cam%d and cam%d." % \
                (inter_camid1, inter_camid2)
                assert T_obj2cam[fid1][inter_camid1] is not None, msg
                assert T_obj2cam[fid1][inter_camid2] is not None, msg

                # calculate the transformation cam1 -> cams2 using the optimal fids
                T12 = np.matmul(T_obj2cam[fid1][inter_camid1],
                                np.linalg.inv(T_obj2cam[fid1][inter_camid2]))
                relative_pose_pair[inter_camid1, inter_camid2] = T12

            delta = relative_pose_pair[inter_camid1, inter_camid2]
            if swapped:
                delta = np.linalg.inv(delta)

            # accumulate trafos
            M = np.matmul(delta, M)
        relative_pose[target_camid] = M

    if verbose > 0:
        print('- Extrinsics estimated')

    if verbose > 2:
        for cid in range(num_cams):
            print('\n- Trafo Root (%d) --> %d' % (root_cam_id, cid))
            print(relative_pose[cid])
            print('')

    cam_extrinsic = list()
    for cid in range(num_cams):
        cam_extrinsic.append(relative_pose[cid])

    # 6. Figure out the object poses (if there is no observation its impossible)
    object_poses = greedy_pick_object_pose(scores_object, T_obj2cam,
                                           relative_pose, verbose)

    cam_extrinsic, object_poses = _center_extrinsics(
        cam_extrinsic, object_poses)  # ensure camera 0 is the world center
    point3d_coord, pid2d_to_pid3d = calc_3d_object_points(
        calib_object_points3d, object_poses, point2d_fid, point2d_cid,
        point2d_mid)
    return cam_extrinsic, point3d_coord, pid2d_to_pid3d, object_poses
Пример #22
0
def earliest_ancestor(ancestors, starting_node):
    """
        10
     /
    1   2   4  11
     \ /   / \ /
      3   5   8
       \ / \   \
        6   7   9
    Write a function that, given the dataset and the ID of an individual in the dataset, returns their earliest known ancestor – the one at the farthest distance from the input individual. If there is more than one ancestor tied for "earliest", return the one with the lowest numeric ID. If the input individual has no parents, the function should return -1.
    """
    # UPER
    # use bft
    # use len(longest)
    # if len(longest) == 1 then return -1
    # check the length of the set of vertices for the longest
    # if there is no child return -1
    # while there is an edge:
    # ancestor[0] is parent ancestor[1] is child

    # starting node has no parent then return -1

    # initialize with the starting node is equal to the child
    # after that the child becomes the parent

    # graph = Graph()
    # # relatives is a node
    # for relatives in ancestors:
    #     for relative in relatives:
    #         graph.add_vertex(relative)
    #         # print('GRAPHXXXXXX',graph.vertices)

    # for relatives in ancestors:
    #     graph.add_edge(relatives[1],relatives[0])
    #     print('GRAPHXXXXXX',graph.vertices)

    graph = Graph()
    for pair in ancestors:
        graph.add_vertex(pair[0])
        graph.add_vertex(pair[1])
        graph.add_edge(pair[1], pair[0])
        print('XXXXXVERTICESXXXX', graph.vertices)

    q = Queue()
    q.enqueue([starting_node])
    max_path_len = 1
    earliest_ancestor = -1

    while q.size() > 0:
        path = q.dequeue()
        v = path[-1]

        # if path is longer or equal and value is smaller, or path is longer
        if (len(path) >= max_path_len
                and v < earliest_ancestor) or (len(path) > max_path_len):
            earliest_ancestor = v
            max_path_len = len(path)

        for neighbor in graph.vertices[v]:
            path_copy = list(path)
            path_copy.append(neighbor)
            q.enqueue(path_copy)
    return earliest_ancestor
Пример #23
0
from utils import Graph, bGraph
from z3 import *
import argparse

parser = argparse.ArgumentParser()
parser.add_argument("meta", nargs='?', default="check_string_for_empty")
args = parser.parse_args()

meta_graph = meta_graph_pb2.MetaGraphDef()
input_file_name = args.meta#"myGraph.meta"
with open(input_file_name, 'rb') as fin:
    file_content = fin.read()
meta_graph.ParseFromString(file_content)
graph = meta_graph.graph_def

mg = Graph(len(graph.node))
nodeIdMap={}
idNodeMap={}
predIdMap={}
idx = 0
for node in graph.node:
  nodeIdMap[node.name] = idx
  idNodeMap[idx] = node
  idx=idx+1

for node in graph.node:
  nid = nodeIdMap[node.name]
  predIdMap[nid]=[]

for node in graph.node:
  nid = nodeIdMap[node.name]
Пример #24
0
if opt.dataset == 'diginetica':
    n_node = 43098
elif opt.dataset == 'yoochoose1_64' or opt.dataset == 'yoochoose1_4':
    n_node = 37484

train_data = Data(train_data,
                  all_seqs=all_train_seq,
                  method=opt.method,
                  shuffle=True,
                  maxlen=opt.max_len)
test_data = Data(test_data,
                 method=opt.method,
                 shuffle=False,
                 maxlen=opt.max_len)
graph = Graph(all_train_seq, opt.num_length, opt.num_walks, opt.skip_window,
              opt.cide_batch_size)
g_node = graph.num_node

model = LGSR(hidden_size=opt.hidden_size,
             emb_size=opt.emb_size,
             n_node=n_node,
             method=opt.method,
             lr=opt.lr,
             l2=opt.l2,
             step=opt.step,
             decay=0.1 * len(train_data.inputs) / opt.batch_size,
             lr_dc=opt.lr_dc,
             dropout=opt.dropout,
             cide=opt.cide,
             g_node=g_node,
             n_sample=opt.n_sample,
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--input-file-path", default="./input.csv")
    parser.add_argument("-nb", "--bags-count", type=int, default=0)
    parser.add_argument("-sc", "--show-cost", type=bool, default=False)

    args = parser.parse_args()
    argsdict = vars(args)
    # Validate input
    assert  0 <= argsdict["bags_count"] <= 2, "The value of bags count should be between 0 and 2" 
    assert argsdict["input_file_path"].endswith('.csv'), "the path provided isn't for CSV file"
    try:
        df = pd.read_csv(argsdict["input_file_path"])
        
    except OSError as e:
        print(e)
        print("Using the input.csv")
        argsdict["input_file_path"] = "./input.csv"
        df = pd.read_csv(argsdict["input_file_path"])

    # Creating flights instances    
    flights = [Flight(x) for _, x in df.iterrows()]
    # building graph
    all_places = set()
    [(all_places.add(x.source), all_places.add(x.dest)) for x in flights]
    graph = Graph(all_places)
    graph.add_flights(flights)
    for place in all_places:
        print(f"Starting Location {place}..")
        passenger = Passenger(argsdict["bags_count"], place)
        graph.find_combinations(passenger, argsdict["show_cost"])
Пример #26
0
        for i in v:
            if i[0] == 'no':
                weight = 0
            else:
                weight = int(i[0])
            edgelist.append((k, i[1], weight))
    return edgelist


# rules_dict = read_rules(fpath='data/07-demo.txt')
# rules_dict = read_rules(fpath='data/07-demo-b.txt')
rules_dict = read_rules(fpath='data/07.txt')

edgelist = create_bags_edgelist_weighted(rules_dict)

g = Graph(directed=True, edgelist=edgelist)

print(g)


def num_contains(g, bag):
    total_bags = 0
    for i in g.g[bag]:
        num_bags_dir_inside = g.weights[bag, i]
        # print('bag:', bag, '; i:', i,
        #     '; num_bags_dir_inside: ', num_bags_dir_inside)
        if num_bags_dir_inside != 0:
            total_bags += (num_bags_dir_inside +
                           num_bags_dir_inside * num_contains(g, i))
    return total_bags
    # -----------------------------------
    # Training and testing of the network
    # -----------------------------------

    network = student.TinyNet(device, 2).to(device)

    # Loading of the initial weights
    # If none, evaluation of the scratch network (a bit useless)
    if weights_path is not None:
        network.load_state_dict(
            torch.load(weights_path, map_location=args.device))

    # Graph instantiation
    graph_path = save_path + "/graphs/"
    graph_metrics = graph.Graph(graph_path, "Offline")
    graph_metrics.set_title("Evolution on " + str(testingset_size) +
                            " moving test images with border (" +
                            str(args.border[0]) + "," + str(args.border[1]) +
                            ") of ")
    graph_metrics.set_names(
        ["F1", "Jaccard", "Precision", "TPR", "FPR", "Accuracy"])

    counter_start_testset = trainingset_size
    counter_stop_testset = counter_start_testset + testingset_size

    network.eval()

    while True:

        images = dataset_images[counter_start_testset:counter_stop_testset]
Пример #28
0
from utils import Graph
from colorama import Fore
from PyInquirer import prompt

graph = Graph()
selection = ""

graph.add_vertex("A")
graph.add_vertex("A#/Bb")
graph.add_vertex("B")
graph.add_vertex("C")
graph.add_vertex("C#/Db")
graph.add_vertex("D")
graph.add_vertex("D#/Eb")
graph.add_vertex("E")
graph.add_vertex("F")
graph.add_vertex("F#/Gb")
graph.add_vertex("G")
graph.add_vertex("G#/Ab")

graph.add_edge("A", "A#/Bb")
graph.add_edge("A#/Bb", "B")
graph.add_edge("B", "C")
graph.add_edge("C", "C#/Db")
graph.add_edge("C#/Db", "D")
graph.add_edge("D", "D#/Eb")
graph.add_edge("D#/Eb", "E")
graph.add_edge("E", "F")
graph.add_edge("F", "F#/Gb")
graph.add_edge("F#/Gb", "G")
graph.add_edge("G", "G#/Ab")
Пример #29
0
def test_amazon(args):
    args = namedtuple("Args", args.keys())(*args.values())

    load_from = args.load_from
    config_file = load_from + '/results.json'
    log_file = load_from + '/log.json'

    with open(config_file) as f:
        config = json.load(f)
    with open(log_file) as f:
        log = json.load(f)

    NUMCLASSES = 2
    BN_AS_TRAIN = False
    ADJ_SELF_CONNECTIONS = True

    # evaluate in the specified version
    print("Trained with {}, evaluating with {}".format(config['amz_data'],
                                                       args.amz_data))
    cat_rel = args.amz_data
    dp = DataLoaderAmazon(cat_rel=cat_rel)
    train_features, adj_train, train_labels, train_r_indices, train_c_indices = dp.get_phase(
        'train')
    _, adj_val, val_labels, val_r_indices, val_c_indices = dp.get_phase(
        'valid')
    _, adj_test, test_labels, test_r_indices, test_c_indices = dp.get_phase(
        'test')
    full_adj = dp.adj

    def norm_adj(adj_to_norm):
        return normalize_nonsym_adj(adj_to_norm)

    train_features, mean, std = dp.normalize_features(train_features,
                                                      get_moments=True)

    train_support = get_degree_supports(adj_train,
                                        config['degree'],
                                        adj_self_con=ADJ_SELF_CONNECTIONS)
    val_support = get_degree_supports(adj_val,
                                      config['degree'],
                                      adj_self_con=ADJ_SELF_CONNECTIONS)
    test_support = get_degree_supports(adj_test,
                                       config['degree'],
                                       adj_self_con=ADJ_SELF_CONNECTIONS)

    for i in range(1, len(train_support)):
        train_support[i] = norm_adj(train_support[i])
        val_support[i] = norm_adj(val_support[i])
        test_support[i] = norm_adj(test_support[i])

    num_support = len(train_support)

    num_support = len(train_support)
    placeholders = {
        'row_indices':
        tf.compat.v1.placeholder(tf.int32, shape=(None, )),
        'col_indices':
        tf.compat.v1.placeholder(tf.int32, shape=(None, )),
        'dropout':
        tf.compat.v1.placeholder_with_default(0., shape=()),
        'weight_decay':
        tf.compat.v1.placeholder_with_default(0., shape=()),
        'is_train':
        tf.compat.v1.placeholder_with_default(True, shape=()),
        'support': [
            tf.compat.v1.sparse_placeholder(tf.float32, shape=(None, None))
            for sup in range(num_support)
        ],
        'node_features':
        tf.compat.v1.placeholder(tf.float32, shape=(None, None)),
        'labels':
        tf.compat.v1.placeholder(tf.float32, shape=(None, ))
    }

    model = CompatibilityGAE(placeholders,
                             input_dim=train_features.shape[1],
                             num_classes=NUMCLASSES,
                             num_support=num_support,
                             hidden=config['hidden'],
                             learning_rate=config['learning_rate'],
                             logging=True,
                             batch_norm=config['batch_norm'])

    train_feed_dict = construct_feed_dict(placeholders, train_features,
                                          train_support, train_labels,
                                          train_r_indices, train_c_indices,
                                          config['dropout'])
    # No dropout for validation and test runs
    val_feed_dict = construct_feed_dict(placeholders,
                                        train_features,
                                        val_support,
                                        val_labels,
                                        val_r_indices,
                                        val_c_indices,
                                        0.,
                                        is_train=BN_AS_TRAIN)
    test_feed_dict = construct_feed_dict(placeholders,
                                         train_features,
                                         test_support,
                                         test_labels,
                                         test_r_indices,
                                         test_c_indices,
                                         0.,
                                         is_train=BN_AS_TRAIN)

    # Add ops to save and restore all the variables.
    saver = tf.compat.v1.train.Saver()

    with tf.compat.v1.Session() as sess:
        saver.restore(sess, load_from + '/' + 'best_epoch.ckpt')

        val_avg_loss, val_acc, conf, pred = sess.run(
            [model.loss, model.accuracy, model.confmat,
             model.predict()],
            feed_dict=val_feed_dict)

        print("val_loss=", "{:.5f}".format(val_avg_loss), "val_acc=",
              "{:.5f}".format(val_acc))

        test_avg_loss, test_acc, conf = sess.run(
            [model.loss, model.accuracy, model.confmat],
            feed_dict=test_feed_dict)

        print("test_loss=", "{:.5f}".format(test_avg_loss), "test_acc=",
              "{:.5f}".format(test_acc))

        # rerun for K=0 (all in parallel)
        k_0_adj = sp.csr_matrix(adj_val.shape)
        k_0_support = get_degree_supports(k_0_adj,
                                          config['degree'],
                                          adj_self_con=ADJ_SELF_CONNECTIONS,
                                          verbose=False)
        for i in range(1, len(k_0_support)):
            k_0_support[i] = norm_adj(k_0_support[i])
        k_0_support = [sparse_to_tuple(sup) for sup in k_0_support]

        k_0_val_feed_dict = construct_feed_dict(placeholders,
                                                train_features,
                                                k_0_support,
                                                val_labels,
                                                val_r_indices,
                                                val_c_indices,
                                                0.,
                                                is_train=BN_AS_TRAIN)
        k_0_test_feed_dict = construct_feed_dict(placeholders,
                                                 train_features,
                                                 k_0_support,
                                                 test_labels,
                                                 test_r_indices,
                                                 test_c_indices,
                                                 0.,
                                                 is_train=BN_AS_TRAIN)

        val_avg_loss, val_acc, conf, pred = sess.run(
            [model.loss, model.accuracy, model.confmat,
             model.predict()],
            feed_dict=k_0_val_feed_dict)
        print("for k=0 val_loss=", "{:.5f}".format(val_avg_loss),
              "for k=0 val_acc=", "{:.5f}".format(val_acc))

        test_avg_loss, test_acc, conf = sess.run(
            [model.loss, model.accuracy, model.confmat],
            feed_dict=k_0_test_feed_dict)
        print("for k=0 test_loss=", "{:.5f}".format(test_avg_loss),
              "for k=0 test_acc=", "{:.5f}".format(test_acc))

        K = args.k

        available_adj = dp.full_valid_adj + dp.full_train_adj
        available_adj = available_adj.tolil()
        for r, c in zip(test_r_indices, test_c_indices):
            available_adj[r, c] = 0
            available_adj[c, r] = 0
        available_adj = available_adj.tocsr()
        available_adj.eliminate_zeros()

        G = Graph(available_adj)
        get_edges_func = G.run_K_BFS

        new_adj = sp.csr_matrix(full_adj.shape)
        new_adj = new_adj.tolil()
        for r, c in zip(test_r_indices, test_c_indices):
            before = time.time()
            if K > 0:  #expand the edges
                nodes_to_expand = [r, c]
                for node in nodes_to_expand:
                    edges = get_edges_func(node, K)
                    for edge in edges:
                        i, j = edge
                        new_adj[i, j] = 1
                        new_adj[j, i] = 1

        new_adj = new_adj.tocsr()

        new_support = get_degree_supports(new_adj,
                                          config['degree'],
                                          adj_self_con=ADJ_SELF_CONNECTIONS,
                                          verbose=False)
        for i in range(1, len(new_support)):
            new_support[i] = norm_adj(new_support[i])
        new_support = [sparse_to_tuple(sup) for sup in new_support]

        new_feed_dict = construct_feed_dict(placeholders,
                                            train_features,
                                            new_support,
                                            test_labels,
                                            test_r_indices,
                                            test_c_indices,
                                            0.,
                                            is_train=BN_AS_TRAIN)

        loss, acc = sess.run([model.loss, model.accuracy],
                             feed_dict=new_feed_dict)

        print("for k={} test_acc=".format(K), "{:.5f}".format(acc))

    print('Best val score saved in log: {}'.format(config['best_val_score']))
    print('Last val score saved in log: {}'.format(log['val']['acc'][-1]))
Пример #30
0
        filename = setFileName()
        Log.clearDefaultLog()
        Log.clearLog(filename)

        rec.append(
            run(filename, train_sample, train_label, test_sample, test_label,
                title, M, thresh, CART_step))
        maxi.append(np.max(rec[-1]))
        mini.append(np.min(rec[-1]))
        maxi_M.append(np.argmax(rec[-1]))
        mini_M.append(np.argmin(rec[-1]))
        avrg.append(np.average(rec[-1]))
        stdv.append(np.std(rec[-1], ddof=1))
        Log.log(
            filename,
            'max: {}; min: {}; max M: {}; min M: {}; avg: {}; std: {}'.format(
                maxi[-1], mini[-1], maxi_M[-1], mini_M[-1], avrg[-1],
                stdv[-1]))

    Graph.drawHyper('hypervariant-maxM-{}-{}-{}'.format(
        hypervariant, hyperrange[0], hyperrange[-1]),
                    hyperrange,
                    maxi_M,
                    title='max M when varying {}'.format(hypervariant))
    Graph.drawHyper(
        'hypervariant-maxCorr-{}-{}-{}'.format(hypervariant, hyperrange[0],
                                               hyperrange[-1]),
        hyperrange,
        maxi,
        title='max correction when varying {}'.format(hypervariant))