def addCityStations(catalog, row): ''' Función que va construyendo un mapa de ciudades, añadiendo a cada ciudad una lista con sus estaciones respectivas. Cada estación contiene un diccionario con su ID, Nombre, y Dock Count. ''' #Vamos actualizando el mapa de ciudades, añadiendo la estación a la ciudad respectiva cityStationsMap = catalog['cities'] station = { 'id': row['id'], 'name': row['name'], 'dock_count': row['dock_count'] } if map.contains(cityStationsMap, row['city']) == False: stationsList = lt.newList(datastructure='ARRAY_LIST') lt.addLast(stationsList, station) map.put(cityStationsMap, row['city'], stationsList) if map.contains(cityStationsMap, row['city']) == True: stationsList = map.get(cityStationsMap, row['city'])['value'] lt.addLast(stationsList, station) map.put(cityStationsMap, row['city'], stationsList) #Añadimos la estación al mapa de ids con value de nombres de las estaciones y su ciudad stationsIdName = catalog['stationIds'] dicct = {'Name': row['name'], 'City': row['city']} map.put(stationsIdName, row['id'], dicct)
def test_LoadTable(self): table_capacity = 171 book_map = ht.newMap(capacity=table_capacity, maptype='CHAINING', comparefunction=self.compare_book_id) booksfile = cf.data_dir + 'GoodReads/books-small.csv' self.assertEqual(ht.size(book_map), 0) self.assertTrue(ht.isEmpty(book_map)) input_file = csv.DictReader(open(booksfile)) for book in input_file: ht.put(book_map, book['book_id'], book) self.assertEqual(ht.size(book_map), 149) self.assertTrue(ht.contains(book_map, '100')) entry = ht.get(book_map, '100') self.assertIsNotNone(entry) self.assertEqual(entry['value']['book_id'], '100') ht.remove(book_map, '100') self.assertEqual(ht.size(book_map), 148) self.assertFalse(ht.contains(book_map, '100')) lst_keys = ht.keySet(book_map) self.assertFalse(lt.isEmpty(lst_keys)) self.assertEqual(lt.size(lst_keys), 148) lst_values = ht.valueSet(book_map) self.assertFalse(lt.isEmpty(lst_values)) self.assertEqual(lt.size(lst_values), 148)
def test_LoadTable(self): self.assertEqual(ht.size(self.book_map), 0) self.assertTrue(ht.isEmpty(self.book_map)) input_file = csv.DictReader(open(self.booksfile)) for book in input_file: ht.put(self.book_map, book['book_id'], book, self.compare_book_id) self.assertEqual(ht.size(self.book_map), 149) self.assertTrue(ht.contains(self.book_map, '100', self.compare_book_id)) entry = ht.get(self.book_map, '100', self.compare_book_id) self.assertIsNotNone(entry) self.assertEqual(entry['value']['book_id'], '100') ht.remove(self.book_map, '100', self.compare_book_id) self.assertEqual(ht.size(self.book_map), 148) self.assertFalse( ht.contains(self.book_map, '100', self.compare_book_id)) lst_keys = ht.keySet(self.book_map) self.assertFalse(lt.isEmpty(lst_keys)) self.assertEqual(lt.size(lst_keys), 148) lst_values = ht.valueSet(self.book_map) self.assertFalse(lt.isEmpty(lst_values)) self.assertEqual(lt.size(lst_values), 148)
def addDate_city_trips(catalog, row): ''' Función que construye el árbol RBT de fechas. Cada nodo del árbol es a su vez un mapa de hash con cantidad de viajes indexados por ciudad ''' # Añadimos las fechas al RBT con un value igual a un map con ciudad y values = cantidad de viajes d = row['start_date'] # row del archivo trip.csv t = d.split(" ")[0] date = strToDate(t, '%m/%d/%Y') #print(date) id_station = row['start_station_id'] city_trip = tree.get(catalog['date_city_trips'], date, greater) #print(city_trip) city = station_id_city(catalog, id_station) if city_trip: if map.contains(city_trip, city): u = map.get(city_trip, city)['value'] u += 1 map.put(city_trip, city, u) catalog['date_city_trips'] = tree.put(catalog['date_city_trips'], date, city_trip, greater) else: map.put(city_trip, city, 1) catalog['date_city_trips'] = tree.put(catalog['date_city_trips'], date, city_trip, greater) else: city_trip = map.newMap(capacity=5, prime=3, maptype='CHAINING', comparefunction=compareByKey) map.put(city_trip, city, 1) catalog['date_city_trips'] = tree.put(catalog['date_city_trips'], date, city_trip, greater)
def dfs_2(grafo, v, revisados): adjs = g.adjacents(grafo, v) adjs_iter = it.newIterator(adjs) while (it.hasNext(adjs_iter)): w = it.next(adjs_iter) visited_w = map.contains(revisados, w) if visited_w == False: map.put(revisados, w, {'marked': True, 'edgeTo': v}) dfs_2(grafo, w, revisados)
def add_gen(catalog, row): generos = catalog['generos'] split_gen = row['genres'].split('|') for i in split_gen: if map.contains(generos, i, compareByKey): map.get(generos, i, compareByKey)['peliculas'] += 1 else: x = new_gen(row['genres']) map.put(generos, i, x, compareByKey)
def dfo(self, graph, marked, pre, post, reversepost): """ Implementación del recorrido Depth First Order """ lstvert = g.vertices(graph) vertiterator = it.newIterator(lstvert) while it.hasNext(vertiterator): vert = it.next(vertiterator) if not (m.contains(marked, vert)): self.dfs(graph, vert, marked, pre, post, reversepost)
def depth_first_search(Graph, Mapa_de_marcar, node): valor={'nodo':node, 'stado':True, 'predecesor':None} map.put(Mapa_de_marcar,valor['nodo'],valor) list_ad=g.adjacents(Graph,node) for i in range (1,lt.size(list_ad)+1): li_node=lt.getElement(list_ad,i) if not map.contains(Mapa_de_marcar,li_node): record={'nodo':li_node, 'stado':True, 'predecesor':node} map.put(Mapa_de_marcar,record['nodo'],record) depth_first_search(Graph, Mapa_de_marcar, li_node)
def depth_first_search(catalog,node): valor={'nodo':node, 'stado':True, 'predecesor':None} map.put(catalog['visitedMap'],valor['nodo'],valor) list_ad=g.adjacents(catalog['delayGraph'],node) for i in range (1,lt.size(list_ad)+1): li_node=lt.getElement(list_ad,i) if not map.contains(catalog['visitedMap'],li_node): record={'nodo':li_node, 'stado':True, 'predecesor':node} map.put(catalog['visitedMap'],record['nodo'],record) depth_first_search(catalog,li_node)
def dfs(self, graph, vert, marked, pre, post, reversepost): q.enqueue(pre, vert) m.put(marked, vert, True) lstadjacents = g.adjacents(graph, vert) adjiterator = it.newIterator(lstadjacents) while it.hasNext(adjiterator): adjvert = it.next(adjiterator) if not m.contains(marked, adjvert): self.dfs(graph, adjvert, marked, pre, post, reversepost) q.enqueue(post, vert) s.push(reversepost, vert)
def countConnectedComponents (Graph, Mapa_de_marcar): """ Retorna la cantidad de componentes conectados del grafo de revisiones """ counter=0 list_nodes=g.vertices(Graph) for i in range(1,lt.size(list_nodes)+1): node=lt.getElement(list_nodes,i) if not map.contains(Mapa_de_marcar, node): depth_first_search(Graph, Mapa_de_marcar, node) counter+=1 return counter
def sccCount(self, graph, vert, marked, idscc, scc): """ Este algoritmo cuenta el número de componentes conectados. Deja en idscc, el número del componente al que pertenece cada vértice """ m.put(marked, vert, True) m.put(idscc, vert, scc) lstadjacents = g.adjacents(graph, vert) adjiterator = it.newIterator(lstadjacents) while it.hasNext(adjiterator): adjvert = it.next(adjiterator) if not m.contains(marked, adjvert): self.sccCount(graph, adjvert, marked, idscc, scc)
def dfs(self, graph, vert, marked, pre, post, reversepost): """ Implementación del recorrido Depth First Search """ q.enqueue(pre, vert) m.put(marked, vert, True) lstadjacents = g.adjacents(graph, vert) adjiterator = it.newIterator(lstadjacents) while it.hasNext(adjiterator): adjvert = it.next(adjiterator) if not m.contains(marked, adjvert): self.dfs(graph, adjvert, marked, pre, post, reversepost) q.enqueue(post, vert) s.push(reversepost, vert)
def addDirector_id(catalog, row): """ Adiciona un autor al map y sus libros """ name = row['id'] if map.contains(catalog['Directors_id'], name, compareByKey): author_1 = map.get(catalog['Directors_id'], name, compareByKey) lt.addLast(author_1['DirectorMovies'], row['id']) else: author_2 = newDirector(row['director_name'], row, catalog) map.put(catalog['Directors_id'], author_2['id'], author_2, compareByKey)
def test_kosaraju(self): graph = g.newGraph(12, self.comparenames, directed=True) idscc = m.newMap(12, maptype='PROBING', comparefunction=self.comparenames) pre = q.newQueue() post = q.newQueue() reversepost = s.newStack() marked = m.newMap(12, comparefunction=self.comparenames) grmarked = m.newMap(12, maptype='PROBING', comparefunction=self.comparenames) grpre = q.newQueue() grpost = q.newQueue() grreversepost = s.newStack() # se inicializa el grafo self.loadgraph(graph) self.assertEqual(g.numVertex(graph), 12) self.assertEqual(g.numEdges(graph), 14) # Se calcula el grafo reverso de G greverse = self.reverse(graph) self.assertEqual(g.numVertex(greverse), 12) self.assertEqual(g.numEdges(greverse), 14) # Se recorre el grafor reverso de G, utilizando DepthFirstOrder. self.dfo(greverse, grmarked, grpre, grpost, grreversepost) lst = self.lstReversePost(grreversepost) #lst contiene los vertices retornados por reversepost (G-reverso) #Se recorre el grafo en el orden dado por reverspost (G-reverso) iterlst = it.newIterator(lst) scc = 1 while (it.hasNext(iterlst)): vert = it.next(iterlst) if not m.contains(marked, vert): self.sccCount(graph, vert, marked, idscc, scc) scc += 1 self.assertTrue(self.stronglyConnected(idscc, 'Pedro', 'Maria')) self.assertTrue(self.stronglyConnected(idscc, 'Martin', 'Gloria')) self.assertTrue(self.stronglyConnected(idscc, 'Susana', 'Tere')) self.assertFalse(self.stronglyConnected(idscc, 'Pedro', 'Gloria')) self.assertFalse(self.stronglyConnected(idscc, 'Camila', 'Jose')) self.assertFalse(self.stronglyConnected(idscc, 'Gloria', 'Luz'))
def countConnectedComponents (catalog): """ Retorna la cantidad de componentes conectados del grafo de revisiones """ counter=0 list_nodes=g.vertices(catalog['delayGraph']) total= g.numVertex(catalog['delayGraph']) for i in range(1,lt.size(list_nodes)+1): node=lt.getElement(list_nodes,i) if not map.contains(catalog['visitedMap'],node): depth_first_search(catalog,node) counter+=1 sub_total=map.size(catalog['visitedMap']) if sub_total==total: break return counter
def Add_station_list(catalog, row): if map.contains(catalog['map_station'], row['city']) == None: value = lt.newList() map.put(catalog['map_station'], row['city'], value) dic = { 'city_id': row['id'], 'dock_count': row['dock_count'], 'city': row['city'] } lista = map.get(catalog['map_station'], row['city']) lt.addFirst(lista, dic) else: lista = map.get(catalog['map_station'], row['city']) dic = { 'city_id': row['id'], 'dock_count': row['dock_count'], 'city': row['city'] } lt.addFirst(lista, dic)
def test_topological(self): graph = g.newGraph(10, self.comparenames, directed=True) pre = q.newQueue() post = q.newQueue() reversepost = s.newStack() marked = m.newMap(10, comparefunction=self.comparenames) g.insertVertex(graph, 'Calculo1') g.insertVertex(graph, 'Calculo2') g.insertVertex(graph, 'Diseno1') g.insertVertex(graph, 'Diseno2') g.insertVertex(graph, 'Electiva') g.insertVertex(graph, 'Fisica1') g.insertVertex(graph, 'Ingles') g.insertVertex(graph, 'IP1') g.insertVertex(graph, 'IP2') g.insertVertex(graph, 'ProyectoFinal') g.addEdge(graph, 'Calculo1', 'Calculo2') g.addEdge(graph, 'Calculo2', 'IP2') g.addEdge(graph, 'Calculo2', 'Fisica1') g.addEdge(graph, 'Diseno1', 'Diseno2') g.addEdge(graph, 'Diseno2', 'ProyectoFinal') g.addEdge(graph, 'Electiva', 'ProyectoFinal') g.addEdge(graph, 'Fisica1', 'Diseno2') g.addEdge(graph, 'Ingles', 'ProyectoFinal') g.addEdge(graph, 'IP1', 'Diseno1') g.addEdge(graph, 'IP1', 'IP2') self.assertEqual(g.numEdges(graph), 10) self.assertEqual(g.numVertex(graph), 10) #DFO lstvert = g.vertices(graph) vertiterator = it.newIterator(lstvert) while it.hasNext(vertiterator): vert = it.next(vertiterator) if not (m.contains(marked, vert)): self.dfs(graph, vert, marked, pre, post, reversepost) self.printTopological(reversepost)
def addDirector_name(catalog, row): """ Adiciona un autor al map y sus libros """ name = row['director_name'] if map.contains(catalog['Directors_name'], name, compareByKey): author_1 = map.get(catalog['Directors_name'], name, compareByKey) lt.addLast(author_1['DirectorMovies'], row['id']) author_1['sum_aver'] += float( map.get(catalog['MovieMap_id'], row['id'], compareByKey)['vote_average']) if float( map.get(catalog['MovieMap_id'], row['id'], compareByKey)['vote_average']) >= 6: author_1['Movie_more_6'] += 1 else: author_2 = newDirector(name, row, catalog) map.put(catalog['Directors_name'], author_2['name'], author_2, compareByKey)
def addActor(catalog, row): """ Adiciona un autor al map y sus libros """ actors = [ 'actor1_name', 'actor2_name', 'actor3_name', 'actor4_name', 'actor5_name' ] for x in actors: name = row[x] if name: if map.contains(catalog['Actors'], name, compareByKey): author_1 = map.get(catalog['Actors'], name, compareByKey) lt.addLast(author_1['ActorMovies'], row['id']) else: author_2 = newActor(name, row, catalog) map.put(catalog['Actors'], author_2['name'], author_2, compareByKey)
def contains(minPQ, index): return map.contains(minPQ['qpMap'], index)